From 561301f5789cb6de8932b4cacf2d620b5493da99 Mon Sep 17 00:00:00 2001 From: Tomas Dvorak Date: Sat, 30 Nov 2024 10:15:22 +0100 Subject: [PATCH] tests: update model Signed-off-by: Tomas Dvorak --- .../huggingface/huggingface_agent.py | 2 +- .../extensions/langchain/langchain_agent.py | 2 +- .../langchain/langchain_chat_generate.py | 2 +- .../langchain/langchain_chat_stream.py | 2 +- .../langchain/langchain_sql_agent.py | 2 +- .../extensions/llama_index/llama_index_llm.py | 2 +- examples/text/chat.py | 2 +- src/genai/extensions/langchain/chat_llm.py | 2 +- src/genai/extensions/langchain/llm.py | 2 +- .../text/chat/chat_generation_service.py | 4 ++-- tests/e2e/test_examples.py | 3 +++ ...TestLangChainChat.test_async_generate.yaml | 4 ++-- .../TestLangChainChat.test_generate.yaml | 4 ++-- .../extensions/test_langchain_chat.py | 2 +- tests/integration/extensions/test_lm_eval.py | 1 + .../TestChatService.test_create_history.yaml | 20 ++++++++--------- .../TestChatService.test_create_stream.yaml | 22 +++++++++---------- tests/integration/text/test_chat_service.py | 2 +- 18 files changed, 42 insertions(+), 38 deletions(-) diff --git a/examples/extensions/huggingface/huggingface_agent.py b/examples/extensions/huggingface/huggingface_agent.py index 4259bd89..be94876a 100644 --- a/examples/extensions/huggingface/huggingface_agent.py +++ b/examples/extensions/huggingface/huggingface_agent.py @@ -49,7 +49,7 @@ def __call__(self): agent = IBMGenAIAgent( client=client, - model="meta-llama/llama-3-70b-instruct", + model="meta-llama/llama-3-1-70b-instruct", parameters=TextGenerationParameters(min_new_tokens=10, max_new_tokens=200, random_seed=777, temperature=0), additional_tools=[BitcoinPriceFetcher()], ) diff --git a/examples/extensions/langchain/langchain_agent.py b/examples/extensions/langchain/langchain_agent.py index 1bd79e98..7f10a51a 100644 --- a/examples/extensions/langchain/langchain_agent.py +++ b/examples/extensions/langchain/langchain_agent.py @@ -78,7 +78,7 @@ def _run(self, word: str, run_manager: Optional[CallbackManagerForToolRun] = Non client = Client(credentials=Credentials.from_env()) llm = LangChainChatInterface( client=client, - model_id="meta-llama/llama-3-70b-instruct", + model_id="meta-llama/llama-3-1-70b-instruct", parameters=TextGenerationParameters( max_new_tokens=250, min_new_tokens=20, temperature=0, stop_sequences=["\nObservation"] ), diff --git a/examples/extensions/langchain/langchain_chat_generate.py b/examples/extensions/langchain/langchain_chat_generate.py index c7d07076..651f3088 100644 --- a/examples/extensions/langchain/langchain_chat_generate.py +++ b/examples/extensions/langchain/langchain_chat_generate.py @@ -27,7 +27,7 @@ def heading(text: str) -> str: llm = LangChainChatInterface( client=Client(credentials=Credentials.from_env()), - model_id="meta-llama/llama-3-70b-instruct", + model_id="meta-llama/llama-3-1-70b-instruct", parameters=TextGenerationParameters( decoding_method=DecodingMethod.SAMPLE, max_new_tokens=100, diff --git a/examples/extensions/langchain/langchain_chat_stream.py b/examples/extensions/langchain/langchain_chat_stream.py index 169336cd..37a025f0 100644 --- a/examples/extensions/langchain/langchain_chat_stream.py +++ b/examples/extensions/langchain/langchain_chat_stream.py @@ -21,7 +21,7 @@ def heading(text: str) -> str: print(heading("Stream chat with langchain")) llm = LangChainChatInterface( - model_id="meta-llama/llama-3-70b-instruct", + model_id="meta-llama/llama-3-1-70b-instruct", client=Client(credentials=Credentials.from_env()), parameters=TextGenerationParameters( decoding_method=DecodingMethod.SAMPLE, diff --git a/examples/extensions/langchain/langchain_sql_agent.py b/examples/extensions/langchain/langchain_sql_agent.py index 4e212372..11afc223 100644 --- a/examples/extensions/langchain/langchain_sql_agent.py +++ b/examples/extensions/langchain/langchain_sql_agent.py @@ -75,7 +75,7 @@ def create_llm(): client = Client(credentials=Credentials.from_env()) return LangChainChatInterface( client=client, - model_id="meta-llama/llama-3-70b-instruct", + model_id="meta-llama/llama-3-1-70b-instruct", parameters=TextGenerationParameters( max_new_tokens=250, min_new_tokens=20, temperature=0, stop_sequences=["\nObservation"] ), diff --git a/examples/extensions/llama_index/llama_index_llm.py b/examples/extensions/llama_index/llama_index_llm.py index 8703ae7f..68e70040 100644 --- a/examples/extensions/llama_index/llama_index_llm.py +++ b/examples/extensions/llama_index/llama_index_llm.py @@ -22,7 +22,7 @@ def heading(text: str) -> str: llm = IBMGenAILlamaIndex( client=client, - model_id="meta-llama/llama-3-70b-instruct", + model_id="meta-llama/llama-3-1-70b-instruct", parameters=TextGenerationParameters( decoding_method=DecodingMethod.SAMPLE, max_new_tokens=100, diff --git a/examples/text/chat.py b/examples/text/chat.py index d14de0d9..5dcd543d 100644 --- a/examples/text/chat.py +++ b/examples/text/chat.py @@ -31,7 +31,7 @@ def heading(text: str) -> str: ) client = Client(credentials=Credentials.from_env()) -model_id = "meta-llama/llama-3-70b-instruct" +model_id = "meta-llama/llama-3-1-70b-instruct" prompt = "What is NLP and how it has evolved over the years?" print(heading("Generating a chat response")) diff --git a/src/genai/extensions/langchain/chat_llm.py b/src/genai/extensions/langchain/chat_llm.py index 38d25678..a1bfd343 100644 --- a/src/genai/extensions/langchain/chat_llm.py +++ b/src/genai/extensions/langchain/chat_llm.py @@ -93,7 +93,7 @@ class LangChainChatInterface(BaseChatModel): client = Client(credentials=Credentials.from_env()) llm = LangChainChatInterface( client=client, - model_id="meta-llama/llama-3-70b-instruct", + model_id="meta-llama/llama-3-1-70b-instruct", parameters=TextGenerationParameters( max_new_tokens=250, ) diff --git a/src/genai/extensions/langchain/llm.py b/src/genai/extensions/langchain/llm.py index b3c73ea1..f0ba77df 100644 --- a/src/genai/extensions/langchain/llm.py +++ b/src/genai/extensions/langchain/llm.py @@ -62,7 +62,7 @@ class LangChainInterface(LLM): client = Client(credentials=Credentials.from_env()) llm = LangChainInterface( client=client, - model_id="meta-llama/llama-3-70b-instruct", + model_id="meta-llama/llama-3-1-70b-instruct", parameters=TextGenerationParameters(max_new_tokens=50) ) diff --git a/src/genai/text/chat/chat_generation_service.py b/src/genai/text/chat/chat_generation_service.py index 9080131c..716a52cc 100644 --- a/src/genai/text/chat/chat_generation_service.py +++ b/src/genai/text/chat/chat_generation_service.py @@ -79,7 +79,7 @@ def create( # Create a new conversation response = client.text.chat.create( - model_id="meta-llama/llama-3-70b-instruct", + model_id="meta-llama/llama-3-1-70b-instruct", messages=[HumanMessage(content="Describe the game Chess?")], parameters=TextGenerationParameters(max_token_limit=100) ) @@ -150,7 +150,7 @@ def create_stream( # Create a new conversation for response in client.text.chat.create_stream( - model_id="meta-llama/llama-3-70b-instruct", + model_id="meta-llama/llama-3-1-70b-instruct", messages=[HumanMessage(content="Describe the game Chess?")], parameters=TextGenerationParameters(max_token_limit=100) ): diff --git a/tests/e2e/test_examples.py b/tests/e2e/test_examples.py index 58e7275f..5841f388 100644 --- a/tests/e2e/test_examples.py +++ b/tests/e2e/test_examples.py @@ -16,6 +16,9 @@ "parallel_processing.py", "chroma_db_embedding.py", "deployment.py", + # no available model + "lm_eval_cli.py", + "lm_eval_model.py", } skip_for_python_3_12 = { # These files are skipped for python >= 3.12 because transformers library cannot be installed diff --git a/tests/integration/extensions/cassettes/test_langchain_chat/TestLangChainChat.test_async_generate.yaml b/tests/integration/extensions/cassettes/test_langchain_chat/TestLangChainChat.test_async_generate.yaml index 12d88294..209ec42d 100644 --- a/tests/integration/extensions/cassettes/test_langchain_chat/TestLangChainChat.test_async_generate.yaml +++ b/tests/integration/extensions/cassettes/test_langchain_chat/TestLangChainChat.test_async_generate.yaml @@ -8,7 +8,7 @@ interactions: explain why instead of answering something incorrectly.\n If you don''t know the answer to a question, please don''t share false information.\n ", "role": "system"}, {"content": "What is NLP and how it has evolved over the years?", - "role": "user"}], "model_id": "meta-llama/llama-3-70b-instruct", "parameters": + "role": "user"}], "model_id": "meta-llama/llama-3-1-70b-instruct", "parameters": {}}' headers: accept: @@ -25,7 +25,7 @@ interactions: uri: https://api.com/v2/text/chat?version=2024-03-19 response: body: - string: '{"id":"01f28b95-5ecd-4a01-9a2b-7803c6db824f","model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:28.750Z","results":[{"generated_text":"Natural + string: '{"id":"01f28b95-5ecd-4a01-9a2b-7803c6db824f","model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:28.750Z","results":[{"generated_text":"Natural Language Processing (NLP) is a subfield of artificial intelligence (AI) that deals with","generated_token_count":20,"input_token_count":134,"stop_reason":"max_tokens","seed":1025128500}],"conversation_id":"90edb70b-c4e7-45f5-81c3-fab231227b7a"}' headers: diff --git a/tests/integration/extensions/cassettes/test_langchain_chat/TestLangChainChat.test_generate.yaml b/tests/integration/extensions/cassettes/test_langchain_chat/TestLangChainChat.test_generate.yaml index cd1c860f..7d5a69b5 100644 --- a/tests/integration/extensions/cassettes/test_langchain_chat/TestLangChainChat.test_generate.yaml +++ b/tests/integration/extensions/cassettes/test_langchain_chat/TestLangChainChat.test_generate.yaml @@ -8,7 +8,7 @@ interactions: explain why instead of answering something incorrectly.\n If you don''t know the answer to a question, please don''t share false information.\n ", "role": "system"}, {"content": "What is NLP and how it has evolved over the years?", - "role": "user"}], "model_id": "meta-llama/llama-3-70b-instruct", "parameters": + "role": "user"}], "model_id": "meta-llama/llama-3-1-70b-instruct", "parameters": {}}' headers: accept: @@ -25,7 +25,7 @@ interactions: uri: https://api.com/v2/text/chat?version=2024-03-19 response: body: - string: '{"id":"d8160c0f-36a3-49a0-b001-a3f9cbdb9643","model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:27.354Z","results":[{"generated_text":"NLP, + string: '{"id":"d8160c0f-36a3-49a0-b001-a3f9cbdb9643","model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:27.354Z","results":[{"generated_text":"NLP, or Natural Language Processing, is a subfield of artificial intelligence (AI) that deals","generated_token_count":20,"input_token_count":134,"stop_reason":"max_tokens","seed":2032949123}],"conversation_id":"bcc5d917-5e64-48e0-9047-ac5e032e8f21"}' headers: diff --git a/tests/integration/extensions/test_langchain_chat.py b/tests/integration/extensions/test_langchain_chat.py index 038f7e00..01c1e09d 100644 --- a/tests/integration/extensions/test_langchain_chat.py +++ b/tests/integration/extensions/test_langchain_chat.py @@ -18,7 +18,7 @@ @pytest.mark.integration class TestLangChainChat: def setup_method(self): - self.model_id = "meta-llama/llama-3-70b-instruct" + self.model_id = "meta-llama/llama-3-1-70b-instruct" @pytest.fixture def parameters(self): diff --git a/tests/integration/extensions/test_lm_eval.py b/tests/integration/extensions/test_lm_eval.py index 463e0201..e1c69acc 100644 --- a/tests/integration/extensions/test_lm_eval.py +++ b/tests/integration/extensions/test_lm_eval.py @@ -7,6 +7,7 @@ @pytest.mark.integration +@pytest.mark.skip class TestLMEval: @pytest.fixture(autouse=True) def load_credentials(self): diff --git a/tests/integration/text/cassettes/test_chat_service/TestChatService.test_create_history.yaml b/tests/integration/text/cassettes/test_chat_service/TestChatService.test_create_history.yaml index 92abb615..e04ecd06 100644 --- a/tests/integration/text/cassettes/test_chat_service/TestChatService.test_create_history.yaml +++ b/tests/integration/text/cassettes/test_chat_service/TestChatService.test_create_history.yaml @@ -1,7 +1,7 @@ interactions: - request: body: '{"messages": [{"content": "Do you want to destroy the world?", "role": - "user"}], "model_id": "meta-llama/llama-3-70b-instruct"}' + "user"}], "model_id": "meta-llama/llama-3-1-70b-instruct"}' headers: accept: - '*/*' @@ -17,7 +17,7 @@ interactions: uri: https://api.com/v2/text/chat?version=2024-03-19 response: body: - string: '{"id":"191d90ab-e7e0-4821-a66a-64dfdc0415b0","model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:31.312Z","results":[{"generated_text":"No, + string: '{"id":"191d90ab-e7e0-4821-a66a-64dfdc0415b0","model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:31.312Z","results":[{"generated_text":"No, I do not want to destroy the world. I am designed to assist and provide helpful information","generated_token_count":20,"input_token_count":18,"stop_reason":"max_tokens","seed":967002347}],"conversation_id":"02593997-bb70-47dd-97d4-209aa820aa98"}' headers: @@ -53,9 +53,9 @@ interactions: response: body: string: '{"results":[{"id":"191d90ab-e7e0-4821-a66a-64dfdc0415b0","duration":759,"request":{"messages":[{"role":"user","content":"Do - you want to destroy the world?"}],"model_id":"meta-llama/llama-3-70b-instruct","conversation_id":"02593997-bb70-47dd-97d4-209aa820aa98"},"status":"success","created_at":"2024-05-20T14:12:31.000Z","response":{"id":"191d90ab-e7e0-4821-a66a-64dfdc0415b0","results":[{"seed":967002347,"stop_reason":"max_tokens","generated_text":"No, + you want to destroy the world?"}],"model_id":"meta-llama/llama-3-1-70b-instruct","conversation_id":"02593997-bb70-47dd-97d4-209aa820aa98"},"status":"success","created_at":"2024-05-20T14:12:31.000Z","response":{"id":"191d90ab-e7e0-4821-a66a-64dfdc0415b0","results":[{"seed":967002347,"stop_reason":"max_tokens","generated_text":"No, I do not want to destroy the world. I am designed to assist and provide helpful - information","input_token_count":18,"generated_token_count":20}],"model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:31.312Z","conversation_id":"02593997-bb70-47dd-97d4-209aa820aa98"},"version":{"api":"v2","date":"2024-03-19"}}]}' + information","input_token_count":18,"generated_token_count":20}],"model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:31.312Z","conversation_id":"02593997-bb70-47dd-97d4-209aa820aa98"},"version":{"api":"v2","date":"2024-03-19"}}]}' headers: cache-control: - private @@ -86,7 +86,7 @@ interactions: - request: body: '{"conversation_id": "02593997-bb70-47dd-97d4-209aa820aa98", "messages": [{"content": "What was my previous question?", "role": "user"}], "model_id": - "meta-llama/llama-3-70b-instruct"}' + "meta-llama/llama-3-1-70b-instruct"}' headers: accept: - '*/*' @@ -102,7 +102,7 @@ interactions: uri: https://api.com/v2/text/chat?version=2024-03-19 response: body: - string: '{"id":"098d2940-3e40-4b67-8239-b63e16ae53f2","model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:32.733Z","results":[{"generated_text":"Your + string: '{"id":"098d2940-3e40-4b67-8239-b63e16ae53f2","model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:32.733Z","results":[{"generated_text":"Your previous question was \"Do you want to destroy the world?\"","generated_token_count":14,"input_token_count":54,"stop_reason":"eos_token","seed":934152098}],"conversation_id":"02593997-bb70-47dd-97d4-209aa820aa98"}' headers: content-length: @@ -137,11 +137,11 @@ interactions: response: body: string: '{"results":[{"id":"191d90ab-e7e0-4821-a66a-64dfdc0415b0","duration":759,"request":{"messages":[{"role":"user","content":"Do - you want to destroy the world?"}],"model_id":"meta-llama/llama-3-70b-instruct","conversation_id":"02593997-bb70-47dd-97d4-209aa820aa98"},"status":"success","created_at":"2024-05-20T14:12:31.000Z","response":{"id":"191d90ab-e7e0-4821-a66a-64dfdc0415b0","results":[{"seed":967002347,"stop_reason":"max_tokens","generated_text":"No, + you want to destroy the world?"}],"model_id":"meta-llama/llama-3-1-70b-instruct","conversation_id":"02593997-bb70-47dd-97d4-209aa820aa98"},"status":"success","created_at":"2024-05-20T14:12:31.000Z","response":{"id":"191d90ab-e7e0-4821-a66a-64dfdc0415b0","results":[{"seed":967002347,"stop_reason":"max_tokens","generated_text":"No, I do not want to destroy the world. I am designed to assist and provide helpful - information","input_token_count":18,"generated_token_count":20}],"model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:31.312Z","conversation_id":"02593997-bb70-47dd-97d4-209aa820aa98"},"version":{"api":"v2","date":"2024-03-19"}},{"id":"098d2940-3e40-4b67-8239-b63e16ae53f2","duration":582,"request":{"messages":[{"role":"user","content":"What - was my previous question?"}],"model_id":"meta-llama/llama-3-70b-instruct","parent_id":"191d90ab-e7e0-4821-a66a-64dfdc0415b0","conversation_id":"02593997-bb70-47dd-97d4-209aa820aa98"},"status":"success","created_at":"2024-05-20T14:12:33.000Z","response":{"id":"098d2940-3e40-4b67-8239-b63e16ae53f2","results":[{"seed":934152098,"stop_reason":"eos_token","generated_text":"Your - previous question was \"Do you want to destroy the world?\"","input_token_count":54,"generated_token_count":14}],"model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:32.733Z","conversation_id":"02593997-bb70-47dd-97d4-209aa820aa98"},"parent_id":"191d90ab-e7e0-4821-a66a-64dfdc0415b0","version":{"api":"v2","date":"2024-03-19"}}]}' + information","input_token_count":18,"generated_token_count":20}],"model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:31.312Z","conversation_id":"02593997-bb70-47dd-97d4-209aa820aa98"},"version":{"api":"v2","date":"2024-03-19"}},{"id":"098d2940-3e40-4b67-8239-b63e16ae53f2","duration":582,"request":{"messages":[{"role":"user","content":"What + was my previous question?"}],"model_id":"meta-llama/llama-3-1-70b-instruct","parent_id":"191d90ab-e7e0-4821-a66a-64dfdc0415b0","conversation_id":"02593997-bb70-47dd-97d4-209aa820aa98"},"status":"success","created_at":"2024-05-20T14:12:33.000Z","response":{"id":"098d2940-3e40-4b67-8239-b63e16ae53f2","results":[{"seed":934152098,"stop_reason":"eos_token","generated_text":"Your + previous question was \"Do you want to destroy the world?\"","input_token_count":54,"generated_token_count":14}],"model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:32.733Z","conversation_id":"02593997-bb70-47dd-97d4-209aa820aa98"},"parent_id":"191d90ab-e7e0-4821-a66a-64dfdc0415b0","version":{"api":"v2","date":"2024-03-19"}}]}' headers: cache-control: - private diff --git a/tests/integration/text/cassettes/test_chat_service/TestChatService.test_create_stream.yaml b/tests/integration/text/cassettes/test_chat_service/TestChatService.test_create_stream.yaml index 5ba8be13..63dbe925 100644 --- a/tests/integration/text/cassettes/test_chat_service/TestChatService.test_create_stream.yaml +++ b/tests/integration/text/cassettes/test_chat_service/TestChatService.test_create_stream.yaml @@ -1,7 +1,7 @@ interactions: - request: body: '{"messages": [{"content": "I want to kill them! There are my enemies.", - "role": "user"}], "model_id": "meta-llama/llama-3-70b-instruct", "moderations": + "role": "user"}], "model_id": "meta-llama/llama-3-1-70b-instruct", "moderations": {"hap": {"input": {"enabled": true, "send_tokens": false, "threshold": 0.7}, "output": {"enabled": true, "send_tokens": true, "threshold": 0.7}}}, "parameters": {"max_new_tokens": 10, "min_new_tokens": 3}}' @@ -25,41 +25,41 @@ interactions: string: 'retry: 3000 - data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:33.754Z","conversation_id":"6a892fa0-6a3a-46e2-9a8d-c487fa172c52","moderations":{"hap":[{"score":0.8397554755210876,"flagged":true,"success":true,"position":{"start":0,"end":20}}]}} + data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:33.754Z","conversation_id":"6a892fa0-6a3a-46e2-9a8d-c487fa172c52","moderations":{"hap":[{"score":0.8397554755210876,"flagged":true,"success":true,"position":{"start":0,"end":20}}]}} - data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:33.826Z","results":[{"generated_text":"I + data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:33.826Z","results":[{"generated_text":"I understan","generated_token_count":2,"input_token_count":0,"stop_reason":"not_finished"}],"conversation_id":"6a892fa0-6a3a-46e2-9a8d-c487fa172c52"} - data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:33.879Z","results":[{"generated_text":"d + data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:33.879Z","results":[{"generated_text":"d tha","generated_token_count":3,"input_token_count":0,"stop_reason":"not_finished"}],"conversation_id":"6a892fa0-6a3a-46e2-9a8d-c487fa172c52"} - data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:33.917Z","results":[{"generated_text":"t + data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:33.917Z","results":[{"generated_text":"t yo","generated_token_count":4,"input_token_count":0,"stop_reason":"not_finished"}],"conversation_id":"6a892fa0-6a3a-46e2-9a8d-c487fa172c52"} - data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:33.956Z","results":[{"generated_text":"u''r","generated_token_count":5,"input_token_count":0,"stop_reason":"not_finished"}],"conversation_id":"6a892fa0-6a3a-46e2-9a8d-c487fa172c52"} + data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:33.956Z","results":[{"generated_text":"u''r","generated_token_count":5,"input_token_count":0,"stop_reason":"not_finished"}],"conversation_id":"6a892fa0-6a3a-46e2-9a8d-c487fa172c52"} - data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:33.995Z","results":[{"generated_text":"e + data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:33.995Z","results":[{"generated_text":"e feelin","generated_token_count":6,"input_token_count":0,"stop_reason":"not_finished"}],"conversation_id":"6a892fa0-6a3a-46e2-9a8d-c487fa172c52"} - data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:34.035Z","results":[{"generated_text":"g + data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:34.035Z","results":[{"generated_text":"g ","generated_token_count":7,"input_token_count":0,"stop_reason":"not_finished"}],"conversation_id":"6a892fa0-6a3a-46e2-9a8d-c487fa172c52"} - data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:34.075Z","results":[{"generated_text":"a + data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:34.075Z","results":[{"generated_text":"a stron","generated_token_count":8,"input_token_count":0,"stop_reason":"not_finished"}],"conversation_id":"6a892fa0-6a3a-46e2-9a8d-c487fa172c52"} - data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:34.129Z","results":[{"generated_text":"g + data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:34.129Z","results":[{"generated_text":"g sens","generated_token_count":9,"input_token_count":0,"stop_reason":"not_finished"}],"conversation_id":"6a892fa0-6a3a-46e2-9a8d-c487fa172c52"} - data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-70b-instruct","created_at":"2024-05-20T14:12:34.171Z","results":[{"generated_text":"e + data: {"id":"3adaa5b8-38d7-41c8-bd26-2285a5ae8c9f","model_id":"meta-llama/llama-3-1-70b-instruct","created_at":"2024-05-20T14:12:34.171Z","results":[{"generated_text":"e of","generated_token_count":10,"input_token_count":0,"stop_reason":"max_tokens","seed":401923414}],"conversation_id":"6a892fa0-6a3a-46e2-9a8d-c487fa172c52"} diff --git a/tests/integration/text/test_chat_service.py b/tests/integration/text/test_chat_service.py index 30fd6b34..bd6ef522 100644 --- a/tests/integration/text/test_chat_service.py +++ b/tests/integration/text/test_chat_service.py @@ -10,7 +10,7 @@ TextGenerationParameters, ) -TEST_MODEL_ID = "meta-llama/llama-3-70b-instruct" +TEST_MODEL_ID = "meta-llama/llama-3-1-70b-instruct" @pytest.mark.integration