From af57bf4154021281f826645ec05c3801cee2fbaa Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Thu, 25 Apr 2024 15:58:20 -0700 Subject: [PATCH] sgmk --- docs/tracing/faq/custom_llm_token_counting.mdx | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/tracing/faq/custom_llm_token_counting.mdx b/docs/tracing/faq/custom_llm_token_counting.mdx index 0d8f2b44..41380bbf 100644 --- a/docs/tracing/faq/custom_llm_token_counting.mdx +++ b/docs/tracing/faq/custom_llm_token_counting.mdx @@ -36,7 +36,7 @@ def my_chat_model(messages: list): my_chat_model( [ {"role": "system", "content": "You are a bot."}, - {"role": "user", "content": "Will"}, + {"role": "user", "content": "SolidGoldMagikarp"}, ] ) ``` @@ -60,7 +60,7 @@ def my_chat_model_with_model(messages: list, model: str): my_chat_model_with_model( [ {"role": "system", "content": "You are a bot."}, - {"role": "user", "content": "Will"}, + {"role": "user", "content": "SolidGoldMagikarp"}, ], model="gpt-3.5-turbo", ) @@ -93,7 +93,7 @@ list( my_streaming_chat_model( [ {"role": "system", "content": "You are a bot."}, - {"role": "user", "content": "Will but streaming"}, + {"role": "user", "content": "SolidGoldMagikarp but streaming"}, ], model="gpt-3.5-turbo", ) @@ -133,7 +133,7 @@ def my_chat_model_with_usage(messages: list, model: str = "gpt-3.5-turbo"): my_chat_model_with_usage( messages=[ {"role": "system", "content": "You are a bot."}, - {"role": "user", "content": "Will but with usage"}, + {"role": "user", "content": "SolidGoldMagikarp but with usage"}, ], ) ``` @@ -192,7 +192,7 @@ list( my_streaming_chat_model_with_usage( messages=[ {"role": "system", "content": "You are a bot."}, - {"role": "user", "content": "Will but with usage"}, + {"role": "user", "content": "SolidGoldMagikarp but with usage"}, ] ) ) @@ -207,7 +207,7 @@ For completion models, your inputs must contain a key `prompt` with a string val def my_llm(prompt: str): return {"choices": [{"text": "hello, " + prompt}]} -my_llm("Will") +my_llm("SolidGoldMagikarp") ``` If you want to add additional "invocation params" such as the model name, you can just add those keys. The `model` key can be used to let the cost estimator know which model is being used. @@ -217,7 +217,7 @@ If you want to add additional "invocation params" such as the model name, you ca def my_llm_with_model(prompt: str, model: str): return {"choices": [{"text": "hello, " + prompt}]} -my_llm_with_model("Will", model="gpt-3.5-turbo-instruct") +my_llm_with_model("SolidGoldMagikarp", model="gpt-3.5-turbo-instruct") ``` For streaming, you can "reduce" the outputs into the same format as the non-streaming version: @@ -232,5 +232,5 @@ def my_streaming_llm(prompt: str, model: str): for chunk in ["hello, " + prompt]: yield {"choices": [{"text": chunk}]} -list(my_streaming_llm("Will but streaming", model="gpt-3.5-turbo-instruct")) +list(my_streaming_llm("SolidGoldMagikarp but streaming", model="gpt-3.5-turbo-instruct")) ```