Skip to content

Commit

Permalink
sgmk
Browse files Browse the repository at this point in the history
  • Loading branch information
hinthornw committed Apr 25, 2024
1 parent f475ef4 commit af57bf4
Showing 1 changed file with 8 additions and 8 deletions.
16 changes: 8 additions & 8 deletions docs/tracing/faq/custom_llm_token_counting.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def my_chat_model(messages: list):
my_chat_model(
[
{"role": "system", "content": "You are a bot."},
{"role": "user", "content": "Will"},
{"role": "user", "content": "SolidGoldMagikarp"},
]
)
```
Expand All @@ -60,7 +60,7 @@ def my_chat_model_with_model(messages: list, model: str):
my_chat_model_with_model(
[
{"role": "system", "content": "You are a bot."},
{"role": "user", "content": "Will"},
{"role": "user", "content": "SolidGoldMagikarp"},
],
model="gpt-3.5-turbo",
)
Expand Down Expand Up @@ -93,7 +93,7 @@ list(
my_streaming_chat_model(
[
{"role": "system", "content": "You are a bot."},
{"role": "user", "content": "Will but streaming"},
{"role": "user", "content": "SolidGoldMagikarp but streaming"},
],
model="gpt-3.5-turbo",
)
Expand Down Expand Up @@ -133,7 +133,7 @@ def my_chat_model_with_usage(messages: list, model: str = "gpt-3.5-turbo"):
my_chat_model_with_usage(
messages=[
{"role": "system", "content": "You are a bot."},
{"role": "user", "content": "Will but with usage"},
{"role": "user", "content": "SolidGoldMagikarp but with usage"},
],
)
```
Expand Down Expand Up @@ -192,7 +192,7 @@ list(
my_streaming_chat_model_with_usage(
messages=[
{"role": "system", "content": "You are a bot."},
{"role": "user", "content": "Will but with usage"},
{"role": "user", "content": "SolidGoldMagikarp but with usage"},
]
)
)
Expand All @@ -207,7 +207,7 @@ For completion models, your inputs must contain a key `prompt` with a string val
def my_llm(prompt: str):
return {"choices": [{"text": "hello, " + prompt}]}

my_llm("Will")
my_llm("SolidGoldMagikarp")
```

If you want to add additional "invocation params" such as the model name, you can just add those keys. The `model` key can be used to let the cost estimator know which model is being used.
Expand All @@ -217,7 +217,7 @@ If you want to add additional "invocation params" such as the model name, you ca
def my_llm_with_model(prompt: str, model: str):
return {"choices": [{"text": "hello, " + prompt}]}

my_llm_with_model("Will", model="gpt-3.5-turbo-instruct")
my_llm_with_model("SolidGoldMagikarp", model="gpt-3.5-turbo-instruct")
```

For streaming, you can "reduce" the outputs into the same format as the non-streaming version:
Expand All @@ -232,5 +232,5 @@ def my_streaming_llm(prompt: str, model: str):
for chunk in ["hello, " + prompt]:
yield {"choices": [{"text": chunk}]}

list(my_streaming_llm("Will but streaming", model="gpt-3.5-turbo-instruct"))
list(my_streaming_llm("SolidGoldMagikarp but streaming", model="gpt-3.5-turbo-instruct"))
```

0 comments on commit af57bf4

Please sign in to comment.