Skip to content

Commit

Permalink
feat(messages): add support for image inputs (#359)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-bot authored Mar 4, 2024
1 parent 35b0347 commit 579f013
Show file tree
Hide file tree
Showing 26 changed files with 756 additions and 362 deletions.
45 changes: 23 additions & 22 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,10 @@ message = client.messages.create(
messages=[
{
"role": "user",
"content": "How does a court case get to the supreme court?",
"content": "Hello, Claude",
}
],
model="claude-2.1",
model="claude-3-opus-20240229",
)
print(message.content)
```
Expand Down Expand Up @@ -69,10 +69,10 @@ async def main() -> None:
messages=[
{
"role": "user",
"content": "How does a court case get to the supreme court?",
"content": "Hello, Claude",
}
],
model="claude-2.1",
model="claude-3-opus-20240229",
)
print(message.content)

Expand All @@ -96,10 +96,10 @@ stream = client.messages.create(
messages=[
{
"role": "user",
"content": "your prompt here",
"content": "Hello, Claude",
}
],
model="claude-2.1",
model="claude-3-opus-20240229",
stream=True,
)
for event in stream:
Expand All @@ -118,10 +118,10 @@ stream = await client.messages.create(
messages=[
{
"role": "user",
"content": "your prompt here",
"content": "Hello, Claude",
}
],
model="claude-2.1",
model="claude-3-opus-20240229",
stream=True,
)
async for event in stream:
Expand All @@ -147,7 +147,7 @@ async def main() -> None:
"content": "Say hello there!",
}
],
model="claude-2.1",
model="claude-3-opus-20240229",
) as stream:
async for text in stream.text_stream:
print(text, end="", flush=True)
Expand Down Expand Up @@ -190,11 +190,12 @@ For a more fully fledged example see [`examples/bedrock.py`](https://github.com/

## Token counting

You can estimate billing for a given request with the `client.count_tokens()` method, eg:
You can see the exact usage for a given request through the `usage` response property, e.g.

```py
client = Anthropic()
client.count_tokens('Hello world!') # 3
message = client.messages.create(...)
message.usage
# Usage(input_tokens=25, output_tokens=13)
```

## Using types
Expand Down Expand Up @@ -227,10 +228,10 @@ try:
messages=[
{
"role": "user",
"content": "your prompt here",
"content": "Hello, Claude",
}
],
model="claude-2.1",
model="claude-3-opus-20240229",
)
except anthropic.APIConnectionError as e:
print("The server could not be reached")
Expand Down Expand Up @@ -279,10 +280,10 @@ client.with_options(max_retries=5).messages.create(
messages=[
{
"role": "user",
"content": "Can you help me effectively ask for a raise at work?",
"content": "Hello, Claude",
}
],
model="claude-2.1",
model="claude-3-opus-20240229",
)
```

Expand Down Expand Up @@ -311,10 +312,10 @@ client.with_options(timeout=5 * 1000).messages.create(
messages=[
{
"role": "user",
"content": "Where can I get a good coffee in my neighbourhood?",
"content": "Hello, Claude",
}
],
model="claude-2.1",
model="claude-3-opus-20240229",
)
```

Expand Down Expand Up @@ -374,9 +375,9 @@ response = client.messages.with_raw_response.create(
max_tokens=1024,
messages=[{
"role": "user",
"content": "Where can I get a good coffee in my neighbourhood?",
"content": "Hello, Claude",
}],
model="claude-2.1",
model="claude-3-opus-20240229",
)
print(response.headers.get('X-My-Header'))

Expand Down Expand Up @@ -407,10 +408,10 @@ with client.messages.with_streaming_response.create(
messages=[
{
"role": "user",
"content": "Where can I get a good coffee in my neighbourhood?",
"content": "Hello, Claude",
}
],
model="claude-2.1",
model="claude-3-opus-20240229",
) as response:
print(response.headers.get("X-My-Header"))

Expand Down
19 changes: 1 addition & 18 deletions api.md
Original file line number Diff line number Diff line change
@@ -1,21 +1,3 @@
# Anthropic

Methods:

- <code>client.<a href="./src/anthropic/_client.py">count_tokens</a>(\*args) -> int</code>

# Completions

Types:

```python
from anthropic.types import Completion
```

Methods:

- <code title="post /v1/complete">client.completions.<a href="./src/anthropic/resources/completions.py">create</a>(\*\*<a href="src/anthropic/types/completion_create_params.py">params</a>) -> <a href="./src/anthropic/types/completion.py">Completion</a></code>

# Messages

Types:
Expand All @@ -26,6 +8,7 @@ from anthropic.types import (
ContentBlockDeltaEvent,
ContentBlockStartEvent,
ContentBlockStopEvent,
ImageBlockParam,
Message,
MessageDeltaEvent,
MessageDeltaUsage,
Expand Down
30 changes: 30 additions & 0 deletions examples/images.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
from pathlib import Path

from anthropic import Anthropic

client = Anthropic()

response = client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "Hello!",
},
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/png",
"data": Path(__file__).parent.joinpath("logo.png"),
},
},
],
},
],
model="claude-3-opus-20240229",
)
print(response.model_dump_json(indent=2))
Binary file added examples/logo.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
4 changes: 2 additions & 2 deletions examples/messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
"content": "Hello!",
}
],
model="claude-2.1",
model="claude-3-opus-20240229",
)
print(response)

Expand All @@ -30,6 +30,6 @@
"content": "How are you?",
},
],
model="claude-2.1",
model="claude-3-opus-20240229",
)
print(response2)
2 changes: 1 addition & 1 deletion examples/messages_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ async def main() -> None:
"content": "Say hello there!",
}
],
model="claude-2.1",
model="claude-3-opus-20240229",
) as stream:
async for text in stream.text_stream:
print(text, end="", flush=True)
Expand Down
2 changes: 1 addition & 1 deletion examples/messages_stream_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ async def main() -> None:
"content": "Say hello there!",
}
],
model="claude-2.1",
model="claude-3-opus-20240229",
event_handler=MyStream,
) as stream:
accumulated = await stream.get_final_message()
Expand Down
20 changes: 20 additions & 0 deletions examples/text_completions_demo_async.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#!/usr/bin/env -S poetry run python

import asyncio

import anthropic
from anthropic import AsyncAnthropic


async def main() -> None:
client = AsyncAnthropic()

res = await client.completions.create(
model="claude-2.1",
prompt=f"{anthropic.HUMAN_PROMPT} how does a court case get to the Supreme Court? {anthropic.AI_PROMPT}",
max_tokens_to_sample=1000,
)
print(res.completion)


asyncio.run(main())
18 changes: 18 additions & 0 deletions examples/text_completions_demo_sync.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#!/usr/bin/env -S poetry run python

import anthropic
from anthropic import Anthropic


def main() -> None:
client = Anthropic()

res = client.completions.create(
model="claude-2.1",
prompt=f"{anthropic.HUMAN_PROMPT} how does a court case get to the Supreme Court? {anthropic.AI_PROMPT}",
max_tokens_to_sample=1000,
)
print(res.completion)


main()
57 changes: 57 additions & 0 deletions examples/text_completions_streaming.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
#!/usr/bin/env -S poetry run python

import asyncio

from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic, APIStatusError, AsyncAnthropic

client = Anthropic()
async_client = AsyncAnthropic()

question = """
Hey Claude! How can I recursively list all files in a directory in Python?
"""


def sync_stream() -> None:
stream = client.completions.create(
prompt=f"{HUMAN_PROMPT} {question}{AI_PROMPT}",
model="claude-2.1",
stream=True,
max_tokens_to_sample=300,
)

for completion in stream:
print(completion.completion, end="", flush=True)

print()


async def async_stream() -> None:
stream = await async_client.completions.create(
prompt=f"{HUMAN_PROMPT} {question}{AI_PROMPT}",
model="claude-2.1",
stream=True,
max_tokens_to_sample=300,
)

async for completion in stream:
print(completion.completion, end="", flush=True)

print()


def stream_error() -> None:
try:
client.completions.create(
prompt=f"{HUMAN_PROMPT} {question}{AI_PROMPT}",
model="claude-unknown-model",
stream=True,
max_tokens_to_sample=300,
)
except APIStatusError as err:
print(f"Caught API status error with response body: {err.response.text}")


sync_stream()
asyncio.run(async_stream())
stream_error()
4 changes: 2 additions & 2 deletions helpers.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ async with client.messages.stream(
"content": "Say hello there!",
}
],
model="claude-2.1",
model="claude-3-opus-20240229",
) as stream:
async for text in stream.text_stream:
print(text, end="", flush=True)
Expand Down Expand Up @@ -74,7 +74,7 @@ async def main() -> None:
"content": "Say hello there!",
}
],
model="claude-2.1",
model="claude-3-opus-20240229",
event_handler=MyStream,
) as stream:
message = await stream.get_final_message()
Expand Down
14 changes: 12 additions & 2 deletions src/anthropic/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,12 @@ def count_tokens(
self,
text: str,
) -> int:
"""Count the number of tokens in a given string"""
"""Count the number of tokens in a given string.
Note that this is only accurate for older models, e.g. `claude-2.1`. For newer
models this can only be used as a _very_ rough estimate, instead you should rely
on the `usage` property in the response for exact counts.
"""
# Note: tokenizer is untyped
tokenizer = self.get_tokenizer()
encoded_text = tokenizer.encode(text) # type: ignore
Expand Down Expand Up @@ -522,7 +527,12 @@ async def count_tokens(
self,
text: str,
) -> int:
"""Count the number of tokens in a given string"""
"""Count the number of tokens in a given string.
Note that this is only accurate for older models, e.g. `claude-2.1`. For newer
models this can only be used as a _very_ rough estimate, instead you should rely
on the `usage` property in the response for exact counts.
"""
# Note: tokenizer is untyped
tokenizer = await self.get_tokenizer()
encoded_text = tokenizer.encode(text) # type: ignore
Expand Down
3 changes: 2 additions & 1 deletion src/anthropic/lib/streaming/_messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,8 @@ def accumulate_event(*, event: MessageStreamEvent, current_snapshot: Message | N
)
elif event.type == "content_block_delta":
content = current_snapshot.content[event.index]
content.text += event.delta.text
if content.type == "text" and event.delta.type == "text_delta":
content.text += event.delta.text
elif event.type == "message_delta":
current_snapshot.stop_reason = event.delta.stop_reason
current_snapshot.stop_sequence = event.delta.stop_sequence
Expand Down
Loading

0 comments on commit 579f013

Please sign in to comment.