diff --git a/README.md b/README.md index 571a39f..4e4e901 100644 --- a/README.md +++ b/README.md @@ -160,15 +160,16 @@ import pandas as pd import blendsql from blendsql.ingredients import LLMMap, LLMQA, LLMJoin from blendsql.db import Pandas -from blendsql.models import TransformersLLM, OpenaiLLM +from blendsql.models import TransformersLLM, OpenaiLLM, AnthropicLLM # Optionally set how many async calls to allow concurrently # This depends on your OpenAI/Anthropic/etc. rate limits blendsql.config.set_async_limit(10) # Load model -model = OpenaiLLM("gpt-4o-mini") # If you have a .env present with OpenAI API keys -# model = TransformersLLM('Qwen/Qwen1.5-0.5B') +model = OpenaiLLM("gpt-4o-mini") # requires .env file with `OPENAI_API_KEY` +# model = AnthropicLLM("claude-3-haiku-20240307") # requires .env file with `ANTHROPIC_API_KEY` +# model = TransformersLLM('Qwen/Qwen1.5-0.5B') # run with any local Transformers model # Prepare our local database db = Pandas( diff --git a/docs/quickstart.md b/docs/quickstart.md index 1f623e9..75d4e8d 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -10,15 +10,16 @@ import pandas as pd import blendsql from blendsql.ingredients import LLMMap, LLMQA, LLMJoin from blendsql.db import Pandas -from blendsql.models import TransformersLLM, OpenaiLLM +from blendsql.models import TransformersLLM, OpenaiLLM, AnthropicLLM # Optionally set how many async calls to allow concurrently # This depends on your OpenAI/Anthropic/etc. rate limits blendsql.config.set_async_limit(10) # Load model -model = OpenaiLLM("gpt-4o-mini") # If you have a .env present with OpenAI API keys -# model = TransformersLLM('Qwen/Qwen1.5-0.5B') +model = OpenaiLLM("gpt-4o-mini") # requires .env file with `OPENAI_API_KEY` +# model = AnthropicLLM("claude-3-haiku-20240307") # requires .env file with `ANTHROPIC_API_KEY` +# model = TransformersLLM('Qwen/Qwen1.5-0.5B') # run with any local Transformers model # Prepare our local database db = Pandas(