diff --git a/docs/api_reference/guide_imports.json b/docs/api_reference/guide_imports.json index e942394f00ad2..1ef56069ea552 100644 --- a/docs/api_reference/guide_imports.json +++ b/docs/api_reference/guide_imports.json @@ -1 +1 @@ -{"ChatPromptTemplate": {"\ud83e\udd9c\ufe0f\ud83c\udfd3 LangServe": "https://python.langchain.com/docs/langserve/", "\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/", "Wait for the fine-tuning to complete (this may take some time)": "https://python.langchain.com/docs/integrations/chat_loaders/langsmith_llm_runs/", "del os.environ['NVIDIA_API_KEY'] ## delete key and reset": "https://python.langchain.com/docs/integrations/text_embedding/nvidia_ai_endpoints/", "For use in Chaining section": "https://python.langchain.com/docs/integrations/retrievers/you-retriever/", "fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "ragatouille.md": "https://python.langchain.com/docs/integrations/retrievers/ragatouille/", "redis_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/redis_chat_message_history/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/memory/google_sql_mssql/", "Optionally, specify your own session_state key for storing messages": "https://python.langchain.com/docs/integrations/memory/streamlit_chat_message_history/", "copy from tidb cloud console": "https://python.langchain.com/docs/integrations/memory/tidb_chat_message_history/", "Install Langchain community and core packages": "https://python.langchain.com/docs/integrations/chat/kinetica/", "open ../../../static/img/brand/wordmark.png as base64 str": "https://python.langchain.com/docs/integrations/chat/anthropic/", "groq.md": "https://python.langchain.com/docs/integrations/chat/groq/", "openai.md": "https://python.langchain.com/docs/integrations/chat/openai/", "for running these examples in the notebook:": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm/", "get a chat completion from the formatted messages": "https://python.langchain.com/docs/integrations/chat/vllm/", "LangChain supports many other chat models. Here, we're using Ollama": "https://python.langchain.com/docs/integrations/chat/ollama/", "If api_key is not passed, default behavior is to use the `MISTRAL_API_KEY` environment variable.": "https://python.langchain.com/docs/integrations/chat/mistralai/", "ai21.md": "https://python.langchain.com/docs/integrations/chat/ai21/", "!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/", "edenai.md": "https://python.langchain.com/docs/integrations/chat/edenai/", "yuan2.md": "https://python.langchain.com/docs/integrations/chat/yuan2/", "Loading the COMVEST 2024 notice": "https://python.langchain.com/docs/integrations/chat/maritalk/", "perplexity.md": "https://python.langchain.com/docs/integrations/chat/perplexity/", "using chat invoke": "https://python.langchain.com/docs/integrations/chat/upstage/", "Or via the async API": "https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/", "context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "Fiddler project and model names, used for model registration": "https://python.langchain.com/docs/integrations/callbacks/fiddler/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_summary/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "Must be an OpenAI model that supports functions": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger/", "see https://python.langchain.com/en/latest/modules/data_connection/getting_started.html for more details": "https://python.langchain.com/docs/integrations/document_loaders/figma/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Note that we set max_retries = 0 to avoid retrying on RateLimits, etc": "https://python.langchain.com/docs/guides/productionization/fallbacks/", "moderation.md": "https://python.langchain.com/docs/guides/productionization/safety/moderation/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/prompting/", "This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "%pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/quickstart/", "%pip install -qU langchain langchain-community langchain-openai langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/no_queries/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/step_back/", "%pip install -qU langchain langchain-community langchain-openai faker langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/high_cardinality/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde/", "Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/routing/", "%pip install -qU langchain langchain-openai youtube-transcript-api pytube": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/structuring/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/query_checking/", "Install a model capable of tool calling": "https://python.langchain.com/docs/use_cases/extraction/quickstart/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "Set up a parser": "https://python.langchain.com/docs/use_cases/extraction/how_to/parse/", "Using LangSmith is recommended but not required. Uncomment below lines to use.": "https://python.langchain.com/docs/use_cases/sql/csv/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/index/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "Construct the Tools agent": "https://python.langchain.com/docs/modules/agents/agent_types/tool_calling/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/", "!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/", "Tool calling {#tool-calling}": "https://python.langchain.com/docs/modules/model_io/chat/function_calling/", "Prompts": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/prompts-checkpoint/", "openai_functions.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_functions/", "openai_tools.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_tools/", "This is a prompt template used to format each individual example.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples_chat/", "Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/", "Prompt templates": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/.ipynb_checkpoints/index-checkpoint/", "custom_llm.md": "https://python.langchain.com/docs/modules/model_io/llms/custom_llm/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "batch_configurable_chain([\"ice cream\", \"spaghetti\", \"dumplings\"])": "https://python.langchain.com/docs/expression_language/why/", "> ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])": "https://python.langchain.com/docs/expression_language/get_started/", "The input schema of the chain is the input schema of its first part, the prompt.": "https://python.langchain.com/docs/expression_language/interface/", "prompt_llm_parser.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_llm_parser/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/", "multiple_chains.md": "https://python.langchain.com/docs/expression_language/cookbook/multiple_chains/", "code_writing.md": "https://python.langchain.com/docs/expression_language/cookbook/code_writing/", "inspect.md": "https://python.langchain.com/docs/expression_language/how_to/inspect/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/", "decorator.md": "https://python.langchain.com/docs/expression_language/how_to/decorator/", "Adding values to chain state {#adding-values-to-chain-state}": "https://python.langchain.com/docs/expression_language/primitives/assign/", "Binding: Attach runtime args {#binding-attach-runtime-args}": "https://python.langchain.com/docs/expression_language/primitives/binding/", "Formatting inputs & output {#formatting-inputs-output}": "https://python.langchain.com/docs/expression_language/primitives/parallel/", "Passing data through {#passing-data-through}": "https://python.langchain.com/docs/expression_language/primitives/passthrough/", "Run custom functions {#run-custom-functions}": "https://python.langchain.com/docs/expression_language/primitives/functions/", "Chaining runnables {#chaining-runnables}": "https://python.langchain.com/docs/expression_language/primitives/sequence/"}, "ChatAnthropic": {"\ud83e\udd9c\ufe0f\ud83c\udfd3 LangServe": "https://python.langchain.com/docs/langserve/", "open ../../../static/img/brand/wordmark.png as base64 str": "https://python.langchain.com/docs/integrations/chat/anthropic/", "Log10": "https://python.langchain.com/docs/integrations/providers/log10/", "Define the neural network": "https://python.langchain.com/docs/integrations/toolkits/python/", "If this is your first time using playwright, you'll have to install a browser executable.": "https://python.langchain.com/docs/integrations/toolkits/playwright/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/quick_start/", "Note that we set max_retries = 0 to avoid retrying on RateLimits, etc": "https://python.langchain.com/docs/guides/productionization/fallbacks/", "ANTHROPIC_API_KEY=": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/trajectory_eval/", "The prompt was assigned to the evaluator": "https://python.langchain.com/docs/guides/productionization/evaluation/comparison/pairwise_string/", "This is equivalent to loading using the enum": "https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/multiple_tools/", "Set up a parser": "https://python.langchain.com/docs/use_cases/extraction/how_to/parse/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/xml_agent/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/", "!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/", "streaming.md": "https://python.langchain.com/docs/modules/model_io/chat/streaming/", "structured_output.md": "https://python.langchain.com/docs/modules/model_io/chat/structured_output/", "response_metadata.md": "https://python.langchain.com/docs/modules/model_io/chat/response_metadata/", "The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/", "xml.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/xml/", "This is a prompt template used to format each individual example.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples_chat/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "batch_configurable_chain([\"ice cream\", \"spaghetti\", \"dumplings\"])": "https://python.langchain.com/docs/expression_language/why/", "Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/", "Configure chain internals at runtime {#configure-chain-internals-at-runtime}": "https://python.langchain.com/docs/expression_language/primitives/configure/", "Chaining runnables {#chaining-runnables}": "https://python.langchain.com/docs/expression_language/primitives/sequence/"}, "ChatOpenAI": {"\ud83e\udd9c\ufe0f\ud83c\udfd3 LangServe": "https://python.langchain.com/docs/langserve/", "\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/discord/", "This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/", "Wait for the fine-tuning to complete (this may take some time)": "https://python.langchain.com/docs/integrations/chat_loaders/langsmith_dataset/", "re_phrase.md": "https://python.langchain.com/docs/integrations/retrievers/re_phrase/", "For use in Chaining section": "https://python.langchain.com/docs/integrations/tools/you/", "fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "Helper function for printing docs": "https://python.langchain.com/docs/integrations/retrievers/llmlingua/", "outline.md": "https://python.langchain.com/docs/integrations/retrievers/outline/", "get a token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/retrievers/arxiv/", "Setup API keys for Kay and OpenAI": "https://python.langchain.com/docs/integrations/retrievers/sec_filings/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/toolkits/polygon/", "# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "ragatouille.md": "https://python.langchain.com/docs/integrations/retrievers/ragatouille/", "Setup API key": "https://python.langchain.com/docs/integrations/retrievers/kay/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/retrievers/flashrank-reranker/", "This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/tencentvectordb/", "OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "chatgpt_plugins.md": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins/", "Specify your Connery Runner credentials.": "https://python.langchain.com/docs/integrations/toolkits/connery/", "How to use it inside an Agent {#how-to-use-it-inside-an-agent}": "https://python.langchain.com/docs/integrations/tools/infobip/", "Artifacts are charts created by matplotlib when `plt.show()` is called": "https://python.langchain.com/docs/integrations/tools/e2b_data_analysis/", "Answer with 'Zhu'": "https://python.langchain.com/docs/integrations/tools/human_tools/", "How YahooFinanceNewsTool works? {#how-yahoofinancenewstool-works}": "https://python.langchain.com/docs/integrations/tools/yahoo_finance_news/", "start by installing semanticscholar api": "https://python.langchain.com/docs/integrations/tools/semanticscholar/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations/", "Extract pdf content": "https://python.langchain.com/docs/integrations/tools/bearly/", "arxiv.md": "https://python.langchain.com/docs/integrations/tools/arxiv/", "and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/", "bash.md": "https://python.langchain.com/docs/integrations/tools/bash/", "redis_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/redis_chat_message_history/", "xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "Remembrall": "https://python.langchain.com/docs/integrations/memory/remembrall/", "Optionally, specify your own session_state key for storing messages": "https://python.langchain.com/docs/integrations/memory/streamlit_chat_message_history/", "copy from tidb cloud console": "https://python.langchain.com/docs/integrations/memory/tidb_chat_message_history/", "openai.md": "https://python.langchain.com/docs/integrations/chat/openai/", "get a chat completion from the formatted messages": "https://python.langchain.com/docs/integrations/chat/vllm/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "labelstudio.md": "https://python.langchain.com/docs/integrations/callbacks/labelstudio/", "promptlayer.md": "https://python.langchain.com/docs/integrations/callbacks/promptlayer/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "trubrics.md": "https://python.langchain.com/docs/integrations/callbacks/trubrics/", "Install necessary dependencies.": "https://python.langchain.com/docs/integrations/callbacks/infino/", "CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb/", "Log10": "https://python.langchain.com/docs/integrations/providers/log10/", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/", "arthur_tracking.md": "https://python.langchain.com/docs/integrations/providers/arthur_tracking/", "Dataherald": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/dataherald-checkpoint/", "Construct the OpenAI Tools agent": "https://python.langchain.com/docs/integrations/providers/portkey/logging_tracing_portkey/", "Portkey": "https://python.langchain.com/docs/integrations/providers/portkey/index/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/momento_vector_index/", "Create a dataframe": "https://python.langchain.com/docs/integrations/toolkits/csv/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "Define the neural network": "https://python.langchain.com/docs/integrations/toolkits/python/", "fictional example": "https://python.langchain.com/docs/integrations/toolkits/powerbi/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/amadeus/", "airbyte_structured_qa.md": "https://python.langchain.com/docs/integrations/toolkits/airbyte_structured_qa/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/", "Note, you can also connect to Spark via Spark connect. For example:": "https://python.langchain.com/docs/integrations/toolkits/spark_sql/", "IMPORTANT: If you plan to use this account in the future, make sure to save the": "https://python.langchain.com/docs/integrations/toolkits/ainetwork/", "cogniswitch.md": "https://python.langchain.com/docs/integrations/toolkits/cogniswitch/", "pandas.md": "https://python.langchain.com/docs/integrations/toolkits/pandas/", "Install package": "https://python.langchain.com/docs/integrations/toolkits/robocorp/", "Authorize connection to your Browser extention": "https://python.langchain.com/docs/integrations/toolkits/multion/", "NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.": "https://python.langchain.com/docs/integrations/toolkits/openapi/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/neo4jvector/", "openai": "https://python.langchain.com/docs/integrations/vectorstores/hippo/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "Clean up KDB.AI \"documents\" table and index for similarity search": "https://python.langchain.com/docs/integrations/vectorstores/kdbai/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "Must be an OpenAI model that supports functions": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/", "see https://python.langchain.com/en/latest/modules/data_connection/getting_started.html for more details": "https://python.langchain.com/docs/integrations/document_loaders/figma/", "Creating and executing the seeding query": "https://python.langchain.com/docs/integrations/graphs/memgraph/", "rdflib_sparql.md": "https://python.langchain.com/docs/integrations/graphs/rdflib_sparql/", "connect ngql jupyter extension to nebulagraph": "https://python.langchain.com/docs/integrations/graphs/nebula_graph/", "graph.refresh_schema()": "https://python.langchain.com/docs/integrations/graphs/kuzu_db/", "diffbot.md": "https://python.langchain.com/docs/integrations/graphs/diffbot/", "feeding the schema using a user construct query": "https://python.langchain.com/docs/integrations/graphs/ontotext/", "How many people played in Top Gun?": "https://python.langchain.com/docs/integrations/graphs/neo4j_cypher/", "Instantiate ArangoDB Database": "https://python.langchain.com/docs/integrations/graphs/arangodb/", "amazon_neptune_open_cypher.md": "https://python.langchain.com/docs/integrations/graphs/amazon_neptune_open_cypher/", "falkordb.md": "https://python.langchain.com/docs/integrations/graphs/falkordb/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Debugging": "https://python.langchain.com/docs/guides/development/debugging/", "Note that we set max_retries = 0 to avoid retrying on RateLimits, etc": "https://python.langchain.com/docs/guides/productionization/fallbacks/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "Reversible data anonymization with Microsoft Presidio {#reversible-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/reversible/", "Download model": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/index/", "Initialize the language model": "https://python.langchain.com/docs/guides/productionization/evaluation/examples/comparisons/", "ANTHROPIC_API_KEY=": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/trajectory_eval/", "custom.md": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/custom/", "Correct": "https://python.langchain.com/docs/guides/productionization/evaluation/string/scoring_eval_chain/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/prompting/", "This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "%pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/quickstart/", "%pip install -qU langchain langchain-community langchain-openai langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/no_queries/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/step_back/", "%pip install -qU langchain langchain-community langchain-openai faker langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/high_cardinality/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde/", "Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/routing/", "%pip install -qU langchain langchain-openai youtube-transcript-api pytube": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/structuring/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "Install a model capable of tool calling": "https://python.langchain.com/docs/use_cases/extraction/quickstart/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "Using LangSmith is recommended but not required. Uncomment below lines to use.": "https://python.langchain.com/docs/use_cases/sql/csv/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/index/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/", "This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/", "tools_as_openai_functions.md": "https://python.langchain.com/docs/modules/tools/tools_as_openai_functions/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "To enable streaming, we pass in `streaming=True` to the ChatModel constructor": "https://python.langchain.com/docs/modules/callbacks/async_callbacks/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/", "pip install wikipedia": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps/", "need to use GPT-4 here as GPT-3.5 does not understand, however hard you insist, that": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/", "!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/", "structured_output.md": "https://python.langchain.com/docs/modules/model_io/chat/structured_output/", "logprobs.md": "https://python.langchain.com/docs/modules/model_io/chat/logprobs/", "response_metadata.md": "https://python.langchain.com/docs/modules/model_io/chat/response_metadata/", "structured.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/structured/", "csv.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/csv/", "Define your desired data structure.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/pydantic/", "retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/", "enum.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/enum/", "openai_functions.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_functions/", "Solely for documentation purposes.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/pandas_dataframe/", "output_fixing.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/output_fixing/", "openai_tools.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_tools/", "composition.md": "https://python.langchain.com/docs/modules/model_io/prompts/composition/", "Prompt templates": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/.ipynb_checkpoints/index-checkpoint/", "batch_configurable_chain([\"ice cream\", \"spaghetti\", \"dumplings\"])": "https://python.langchain.com/docs/expression_language/why/", "The input schema of the chain is the input schema of its first part, the prompt.": "https://python.langchain.com/docs/expression_language/interface/", "prompt_llm_parser.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_llm_parser/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/", "multiple_chains.md": "https://python.langchain.com/docs/expression_language/cookbook/multiple_chains/", "code_writing.md": "https://python.langchain.com/docs/expression_language/cookbook/code_writing/", "inspect.md": "https://python.langchain.com/docs/expression_language/how_to/inspect/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/", "decorator.md": "https://python.langchain.com/docs/expression_language/how_to/decorator/", "Adding values to chain state {#adding-values-to-chain-state}": "https://python.langchain.com/docs/expression_language/primitives/assign/", "Binding: Attach runtime args {#binding-attach-runtime-args}": "https://python.langchain.com/docs/expression_language/primitives/binding/", "Formatting inputs & output {#formatting-inputs-output}": "https://python.langchain.com/docs/expression_language/primitives/parallel/", "Passing data through {#passing-data-through}": "https://python.langchain.com/docs/expression_language/primitives/passthrough/", "Configure chain internals at runtime {#configure-chain-internals-at-runtime}": "https://python.langchain.com/docs/expression_language/primitives/configure/", "Run custom functions {#run-custom-functions}": "https://python.langchain.com/docs/expression_language/primitives/functions/"}, "SystemMessage": {"\ud83e\udd9c\ufe0f\ud83c\udfd3 LangServe": "https://python.langchain.com/docs/langserve/", "# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "setup tools": "https://python.langchain.com/docs/integrations/chat/huggingface/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/chat/fireworks/", "octoai.md": "https://python.langchain.com/docs/integrations/chat/octoai/", "service url": "https://python.langchain.com/docs/integrations/chat/llama_edge/", "Note that each chunk may contain more than one \"token\"": "https://python.langchain.com/docs/integrations/chat/google_generative_ai/", "Konko {#konko}": "https://python.langchain.com/docs/integrations/chat/konko/", "openai.md": "https://python.langchain.com/docs/integrations/chat/openai/", "gigachat.md": "https://python.langchain.com/docs/integrations/chat/gigachat/", "get a chat completion from the formatted messages": "https://python.langchain.com/docs/integrations/chat/vllm/", "Let\u2019s try out LLAMA model offered on EverlyAI Hosted Endpoints {#lets-try-out-llama-model-offered-on-everlyai-hosted-endpoints}": "https://python.langchain.com/docs/integrations/chat/everlyai/", "friendli.md": "https://python.langchain.com/docs/integrations/chat/friendli/", "zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/", "yuan2.md": "https://python.langchain.com/docs/integrations/chat/yuan2/", "Install the package": "https://python.langchain.com/docs/integrations/chat/tongyi/", "Generate your api key from: https://platform.moonshot.cn/console/api-keys": "https://python.langchain.com/docs/integrations/chat/moonshot/", "First step is to set up the env variable.": "https://python.langchain.com/docs/integrations/chat/premai/", "Let\u2019s try out each model offered on Anyscale Endpoints {#lets-try-out-each-model-offered-on-anyscale-endpoints}": "https://python.langchain.com/docs/integrations/chat/anyscale/", "yandex.md": "https://python.langchain.com/docs/integrations/chat/yandex/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "labelstudio.md": "https://python.langchain.com/docs/integrations/callbacks/labelstudio/", "trubrics.md": "https://python.langchain.com/docs/integrations/callbacks/trubrics/", "MLflow Deployments for LLMs": "https://python.langchain.com/docs/integrations/providers/mlflow/", "MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway/", "PremAI": "https://python.langchain.com/docs/integrations/providers/premai/", "Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Install package": "https://python.langchain.com/docs/integrations/toolkits/robocorp/", "Step 4: Embeddings Example {#step-4-embeddings-example}": "https://python.langchain.com/docs/integrations/llms/javelin/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/few_shot/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/decomposition/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/model_io/chat/quick_start/", "composition.md": "https://python.langchain.com/docs/modules/model_io/prompts/composition/", "Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/"}, "HumanMessage": {"\ud83e\udd9c\ufe0f\ud83c\udfd3 LangServe": "https://python.langchain.com/docs/langserve/", "\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/discord/", "Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "Google": "https://python.langchain.com/docs/integrations/platforms/google/", "setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/", "azureml_chat_endpoint.md": "https://python.langchain.com/docs/integrations/chat/azureml_chat_endpoint/", "alibaba_cloud_pai_eas.md": "https://python.langchain.com/docs/integrations/chat/alibaba_cloud_pai_eas/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/chat/fireworks/", "octoai.md": "https://python.langchain.com/docs/integrations/chat/octoai/", "get a new token: https://deepinfra.com/login?from=%2Fdash": "https://python.langchain.com/docs/integrations/chat/deepinfra/", "open ../../../static/img/brand/wordmark.png as base64 str": "https://python.langchain.com/docs/integrations/chat/anthropic/", "litellm.md": "https://python.langchain.com/docs/integrations/chat/litellm/", "service url": "https://python.langchain.com/docs/integrations/chat/llama_edge/", "Note that each chunk may contain more than one \"token\"": "https://python.langchain.com/docs/integrations/chat/google_generative_ai/", "Schema": "https://python.langchain.com/docs/integrations/chat/ollama_functions/", "Install the package": "https://python.langchain.com/docs/integrations/chat/tongyi/", "Konko {#konko}": "https://python.langchain.com/docs/integrations/chat/konko/", "openai.md": "https://python.langchain.com/docs/integrations/chat/openai/", "for running these examples in the notebook:": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm/", "bedrock.md": "https://python.langchain.com/docs/integrations/chat/bedrock/", "gigachat.md": "https://python.langchain.com/docs/integrations/chat/gigachat/", "get a chat completion from the formatted messages": "https://python.langchain.com/docs/integrations/chat/vllm/", "LangChain supports many other chat models. Here, we're using Ollama": "https://python.langchain.com/docs/integrations/chat/ollama/", "azure_chat_openai.md": "https://python.langchain.com/docs/integrations/chat/azure_chat_openai/", "Let\u2019s try out LLAMA model offered on EverlyAI Hosted Endpoints {#lets-try-out-llama-model-offered-on-everlyai-hosted-endpoints}": "https://python.langchain.com/docs/integrations/chat/everlyai/", "gpt_router.md": "https://python.langchain.com/docs/integrations/chat/gpt_router/", "litellm_router.md": "https://python.langchain.com/docs/integrations/chat/litellm_router/", "friendli.md": "https://python.langchain.com/docs/integrations/chat/friendli/", "If api_key is not passed, default behavior is to use the `MISTRAL_API_KEY` environment variable.": "https://python.langchain.com/docs/integrations/chat/mistralai/", "zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "baichuan.md": "https://python.langchain.com/docs/integrations/chat/baichuan/", "baidu_qianfan_endpoint.md": "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "edenai.md": "https://python.langchain.com/docs/integrations/chat/edenai/", "ernie.md": "https://python.langchain.com/docs/integrations/chat/ernie/", "tencent_hunyuan.md": "https://python.langchain.com/docs/integrations/chat/tencent_hunyuan/", "minimax.md": "https://python.langchain.com/docs/integrations/chat/minimax/", "yuan2.md": "https://python.langchain.com/docs/integrations/chat/yuan2/", "promptlayer_chatopenai.md": "https://python.langchain.com/docs/integrations/chat/promptlayer_chatopenai/", "sparkllm.md": "https://python.langchain.com/docs/integrations/chat/sparkllm/", "Generate your api key from: https://platform.moonshot.cn/console/api-keys": "https://python.langchain.com/docs/integrations/chat/moonshot/", "dappier.md": "https://python.langchain.com/docs/integrations/chat/dappier/", "First step is to set up the env variable.": "https://python.langchain.com/docs/integrations/chat/premai/", "Let\u2019s try out each model offered on Anyscale Endpoints {#lets-try-out-each-model-offered-on-anyscale-endpoints}": "https://python.langchain.com/docs/integrations/chat/anyscale/", "yandex.md": "https://python.langchain.com/docs/integrations/chat/yandex/", "Or via the async API": "https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "labelstudio.md": "https://python.langchain.com/docs/integrations/callbacks/labelstudio/", "promptlayer.md": "https://python.langchain.com/docs/integrations/callbacks/promptlayer/", "trubrics.md": "https://python.langchain.com/docs/integrations/callbacks/trubrics/", "Log10": "https://python.langchain.com/docs/integrations/providers/log10/", "MLflow Deployments for LLMs": "https://python.langchain.com/docs/integrations/providers/mlflow/", "MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway/", "-> content='Hello! How can I assist you today?'": "https://python.langchain.com/docs/integrations/providers/databricks/", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/", "PremAI": "https://python.langchain.com/docs/integrations/providers/premai/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "arthur_tracking.md": "https://python.langchain.com/docs/integrations/providers/arthur_tracking/", "Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "google_vertex_ai_palm.md": "https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm/", "If running a Databricks notebook attached to an interactive cluster in \"single user\"": "https://python.langchain.com/docs/integrations/llms/databricks/", "Step 4: Embeddings Example {#step-4-embeddings-example}": "https://python.langchain.com/docs/integrations/llms/javelin/", "azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Chat Bot Feedback Template": "https://python.langchain.com/docs/templates/chat-bot-feedback/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/few_shot/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/decomposition/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "tools_as_openai_functions.md": "https://python.langchain.com/docs/modules/tools/tools_as_openai_functions/", "To enable streaming, we pass in `streaming=True` to the ChatModel constructor": "https://python.langchain.com/docs/modules/callbacks/async_callbacks/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/model_io/chat/quick_start/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/openai_tools/", "Construct the Tools agent": "https://python.langchain.com/docs/modules/agents/agent_types/tool_calling/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/", "Tool calling {#tool-calling}": "https://python.langchain.com/docs/modules/model_io/chat/function_calling/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/", "composition.md": "https://python.langchain.com/docs/modules/model_io/prompts/composition/", "Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/"}, "RunnableMap": {"\ud83e\udd9c\ufe0f\ud83c\udfd3 LangServe": "https://python.langchain.com/docs/langserve/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/parallel/"}, "RunnableLambda": {"\ud83e\udd9c\ufe0f\ud83c\udfd3 LangServe": "https://python.langchain.com/docs/langserve/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_summary/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/parallel/", "This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/", "Run custom functions {#run-custom-functions}": "https://python.langchain.com/docs/expression_language/primitives/functions/"}, "MessagesPlaceholder": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "redis_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/redis_chat_message_history/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/memory/google_sql_mssql/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "Optionally, specify your own session_state key for storing messages": "https://python.langchain.com/docs/integrations/memory/streamlit_chat_message_history/", "copy from tidb cloud console": "https://python.langchain.com/docs/integrations/memory/tidb_chat_message_history/", "!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/few_shot/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/decomposition/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/agents/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "Install a model capable of tool calling": "https://python.langchain.com/docs/use_cases/extraction/quickstart/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/", "Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/"}, "ToolMessage": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "open ../../../static/img/brand/wordmark.png as base64 str": "https://python.langchain.com/docs/integrations/chat/anthropic/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/few_shot/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/decomposition/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "Tool calling {#tool-calling}": "https://python.langchain.com/docs/modules/model_io/chat/function_calling/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "tool": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "Construct the OpenAI Tools agent": "https://python.langchain.com/docs/integrations/providers/portkey/logging_tracing_portkey/", "Portkey": "https://python.langchain.com/docs/integrations/providers/portkey/index/", "jsonformer_experimental.md": "https://python.langchain.com/docs/integrations/llms/jsonformer_experimental/", "ANTHROPIC_API_KEY=": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/trajectory_eval/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/agents/", "If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/prompting/", "Import things that are needed generically": "https://python.langchain.com/docs/modules/tools/custom_tools/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/", "Tool calling {#tool-calling}": "https://python.langchain.com/docs/modules/model_io/chat/function_calling/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/"}, "convert_to_openai_tool": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "Function calling": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/function_calling-checkpoint/"}, "TavilySearchResults": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/tools/tavily_search/", "zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/tool_usage/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/openai_tools/", "Construct the Tools agent": "https://python.langchain.com/docs/modules/agents/agent_types/tool_calling/"}, "format_tool_to_openai_function": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/"}, "BaseMessage": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/discord/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "Chat Bot Feedback Template": "https://python.langchain.com/docs/templates/chat-bot-feedback/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/few_shot/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/decomposition/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "FunctionMessage": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "AgentAction": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "custom.md": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/custom/", "First, define custom callback handler implementations": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks/"}, "AgentFinish": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "openai_assistants.md": "https://python.langchain.com/docs/modules/agents/agent_types/openai_assistants/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/"}, "create_openai_functions_agent": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/toolkits/polygon/", "How to use it inside an Agent {#how-to-use-it-inside-an-agent}": "https://python.langchain.com/docs/integrations/tools/infobip/", "start by installing semanticscholar api": "https://python.langchain.com/docs/integrations/tools/semanticscholar/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/openai_functions_agent/", "For use in Chaining section": "https://python.langchain.com/docs/integrations/tools/you/", "Define the neural network": "https://python.langchain.com/docs/integrations/toolkits/python/", "Authorize connection to your Browser extention": "https://python.langchain.com/docs/integrations/toolkits/multion/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "pip install wikipedia": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps/", "need to use GPT-4 here as GPT-3.5 does not understand, however hard you insist, that": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter/"}, "tracing_v2_enabled": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/", "Chat Bot Feedback Template": "https://python.langchain.com/docs/templates/chat-bot-feedback/"}, "AgentExecutor": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/agents/", "How to use it inside an Agent {#how-to-use-it-inside-an-agent}": "https://python.langchain.com/docs/integrations/tools/infobip/", "start by installing semanticscholar api": "https://python.langchain.com/docs/integrations/tools/semanticscholar/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations/", "memorize.md": "https://python.langchain.com/docs/integrations/tools/memorize/", "arxiv.md": "https://python.langchain.com/docs/integrations/tools/arxiv/", "and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/", "For use in Chaining section": "https://python.langchain.com/docs/integrations/tools/you/", "Based on ReAct Agent": "https://python.langchain.com/docs/integrations/tools/ionic_shopping/", "setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/", "zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/streamlit/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "Dataherald": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/dataherald-checkpoint/", "Construct the OpenAI Tools agent": "https://python.langchain.com/docs/integrations/providers/portkey/logging_tracing_portkey/", "Portkey": "https://python.langchain.com/docs/integrations/providers/portkey/index/", "Define the neural network": "https://python.langchain.com/docs/integrations/toolkits/python/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/amadeus/", "Install package": "https://python.langchain.com/docs/integrations/toolkits/robocorp/", "Authorize connection to your Browser extention": "https://python.langchain.com/docs/integrations/toolkits/multion/", "azure_ai_services.md": "https://python.langchain.com/docs/integrations/toolkits/azure_ai_services/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/tool_usage/", "agent_with_memory_in_db.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db/", "agent_with_memory.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Construct the Tools agent": "https://python.langchain.com/docs/modules/agents/agent_types/tool_calling/", "openai_assistants.md": "https://python.langchain.com/docs/modules/agents/agent_types/openai_assistants/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/", "pip install wikipedia": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps/", "need to use GPT-4 here as GPT-3.5 does not understand, however hard you insist, that": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/", "!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/"}, "format_to_openai_tool_messages": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/"}, "OpenAIToolsAgentOutputParser": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/"}, "DuckDuckGoSearchResults": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "ddg.md": "https://python.langchain.com/docs/integrations/tools/ddg/"}, "AgentType": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "chatgpt_plugins.md": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins/", "Specify your Connery Runner credentials.": "https://python.langchain.com/docs/integrations/toolkits/connery/", "use it with langchain {#use-it-with-langchain}": "https://python.langchain.com/docs/integrations/tools/google_jobs/", "google_serper.md": "https://python.langchain.com/docs/integrations/tools/google_serper/", "Artifacts are charts created by matplotlib when `plt.show()` is called": "https://python.langchain.com/docs/integrations/tools/e2b_data_analysis/", "Answer with 'Zhu'": "https://python.langchain.com/docs/integrations/tools/human_tools/", "How YahooFinanceNewsTool works? {#how-yahoofinancenewstool-works}": "https://python.langchain.com/docs/integrations/tools/yahoo_finance_news/", "google_finance.md": "https://python.langchain.com/docs/integrations/tools/google_finance/", "awslambda.md": "https://python.langchain.com/docs/integrations/tools/awslambda/", "folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'": "https://python.langchain.com/docs/integrations/tools/google_drive/", "openweathermap.md": "https://python.langchain.com/docs/integrations/tools/openweathermap/", "memorize.md": "https://python.langchain.com/docs/integrations/tools/memorize/", "search_tools.md": "https://python.langchain.com/docs/integrations/tools/search_tools/", "eleven_labs_tts.md": "https://python.langchain.com/docs/integrations/tools/eleven_labs_tts/", "Extract pdf content": "https://python.langchain.com/docs/integrations/tools/bearly/", "get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/", "graphql.md": "https://python.langchain.com/docs/integrations/tools/graphql/", "searchapi.md": "https://python.langchain.com/docs/integrations/tools/searchapi/", "edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "bash.md": "https://python.langchain.com/docs/integrations/tools/bash/", "Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "Connect to Comet if no API Key is set": "https://python.langchain.com/docs/integrations/callbacks/comet_tracing/", "argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/.ipynb_checkpoints/streamlit-checkpoint/", "SearchApi": "https://python.langchain.com/docs/integrations/providers/searchapi/", "scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/", "os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/", "SCENARIO 1 - LLM": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking/", "Serper - Google Search API": "https://python.langchain.com/docs/integrations/providers/google_serper/", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/", "wandb documentation to configure wandb using env variables": "https://python.langchain.com/docs/integrations/providers/wandb_tracing/", "Setup and use the ClearML Callback": "https://python.langchain.com/docs/integrations/providers/clearml_tracking/", "Create a dataframe": "https://python.langchain.com/docs/integrations/toolkits/csv/", "jira.md": "https://python.langchain.com/docs/integrations/toolkits/jira/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "For Windows/Linux": "https://python.langchain.com/docs/integrations/toolkits/azure_cognitive_services/", "Select the LLM to use. Here, we use gpt-3.5-turbo-instruct": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla/", "steam.md": "https://python.langchain.com/docs/integrations/toolkits/steam/", "airbyte_structured_qa.md": "https://python.langchain.com/docs/integrations/toolkits/airbyte_structured_qa/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/gitlab/", "Copilot Sandbox": "https://python.langchain.com/docs/integrations/toolkits/clickup/", "IMPORTANT: If you plan to use this account in the future, make sure to save the": "https://python.langchain.com/docs/integrations/toolkits/ainetwork/", "If this is your first time using playwright, you'll have to install a browser executable.": "https://python.langchain.com/docs/integrations/toolkits/playwright/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/office365/", "pandas.md": "https://python.langchain.com/docs/integrations/toolkits/pandas/", "nasa.md": "https://python.langchain.com/docs/integrations/toolkits/nasa/", "These are sample parameters for Falcon 40B Instruct Deployed from Amazon SageMaker JumpStart": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway/", "Debugging": "https://python.langchain.com/docs/guides/development/debugging/", "Using https://huggingface.co/laiyer/deberta-v3-base-prompt-injection": "https://python.langchain.com/docs/guides/productionization/safety/hugging_face_prompt_injection/", "Initialize the language model": "https://python.langchain.com/docs/guides/productionization/evaluation/examples/comparisons/", "ANTHROPIC_API_KEY=": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/trajectory_eval/", "First, define custom callback handler implementations": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks/", "need to use GPT-4 here as GPT-3.5 does not understand, however hard you insist, that": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter/", "token_usage_tracking.md": "https://python.langchain.com/docs/modules/model_io/llms/token_usage_tracking/"}, "initialize_agent": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "chatgpt_plugins.md": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins/", "Specify your Connery Runner credentials.": "https://python.langchain.com/docs/integrations/toolkits/connery/", "use it with langchain {#use-it-with-langchain}": "https://python.langchain.com/docs/integrations/tools/google_jobs/", "google_serper.md": "https://python.langchain.com/docs/integrations/tools/google_serper/", "Artifacts are charts created by matplotlib when `plt.show()` is called": "https://python.langchain.com/docs/integrations/tools/e2b_data_analysis/", "Answer with 'Zhu'": "https://python.langchain.com/docs/integrations/tools/human_tools/", "How YahooFinanceNewsTool works? {#how-yahoofinancenewstool-works}": "https://python.langchain.com/docs/integrations/tools/yahoo_finance_news/", "google_finance.md": "https://python.langchain.com/docs/integrations/tools/google_finance/", "awslambda.md": "https://python.langchain.com/docs/integrations/tools/awslambda/", "folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'": "https://python.langchain.com/docs/integrations/tools/google_drive/", "openweathermap.md": "https://python.langchain.com/docs/integrations/tools/openweathermap/", "memorize.md": "https://python.langchain.com/docs/integrations/tools/memorize/", "search_tools.md": "https://python.langchain.com/docs/integrations/tools/search_tools/", "eleven_labs_tts.md": "https://python.langchain.com/docs/integrations/tools/eleven_labs_tts/", "Extract pdf content": "https://python.langchain.com/docs/integrations/tools/bearly/", "get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/", "graphql.md": "https://python.langchain.com/docs/integrations/tools/graphql/", "searchapi.md": "https://python.langchain.com/docs/integrations/tools/searchapi/", "gradio_tools.md": "https://python.langchain.com/docs/integrations/tools/gradio_tools/", "sceneXplain.md": "https://python.langchain.com/docs/integrations/tools/sceneXplain/", "edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Needed if you would like to display images in the notebook": "https://python.langchain.com/docs/integrations/tools/dalle_image_generator/", "bash.md": "https://python.langchain.com/docs/integrations/tools/bash/", "Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "LLM Hyperparameters": "https://python.langchain.com/docs/integrations/callbacks/sagemaker_tracking/", "Connect to Comet if no API Key is set": "https://python.langchain.com/docs/integrations/callbacks/comet_tracing/", "argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/.ipynb_checkpoints/streamlit-checkpoint/", "SearchApi": "https://python.langchain.com/docs/integrations/providers/searchapi/", "os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/", "scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/", "os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/", "SCENARIO 1 - LLM": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking/", "Serper - Google Search API": "https://python.langchain.com/docs/integrations/providers/google_serper/", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/", "wandb documentation to configure wandb using env variables": "https://python.langchain.com/docs/integrations/providers/wandb_tracing/", "Setup and use the ClearML Callback": "https://python.langchain.com/docs/integrations/providers/clearml_tracking/", "jira.md": "https://python.langchain.com/docs/integrations/toolkits/jira/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "For Windows/Linux": "https://python.langchain.com/docs/integrations/toolkits/azure_cognitive_services/", "Select the LLM to use. Here, we use gpt-3.5-turbo-instruct": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla/", "steam.md": "https://python.langchain.com/docs/integrations/toolkits/steam/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/gitlab/", "Copilot Sandbox": "https://python.langchain.com/docs/integrations/toolkits/clickup/", "IMPORTANT: If you plan to use this account in the future, make sure to save the": "https://python.langchain.com/docs/integrations/toolkits/ainetwork/", "If this is your first time using playwright, you'll have to install a browser executable.": "https://python.langchain.com/docs/integrations/toolkits/playwright/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/office365/", "nasa.md": "https://python.langchain.com/docs/integrations/toolkits/nasa/", "These are sample parameters for Falcon 40B Instruct Deployed from Amazon SageMaker JumpStart": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway/", "Debugging": "https://python.langchain.com/docs/guides/development/debugging/", "Using https://huggingface.co/laiyer/deberta-v3-base-prompt-injection": "https://python.langchain.com/docs/guides/productionization/safety/hugging_face_prompt_injection/", "Initialize the language model": "https://python.langchain.com/docs/guides/productionization/evaluation/examples/comparisons/", "ANTHROPIC_API_KEY=": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/trajectory_eval/", "First, define custom callback handler implementations": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks/", "need to use GPT-4 here as GPT-3.5 does not understand, however hard you insist, that": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter/", "token_usage_tracking.md": "https://python.langchain.com/docs/modules/model_io/llms/token_usage_tracking/"}, "load_tools": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "Google": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/google-checkpoint/", "chatgpt_plugins.md": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins/", "use it with langchain {#use-it-with-langchain}": "https://python.langchain.com/docs/integrations/tools/google_jobs/", "Answer with 'Zhu'": "https://python.langchain.com/docs/integrations/tools/human_tools/", "google_finance.md": "https://python.langchain.com/docs/integrations/tools/google_finance/", "awslambda.md": "https://python.langchain.com/docs/integrations/tools/awslambda/", "folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'": "https://python.langchain.com/docs/integrations/tools/google_drive/", "Each tool wrapps a requests wrapper": "https://python.langchain.com/docs/integrations/tools/requests/", "openweathermap.md": "https://python.langchain.com/docs/integrations/tools/openweathermap/", "memorize.md": "https://python.langchain.com/docs/integrations/tools/memorize/", "search_tools.md": "https://python.langchain.com/docs/integrations/tools/search_tools/", "eleven_labs_tts.md": "https://python.langchain.com/docs/integrations/tools/eleven_labs_tts/", "arxiv.md": "https://python.langchain.com/docs/integrations/tools/arxiv/", "graphql.md": "https://python.langchain.com/docs/integrations/tools/graphql/", "sceneXplain.md": "https://python.langchain.com/docs/integrations/tools/sceneXplain/", "Needed if you would like to display images in the notebook": "https://python.langchain.com/docs/integrations/tools/dalle_image_generator/", "setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "LLM Hyperparameters": "https://python.langchain.com/docs/integrations/callbacks/sagemaker_tracking/", "Connect to Comet if no API Key is set": "https://python.langchain.com/docs/integrations/callbacks/comet_tracing/", "argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/.ipynb_checkpoints/streamlit-checkpoint/", "SerpAPI": "https://python.langchain.com/docs/integrations/providers/serpapi/", "SearchApi": "https://python.langchain.com/docs/integrations/providers/searchapi/", "os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/", "scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/", "Golden": "https://python.langchain.com/docs/integrations/providers/golden/", "os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/", "Wolfram Alpha": "https://python.langchain.com/docs/integrations/providers/wolfram_alpha/", "SCENARIO 1 - LLM": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking/", "DataForSEO": "https://python.langchain.com/docs/integrations/providers/dataforseo/", "SearxNG Search API": "https://python.langchain.com/docs/integrations/providers/searx/", "Serper - Google Search API": "https://python.langchain.com/docs/integrations/providers/google_serper/", "OpenWeatherMap": "https://python.langchain.com/docs/integrations/providers/openweathermap/", "Stack Exchange": "https://python.langchain.com/docs/integrations/providers/stackexchange/", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/", "wandb documentation to configure wandb using env variables": "https://python.langchain.com/docs/integrations/providers/wandb_tracing/", "Setup and use the ClearML Callback": "https://python.langchain.com/docs/integrations/providers/clearml_tracking/", "Dataherald": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/dataherald-checkpoint/", "These are sample parameters for Falcon 40B Instruct Deployed from Amazon SageMaker JumpStart": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway/", "Debugging": "https://python.langchain.com/docs/guides/development/debugging/", "First, define custom callback handler implementations": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks/", "!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/", "token_usage_tracking.md": "https://python.langchain.com/docs/modules/model_io/llms/token_usage_tracking/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/"}, "EvaluatorType": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "This is equivalent to loading using the enum": "https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain/"}, "RunEvalConfig": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/"}, "arun_on_dataset": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/"}, "run_on_dataset": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/"}, "BaseChatModel": {"Contribute Integrations": "https://python.langchain.com/docs/contributing/integrations/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "deprecated": {"Contribute Integrations": "https://python.langchain.com/docs/contributing/integrations/"}, "ChatSession": {"Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/discord/", "This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/"}, "map_ai_messages": {"Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/discord/", "This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/", "The file token.json stores the user's access and refresh tokens, and is": "https://python.langchain.com/docs/integrations/chat_loaders/gmail/"}, "merge_chat_runs": {"Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/discord/", "This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/"}, "FolderFacebookMessengerChatLoader": {"This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/facebook/", "Facebook - Meta": "https://python.langchain.com/docs/integrations/providers/facebook/"}, "SingleFileFacebookMessengerChatLoader": {"This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/facebook/", "Facebook - Meta": "https://python.langchain.com/docs/integrations/providers/facebook/"}, "convert_messages_for_finetuning": {"This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/", "Wait for the fine-tuning to complete (this may take some time)": "https://python.langchain.com/docs/integrations/chat_loaders/langsmith_dataset/"}, "StrOutputParser": {"This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/", "del os.environ['NVIDIA_API_KEY'] ## delete key and reset": "https://python.langchain.com/docs/integrations/text_embedding/nvidia_ai_endpoints/", "For use in Chaining section": "https://python.langchain.com/docs/integrations/retrievers/you-retriever/", "fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/retrievers/tavily/", "LangChain supports many other chat models. Here, we're using Ollama": "https://python.langchain.com/docs/integrations/chat/ollama/", "Loading the COMVEST 2024 notice": "https://python.langchain.com/docs/integrations/chat/maritalk/", "Or via the async API": "https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/", "Fiddler project and model names, used for model registration": "https://python.langchain.com/docs/integrations/callbacks/fiddler/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "From LangChain, import standard modules for prompting.": "https://python.langchain.com/docs/integrations/providers/dspy/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_summary/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "Install the package": "https://python.langchain.com/docs/integrations/llms/volcengine_maas/", "use default authN method API-key": "https://python.langchain.com/docs/integrations/llms/oci_generative_ai/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "AI21 Contextual Answer {#ai21-contextual-answer}": "https://python.langchain.com/docs/integrations/llms/ai21/", "Quickstart": "https://python.langchain.com/docs/use_cases/question_answering/.ipynb_checkpoints/quickstart-checkpoint/", "Note that we set max_retries = 0 to avoid retrying on RateLimits, etc": "https://python.langchain.com/docs/guides/productionization/fallbacks/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/step_back/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/query_checking/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/quickstart/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/index/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "batch_configurable_chain([\"ice cream\", \"spaghetti\", \"dumplings\"])": "https://python.langchain.com/docs/expression_language/why/", "> ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])": "https://python.langchain.com/docs/expression_language/get_started/", "The input schema of the chain is the input schema of its first part, the prompt.": "https://python.langchain.com/docs/expression_language/interface/", "prompt_llm_parser.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_llm_parser/", "multiple_chains.md": "https://python.langchain.com/docs/expression_language/cookbook/multiple_chains/", "code_writing.md": "https://python.langchain.com/docs/expression_language/cookbook/code_writing/", "Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/", "inspect.md": "https://python.langchain.com/docs/expression_language/how_to/inspect/", "decorator.md": "https://python.langchain.com/docs/expression_language/how_to/decorator/", "Adding values to chain state {#adding-values-to-chain-state}": "https://python.langchain.com/docs/expression_language/primitives/assign/", "Binding: Attach runtime args {#binding-attach-runtime-args}": "https://python.langchain.com/docs/expression_language/primitives/binding/", "Formatting inputs & output {#formatting-inputs-output}": "https://python.langchain.com/docs/expression_language/primitives/parallel/", "Passing data through {#passing-data-through}": "https://python.langchain.com/docs/expression_language/primitives/passthrough/", "Run custom functions {#run-custom-functions}": "https://python.langchain.com/docs/expression_language/primitives/functions/", "Chaining runnables {#chaining-runnables}": "https://python.langchain.com/docs/expression_language/primitives/sequence/"}, "convert_message_to_dict": {"Filter out tweets that reference other tweets, because it's a bit weird": "https://python.langchain.com/docs/integrations/chat_loaders/twitter/"}, "AIMessage": {"Filter out tweets that reference other tweets, because it's a bit weird": "https://python.langchain.com/docs/integrations/chat_loaders/twitter/", "Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "open ../../../static/img/brand/wordmark.png as base64 str": "https://python.langchain.com/docs/integrations/chat/anthropic/", "zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "yuan2.md": "https://python.langchain.com/docs/integrations/chat/yuan2/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Install required dependencies": "https://python.langchain.com/docs/integrations/llms/chatglm/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "Chat Bot Feedback Template": "https://python.langchain.com/docs/templates/chat-bot-feedback/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/human_in_the_loop/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/few_shot/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/decomposition/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "Set up a parser": "https://python.langchain.com/docs/use_cases/extraction/how_to/parse/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/openai_tools/", "Construct the Tools agent": "https://python.langchain.com/docs/modules/agents/agent_types/tool_calling/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/", "Tool calling {#tool-calling}": "https://python.langchain.com/docs/modules/model_io/chat/function_calling/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/", "The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/", "composition.md": "https://python.langchain.com/docs/modules/model_io/prompts/composition/", "Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/"}, "convert_pydantic_to_openai_function": {"Wait for the fine-tuning to complete (this may take some time)": "https://python.langchain.com/docs/integrations/chat_loaders/langsmith_llm_runs/", "openai_functions.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_functions/"}, "PydanticOutputFunctionsParser": {"Wait for the fine-tuning to complete (this may take some time)": "https://python.langchain.com/docs/integrations/chat_loaders/langsmith_llm_runs/", "openai_functions.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_functions/"}, "LangSmithRunChatLoader": {"Wait for the fine-tuning to complete (this may take some time)": "https://python.langchain.com/docs/integrations/chat_loaders/langsmith_llm_runs/"}, "GMailLoader": {"The file token.json stores the user's access and refresh tokens, and is": "https://python.langchain.com/docs/integrations/chat_loaders/gmail/", "Google": "https://python.langchain.com/docs/integrations/platforms/google/"}, "SlackChatLoader": {"Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/slack/", "Slack": "https://python.langchain.com/docs/integrations/providers/slack/"}, "WhatsAppChatLoader": {"Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/whatsapp/", "Facebook - Meta": "https://python.langchain.com/docs/integrations/providers/facebook/", "WhatsApp": "https://python.langchain.com/docs/integrations/providers/whatsapp/", "whatsapp_chat.md": "https://python.langchain.com/docs/integrations/document_loaders/whatsapp_chat/"}, "LangSmithDatasetChatLoader": {"Wait for the fine-tuning to complete (this may take some time)": "https://python.langchain.com/docs/integrations/chat_loaders/langsmith_dataset/"}, "IMessageChatLoader": {"This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/"}, "TelegramChatLoader": {"Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/telegram/", "Telegram": "https://python.langchain.com/docs/integrations/providers/telegram/"}, "base": {"Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/discord/"}, "BookendEmbeddings": {"bookend.md": "https://python.langchain.com/docs/integrations/text_embedding/bookend/"}, "HuggingFaceBgeEmbeddings": {"bge_huggingface.md": "https://python.langchain.com/docs/integrations/text_embedding/bge_huggingface/", "Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/"}, "QuantizedBiEncoderEmbeddings": {"optimum_intel.md": "https://python.langchain.com/docs/integrations/text_embedding/optimum_intel/", "Intel": "https://python.langchain.com/docs/integrations/providers/intel/"}, "FireworksEmbeddings": {"Using the Embedding Model {#using-the-embedding-model}": "https://python.langchain.com/docs/integrations/text_embedding/fireworks/"}, "XinferenceEmbeddings": {"xinference.md": "https://python.langchain.com/docs/integrations/text_embedding/xinference/"}, "LLMRailsEmbeddings": {"llm_rails.md": "https://python.langchain.com/docs/integrations/text_embedding/llm_rails/"}, "DeepInfraEmbeddings": {"sign up for an account: https://deepinfra.com/login?utm_source=langchain": "https://python.langchain.com/docs/integrations/text_embedding/deepinfra/", "DeepInfra": "https://python.langchain.com/docs/integrations/providers/deepinfra/"}, "HuggingFaceEmbeddings": {"huggingfacehub.md": "https://python.langchain.com/docs/integrations/text_embedding/huggingfacehub/", "Equivalent to SentenceTransformerEmbeddings(model_name=\"all-MiniLM-L6-v2\")": "https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers/", "Get 3 diff embeddings.": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever/", "Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/", "VDMS": "https://python.langchain.com/docs/integrations/providers/vdms/", "Refresh is required for server use": "https://python.langchain.com/docs/integrations/vectorstores/vald/", "scann.md": "https://python.langchain.com/docs/integrations/vectorstores/scann/", "default metric is angular": "https://python.langchain.com/docs/integrations/vectorstores/annoy/", "tiledb.md": "https://python.langchain.com/docs/integrations/vectorstores/tiledb/", "%pip install --upgrade --quiet surrealdb langchain langchain-community": "https://python.langchain.com/docs/integrations/vectorstores/surrealdb/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/vearch/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/vdms/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/vectorstores/faiss/", "Ensure that all we need is installed": "https://python.langchain.com/docs/integrations/vectorstores/infinispanvs/", "Create collection if running for the first time. If the collection": "https://python.langchain.com/docs/integrations/vectorstores/semadb/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/cross_encoder_reranker/", "pairwise_embedding_distance.md": "https://python.langchain.com/docs/guides/productionization/evaluation/comparison/pairwise_embedding_distance/", "You can load by enum or by raw python string": "https://python.langchain.com/docs/guides/productionization/evaluation/string/embedding_distance/", "self-query-qdrant": "https://python.langchain.com/docs/templates/self-query-qdrant/", "Get embeddings.": "https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder/"}, "HuggingFaceInferenceAPIEmbeddings": {"huggingfacehub.md": "https://python.langchain.com/docs/integrations/text_embedding/huggingfacehub/"}, "HuggingFaceHubEmbeddings": {"huggingfacehub.md": "https://python.langchain.com/docs/integrations/text_embedding/huggingfacehub/", "text_embeddings_inference.md": "https://python.langchain.com/docs/integrations/text_embedding/text_embeddings_inference/", "Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/"}, "GoogleGenerativeAIEmbeddings": {"google_generative_ai.md": "https://python.langchain.com/docs/integrations/text_embedding/google_generative_ai/", "Google": "https://python.langchain.com/docs/integrations/platforms/google/"}, "GPT4AllEmbeddings": {"gpt4all.md": "https://python.langchain.com/docs/integrations/text_embedding/gpt4all/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/"}, "MosaicMLInstructorEmbeddings": {"sign up for an account: https://forms.mosaicml.com/demo?utm_source=langchain": "https://python.langchain.com/docs/integrations/text_embedding/mosaicml/"}, "QuantizedBgeEmbeddings": {"itrex.md": "https://python.langchain.com/docs/integrations/text_embedding/itrex/", "Intel": "https://python.langchain.com/docs/integrations/providers/intel/"}, "OpenAIEmbeddings": {"openai.md": "https://python.langchain.com/docs/integrations/text_embedding/openai/", "set the environment variables needed for openai package to know to reach out to azure": "https://python.langchain.com/docs/integrations/text_embedding/azureopenai/", "azure_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/azure_ai_search/", "re_phrase.md": "https://python.langchain.com/docs/integrations/retrievers/re_phrase/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/", "Establishing a connection to the database is facilitated through the singlestoredb Python connector.": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb/", "knn.md": "https://python.langchain.com/docs/integrations/retrievers/knn/", "initialize the index": "https://python.langchain.com/docs/integrations/retrievers/docarray_retriever/", "svm.md": "https://python.langchain.com/docs/integrations/retrievers/svm/", "create the index": "https://python.langchain.com/docs/integrations/retrievers/pinecone_hybrid_search/", "# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/retrievers/flashrank-reranker/", "Get 3 diff embeddings.": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever/", "This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/mongodb_atlas/", "or install latest:": "https://python.langchain.com/docs/integrations/vectorstores/dingo/", "Get openAI api key by reading local .env file": "https://python.langchain.com/docs/integrations/retrievers/self_query/timescalevector_self_query/", "create new index": "https://python.langchain.com/docs/integrations/retrievers/self_query/pinecone/", "in case if some queries fail consider installing libdeeplake manually": "https://python.langchain.com/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query/", "import os": "https://python.langchain.com/docs/integrations/retrievers/self_query/qdrant_self_query/", "OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "Here we want to make sure the answer is minimally relevant": "https://python.langchain.com/docs/integrations/callbacks/confident/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "ragatouille.md": "https://python.langchain.com/docs/integrations/providers/ragatouille/", "Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "lancedb.md": "https://python.langchain.com/docs/integrations/vectorstores/lancedb/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "databricks_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/databricks_vector_search/", "xata.md": "https://python.langchain.com/docs/integrations/vectorstores/xata/", "openai": "https://python.langchain.com/docs/integrations/vectorstores/hippo/", "connection to redis standalone at localhost, db 0, no password": "https://python.langchain.com/docs/integrations/vectorstores/redis/", "output length: 4": "https://python.langchain.com/docs/integrations/vectorstores/rockset/", "replace": "https://python.langchain.com/docs/integrations/vectorstores/zilliz/", "Set up the OpenAI Environment Variables": "https://python.langchain.com/docs/integrations/vectorstores/azure_cosmos_db/", "vikingdb.md": "https://python.langchain.com/docs/integrations/vectorstores/vikingdb/", "Wait until the cluster is ready for use.": "https://python.langchain.com/docs/integrations/vectorstores/couchbase/", "typesense.md": "https://python.langchain.com/docs/integrations/vectorstores/typesense/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/momento_vector_index/", "Here we useimport getpass": "https://python.langchain.com/docs/integrations/vectorstores/tidb_vector/", "or shorter": "https://python.langchain.com/docs/integrations/vectorstores/activeloop_deeplake/", "Pip install necessary package {#pip-install-necessary-package}": "https://python.langchain.com/docs/integrations/vectorstores/lantern/", "import": "https://python.langchain.com/docs/integrations/vectorstores/chroma/", "duckdb.md": "https://python.langchain.com/docs/integrations/vectorstores/duckdb/", "for example": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch/", "# if you plan to use bson serialization, install also:": "https://python.langchain.com/docs/integrations/vectorstores/sklearn/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/", "Get an OpenAI token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory/", "use directly a `where_str` to delete": "https://python.langchain.com/docs/integrations/vectorstores/myscale/", "clickhouse.md": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse/", "qdrant.md": "https://python.langchain.com/docs/integrations/vectorstores/qdrant/", "tigris.md": "https://python.langchain.com/docs/integrations/vectorstores/tigris/", "ecloud_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/ecloud_vector_search/", "with pip": "https://python.langchain.com/docs/integrations/vectorstores/supabase/", "If using the default Docker installation, use this instantiation instead:": "https://python.langchain.com/docs/integrations/vectorstores/opensearch/", "pinecone.md": "https://python.langchain.com/docs/integrations/vectorstores/pinecone/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/faiss_async/", "Option 1: use an OpenAI account": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "usearch.md": "https://python.langchain.com/docs/integrations/vectorstores/usearch/", "This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/", "Clean up KDB.AI \"documents\" table and index for similarity search": "https://python.langchain.com/docs/integrations/vectorstores/kdbai/", "Metadata {#metadata}": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/vectorstores/faiss/", "epsilla.md": "https://python.langchain.com/docs/integrations/vectorstores/epsilla/", "DocumentDB connection string": "https://python.langchain.com/docs/integrations/vectorstores/documentdb/", "analyticdb.md": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb/", "hologres.md": "https://python.langchain.com/docs/integrations/vectorstores/hologres/", "initialize MongoDB python client": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/", "Use Meilisearch vector store to store texts & associated embeddings as vector": "https://python.langchain.com/docs/integrations/vectorstores/meilisearch/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/", "Uncomment this to install psychicapi if you don't already have it installed": "https://python.langchain.com/docs/integrations/document_loaders/psychic/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/", "Quickstart": "https://python.langchain.com/docs/use_cases/question_answering/.ipynb_checkpoints/quickstart-checkpoint/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/quickstart/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "%pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/quickstart/", "%pip install -qU langchain langchain-community langchain-openai langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/no_queries/", "%pip install -qU langchain langchain-community langchain-openai faker langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/high_cardinality/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/", "indexing.md": "https://python.langchain.com/docs/modules/data_connection/indexing/", "Text embedding models": "https://python.langchain.com/docs/modules/data_connection/text_embedding/index/", "Swapping the `ByteStore` {#swapping-the-bytestore}": "https://python.langchain.com/docs/modules/data_connection/text_embedding/caching_embeddings/", "initialize the bm25 retriever and faiss retriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/ensemble/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "Define your embedding model": "https://python.langchain.com/docs/modules/data_connection/retrievers/time_weighted_vectorstore/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/", "This text splitter is used to create the child documents": "https://python.langchain.com/docs/modules/data_connection/retrievers/parent_document_retriever/", "vectorstore.md": "https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore/", "This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/", "Self-querying": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/self_query-checkpoint/", "Vector stores": "https://python.langchain.com/docs/modules/data_connection/vectorstores/index/", "This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/semantic-chunker/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/", "adding_memory_chain_multiple_inputs.md": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs/", "Backed by a Vector Store": "https://python.langchain.com/docs/modules/memory/types/vectorstore_retriever_memory/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/", "Select the most similar example to the input.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples/", "This is a prompt template used to format each individual example.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples_chat/", "Examples of a pretend task of creating antonyms.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/similarity/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "> ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])": "https://python.langchain.com/docs/expression_language/get_started/", "The input schema of the chain is the input schema of its first part, the prompt.": "https://python.langchain.com/docs/expression_language/interface/", "Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/", "inspect.md": "https://python.langchain.com/docs/expression_language/how_to/inspect/", "Adding values to chain state {#adding-values-to-chain-state}": "https://python.langchain.com/docs/expression_language/primitives/assign/", "Formatting inputs & output {#formatting-inputs-output}": "https://python.langchain.com/docs/expression_language/primitives/parallel/", "Passing data through {#passing-data-through}": "https://python.langchain.com/docs/expression_language/primitives/passthrough/"}, "VertexAIEmbeddings": {"google_vertex_ai_palm.md": "https://python.langchain.com/docs/integrations/text_embedding/google_vertex_ai_palm/", "Google": "https://python.langchain.com/docs/integrations/platforms/google/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/vectorstores/google_cloud_sql_pg/", "TODO : Set values as per your requirements": "https://python.langchain.com/docs/integrations/vectorstores/google_vertex_ai_vector_search/", "@markdown Please specify a source for demo purpose.": "https://python.langchain.com/docs/integrations/vectorstores/google_firestore/"}, "BedrockEmbeddings": {"async embed query": "https://python.langchain.com/docs/integrations/text_embedding/bedrock/", "AWS": "https://python.langchain.com/docs/integrations/platforms/aws/"}, "GigaChatEmbeddings": {"gigachat.md": "https://python.langchain.com/docs/integrations/text_embedding/gigachat/", "Salute Devices": "https://python.langchain.com/docs/integrations/providers/salute_devices/"}, "OllamaEmbeddings": {"ollama.md": "https://python.langchain.com/docs/integrations/text_embedding/ollama/", "Ollama": "https://python.langchain.com/docs/integrations/providers/ollama/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/"}, "OCIGenAIEmbeddings": {"use default authN method API-key": "https://python.langchain.com/docs/integrations/llms/oci_generative_ai/", "Oracle Cloud Infrastructure (OCI)": "https://python.langchain.com/docs/integrations/providers/oci/"}, "FastEmbedEmbeddings": {"fastembed.md": "https://python.langchain.com/docs/integrations/text_embedding/fastembed/"}, "LlamaCppEmbeddings": {"llamacpp.md": "https://python.langchain.com/docs/integrations/text_embedding/llamacpp/", "Llama.cpp": "https://python.langchain.com/docs/integrations/providers/llamacpp/"}, "NLPCloudEmbeddings": {"nlp_cloud.md": "https://python.langchain.com/docs/integrations/text_embedding/nlp_cloud/", "NLPCloud": "https://python.langchain.com/docs/integrations/providers/nlpcloud/"}, "LaserEmbeddings": {"Ex Instantiationz": "https://python.langchain.com/docs/integrations/text_embedding/laser/", "Facebook - Meta": "https://python.langchain.com/docs/integrations/providers/facebook/"}, "OpenCLIPEmbeddings": {"Image URIs": "https://python.langchain.com/docs/integrations/text_embedding/open_clip/", "Establishing a connection to the database is facilitated through the singlestoredb Python connector.": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb/"}, "TitanTakeoffEmbed": {"Model config for the embedding model, where you can specify the following parameters:": "https://python.langchain.com/docs/integrations/text_embedding/titan_takeoff/"}, "MistralAIEmbeddings": {"pip install -U langchain-mistralai": "https://python.langchain.com/docs/integrations/text_embedding/mistralai/", "mistralai.md": "https://python.langchain.com/docs/integrations/providers/mistralai/"}, "SpacyEmbeddings": {"spacy_embedding.md": "https://python.langchain.com/docs/integrations/text_embedding/spacy_embedding/", "spaCy": "https://python.langchain.com/docs/integrations/providers/spacy/"}, "BaichuanTextEmbeddings": {"baichuan.md": "https://python.langchain.com/docs/integrations/text_embedding/baichuan/", "Baichuan": "https://python.langchain.com/docs/integrations/providers/baichuan/"}, "TogetherEmbeddings": {"install package": "https://python.langchain.com/docs/integrations/text_embedding/together/", "together.md": "https://python.langchain.com/docs/integrations/providers/together/"}, "HuggingFaceInstructEmbeddings": {"instruct_embeddings.md": "https://python.langchain.com/docs/integrations/text_embedding/instruct_embeddings/", "Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/"}, "QianfanEmbeddingsEndpoint": {"baidu_qianfan_endpoint.md": "https://python.langchain.com/docs/integrations/text_embedding/baidu_qianfan_endpoint/", "ernie.md": "https://python.langchain.com/docs/integrations/text_embedding/ernie/", "Baidu": "https://python.langchain.com/docs/integrations/providers/baidu/", "Create a bes instance and index docs.": "https://python.langchain.com/docs/integrations/vectorstores/baiducloud_vector_search/"}, "CohereEmbeddings": {"cohere.md": "https://python.langchain.com/docs/integrations/text_embedding/cohere/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "See docker command above to launch a postgres instance with pgvector enabled.": "https://python.langchain.com/docs/integrations/vectorstores/pgvector/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "Text embedding models": "https://python.langchain.com/docs/modules/data_connection/text_embedding/index/"}, "EdenAiEmbeddings": {"edenai.md": "https://python.langchain.com/docs/integrations/text_embedding/edenai/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "JohnSnowLabsEmbeddings": {"If you have a enterprise license, you can run this to install enterprise features": "https://python.langchain.com/docs/integrations/text_embedding/johnsnowlabs_embedding/"}, "ErnieEmbeddings": {"ernie.md": "https://python.langchain.com/docs/integrations/text_embedding/ernie/"}, "LLMChain": {"Dependencies {#dependencies}": "https://python.langchain.com/docs/integrations/llms/clarifai/", "re_phrase.md": "https://python.langchain.com/docs/integrations/retrievers/re_phrase/", "memorize.md": "https://python.langchain.com/docs/integrations/tools/memorize/", "get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/", "Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/", "Needed if you would like to display images in the notebook": "https://python.langchain.com/docs/integrations/tools/dalle_image_generator/", "loads previous state from Mot\u00f6rhead \ud83e\udd18": "https://python.langchain.com/docs/integrations/memory/motorhead_memory/", "!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/", "context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "LLM Hyperparameters": "https://python.langchain.com/docs/integrations/callbacks/sagemaker_tracking/", "argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/", "scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/", "os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/", "MLflow Deployments for LLMs": "https://python.langchain.com/docs/integrations/providers/mlflow/", "MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway/", "!pip3 install rebuff openai -U": "https://python.langchain.com/docs/integrations/providers/rebuff/", "Prediction Guard": "https://python.langchain.com/docs/integrations/providers/predictionguard/", "SCENARIO 1 - LLM": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking/", "Shale Protocol": "https://python.langchain.com/docs/integrations/providers/shaleprotocol/", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/", "0: Import ray serve and request from starlette": "https://python.langchain.com/docs/integrations/providers/ray_serve/", "Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/minimax/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "stochasticai.md": "https://python.langchain.com/docs/integrations/llms/stochasticai/", "solar.md": "https://python.langchain.com/docs/integrations/llms/solar/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "Update Langchain": "https://python.langchain.com/docs/integrations/llms/ipex_llm/", "Install the package https://docs.banana.dev/banana-docs/core-concepts/sdks/python": "https://python.langchain.com/docs/integrations/llms/banana/", "alibabacloud_pai_eas_endpoint.md": "https://python.langchain.com/docs/integrations/llms/alibabacloud_pai_eas_endpoint/", "openllm.md": "https://python.langchain.com/docs/integrations/llms/openllm/", "octoai.md": "https://python.langchain.com/docs/integrations/llms/octoai/", "If you get an error, probably, you need to set up the \"base_url\" parameter that can be taken from the error log.": "https://python.langchain.com/docs/integrations/llms/writer/", "Register an account with Modal and get a new token.": "https://python.langchain.com/docs/integrations/llms/modal/", "textgen.md": "https://python.langchain.com/docs/integrations/llms/textgen/", "xinference.md": "https://python.langchain.com/docs/integrations/llms/xinference/", "symblai_nebula.md": "https://python.langchain.com/docs/integrations/llms/symblai_nebula/", "get a new token: https://deepinfra.com/login?from=%2Fdash": "https://python.langchain.com/docs/integrations/llms/deepinfra/", "get a token: https://docs.nlpcloud.com/#authentication": "https://python.langchain.com/docs/integrations/llms/nlpcloud/", "Callbacks support token-wise streaming": "https://python.langchain.com/docs/integrations/llms/gpt4all/", "get a new token: https://docs.forefront.ai/forefront/api-reference/authentication": "https://python.langchain.com/docs/integrations/llms/forefrontai/", "sign up for an account: https://forms.mosaicml.com/demo?utm_source=langchain": "https://python.langchain.com/docs/integrations/llms/mosaicml/", "Install the package": "https://python.langchain.com/docs/integrations/llms/pipelineai/", "get a token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/llms/openai/", "gigachat.md": "https://python.langchain.com/docs/integrations/llms/gigachat/", "use default authN method API-key": "https://python.langchain.com/docs/integrations/llms/oci_generative_ai/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "%pip list | grep aphrodite": "https://python.langchain.com/docs/integrations/llms/aphrodite/", "Run the chain specifying only the input variable for the first chain.": "https://python.langchain.com/docs/integrations/llms/edenai/", "Optional, add your OpenAI API Key. This is optional, as Prediction Guard allows": "https://python.langchain.com/docs/integrations/llms/predictionguard/", "Step 4: Embeddings Example {#step-4-embeddings-example}": "https://python.langchain.com/docs/integrations/llms/javelin/", "Calling a single prompt": "https://python.langchain.com/docs/integrations/llms/ibm_watsonx/", "ctransformers.md": "https://python.langchain.com/docs/integrations/llms/ctransformers/", "vllm.md": "https://python.langchain.com/docs/integrations/llms/vllm/", "azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/", "default infer_api for a local deployed Yuan2.0 inference server": "https://python.langchain.com/docs/integrations/llms/yuan2/", "get a token: https://huggingface.co/docs/api-inference/quicktour#get-your-api-token": "https://python.langchain.com/docs/integrations/llms/huggingface_endpoint/", "For an on-demand A100 with GCP, Azure, or Lambda": "https://python.langchain.com/docs/integrations/llms/runhouse/", "anyscale.md": "https://python.langchain.com/docs/integrations/llms/anyscale/", "yandex.md": "https://python.langchain.com/docs/integrations/llms/yandex/", "gooseai.md": "https://python.langchain.com/docs/integrations/llms/gooseai/", "Uncomment to install openlm and openai if you haven't already": "https://python.langchain.com/docs/integrations/llms/openlm/", "Using streaming": "https://python.langchain.com/docs/integrations/llms/cloudflare_workersai/", "conversation can take several minutes": "https://python.langchain.com/docs/integrations/llms/ctranslate2/", "Install required dependencies": "https://python.langchain.com/docs/integrations/llms/chatglm/", "Improve the results by fine-tuning (optional) {#improve-the-results-by-fine-tuning-optional}": "https://python.langchain.com/docs/integrations/llms/gradient/", "this can take several minutes to download big files!": "https://python.langchain.com/docs/integrations/llms/petals/", "magics to auto-reload external modules in case you are making changes to langchain while working on this notebook": "https://python.langchain.com/docs/integrations/llms/replicate/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/", "Logical Fallacy chain": "https://python.langchain.com/docs/guides/productionization/safety/logical_fallacy_chain/", "Constitutional chain": "https://python.langchain.com/docs/guides/productionization/safety/constitutional_chain/", "custom.md": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/custom/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/apis/", "Get embeddings.": "https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "Callbacks": "https://python.langchain.com/docs/modules/callbacks/index/", "composition.md": "https://python.langchain.com/docs/modules/model_io/prompts/composition/"}, "ClarifaiEmbeddings": {"Dependencies {#dependencies}": "https://python.langchain.com/docs/integrations/text_embedding/clarifai/", "Clarifai": "https://python.langchain.com/docs/integrations/providers/clarifai/"}, "PromptTemplate": {"Dependencies {#dependencies}": "https://python.langchain.com/docs/integrations/llms/clarifai/", "re_phrase.md": "https://python.langchain.com/docs/integrations/retrievers/re_phrase/", "folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'": "https://python.langchain.com/docs/integrations/document_loaders/google_drive/", "get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/", "send data into the chain": "https://python.langchain.com/docs/integrations/tools/nvidia_riva/", "and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/", "Needed if you would like to display images in the notebook": "https://python.langchain.com/docs/integrations/tools/dalle_image_generator/", "loads previous state from Mot\u00f6rhead \ud83e\udd18": "https://python.langchain.com/docs/integrations/memory/motorhead_memory/", "context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "LLM Hyperparameters": "https://python.langchain.com/docs/integrations/callbacks/sagemaker_tracking/", "argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "From LangChain, import standard modules for prompting.": "https://python.langchain.com/docs/integrations/providers/dspy/", "os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/", "scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/", "os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/", "MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway/", "!pip3 install rebuff openai -U": "https://python.langchain.com/docs/integrations/providers/rebuff/", "Prediction Guard": "https://python.langchain.com/docs/integrations/providers/predictionguard/", "SCENARIO 1 - LLM": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking/", "Shale Protocol": "https://python.langchain.com/docs/integrations/providers/shaleprotocol/", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/", "0: Import ray serve and request from starlette": "https://python.langchain.com/docs/integrations/providers/ray_serve/", "Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "DocumentDB connection string": "https://python.langchain.com/docs/integrations/vectorstores/documentdb/", "initialize MongoDB python client": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/", "airbyte.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte/", "Creating and executing the seeding query": "https://python.langchain.com/docs/integrations/graphs/memgraph/", "How many people played in Top Gun?": "https://python.langchain.com/docs/integrations/graphs/neo4j_cypher/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/minimax/", "stochasticai.md": "https://python.langchain.com/docs/integrations/llms/stochasticai/", "solar.md": "https://python.langchain.com/docs/integrations/llms/solar/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "Update Langchain": "https://python.langchain.com/docs/integrations/llms/ipex_llm/", "Install the package https://docs.banana.dev/banana-docs/core-concepts/sdks/python": "https://python.langchain.com/docs/integrations/llms/banana/", "alibabacloud_pai_eas_endpoint.md": "https://python.langchain.com/docs/integrations/llms/alibabacloud_pai_eas_endpoint/", "openllm.md": "https://python.langchain.com/docs/integrations/llms/openllm/", "sagemaker.md": "https://python.langchain.com/docs/integrations/llms/sagemaker/", "octoai.md": "https://python.langchain.com/docs/integrations/llms/octoai/", "If you get an error, probably, you need to set up the \"base_url\" parameter that can be taken from the error log.": "https://python.langchain.com/docs/integrations/llms/writer/", "Register an account with Modal and get a new token.": "https://python.langchain.com/docs/integrations/llms/modal/", "textgen.md": "https://python.langchain.com/docs/integrations/llms/textgen/", "xinference.md": "https://python.langchain.com/docs/integrations/llms/xinference/", "symblai_nebula.md": "https://python.langchain.com/docs/integrations/llms/symblai_nebula/", "get a new token: https://deepinfra.com/login?from=%2Fdash": "https://python.langchain.com/docs/integrations/llms/deepinfra/", "anthropic.md": "https://python.langchain.com/docs/integrations/llms/anthropic/", "get a token: https://docs.nlpcloud.com/#authentication": "https://python.langchain.com/docs/integrations/llms/nlpcloud/", "Callbacks support token-wise streaming": "https://python.langchain.com/docs/integrations/llms/llamacpp/", "get a new token: https://docs.forefront.ai/forefront/api-reference/authentication": "https://python.langchain.com/docs/integrations/llms/forefrontai/", "sign up for an account: https://forms.mosaicml.com/demo?utm_source=langchain": "https://python.langchain.com/docs/integrations/llms/mosaicml/", "Install the package": "https://python.langchain.com/docs/integrations/llms/pipelineai/", "get a token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/llms/openai/", "google_vertex_ai_palm.md": "https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm/", "gigachat.md": "https://python.langchain.com/docs/integrations/llms/gigachat/", "use default authN method API-key": "https://python.langchain.com/docs/integrations/llms/oci_generative_ai/", "huggingface_pipelines.md": "https://python.langchain.com/docs/integrations/llms/huggingface_pipelines/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "Note importing TitanTakeoffPro instead of TitanTakeoff will work as well both use same object under the hood": "https://python.langchain.com/docs/integrations/llms/titan_takeoff/", "%pip list | grep aphrodite": "https://python.langchain.com/docs/integrations/llms/aphrodite/", "AI21 Contextual Answer {#ai21-contextual-answer}": "https://python.langchain.com/docs/integrations/llms/ai21/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/llms/cohere/", "Run the chain specifying only the input variable for the first chain.": "https://python.langchain.com/docs/integrations/llms/edenai/", "Optional, add your OpenAI API Key. This is optional, as Prediction Guard allows": "https://python.langchain.com/docs/integrations/llms/predictionguard/", "Step 4: Embeddings Example {#step-4-embeddings-example}": "https://python.langchain.com/docs/integrations/llms/javelin/", "Calling a single prompt": "https://python.langchain.com/docs/integrations/llms/ibm_watsonx/", "ctransformers.md": "https://python.langchain.com/docs/integrations/llms/ctransformers/", "vllm.md": "https://python.langchain.com/docs/integrations/llms/vllm/", "azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/", "Map reduce example": "https://python.langchain.com/docs/integrations/llms/manifest/", "get a token: https://huggingface.co/docs/api-inference/quicktour#get-your-api-token": "https://python.langchain.com/docs/integrations/llms/huggingface_endpoint/", "mlx_pipelines.md": "https://python.langchain.com/docs/integrations/llms/mlx_pipelines/", "For an on-demand A100 with GCP, Azure, or Lambda": "https://python.langchain.com/docs/integrations/llms/runhouse/", "anyscale.md": "https://python.langchain.com/docs/integrations/llms/anyscale/", "yandex.md": "https://python.langchain.com/docs/integrations/llms/yandex/", "gooseai.md": "https://python.langchain.com/docs/integrations/llms/gooseai/", "Uncomment to install openlm and openai if you haven't already": "https://python.langchain.com/docs/integrations/llms/openlm/", "Using streaming": "https://python.langchain.com/docs/integrations/llms/cloudflare_workersai/", "conversation can take several minutes": "https://python.langchain.com/docs/integrations/llms/ctranslate2/", "google_ai.md": "https://python.langchain.com/docs/integrations/llms/google_ai/", "Install required dependencies": "https://python.langchain.com/docs/integrations/llms/chatglm/", "Improve the results by fine-tuning (optional) {#improve-the-results-by-fine-tuning-optional}": "https://python.langchain.com/docs/integrations/llms/gradient/", "this can take several minutes to download big files!": "https://python.langchain.com/docs/integrations/llms/petals/", "openvino.md": "https://python.langchain.com/docs/integrations/llms/openvino/", "weight_only_quantization.md": "https://python.langchain.com/docs/integrations/llms/weight_only_quantization/", "magics to auto-reload external modules in case you are making changes to langchain while working on this notebook": "https://python.langchain.com/docs/integrations/llms/replicate/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/", "Note that we set max_retries = 0 to avoid retrying on RateLimits, etc": "https://python.langchain.com/docs/guides/productionization/fallbacks/", "Logical Fallacy chain": "https://python.langchain.com/docs/guides/productionization/safety/logical_fallacy_chain/", "Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/", "Constitutional chain": "https://python.langchain.com/docs/guides/productionization/safety/constitutional_chain/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "Reversible data anonymization with Microsoft Presidio {#reversible-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/reversible/", "Download model": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/index/", "The prompt was assigned to the evaluator": "https://python.langchain.com/docs/guides/productionization/evaluation/comparison/pairwise_string/", "This is equivalent to loading using the enum": "https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "Get embeddings.": "https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "!python -m spacy download en_core_web_lg": "https://python.langchain.com/docs/modules/memory/custom_memory/", "adding_memory_chain_multiple_inputs.md": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "Combined": "https://python.langchain.com/docs/modules/memory/multiple_memory/", "Here it is by default set to \"AI\"": "https://python.langchain.com/docs/modules/memory/conversational_customization/", "kg.md": "https://python.langchain.com/docs/modules/memory/types/kg/", "Backed by a Vector Store": "https://python.langchain.com/docs/modules/memory/types/vectorstore_retriever_memory/", "Callbacks": "https://python.langchain.com/docs/modules/callbacks/index/", "this chain will both print to stdout (because verbose=True) and write to 'output.log'": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/", "Prompts": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/prompts-checkpoint/", "Define your desired data structure.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/pydantic/", "structured.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/structured/", "csv.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/csv/", "retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/", "enum.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/enum/", "datetime.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/datetime/", "Solely for documentation purposes.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/pandas_dataframe/", "xml.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/xml/", "Select the most similar example to the input.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples/", "partial.md": "https://python.langchain.com/docs/modules/model_io/prompts/partial/", "composition.md": "https://python.langchain.com/docs/modules/model_io/prompts/composition/", "Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/", "Examples of a pretend task of creating antonyms.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/similarity/", "index.md": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/index/", "Examples of a fictional translation task.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/ngram_overlap/", "Prompt templates": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/.ipynb_checkpoints/index-checkpoint/", "Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/", "Configure chain internals at runtime {#configure-chain-internals-at-runtime}": "https://python.langchain.com/docs/expression_language/primitives/configure/"}, "AzureOpenAIEmbeddings": {"set the environment variables needed for openai package to know to reach out to azure": "https://python.langchain.com/docs/integrations/text_embedding/azureopenai/", "Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "Option 1: use an OpenAI account": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch/"}, "InfinityEmbeddings": {"Option 1: Use infinity from Python {#option-1-use-infinity-from-python}": "https://python.langchain.com/docs/integrations/text_embedding/infinity/", "Infinity": "https://python.langchain.com/docs/integrations/providers/infinity/"}, "InfinityEmbeddingsLocal": {"Option 1: Use infinity from Python {#option-1-use-infinity-from-python}": "https://python.langchain.com/docs/integrations/text_embedding/infinity/"}, "AwaEmbeddings": {"pip install awadb": "https://python.langchain.com/docs/integrations/text_embedding/awadb/", "AwaDB": "https://python.langchain.com/docs/integrations/providers/awadb/"}, "VolcanoEmbeddings": {"volcengine.md": "https://python.langchain.com/docs/integrations/text_embedding/volcengine/"}, "MiniMaxEmbeddings": {"minimax.md": "https://python.langchain.com/docs/integrations/text_embedding/minimax/", "Minimax": "https://python.langchain.com/docs/integrations/providers/minimax/"}, "FakeEmbeddings": {"fake.md": "https://python.langchain.com/docs/integrations/text_embedding/fake/", "initialize the index": "https://python.langchain.com/docs/integrations/retrievers/docarray_retriever/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/vectara/", "drop first if index already exists": "https://python.langchain.com/docs/integrations/vectorstores/tair/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/vectorstores/google_memorystore_redis/", "Run tests with shell:": "https://python.langchain.com/docs/integrations/vectorstores/pgvecto_rs/", "baiduvectordb.md": "https://python.langchain.com/docs/integrations/vectorstores/baiduvectordb/"}, "NeMoEmbeddings": {"nemo.md": "https://python.langchain.com/docs/integrations/text_embedding/nemo/"}, "NomicEmbeddings": {"install package": "https://python.langchain.com/docs/integrations/text_embedding/nomic/", "nomic.md": "https://python.langchain.com/docs/integrations/providers/nomic/"}, "SparkLLMTextEmbeddings": {"sparkllm.md": "https://python.langchain.com/docs/integrations/text_embedding/sparkllm/"}, "PremAIEmbeddings": {"Let's start by doing some imports and define our embedding object": "https://python.langchain.com/docs/integrations/text_embedding/premai/"}, "ElasticsearchEmbeddings": {"Define the model ID": "https://python.langchain.com/docs/integrations/text_embedding/elasticsearch/", "Elasticsearch": "https://python.langchain.com/docs/integrations/providers/elasticsearch/"}, "VoyageAIEmbeddings": {"retrieve the most relevant documents": "https://python.langchain.com/docs/integrations/text_embedding/voyageai/", "VoyageAI": "https://python.langchain.com/docs/integrations/providers/voyageai/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/voyageai-reranker/"}, "KNNRetriever": {"retrieve the most relevant documents": "https://python.langchain.com/docs/integrations/text_embedding/voyageai/", "knn.md": "https://python.langchain.com/docs/integrations/retrievers/knn/"}, "SelfHostedEmbeddings": {"For an on-demand A100 with GCP, Azure, or Lambda": "https://python.langchain.com/docs/integrations/text_embedding/self-hosted/"}, "SelfHostedHuggingFaceEmbeddings": {"For an on-demand A100 with GCP, Azure, or Lambda": "https://python.langchain.com/docs/integrations/text_embedding/self-hosted/"}, "SelfHostedHuggingFaceInstructEmbeddings": {"For an on-demand A100 with GCP, Azure, or Lambda": "https://python.langchain.com/docs/integrations/text_embedding/self-hosted/"}, "AnyscaleEmbeddings": {"anyscale.md": "https://python.langchain.com/docs/integrations/text_embedding/anyscale/", "Anyscale": "https://python.langchain.com/docs/integrations/providers/anyscale/"}, "EmbaasEmbeddings": {"Set API key": "https://python.langchain.com/docs/integrations/text_embedding/embaas/"}, "YandexGPTEmbeddings": {"yandex.md": "https://python.langchain.com/docs/integrations/text_embedding/yandex/"}, "JinaEmbeddings": {"jina.md": "https://python.langchain.com/docs/integrations/text_embedding/jina/", "Jina": "https://python.langchain.com/docs/integrations/providers/jina/"}, "AlephAlphaAsymmetricSemanticEmbedding": {"aleph_alpha.md": "https://python.langchain.com/docs/integrations/text_embedding/aleph_alpha/", "Aleph Alpha": "https://python.langchain.com/docs/integrations/providers/aleph_alpha/"}, "AlephAlphaSymmetricSemanticEmbedding": {"aleph_alpha.md": "https://python.langchain.com/docs/integrations/text_embedding/aleph_alpha/", "Aleph Alpha": "https://python.langchain.com/docs/integrations/providers/aleph_alpha/"}, "CloudflareWorkersAIEmbeddings": {"single string embeddings": "https://python.langchain.com/docs/integrations/text_embedding/cloudflare_workersai/", "Cloudflare": "https://python.langchain.com/docs/integrations/providers/cloudflare/"}, "DashScopeEmbeddings": {"dashscope.md": "https://python.langchain.com/docs/integrations/text_embedding/dashscope/", "create DashVector collection": "https://python.langchain.com/docs/integrations/retrievers/self_query/dashvector/", "add texts": "https://python.langchain.com/docs/integrations/vectorstores/dashvector/"}, "TensorflowHubEmbeddings": {"tensorflowhub.md": "https://python.langchain.com/docs/integrations/text_embedding/tensorflowhub/"}, "LlamafileEmbeddings": {"llamafile setup": "https://python.langchain.com/docs/integrations/text_embedding/llamafile/"}, "GradientEmbeddings": {"(demo) compute similarity": "https://python.langchain.com/docs/integrations/text_embedding/gradient/", "Gradient": "https://python.langchain.com/docs/integrations/providers/gradient/"}, "ModelScopeEmbeddings": {"modelscope_hub.md": "https://python.langchain.com/docs/integrations/text_embedding/modelscope_hub/", "ModelScope": "https://python.langchain.com/docs/integrations/providers/modelscope/"}, "SagemakerEndpointEmbeddings": {"client = boto3.client(": "https://python.langchain.com/docs/integrations/text_embedding/sagemaker-endpoint/", "AWS": "https://python.langchain.com/docs/integrations/platforms/aws/"}, "EmbeddingsContentHandler": {"client = boto3.client(": "https://python.langchain.com/docs/integrations/text_embedding/sagemaker-endpoint/"}, "DocArrayInMemorySearch": {"async embed query": "https://python.langchain.com/docs/integrations/text_embedding/upstage/", "Get an OpenAI token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory/", "> ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])": "https://python.langchain.com/docs/expression_language/get_started/"}, "OpenVINOEmbeddings": {"openvino.md": "https://python.langchain.com/docs/integrations/text_embedding/openvino/", "Helper function for printing docs": "https://python.langchain.com/docs/integrations/document_transformers/openvino_rerank/"}, "OpenVINOBgeEmbeddings": {"openvino.md": "https://python.langchain.com/docs/integrations/text_embedding/openvino/"}, "NVIDIAEmbeddings": {"del os.environ['NVIDIA_API_KEY'] ## delete key and reset": "https://python.langchain.com/docs/integrations/text_embedding/nvidia_ai_endpoints/", "NVIDIA": "https://python.langchain.com/docs/integrations/providers/nvidia/"}, "FAISS": {"del os.environ['NVIDIA_API_KEY'] ## delete key and reset": "https://python.langchain.com/docs/integrations/text_embedding/nvidia_ai_endpoints/", "fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/cross_encoder_reranker/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "ragatouille.md": "https://python.langchain.com/docs/integrations/providers/ragatouille/", "Facebook - Meta": "https://python.langchain.com/docs/integrations/providers/facebook/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/faiss_async/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/vectorstores/faiss/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/", "use default authN method API-key": "https://python.langchain.com/docs/integrations/llms/oci_generative_ai/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/", "Swapping the `ByteStore` {#swapping-the-bytestore}": "https://python.langchain.com/docs/modules/data_connection/text_embedding/caching_embeddings/", "initialize the bm25 retriever and faiss retriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/ensemble/", "Define your embedding model": "https://python.langchain.com/docs/modules/data_connection/retrievers/time_weighted_vectorstore/", "vectorstore.md": "https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore/", "Vector stores": "https://python.langchain.com/docs/modules/data_connection/vectorstores/index/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/", "Backed by a Vector Store": "https://python.langchain.com/docs/modules/memory/types/vectorstore_retriever_memory/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Examples of a pretend task of creating antonyms.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/mmr/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "The input schema of the chain is the input schema of its first part, the prompt.": "https://python.langchain.com/docs/expression_language/interface/", "inspect.md": "https://python.langchain.com/docs/expression_language/how_to/inspect/", "Adding values to chain state {#adding-values-to-chain-state}": "https://python.langchain.com/docs/expression_language/primitives/assign/", "Formatting inputs & output {#formatting-inputs-output}": "https://python.langchain.com/docs/expression_language/primitives/parallel/", "Passing data through {#passing-data-through}": "https://python.langchain.com/docs/expression_language/primitives/passthrough/"}, "RunnablePassthrough": {"del os.environ['NVIDIA_API_KEY'] ## delete key and reset": "https://python.langchain.com/docs/integrations/text_embedding/nvidia_ai_endpoints/", "For use in Chaining section": "https://python.langchain.com/docs/integrations/retrievers/you-retriever/", "fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "From LangChain, import standard modules for prompting.": "https://python.langchain.com/docs/integrations/providers/dspy/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_summary/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "use default authN method API-key": "https://python.langchain.com/docs/integrations/llms/oci_generative_ai/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/prompting/", "This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "Quickstart": "https://python.langchain.com/docs/use_cases/question_answering/.ipynb_checkpoints/quickstart-checkpoint/", "%pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/quickstart/", "%pip install -qU langchain langchain-community langchain-openai langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/no_queries/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/step_back/", "%pip install -qU langchain langchain-community langchain-openai faker langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/high_cardinality/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/quickstart/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/index/", "Tool calling {#tool-calling}": "https://python.langchain.com/docs/modules/model_io/chat/function_calling/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "batch_configurable_chain([\"ice cream\", \"spaghetti\", \"dumplings\"])": "https://python.langchain.com/docs/expression_language/why/", "> ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])": "https://python.langchain.com/docs/expression_language/get_started/", "The input schema of the chain is the input schema of its first part, the prompt.": "https://python.langchain.com/docs/expression_language/interface/", "prompt_llm_parser.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_llm_parser/", "multiple_chains.md": "https://python.langchain.com/docs/expression_language/cookbook/multiple_chains/", "Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/", "inspect.md": "https://python.langchain.com/docs/expression_language/how_to/inspect/", "Adding values to chain state {#adding-values-to-chain-state}": "https://python.langchain.com/docs/expression_language/primitives/assign/", "Binding: Attach runtime args {#binding-attach-runtime-args}": "https://python.langchain.com/docs/expression_language/primitives/binding/", "Formatting inputs & output {#formatting-inputs-output}": "https://python.langchain.com/docs/expression_language/primitives/parallel/", "Passing data through {#passing-data-through}": "https://python.langchain.com/docs/expression_language/primitives/passthrough/"}, "ChatNVIDIA": {"del os.environ['NVIDIA_API_KEY'] ## delete key and reset": "https://python.langchain.com/docs/integrations/text_embedding/nvidia_ai_endpoints/", "Or via the async API": "https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/", "NVIDIA": "https://python.langchain.com/docs/integrations/providers/nvidia/"}, "LocalAIEmbeddings": {"if you are behind an explicit proxy, you can use the OPENAI_PROXY environment variable to pass through": "https://python.langchain.com/docs/integrations/text_embedding/localai/"}, "AzureAISearchRetriever": {"azure_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/azure_ai_search/", "Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/"}, "DirectoryLoader": {"azure_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/azure_ai_search/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/", "File Directory": "https://python.langchain.com/docs/modules/data_connection/document_loaders/file_directory/"}, "TextLoader": {"azure_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/azure_ai_search/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/cross_encoder_reranker/", "Establishing a connection to the database is facilitated through the singlestoredb Python connector.": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/momento_vector_index/", "Here we want to make sure the answer is minimally relevant": "https://python.langchain.com/docs/integrations/callbacks/confident/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "VDMS": "https://python.langchain.com/docs/integrations/providers/vdms/", "lancedb.md": "https://python.langchain.com/docs/integrations/vectorstores/lancedb/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/vdms/", "You need to install sqlite-vss as a dependency.": "https://python.langchain.com/docs/integrations/vectorstores/sqlitevss/", "Refresh is required for server use": "https://python.langchain.com/docs/integrations/vectorstores/vald/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "add texts": "https://python.langchain.com/docs/integrations/vectorstores/dashvector/", "databricks_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/databricks_vector_search/", "scann.md": "https://python.langchain.com/docs/integrations/vectorstores/scann/", "xata.md": "https://python.langchain.com/docs/integrations/vectorstores/xata/", "openai": "https://python.langchain.com/docs/integrations/vectorstores/hippo/", "docs[0].metadata[\"id\"] == \"id:testapp:testapp::32\"": "https://python.langchain.com/docs/integrations/vectorstores/vespa/", "output length: 4": "https://python.langchain.com/docs/integrations/vectorstores/rockset/", "or install latest:": "https://python.langchain.com/docs/integrations/vectorstores/dingo/", "replace": "https://python.langchain.com/docs/integrations/vectorstores/zilliz/", "Set up the OpenAI Environment Variables": "https://python.langchain.com/docs/integrations/vectorstores/azure_cosmos_db/", "vikingdb.md": "https://python.langchain.com/docs/integrations/vectorstores/vikingdb/", "default metric is angular": "https://python.langchain.com/docs/integrations/vectorstores/annoy/", "Wait until the cluster is ready for use.": "https://python.langchain.com/docs/integrations/vectorstores/couchbase/", "typesense.md": "https://python.langchain.com/docs/integrations/vectorstores/typesense/", "Here we useimport getpass": "https://python.langchain.com/docs/integrations/vectorstores/tidb_vector/", "atlas.md": "https://python.langchain.com/docs/integrations/vectorstores/atlas/", "or shorter": "https://python.langchain.com/docs/integrations/vectorstores/activeloop_deeplake/", "Load the document and split it into chunks": "https://python.langchain.com/docs/integrations/vectorstores/vlite/", "Pip install necessary package {#pip-install-necessary-package}": "https://python.langchain.com/docs/integrations/vectorstores/lantern/", "drop first if index already exists": "https://python.langchain.com/docs/integrations/vectorstores/tair/", "import": "https://python.langchain.com/docs/integrations/vectorstores/chroma/", "duckdb.md": "https://python.langchain.com/docs/integrations/vectorstores/duckdb/", "for example": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch/", "Dependencies {#dependencies}": "https://python.langchain.com/docs/integrations/vectorstores/clarifai/", "# if you plan to use bson serialization, install also:": "https://python.langchain.com/docs/integrations/vectorstores/sklearn/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/", "Get an OpenAI token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory/", "use directly a `where_str` to delete": "https://python.langchain.com/docs/integrations/vectorstores/myscale/", "tiledb.md": "https://python.langchain.com/docs/integrations/vectorstores/tiledb/", "clickhouse.md": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/vectorstores/google_memorystore_redis/", "qdrant.md": "https://python.langchain.com/docs/integrations/vectorstores/qdrant/", "tigris.md": "https://python.langchain.com/docs/integrations/vectorstores/tigris/", "ecloud_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/ecloud_vector_search/", "Create a bes instance and index docs.": "https://python.langchain.com/docs/integrations/vectorstores/baiducloud_vector_search/", "awadb.md": "https://python.langchain.com/docs/integrations/vectorstores/awadb/", "with pip": "https://python.langchain.com/docs/integrations/vectorstores/supabase/", "%pip install --upgrade --quiet surrealdb langchain langchain-community": "https://python.langchain.com/docs/integrations/vectorstores/surrealdb/", "If using the default Docker installation, use this instantiation instead:": "https://python.langchain.com/docs/integrations/vectorstores/opensearch/", "pinecone.md": "https://python.langchain.com/docs/integrations/vectorstores/pinecone/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/vearch/", "create cluster and add texts": "https://python.langchain.com/docs/integrations/vectorstores/bageldb/", "Option 1: use an OpenAI account": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch/", "usearch.md": "https://python.langchain.com/docs/integrations/vectorstores/usearch/", "This will only get documents for Ankush": "https://python.langchain.com/docs/integrations/vectorstores/milvus/", "Metadata {#metadata}": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch/", "Run tests with shell:": "https://python.langchain.com/docs/integrations/vectorstores/pgvecto_rs/", "initialize marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/vectorstores/faiss/", "epsilla.md": "https://python.langchain.com/docs/integrations/vectorstores/epsilla/", "DocumentDB connection string": "https://python.langchain.com/docs/integrations/vectorstores/documentdb/", "Create collection if running for the first time. If the collection": "https://python.langchain.com/docs/integrations/vectorstores/semadb/", "analyticdb.md": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb/", "hologres.md": "https://python.langchain.com/docs/integrations/vectorstores/hologres/", "baiduvectordb.md": "https://python.langchain.com/docs/integrations/vectorstores/baiduvectordb/", "Use Meilisearch vector store to store texts & associated embeddings as vector": "https://python.langchain.com/docs/integrations/vectorstores/meilisearch/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Swapping the `ByteStore` {#swapping-the-bytestore}": "https://python.langchain.com/docs/modules/data_connection/text_embedding/caching_embeddings/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "This text splitter is used to create the child documents": "https://python.langchain.com/docs/modules/data_connection/retrievers/parent_document_retriever/", "vectorstore.md": "https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/", "Vector stores": "https://python.langchain.com/docs/modules/data_connection/vectorstores/index/", "Document loaders": "https://python.langchain.com/docs/modules/data_connection/document_loaders/index/", "File Directory": "https://python.langchain.com/docs/modules/data_connection/document_loaders/file_directory/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/"}, "TokenTextSplitter": {"azure_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/azure_ai_search/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/split_by_token/"}, "AzureSearch": {"azure_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/azure_ai_search/", "Option 1: use an OpenAI account": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch/"}, "RePhraseQueryRetriever": {"re_phrase.md": "https://python.langchain.com/docs/integrations/retrievers/re_phrase/"}, "WebBaseLoader": {"re_phrase.md": "https://python.langchain.com/docs/integrations/retrievers/re_phrase/", "Install necessary dependencies.": "https://python.langchain.com/docs/integrations/callbacks/infino/", "Collection config is needed if we're creating a new Zep Collection": "https://python.langchain.com/docs/integrations/vectorstores/zep/", "merge_doc.md": "https://python.langchain.com/docs/integrations/document_loaders/merge_doc/", "Use this piece of code for testing new custom BeautifulSoup parsers": "https://python.langchain.com/docs/integrations/document_loaders/web_base/", "Quickstart": "https://python.langchain.com/docs/use_cases/question_answering/.ipynb_checkpoints/quickstart-checkpoint/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/quickstart/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/"}, "RecursiveCharacterTextSplitter": {"re_phrase.md": "https://python.langchain.com/docs/integrations/retrievers/re_phrase/", "Helper function for printing docs": "https://python.langchain.com/docs/integrations/document_transformers/openvino_rerank/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/cross_encoder_reranker/", "# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "Loading the COMVEST 2024 notice": "https://python.langchain.com/docs/integrations/chat/maritalk/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "ragatouille.md": "https://python.langchain.com/docs/integrations/providers/ragatouille/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "TODO : Set values as per your requirements": "https://python.langchain.com/docs/integrations/vectorstores/google_vertex_ai_vector_search/", "vikingdb.md": "https://python.langchain.com/docs/integrations/vectorstores/vikingdb/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "Collection config is needed if we're creating a new Zep Collection": "https://python.langchain.com/docs/integrations/vectorstores/zep/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/vearch/", "initialize MongoDB python client": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/", "Code for: class MyClass:": "https://python.langchain.com/docs/integrations/document_loaders/source_code/", "Quickstart": "https://python.langchain.com/docs/use_cases/question_answering/.ipynb_checkpoints/quickstart-checkpoint/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/quickstart/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "%pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/quickstart/", "%pip install -qU langchain langchain-community langchain-openai langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/no_queries/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/", "This text splitter is used to create the child documents": "https://python.langchain.com/docs/modules/data_connection/retrievers/parent_document_retriever/", "Full list of supported languages": "https://python.langchain.com/docs/modules/data_connection/document_transformers/code_splitter/", "This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/split_by_token/", "for local file use html_splitter.split_text_from_file()": "https://python.langchain.com/docs/modules/data_connection/document_transformers/HTML_header_metadata/", "MD splits": "https://python.langchain.com/docs/modules/data_connection/document_transformers/markdown_header_metadata/", "Split": "https://python.langchain.com/docs/modules/data_connection/document_transformers/HTML_section_aware_splitter/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/"}, "YouSearchAPIWrapper": {"For use in Chaining section": "https://python.langchain.com/docs/integrations/tools/you/"}, "YouRetriever": {"For use in Chaining section": "https://python.langchain.com/docs/integrations/retrievers/you-retriever/"}, "Jaguar": {"cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "Jaguar": "https://python.langchain.com/docs/integrations/providers/jaguar/"}, "CharacterTextSplitter": {"cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "Establishing a connection to the database is facilitated through the singlestoredb Python connector.": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/momento_vector_index/", "OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "Here we want to make sure the answer is minimally relevant": "https://python.langchain.com/docs/integrations/callbacks/confident/", "VDMS": "https://python.langchain.com/docs/integrations/providers/vdms/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "lancedb.md": "https://python.langchain.com/docs/integrations/vectorstores/lancedb/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/vdms/", "You need to install sqlite-vss as a dependency.": "https://python.langchain.com/docs/integrations/vectorstores/sqlitevss/", "Refresh is required for server use": "https://python.langchain.com/docs/integrations/vectorstores/vald/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "add texts": "https://python.langchain.com/docs/integrations/vectorstores/dashvector/", "databricks_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/databricks_vector_search/", "scann.md": "https://python.langchain.com/docs/integrations/vectorstores/scann/", "xata.md": "https://python.langchain.com/docs/integrations/vectorstores/xata/", "openai": "https://python.langchain.com/docs/integrations/vectorstores/hippo/", "docs[0].metadata[\"id\"] == \"id:testapp:testapp::32\"": "https://python.langchain.com/docs/integrations/vectorstores/vespa/", "output length: 4": "https://python.langchain.com/docs/integrations/vectorstores/rockset/", "or install latest:": "https://python.langchain.com/docs/integrations/vectorstores/dingo/", "replace": "https://python.langchain.com/docs/integrations/vectorstores/zilliz/", "Set up the OpenAI Environment Variables": "https://python.langchain.com/docs/integrations/vectorstores/azure_cosmos_db/", "default metric is angular": "https://python.langchain.com/docs/integrations/vectorstores/annoy/", "Wait until the cluster is ready for use.": "https://python.langchain.com/docs/integrations/vectorstores/couchbase/", "typesense.md": "https://python.langchain.com/docs/integrations/vectorstores/typesense/", "Here we useimport getpass": "https://python.langchain.com/docs/integrations/vectorstores/tidb_vector/", "or shorter": "https://python.langchain.com/docs/integrations/vectorstores/activeloop_deeplake/", "Load the document and split it into chunks": "https://python.langchain.com/docs/integrations/vectorstores/vlite/", "Pip install necessary package {#pip-install-necessary-package}": "https://python.langchain.com/docs/integrations/vectorstores/lantern/", "drop first if index already exists": "https://python.langchain.com/docs/integrations/vectorstores/tair/", "import": "https://python.langchain.com/docs/integrations/vectorstores/chroma/", "duckdb.md": "https://python.langchain.com/docs/integrations/vectorstores/duckdb/", "for example": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch/", "Dependencies {#dependencies}": "https://python.langchain.com/docs/integrations/vectorstores/clarifai/", "# if you plan to use bson serialization, install also:": "https://python.langchain.com/docs/integrations/vectorstores/sklearn/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/", "Get an OpenAI token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory/", "use directly a `where_str` to delete": "https://python.langchain.com/docs/integrations/vectorstores/myscale/", "tiledb.md": "https://python.langchain.com/docs/integrations/vectorstores/tiledb/", "clickhouse.md": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/vectorstores/google_memorystore_redis/", "qdrant.md": "https://python.langchain.com/docs/integrations/vectorstores/qdrant/", "tigris.md": "https://python.langchain.com/docs/integrations/vectorstores/tigris/", "ecloud_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/ecloud_vector_search/", "Create a bes instance and index docs.": "https://python.langchain.com/docs/integrations/vectorstores/baiducloud_vector_search/", "awadb.md": "https://python.langchain.com/docs/integrations/vectorstores/awadb/", "with pip": "https://python.langchain.com/docs/integrations/vectorstores/supabase/", "%pip install --upgrade --quiet surrealdb langchain langchain-community": "https://python.langchain.com/docs/integrations/vectorstores/surrealdb/", "If using the default Docker installation, use this instantiation instead:": "https://python.langchain.com/docs/integrations/vectorstores/opensearch/", "pinecone.md": "https://python.langchain.com/docs/integrations/vectorstores/pinecone/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/faiss_async/", "create cluster and add texts": "https://python.langchain.com/docs/integrations/vectorstores/bageldb/", "Option 1: use an OpenAI account": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch/", "usearch.md": "https://python.langchain.com/docs/integrations/vectorstores/usearch/", "This will only get documents for Ankush": "https://python.langchain.com/docs/integrations/vectorstores/milvus/", "Metadata {#metadata}": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch/", "Run tests with shell:": "https://python.langchain.com/docs/integrations/vectorstores/pgvecto_rs/", "initialize marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/vectorstores/faiss/", "epsilla.md": "https://python.langchain.com/docs/integrations/vectorstores/epsilla/", "DocumentDB connection string": "https://python.langchain.com/docs/integrations/vectorstores/documentdb/", "Create collection if running for the first time. If the collection": "https://python.langchain.com/docs/integrations/vectorstores/semadb/", "analyticdb.md": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb/", "hologres.md": "https://python.langchain.com/docs/integrations/vectorstores/hologres/", "baiduvectordb.md": "https://python.langchain.com/docs/integrations/vectorstores/baiduvectordb/", "Use Meilisearch vector store to store texts & associated embeddings as vector": "https://python.langchain.com/docs/integrations/vectorstores/meilisearch/", "Uncomment this to install psychicapi if you don't already have it installed": "https://python.langchain.com/docs/integrations/document_loaders/psychic/", "Map reduce example": "https://python.langchain.com/docs/integrations/llms/manifest/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "indexing.md": "https://python.langchain.com/docs/modules/data_connection/indexing/", "Swapping the `ByteStore` {#swapping-the-bytestore}": "https://python.langchain.com/docs/modules/data_connection/text_embedding/caching_embeddings/", "Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/", "vectorstore.md": "https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/", "Vector stores": "https://python.langchain.com/docs/modules/data_connection/vectorstores/index/", "This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/split_by_token/", "adding_memory_chain_multiple_inputs.md": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs/"}, "MultiVectorRetriever": {"fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/"}, "Document": {"fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "STEP 1: Load": "https://python.langchain.com/docs/integrations/retrievers/chatgpt-plugin/", "cohere.md": "https://python.langchain.com/docs/integrations/retrievers/cohere/", "client.schema.delete_all()": "https://python.langchain.com/docs/integrations/retrievers/weaviate-hybrid/", "bm25.md": "https://python.langchain.com/docs/integrations/retrievers/bm25/", "Create a retriever with a demo encoder": "https://python.langchain.com/docs/integrations/retrievers/qdrant-sparse/", "elasticsearch_retriever.md": "https://python.langchain.com/docs/integrations/retrievers/elasticsearch_retriever/", "tf_idf.md": "https://python.langchain.com/docs/integrations/retrievers/tf_idf/", "This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/mongodb_atlas/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/retrievers/self_query/vectara_self_query/", "create DashVector collection": "https://python.langchain.com/docs/integrations/retrievers/self_query/dashvector/", "or install latest:": "https://python.langchain.com/docs/integrations/retrievers/self_query/dingo/", "Get openAI api key by reading local .env file": "https://python.langchain.com/docs/integrations/retrievers/self_query/timescalevector_self_query/", "create new index": "https://python.langchain.com/docs/integrations/retrievers/self_query/pinecone/", "in case if some queries fail consider installing libdeeplake manually": "https://python.langchain.com/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query/", "import os": "https://python.langchain.com/docs/integrations/retrievers/self_query/qdrant_self_query/", "apify.md": "https://python.langchain.com/docs/integrations/tools/apify/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "See docker command above to launch a postgres instance with pgvector enabled.": "https://python.langchain.com/docs/integrations/vectorstores/pgvector/", "default metric is angular": "https://python.langchain.com/docs/integrations/vectorstores/annoy/", "Pip install necessary package {#pip-install-necessary-package}": "https://python.langchain.com/docs/integrations/vectorstores/lantern/", "@markdown Please specify a source for demo purpose.": "https://python.langchain.com/docs/integrations/document_loaders/google_firestore/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/faiss_async/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "This will only get documents for Ankush": "https://python.langchain.com/docs/integrations/vectorstores/milvus/", "Metadata {#metadata}": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch/", "Run tests with shell:": "https://python.langchain.com/docs/integrations/vectorstores/pgvecto_rs/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/vectorstores/faiss/", "nuclia_transformer.md": "https://python.langchain.com/docs/integrations/document_transformers/nuclia_transformer/", "ai21_semantic_text_splitter.md": "https://python.langchain.com/docs/integrations/document_transformers/ai21_semantic_text_splitter/", "Must be an OpenAI model that supports functions": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger/", "doctran_extract_properties.md": "https://python.langchain.com/docs/integrations/document_transformers/doctran_extract_properties/", "google_translate.md": "https://python.langchain.com/docs/integrations/document_transformers/google_translate/", "doctran_interrogate_document.md": "https://python.langchain.com/docs/integrations/document_transformers/doctran_interrogate_document/", "doctran_translate_document.md": "https://python.langchain.com/docs/integrations/document_transformers/doctran_translate_document/", "Feature structure of `mlqa/en` dataset:": "https://python.langchain.com/docs/integrations/document_loaders/tensorflow_datasets/", "@markdown Please fill in the both the Google Cloud region and name of your Cloud SQL instance.": "https://python.langchain.com/docs/integrations/document_loaders/google_cloud_sql_mssql/", "airbyte_salesforce.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_salesforce/", "airbyte_cdk.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_cdk/", "airbyte_stripe.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_stripe/", "copypaste.md": "https://python.langchain.com/docs/integrations/document_loaders/copypaste/", "airbyte_typeform.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_typeform/", "apify_dataset.md": "https://python.langchain.com/docs/integrations/document_loaders/apify_dataset/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/document_loaders/google_datastore/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "airbyte_hubspot.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_hubspot/", "airbyte_gong.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_gong/", "@markdown Please specify an endpoint associated with the instance and a key prefix for demo purpose.": "https://python.langchain.com/docs/integrations/document_loaders/google_memorystore_redis/", "@markdown Please specify an instance and a table for demo purpose.": "https://python.langchain.com/docs/integrations/document_loaders/google_bigtable/", "@title Set Your Values Here { display-mode: \"form\" }": "https://python.langchain.com/docs/integrations/document_loaders/google_el_carro/", "airbyte_shopify.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_shopify/", "airbyte_zendesk_support.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_zendesk_support/", "@markdown Please specify an instance id, a database, and a table for demo purpose.": "https://python.langchain.com/docs/integrations/document_loaders/google_spanner/", "The underlying python-gremlin has a problem when running in notebook": "https://python.langchain.com/docs/integrations/graphs/azure_cosmosdb_gremlin/", "sagemaker.md": "https://python.langchain.com/docs/integrations/llms/sagemaker/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "self-query-qdrant": "https://python.langchain.com/docs/templates/self-query-qdrant/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "%pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/quickstart/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/constructing/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "indexing.md": "https://python.langchain.com/docs/modules/data_connection/indexing/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "Custom Retriever {#custom-retriever}": "https://python.langchain.com/docs/modules/data_connection/retrievers/custom_retriever/", "Define your embedding model": "https://python.langchain.com/docs/modules/data_connection/retrievers/time_weighted_vectorstore/", "This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/", "Self-querying": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/self_query-checkpoint/", "Custom Document Loader {#custom-document-loader}": "https://python.langchain.com/docs/modules/data_connection/document_loaders/custom/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "BaseStore": {"fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/"}, "VectorStore": {"fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/"}, "InMemoryStore": {"fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "This text splitter is used to create the child documents": "https://python.langchain.com/docs/modules/data_connection/retrievers/parent_document_retriever/"}, "ContextualCompressionRetriever": {"Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/cross_encoder_reranker/", "Get 3 diff embeddings.": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "ragatouille.md": "https://python.langchain.com/docs/integrations/providers/ragatouille/"}, "LLMLinguaCompressor": {"Helper function for printing docs": "https://python.langchain.com/docs/integrations/retrievers/llmlingua/"}, "RetrievalQA": {"Helper function for printing docs": "https://python.langchain.com/docs/integrations/retrievers/llmlingua/", "bedrock.md": "https://python.langchain.com/docs/integrations/retrievers/bedrock/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/voyageai-reranker/", "# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "Here we want to make sure the answer is minimally relevant": "https://python.langchain.com/docs/integrations/callbacks/confident/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/", "scann.md": "https://python.langchain.com/docs/integrations/vectorstores/scann/", "TODO : Set values as per your requirements": "https://python.langchain.com/docs/integrations/vectorstores/google_vertex_ai_vector_search/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/momento_vector_index/", "or shorter": "https://python.langchain.com/docs/integrations/vectorstores/activeloop_deeplake/", "Clean up KDB.AI \"documents\" table and index for similarity search": "https://python.langchain.com/docs/integrations/vectorstores/kdbai/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "DocumentDB connection string": "https://python.langchain.com/docs/integrations/vectorstores/documentdb/", "initialize MongoDB python client": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/"}, "ElasticSearchBM25Retriever": {"Alternatively, you can load an existing index": "https://python.langchain.com/docs/integrations/retrievers/elastic_search_bm25/"}, "OutlineRetriever": {"outline.md": "https://python.langchain.com/docs/integrations/retrievers/outline/", "Outline": "https://python.langchain.com/docs/integrations/providers/outline/"}, "ConversationalRetrievalChain": {"outline.md": "https://python.langchain.com/docs/integrations/retrievers/outline/", "get a token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/retrievers/arxiv/", "Setup API keys for Kay and OpenAI": "https://python.langchain.com/docs/integrations/retrievers/sec_filings/", "Setup API key": "https://python.langchain.com/docs/integrations/retrievers/kay/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/sap_hanavector/"}, "ZepMemory": {"Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "Zep": "https://python.langchain.com/docs/integrations/providers/zep/"}, "SearchScope": {"Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/retrievers/zep_memorystore/"}, "SearchType": {"Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/retrievers/zep_memorystore/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/"}, "ZepRetriever": {"Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "Zep": "https://python.langchain.com/docs/integrations/providers/zep/"}, "VespaRetriever": {"vespa.md": "https://python.langchain.com/docs/integrations/retrievers/vespa/", "Vespa": "https://python.langchain.com/docs/integrations/providers/vespa/"}, "AmazonKendraRetriever": {"amazon_kendra_retriever.md": "https://python.langchain.com/docs/integrations/retrievers/amazon_kendra_retriever/"}, "AmazonKnowledgeBasesRetriever": {"bedrock.md": "https://python.langchain.com/docs/integrations/retrievers/bedrock/"}, "Bedrock": {"bedrock.md": "https://python.langchain.com/docs/integrations/retrievers/bedrock/", "Guardrails for Amazon Bedrock with trace": "https://python.langchain.com/docs/integrations/llms/bedrock/"}, "CohereRerank": {"OR (depending on Python version)": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker/"}, "Cohere": {"OR (depending on Python version)": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "self-query-qdrant": "https://python.langchain.com/docs/templates/self-query-qdrant/"}, "NeuralDBRetriever": {"From scratch": "https://python.langchain.com/docs/integrations/retrievers/thirdai_neuraldb/"}, "SingleStoreDB": {"Establishing a connection to the database is facilitated through the singlestoredb Python connector.": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb/", "SingleStoreDB": "https://python.langchain.com/docs/integrations/providers/singlestoredb/"}, "WikipediaRetriever": {"get a token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/retrievers/wikipedia/", "Wikipedia": "https://python.langchain.com/docs/integrations/providers/wikipedia/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/"}, "MetalRetriever": {"metal.md": "https://python.langchain.com/docs/integrations/retrievers/metal/", "Metal": "https://python.langchain.com/docs/integrations/providers/metal/"}, "BreebsRetriever": {"breebs.md": "https://python.langchain.com/docs/integrations/retrievers/breebs/", "Breebs (Open Knowledge)": "https://python.langchain.com/docs/integrations/providers/breebs/"}, "CSVLoader": {"STEP 1: Load": "https://python.langchain.com/docs/integrations/retrievers/chatgpt-plugin/", "csv.md": "https://python.langchain.com/docs/integrations/document_loaders/csv/", "pebblo.md": "https://python.langchain.com/docs/integrations/document_loaders/pebblo/", "CSV": "https://python.langchain.com/docs/modules/data_connection/document_loaders/csv/"}, "ChatGPTPluginRetriever": {"STEP 1: Load": "https://python.langchain.com/docs/integrations/retrievers/chatgpt-plugin/", "OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/"}, "KayAiRetriever": {"Setup API keys for Kay and OpenAI": "https://python.langchain.com/docs/integrations/retrievers/sec_filings/", "Setup API key": "https://python.langchain.com/docs/integrations/retrievers/kay/"}, "ChatCohere": {"cohere.md": "https://python.langchain.com/docs/integrations/retrievers/cohere/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/chat/cohere/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/quick_start/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/"}, "CohereRagRetriever": {"cohere.md": "https://python.langchain.com/docs/integrations/retrievers/cohere/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/"}, "DriaRetriever": {"Installation {#installation}": "https://python.langchain.com/docs/integrations/retrievers/dria_index/"}, "DocArrayRetriever": {"initialize the index": "https://python.langchain.com/docs/integrations/retrievers/docarray_retriever/"}, "SVMRetriever": {"svm.md": "https://python.langchain.com/docs/integrations/retrievers/svm/"}, "TavilySearchAPIRetriever": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/retrievers/tavily/"}, "PineconeHybridSearchRetriever": {"create the index": "https://python.langchain.com/docs/integrations/retrievers/pinecone_hybrid_search/", "Pinecone": "https://python.langchain.com/docs/integrations/providers/pinecone/"}, "DeepLake": {"# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "in case if some queries fail consider installing libdeeplake manually": "https://python.langchain.com/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query/", "Activeloop Deep Lake": "https://python.langchain.com/docs/integrations/providers/activeloop_deeplake/", "or shorter": "https://python.langchain.com/docs/integrations/vectorstores/activeloop_deeplake/"}, "AsyncHtmlLoader": {"# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "html2text.md": "https://python.langchain.com/docs/integrations/document_transformers/html2text/", "async_html.md": "https://python.langchain.com/docs/integrations/document_loaders/async_html/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "Html2TextTransformer": {"# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "html2text.md": "https://python.langchain.com/docs/integrations/document_transformers/html2text/", "async_chromium.md": "https://python.langchain.com/docs/integrations/document_loaders/async_chromium/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "create_structured_output_chain": {"# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/mapping/"}, "HumanMessagePromptTemplate": {"# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "get a chat completion from the formatted messages": "https://python.langchain.com/docs/integrations/chat/vllm/", "!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/", "context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "see https://python.langchain.com/en/latest/modules/data_connection/getting_started.html for more details": "https://python.langchain.com/docs/integrations/document_loaders/figma/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "Prompts": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/prompts-checkpoint/", "Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/", "Prompt templates": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/.ipynb_checkpoints/index-checkpoint/"}, "PubMedRetriever": {"pubmed.md": "https://python.langchain.com/docs/integrations/retrievers/pubmed/", "PubMed": "https://python.langchain.com/docs/integrations/providers/pubmed/"}, "WeaviateHybridSearchRetriever": {"client.schema.delete_all()": "https://python.langchain.com/docs/integrations/retrievers/weaviate-hybrid/"}, "EmbedchainRetriever": {"Installation {#installation}": "https://python.langchain.com/docs/integrations/retrievers/embedchain/"}, "create_retrieval_chain": {"ragatouille.md": "https://python.langchain.com/docs/integrations/retrievers/ragatouille/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/chat_history/"}, "create_stuff_documents_chain": {"ragatouille.md": "https://python.langchain.com/docs/integrations/retrievers/ragatouille/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/chat_history/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/quickstart/"}, "ArxivRetriever": {"get a token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/retrievers/arxiv/", "Arxiv": "https://python.langchain.com/docs/integrations/providers/arxiv/"}, "BM25Retriever": {"bm25.md": "https://python.langchain.com/docs/integrations/retrievers/bm25/", "Loading the COMVEST 2024 notice": "https://python.langchain.com/docs/integrations/chat/maritalk/", "initialize the bm25 retriever and faiss retriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/ensemble/"}, "QdrantSparseVectorRetriever": {"Create a retriever with a demo encoder": "https://python.langchain.com/docs/integrations/retrievers/qdrant-sparse/"}, "DeterministicFakeEmbedding": {"elasticsearch_retriever.md": "https://python.langchain.com/docs/integrations/retrievers/elasticsearch_retriever/"}, "Embeddings": {"elasticsearch_retriever.md": "https://python.langchain.com/docs/integrations/retrievers/elasticsearch_retriever/", "Ensure that all we need is installed": "https://python.langchain.com/docs/integrations/vectorstores/infinispanvs/"}, "ElasticsearchRetriever": {"elasticsearch_retriever.md": "https://python.langchain.com/docs/integrations/retrievers/elasticsearch_retriever/"}, "ArceeRetriever": {"Define filters": "https://python.langchain.com/docs/integrations/retrievers/arcee/", "Arcee": "https://python.langchain.com/docs/integrations/providers/arcee/"}, "FlashrankRerank": {"OR (depending on Python version)": "https://python.langchain.com/docs/integrations/retrievers/flashrank-reranker/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/"}, "ChaindeskRetriever": {"chaindesk.md": "https://python.langchain.com/docs/integrations/retrievers/chaindesk/", "Chaindesk": "https://python.langchain.com/docs/integrations/providers/chaindesk/"}, "MergerRetriever": {"Get 3 diff embeddings.": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever/"}, "EmbeddingsClusteringFilter": {"Get 3 diff embeddings.": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever/"}, "EmbeddingsRedundantFilter": {"Get 3 diff embeddings.": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever/", "Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/"}, "LongContextReorder": {"Get 3 diff embeddings.": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever/", "Get embeddings.": "https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder/"}, "TFIDFRetriever": {"tf_idf.md": "https://python.langchain.com/docs/integrations/retrievers/tf_idf/"}, "GoogleVertexAIMultiTurnSearchRetriever": {"google_vertex_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/google_vertex_ai_search/"}, "GoogleVertexAISearchRetriever": {"google_vertex_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/google_vertex_ai_search/", "Google": "https://python.langchain.com/docs/integrations/platforms/google/"}, "Milvus": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/milvus_self_query/", "Milvus": "https://python.langchain.com/docs/integrations/providers/milvus/", "Zilliz": "https://python.langchain.com/docs/integrations/providers/zilliz/", "replace": "https://python.langchain.com/docs/integrations/vectorstores/zilliz/", "This will only get documents for Ankush": "https://python.langchain.com/docs/integrations/vectorstores/milvus/"}, "AttributeInfo": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/mongodb_atlas/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/retrievers/self_query/vectara_self_query/", "create DashVector collection": "https://python.langchain.com/docs/integrations/retrievers/self_query/dashvector/", "or install latest:": "https://python.langchain.com/docs/integrations/retrievers/self_query/dingo/", "Get openAI api key by reading local .env file": "https://python.langchain.com/docs/integrations/retrievers/self_query/timescalevector_self_query/", "create new index": "https://python.langchain.com/docs/integrations/retrievers/self_query/pinecone/", "in case if some queries fail consider installing libdeeplake manually": "https://python.langchain.com/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query/", "import os": "https://python.langchain.com/docs/integrations/retrievers/self_query/qdrant_self_query/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "self-query-qdrant": "https://python.langchain.com/docs/templates/self-query-qdrant/", "This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/", "Self-querying": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/self_query-checkpoint/"}, "SelfQueryRetriever": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/mongodb_atlas/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/retrievers/self_query/vectara_self_query/", "create DashVector collection": "https://python.langchain.com/docs/integrations/retrievers/self_query/dashvector/", "or install latest:": "https://python.langchain.com/docs/integrations/retrievers/self_query/dingo/", "Get openAI api key by reading local .env file": "https://python.langchain.com/docs/integrations/retrievers/self_query/timescalevector_self_query/", "create new index": "https://python.langchain.com/docs/integrations/retrievers/self_query/pinecone/", "in case if some queries fail consider installing libdeeplake manually": "https://python.langchain.com/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query/", "import os": "https://python.langchain.com/docs/integrations/retrievers/self_query/qdrant_self_query/", "Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "Chroma": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/chroma-checkpoint/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/", "Self-querying": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/self_query-checkpoint/"}, "OpenAI": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/mongodb_atlas/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/", "or install latest:": "https://python.langchain.com/docs/integrations/retrievers/self_query/dingo/", "Get openAI api key by reading local .env file": "https://python.langchain.com/docs/integrations/retrievers/self_query/timescalevector_self_query/", "create new index": "https://python.langchain.com/docs/integrations/retrievers/self_query/pinecone/", "in case if some queries fail consider installing libdeeplake manually": "https://python.langchain.com/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query/", "import os": "https://python.langchain.com/docs/integrations/retrievers/self_query/qdrant_self_query/", "OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "use it with langchain {#use-it-with-langchain}": "https://python.langchain.com/docs/integrations/tools/google_jobs/", "google_serper.md": "https://python.langchain.com/docs/integrations/tools/google_serper/", "Answer with 'Zhu'": "https://python.langchain.com/docs/integrations/tools/human_tools/", "google_finance.md": "https://python.langchain.com/docs/integrations/tools/google_finance/", "awslambda.md": "https://python.langchain.com/docs/integrations/tools/awslambda/", "folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'": "https://python.langchain.com/docs/integrations/tools/google_drive/", "openweathermap.md": "https://python.langchain.com/docs/integrations/tools/openweathermap/", "search_tools.md": "https://python.langchain.com/docs/integrations/tools/search_tools/", "eleven_labs_tts.md": "https://python.langchain.com/docs/integrations/tools/eleven_labs_tts/", "get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/", "send data into the chain": "https://python.langchain.com/docs/integrations/tools/nvidia_riva/", "lemonai.md": "https://python.langchain.com/docs/integrations/tools/lemonai/", "graphql.md": "https://python.langchain.com/docs/integrations/tools/graphql/", "searchapi.md": "https://python.langchain.com/docs/integrations/tools/searchapi/", "gradio_tools.md": "https://python.langchain.com/docs/integrations/tools/gradio_tools/", "sceneXplain.md": "https://python.langchain.com/docs/integrations/tools/sceneXplain/", "Needed if you would like to display images in the notebook": "https://python.langchain.com/docs/integrations/tools/dalle_image_generator/", "Based on ReAct Agent": "https://python.langchain.com/docs/integrations/tools/ionic_shopping/", "Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "loads previous state from Mot\u00f6rhead \ud83e\udd18": "https://python.langchain.com/docs/integrations/memory/motorhead_memory/", "Here we want to make sure the answer is minimally relevant": "https://python.langchain.com/docs/integrations/callbacks/confident/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "Fiddler project and model names, used for model registration": "https://python.langchain.com/docs/integrations/callbacks/fiddler/", "LLM Hyperparameters": "https://python.langchain.com/docs/integrations/callbacks/sagemaker_tracking/", "labelstudio.md": "https://python.langchain.com/docs/integrations/callbacks/labelstudio/", "Connect to Comet if no API Key is set": "https://python.langchain.com/docs/integrations/callbacks/comet_tracing/", "argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "promptlayer.md": "https://python.langchain.com/docs/integrations/callbacks/promptlayer/", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/.ipynb_checkpoints/streamlit-checkpoint/", "trubrics.md": "https://python.langchain.com/docs/integrations/callbacks/trubrics/", "Install necessary dependencies.": "https://python.langchain.com/docs/integrations/callbacks/infino/", "From LangChain, import standard modules for prompting.": "https://python.langchain.com/docs/integrations/providers/dspy/", "SearchApi": "https://python.langchain.com/docs/integrations/providers/searchapi/", "os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/", "scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/", "os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/", "Log10": "https://python.langchain.com/docs/integrations/providers/log10/", "LangChain Decorators \u2728": "https://python.langchain.com/docs/integrations/providers/langchain_decorators/", "!pip3 install rebuff openai -U": "https://python.langchain.com/docs/integrations/providers/rebuff/", "SCENARIO 1 - LLM": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking/", "Serper - Google Search API": "https://python.langchain.com/docs/integrations/providers/google_serper/", "Helicone": "https://python.langchain.com/docs/integrations/providers/helicone/", "Shale Protocol": "https://python.langchain.com/docs/integrations/providers/shaleprotocol/", "you don't need to call close to write profiles to WhyLabs, upload will occur periodically, but to demo let's not wait.": "https://python.langchain.com/docs/integrations/providers/whylabs_profiling/", "wandb documentation to configure wandb using env variables": "https://python.langchain.com/docs/integrations/providers/wandb_tracing/", "Setup and use the ClearML Callback": "https://python.langchain.com/docs/integrations/providers/clearml_tracking/", "0: Import ray serve and request from starlette": "https://python.langchain.com/docs/integrations/providers/ray_serve/", "Create a dataframe": "https://python.langchain.com/docs/integrations/toolkits/csv/", "xorbits.md": "https://python.langchain.com/docs/integrations/toolkits/xorbits/", "jira.md": "https://python.langchain.com/docs/integrations/toolkits/jira/", "in apache-spark root directory. (tested here with \"spark-3.4.0-bin-hadoop3 and later\")": "https://python.langchain.com/docs/integrations/toolkits/spark/", "For Windows/Linux": "https://python.langchain.com/docs/integrations/toolkits/azure_cognitive_services/", "Select the LLM to use. Here, we use gpt-3.5-turbo-instruct": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla/", "steam.md": "https://python.langchain.com/docs/integrations/toolkits/steam/", "json.md": "https://python.langchain.com/docs/integrations/toolkits/json/", "Copilot Sandbox": "https://python.langchain.com/docs/integrations/toolkits/clickup/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/office365/", "pandas.md": "https://python.langchain.com/docs/integrations/toolkits/pandas/", "nasa.md": "https://python.langchain.com/docs/integrations/toolkits/nasa/", "azure_ai_services.md": "https://python.langchain.com/docs/integrations/toolkits/azure_ai_services/", "NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.": "https://python.langchain.com/docs/integrations/toolkits/openapi/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/gitlab/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "initialize marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "DocumentDB connection string": "https://python.langchain.com/docs/integrations/vectorstores/documentdb/", "initialize MongoDB python client": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/voyageai-reranker/", "Uncomment this to install psychicapi if you don't already have it installed": "https://python.langchain.com/docs/integrations/document_loaders/psychic/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "You can store your OPENAI_API_KEY in a .env file as well": "https://python.langchain.com/docs/integrations/document_loaders/amazon_textract/", "networkx.md": "https://python.langchain.com/docs/integrations/graphs/networkx/", "get a token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/llms/openai/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/modules/model_io/llms/llm_caching/", "Layerup Security": "https://python.langchain.com/docs/guides/productionization/safety/layerup_security/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Note that we set max_retries = 0 to avoid retrying on RateLimits, etc": "https://python.langchain.com/docs/guides/productionization/fallbacks/", "Using https://huggingface.co/laiyer/deberta-v3-base-prompt-injection": "https://python.langchain.com/docs/guides/productionization/safety/hugging_face_prompt_injection/", "Logical Fallacy chain": "https://python.langchain.com/docs/guides/productionization/safety/logical_fallacy_chain/", "Constitutional chain": "https://python.langchain.com/docs/guides/productionization/safety/constitutional_chain/", "moderation.md": "https://python.langchain.com/docs/guides/productionization/safety/moderation/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/", "Get embeddings.": "https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder/", "Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/", "Self-querying": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/self_query-checkpoint/", "agent_with_memory_in_db.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "!python -m spacy download en_core_web_lg": "https://python.langchain.com/docs/modules/memory/custom_memory/", "adding_memory_chain_multiple_inputs.md": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "Combined": "https://python.langchain.com/docs/modules/memory/multiple_memory/", "Here it is by default set to \"AI\"": "https://python.langchain.com/docs/modules/memory/conversational_customization/", "agent_with_memory.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory/", "kg.md": "https://python.langchain.com/docs/modules/memory/types/kg/", "We can see here that the buffer is updated": "https://python.langchain.com/docs/modules/memory/types/token_buffer/", "Entity": "https://python.langchain.com/docs/modules/memory/types/entity_summary_memory/", "Conversation Summary": "https://python.langchain.com/docs/modules/memory/types/summary/", "Backed by a Vector Store": "https://python.langchain.com/docs/modules/memory/types/vectorstore_retriever_memory/", "Conversation Buffer Window": "https://python.langchain.com/docs/modules/memory/types/buffer_window/", "Conversation Buffer": "https://python.langchain.com/docs/modules/memory/types/buffer/", "We can see here that there is a summary of the conversation and then some previous interactions": "https://python.langchain.com/docs/modules/memory/types/summary_buffer/", "Callbacks": "https://python.langchain.com/docs/modules/callbacks/index/", "First, define custom callback handler implementations": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks/", "You can kick off concurrent runs from within the context manager": "https://python.langchain.com/docs/modules/callbacks/token_counting/", "this chain will both print to stdout (because verbose=True) and write to 'output.log'": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/handle_parsing_errors/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/", "Define your desired data structure.": "https://python.langchain.com/docs/modules/model_io/output_parsers/quick_start/", "retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/", "datetime.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/datetime/", "token_usage_tracking.md": "https://python.langchain.com/docs/modules/model_io/llms/token_usage_tracking/", "streaming_llm.md": "https://python.langchain.com/docs/modules/model_io/llms/streaming_llm/", "Quick Start {#quick-start}": "https://python.langchain.com/docs/modules/model_io/llms/quick_start/", "batch_configurable_chain([\"ice cream\", \"spaghetti\", \"dumplings\"])": "https://python.langchain.com/docs/expression_language/why/", "> ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])": "https://python.langchain.com/docs/expression_language/get_started/"}, "PGVector": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/pgvector_self_query/", "PGVector": "https://python.langchain.com/docs/integrations/providers/pgvector/", "See docker command above to launch a postgres instance with pgvector enabled.": "https://python.langchain.com/docs/integrations/vectorstores/pgvector/"}, "Weaviate": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/weaviate_self_query/"}, "Vectara": {"Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/vectara/", "Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/index/"}, "DashVector": {"create DashVector collection": "https://python.langchain.com/docs/integrations/retrievers/self_query/dashvector/", "DashVector": "https://python.langchain.com/docs/integrations/providers/dashvector/", "add texts": "https://python.langchain.com/docs/integrations/vectorstores/dashvector/"}, "Tongyi": {"create DashVector collection": "https://python.langchain.com/docs/integrations/retrievers/self_query/dashvector/", "Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "Install the package": "https://python.langchain.com/docs/integrations/llms/tongyi/"}, "DatabricksVectorSearch": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/databricks_vector_search/", "databricks_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/databricks_vector_search/"}, "Dingo": {"or install latest:": "https://python.langchain.com/docs/integrations/vectorstores/dingo/", "DingoDB": "https://python.langchain.com/docs/integrations/providers/dingo/"}, "OpenSearchVectorSearch": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/opensearch_self_query/", "AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "OpenSearch": "https://python.langchain.com/docs/integrations/providers/opensearch/", "If using the default Docker installation, use this instantiation instead:": "https://python.langchain.com/docs/integrations/vectorstores/opensearch/"}, "ElasticsearchStore": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/elasticsearch_self_query/", "Elasticsearch": "https://python.langchain.com/docs/integrations/providers/elasticsearch/", "Metadata {#metadata}": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch/", "indexing.md": "https://python.langchain.com/docs/modules/data_connection/indexing/"}, "ConnectionParams": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/tencentvectordb/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/", "baiduvectordb.md": "https://python.langchain.com/docs/integrations/vectorstores/baiduvectordb/"}, "MetaField": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/tencentvectordb/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/"}, "TencentVectorDB": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/tencentvectordb/", "Tencent": "https://python.langchain.com/docs/integrations/providers/tencent/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/"}, "TimescaleVector": {"Get openAI api key by reading local .env file": "https://python.langchain.com/docs/integrations/retrievers/self_query/timescalevector_self_query/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/"}, "AstraDB": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/astradb/"}, "SupabaseVectorStore": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/supabase_self_query/", "Supabase (Postgres)": "https://python.langchain.com/docs/integrations/providers/supabase/", "with pip": "https://python.langchain.com/docs/integrations/vectorstores/supabase/"}, "Redis": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/redis_self_query/", "Redis": "https://python.langchain.com/docs/integrations/providers/redis/", "connection to redis standalone at localhost, db 0, no password": "https://python.langchain.com/docs/integrations/vectorstores/redis/"}, "MyScale": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/myscale_self_query/", "MyScale": "https://python.langchain.com/docs/integrations/providers/myscale/", "use directly a `where_str` to delete": "https://python.langchain.com/docs/integrations/vectorstores/myscale/"}, "MongoDBAtlasVectorSearch": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/mongodb_atlas/", "initialize MongoDB python client": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/"}, "Qdrant": {"import os": "https://python.langchain.com/docs/integrations/retrievers/self_query/qdrant_self_query/", "Qdrant": "https://python.langchain.com/docs/integrations/providers/qdrant/", "qdrant.md": "https://python.langchain.com/docs/integrations/vectorstores/qdrant/", "Vector stores": "https://python.langchain.com/docs/modules/data_connection/vectorstores/index/"}, "AzureMLOnlineEndpoint": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/"}, "AzureOpenAI": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "The API version you want to use: set this to `2023-12-01-preview` for the released version.": "https://python.langchain.com/docs/integrations/llms/azure_openai/"}, "AzureChatOpenAI": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "azure_chat_openai.md": "https://python.langchain.com/docs/integrations/chat/azure_chat_openai/", "The underlying python-gremlin has a problem when running in notebook": "https://python.langchain.com/docs/integrations/graphs/azure_cosmosdb_gremlin/"}, "AzureAIDataLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "Create a connection to your project": "https://python.langchain.com/docs/integrations/document_loaders/azure_ai_data/"}, "AzureAIDocumentIntelligenceLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "microsoft_word.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_word/", "microsoft_excel.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_excel/", "microsoft_powerpoint.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_powerpoint/", "azure_document_intelligence.md": "https://python.langchain.com/docs/integrations/document_loaders/azure_document_intelligence/", "Microsoft Office": "https://python.langchain.com/docs/modules/data_connection/document_loaders/office_file/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/", "HTML": "https://python.langchain.com/docs/modules/data_connection/document_loaders/html/"}, "AzureBlobStorageContainerLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "azure_blob_storage_container.md": "https://python.langchain.com/docs/integrations/document_loaders/azure_blob_storage_container/"}, "AzureBlobStorageFileLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "azure_blob_storage_file.md": "https://python.langchain.com/docs/integrations/document_loaders/azure_blob_storage_file/"}, "OneDriveLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "microsoft_onedrive.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_onedrive/"}, "UnstructuredWordDocumentLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "microsoft_word.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_word/"}, "UnstructuredExcelLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "microsoft_excel.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_excel/"}, "SharePointLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "loads documents from root directory": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_sharepoint/"}, "UnstructuredPowerPointLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "microsoft_powerpoint.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_powerpoint/"}, "OneNoteLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "microsoft_onenote.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_onenote/"}, "AzureCosmosDBVectorSearch": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "Set up the OpenAI Environment Variables": "https://python.langchain.com/docs/integrations/vectorstores/azure_cosmos_db/"}, "O365Toolkit": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/office365/"}, "PowerBIToolkit": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "fictional example": "https://python.langchain.com/docs/integrations/toolkits/powerbi/"}, "PowerBIDataset": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "fictional example": "https://python.langchain.com/docs/integrations/toolkits/powerbi/"}, "BingSearchAPIWrapper": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "bing_search.md": "https://python.langchain.com/docs/integrations/tools/bing_search/"}, "PresidioAnonymizer": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "Download model": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/index/"}, "PresidioReversibleAnonymizer": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "Multi-language data anonymization with Microsoft Presidio {#multi-language-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/multi_language/", "Reversible data anonymization with Microsoft Presidio {#reversible-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/reversible/", "Download model": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/index/"}, "AmazonAPIGateway": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "These are sample parameters for Falcon 40B Instruct Deployed from Amazon SageMaker JumpStart": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway/"}, "ContentHandlerBase": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/"}, "S3DirectoryLoader": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "aws_s3_directory.md": "https://python.langchain.com/docs/integrations/document_loaders/aws_s3_directory/"}, "S3FileLoader": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "aws_s3_file.md": "https://python.langchain.com/docs/integrations/document_loaders/aws_s3_file/"}, "AmazonTextractPDFLoader": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "You can store your OPENAI_API_KEY in a .env file as well": "https://python.langchain.com/docs/integrations/document_loaders/amazon_textract/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "AthenaLoader": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "athena.md": "https://python.langchain.com/docs/integrations/document_loaders/athena/"}, "DocumentDBVectorSearch": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/"}, "DynamoDBChatMessageHistory": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/memory/aws_dynamodb/"}, "SageMakerCallbackHandler": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "LLM Hyperparameters": "https://python.langchain.com/docs/integrations/callbacks/sagemaker_tracking/"}, "AmazonComprehendModerationChain": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "ChatHuggingFace": {"Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/", "setup tools": "https://python.langchain.com/docs/integrations/chat/huggingface/"}, "HuggingFacePipeline": {"Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/", "huggingface_pipelines.md": "https://python.langchain.com/docs/integrations/llms/huggingface_pipelines/", "lmformatenforcer_experimental.md": "https://python.langchain.com/docs/integrations/llms/lmformatenforcer_experimental/", "We'll choose a regex that matches to a structured json string that looks like:": "https://python.langchain.com/docs/integrations/llms/rellm_experimental/", "mlx_pipelines.md": "https://python.langchain.com/docs/integrations/llms/mlx_pipelines/", "jsonformer_experimental.md": "https://python.langchain.com/docs/integrations/llms/jsonformer_experimental/", "openvino.md": "https://python.langchain.com/docs/integrations/llms/openvino/", "weight_only_quantization.md": "https://python.langchain.com/docs/integrations/llms/weight_only_quantization/"}, "HuggingFaceDatasetLoader": {"Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/", "hugging_face_dataset.md": "https://python.langchain.com/docs/integrations/document_loaders/hugging_face_dataset/"}, "load_huggingface_tool": {"Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/", "Requires transformers>=4.29.0 and huggingface_hub>=0.14.1": "https://python.langchain.com/docs/integrations/tools/huggingface_tools/"}, "ChatGPTLoader": {"OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "chatgpt_loader.md": "https://python.langchain.com/docs/integrations/document_loaders/chatgpt_loader/"}, "DallEAPIWrapper": {"OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "Needed if you would like to display images in the notebook": "https://python.langchain.com/docs/integrations/tools/dalle_image_generator/"}, "OpenAIModerationChain": {"OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "moderation.md": "https://python.langchain.com/docs/guides/productionization/safety/moderation/"}, "GoogleGenerativeAI": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_ai.md": "https://python.langchain.com/docs/integrations/llms/google_ai/"}, "VertexAIModelGarden": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_vertex_ai_palm.md": "https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm/"}, "ChatGoogleGenerativeAI": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "Note that each chunk may contain more than one \"token\"": "https://python.langchain.com/docs/integrations/chat/google_generative_ai/"}, "ChatVertexAI": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/memory/google_sql_mssql/", "for running these examples in the notebook:": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm/", "google_vertex_ai_palm.md": "https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm/", "structured_output.md": "https://python.langchain.com/docs/modules/model_io/chat/structured_output/", "response_metadata.md": "https://python.langchain.com/docs/modules/model_io/chat/response_metadata/"}, "BigQueryLoader": {"Google": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/google-checkpoint/", "Note that the `id` column is being returned twice, with one instance aliased as `source`": "https://python.langchain.com/docs/integrations/document_loaders/google_bigquery/"}, "GCSDirectoryLoader": {"Google": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/google-checkpoint/", "google_cloud_storage_directory.md": "https://python.langchain.com/docs/integrations/document_loaders/google_cloud_storage_directory/"}, "GCSFileLoader": {"Google": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/google-checkpoint/", "google_cloud_storage_file.md": "https://python.langchain.com/docs/integrations/document_loaders/google_cloud_storage_file/"}, "GoogleDriveLoader": {"Google": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/google-checkpoint/", "folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'": "https://python.langchain.com/docs/integrations/document_loaders/google_drive/"}, "GoogleSpeechToTextLoader": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "or a local file path: file_path = \"./audio.wav\"": "https://python.langchain.com/docs/integrations/document_loaders/google_speech_to_text/"}, "Blob": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_docai.md": "https://python.langchain.com/docs/integrations/document_transformers/google_docai/", "Configure the parsers that you want to use per mime-type!": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_files/", "Custom Document Loader {#custom-document-loader}": "https://python.langchain.com/docs/modules/data_connection/document_loaders/custom/"}, "DocAIParser": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_docai.md": "https://python.langchain.com/docs/integrations/document_transformers/google_docai/"}, "GoogleTranslateTransformer": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_translate.md": "https://python.langchain.com/docs/integrations/document_transformers/google_translate/"}, "BigQueryVectorSearch": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/vectorstores/google_bigquery_vector_search/"}, "VectorSearchVectorStore": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "TODO : Set values as per your requirements": "https://python.langchain.com/docs/integrations/vectorstores/google_vertex_ai_vector_search/"}, "ScaNN": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "scann.md": "https://python.langchain.com/docs/integrations/vectorstores/scann/"}, "GoogleDocumentAIWarehouseRetriever": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/"}, "GoogleCloudTextToSpeechTool": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_cloud_texttospeech.md": "https://python.langchain.com/docs/integrations/tools/google_cloud_texttospeech/"}, "GoogleFinanceQueryRun": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_finance.md": "https://python.langchain.com/docs/integrations/tools/google_finance/"}, "GoogleFinanceAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_finance.md": "https://python.langchain.com/docs/integrations/tools/google_finance/"}, "GoogleJobsQueryRun": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "use it with langchain {#use-it-with-langchain}": "https://python.langchain.com/docs/integrations/tools/google_jobs/"}, "GoogleLensQueryRun": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "Runs google lens on an image of Danny Devito": "https://python.langchain.com/docs/integrations/tools/google_lens/"}, "GoogleLensAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "Runs google lens on an image of Danny Devito": "https://python.langchain.com/docs/integrations/tools/google_lens/"}, "GooglePlacesTool": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_places.md": "https://python.langchain.com/docs/integrations/tools/google_places/"}, "GoogleScholarQueryRun": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_scholar.md": "https://python.langchain.com/docs/integrations/tools/google_scholar/"}, "GoogleScholarAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_scholar.md": "https://python.langchain.com/docs/integrations/tools/google_scholar/"}, "GoogleSearchAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/google-checkpoint/", "google_search.md": "https://python.langchain.com/docs/integrations/tools/google_search/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/", "agent_with_memory.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory/"}, "GoogleTrendsQueryRun": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_trends.md": "https://python.langchain.com/docs/integrations/tools/google_trends/"}, "GoogleTrendsAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_trends.md": "https://python.langchain.com/docs/integrations/tools/google_trends/"}, "GmailToolkit": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/toolkits/gmail/"}, "SearchApiAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "searchapi.md": "https://python.langchain.com/docs/integrations/tools/searchapi/", "SearchApi": "https://python.langchain.com/docs/integrations/providers/searchapi/"}, "SerpAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "You can create the tool to pass to an agent": "https://python.langchain.com/docs/integrations/tools/serpapi/", "setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/", "SerpAPI": "https://python.langchain.com/docs/integrations/providers/serpapi/", "Initialize the language model": "https://python.langchain.com/docs/guides/productionization/evaluation/examples/comparisons/", "agent_with_memory_in_db.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db/"}, "GoogleSerperAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_serper.md": "https://python.langchain.com/docs/integrations/tools/google_serper/", "Serper - Google Search API": "https://python.langchain.com/docs/integrations/providers/google_serper/"}, "YouTubeSearchTool": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "youtube.md": "https://python.langchain.com/docs/integrations/tools/youtube/"}, "YoutubeAudioLoader": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/"}, "OpenAIWhisperParser": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/"}, "YoutubeLoader": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "YouTube": "https://python.langchain.com/docs/integrations/providers/youtube/", "Init the GoogleApiClient": "https://python.langchain.com/docs/integrations/document_loaders/youtube_transcript/", "%pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/quickstart/", "%pip install -qU langchain langchain-openai youtube-transcript-api pytube": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/structuring/"}, "AnthropicLLM": {"Anthropic": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/anthropic-checkpoint/", "anthropic.md": "https://python.langchain.com/docs/integrations/llms/anthropic/"}, "MatchingEngine": {"Google": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/google-checkpoint/"}, "AzureCognitiveSearchRetriever": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/"}, "AIPluginTool": {"chatgpt_plugins.md": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins/"}, "DataForSeoAPIWrapper": {"dataforseo.md": "https://python.langchain.com/docs/integrations/tools/dataforseo/", "DataForSEO": "https://python.langchain.com/docs/integrations/providers/dataforseo/"}, "Tool": {"dataforseo.md": "https://python.langchain.com/docs/integrations/tools/dataforseo/", "You can create the tool to pass to an agent": "https://python.langchain.com/docs/integrations/tools/serpapi/", "google_serper.md": "https://python.langchain.com/docs/integrations/tools/google_serper/", "searchapi.md": "https://python.langchain.com/docs/integrations/tools/searchapi/", "google_search.md": "https://python.langchain.com/docs/integrations/tools/google_search/", "Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/", "Based on ReAct Agent": "https://python.langchain.com/docs/integrations/tools/ionic_shopping/", "Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "SearchApi": "https://python.langchain.com/docs/integrations/providers/searchapi/", "Serper - Google Search API": "https://python.langchain.com/docs/integrations/providers/google_serper/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "Pydantic compatibility": "https://python.langchain.com/docs/guides/development/pydantic_compatibility/", "Initialize the language model": "https://python.langchain.com/docs/guides/productionization/evaluation/examples/comparisons/", "agent_with_memory_in_db.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db/", "agent_with_memory.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory/", "need to use GPT-4 here as GPT-3.5 does not understand, however hard you insist, that": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter/"}, "ConneryService": {"Specify your Connery Runner credentials.": "https://python.langchain.com/docs/integrations/toolkits/connery/"}, "DataheraldAPIWrapper": {"dataherald.md": "https://python.langchain.com/docs/integrations/tools/dataherald/", "Dataherald": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/dataherald-checkpoint/"}, "SearxSearchWrapper": {"searx_search.md": "https://python.langchain.com/docs/integrations/tools/searx_search/", "SearxNG Search API": "https://python.langchain.com/docs/integrations/providers/searx/"}, "PythonREPL": {"You can create the tool to pass to an agent": "https://python.langchain.com/docs/integrations/tools/python/", "code_writing.md": "https://python.langchain.com/docs/expression_language/cookbook/code_writing/"}, "GoogleJobsAPIWrapper": {"use it with langchain {#use-it-with-langchain}": "https://python.langchain.com/docs/integrations/tools/google_jobs/"}, "InfobipAPIWrapper": {"How to use it inside an Agent {#how-to-use-it-inside-an-agent}": "https://python.langchain.com/docs/integrations/tools/infobip/"}, "StructuredTool": {"How to use it inside an Agent {#how-to-use-it-inside-an-agent}": "https://python.langchain.com/docs/integrations/tools/infobip/", "Import things that are needed generically": "https://python.langchain.com/docs/modules/tools/custom_tools/"}, "E2BDataAnalysisTool": {"Artifacts are charts created by matplotlib when `plt.show()` is called": "https://python.langchain.com/docs/integrations/tools/e2b_data_analysis/", "openai_assistants.md": "https://python.langchain.com/docs/modules/agents/agent_types/openai_assistants/"}, "SQLDatabase": {"In order to build a selectable on SA's Core API, you need a table definition.": "https://python.langchain.com/docs/integrations/tools/sql_database/", "CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb/", "!pip3 install rebuff openai -U": "https://python.langchain.com/docs/integrations/providers/rebuff/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Using LangSmith is recommended but not required. Uncomment below lines to use.": "https://python.langchain.com/docs/use_cases/sql/csv/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/"}, "HumanInputRun": {"Answer with 'Zhu'": "https://python.langchain.com/docs/integrations/tools/human_tools/"}, "NucliaUnderstandingAPI": {"nuclia.md": "https://python.langchain.com/docs/integrations/document_loaders/nuclia/", "Nuclia": "https://python.langchain.com/docs/integrations/providers/nuclia/", "nuclia_transformer.md": "https://python.langchain.com/docs/integrations/document_transformers/nuclia_transformer/"}, "YahooFinanceNewsTool": {"How YahooFinanceNewsTool works? {#how-yahoofinancenewstool-works}": "https://python.langchain.com/docs/integrations/tools/yahoo_finance_news/"}, "WikidataAPIWrapper": {"wikidata.md": "https://python.langchain.com/docs/integrations/tools/wikidata/"}, "WikidataQueryRun": {"wikidata.md": "https://python.langchain.com/docs/integrations/tools/wikidata/"}, "TwilioAPIWrapper": {"twilio.md": "https://python.langchain.com/docs/integrations/tools/twilio/"}, "IFTTTWebhook": {"ifttt.md": "https://python.langchain.com/docs/integrations/tools/ifttt/"}, "SemanticScholarQueryRun": {"start by installing semanticscholar api": "https://python.langchain.com/docs/integrations/tools/semanticscholar/"}, "WikipediaQueryRun": {"wikipedia.md": "https://python.langchain.com/docs/integrations/tools/wikipedia/", "index.md": "https://python.langchain.com/docs/modules/tools/index/", "pip install wikipedia": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/"}, "WikipediaAPIWrapper": {"wikipedia.md": "https://python.langchain.com/docs/integrations/tools/wikipedia/", "Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "index.md": "https://python.langchain.com/docs/modules/tools/index/", "pip install wikipedia": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/"}, "AlphaVantageAPIWrapper": {"alpha_vantage.md": "https://python.langchain.com/docs/integrations/tools/alpha_vantage/"}, "StackExchangeAPIWrapper": {"stackexchange.md": "https://python.langchain.com/docs/integrations/tools/stackexchange/", "Stack Exchange": "https://python.langchain.com/docs/integrations/providers/stackexchange/"}, "TextRequestsWrapper": {"Each tool wrapps a requests wrapper": "https://python.langchain.com/docs/integrations/tools/requests/"}, "OpenWeatherMapAPIWrapper": {"openweathermap.md": "https://python.langchain.com/docs/integrations/tools/openweathermap/", "OpenWeatherMap": "https://python.langchain.com/docs/integrations/providers/openweathermap/"}, "get_from_env": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/integrations/tools/passio_nutrition_ai/"}, "NutritionAI": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/integrations/tools/passio_nutrition_ai/"}, "NutritionAIAPI": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/integrations/tools/passio_nutrition_ai/"}, "PubmedQueryRun": {"pubmed.md": "https://python.langchain.com/docs/integrations/tools/pubmed/"}, "ConversationBufferMemory": {"memorize.md": "https://python.langchain.com/docs/integrations/tools/memorize/", "gradio_tools.md": "https://python.langchain.com/docs/integrations/tools/gradio_tools/", "sceneXplain.md": "https://python.langchain.com/docs/integrations/tools/sceneXplain/", "Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/", "xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/", "Or via the async API": "https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/sap_hanavector/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "Guardrails for Amazon Bedrock with trace": "https://python.langchain.com/docs/integrations/llms/bedrock/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "adding_memory_chain_multiple_inputs.md": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "Combined": "https://python.langchain.com/docs/modules/memory/multiple_memory/", "Here it is by default set to \"AI\"": "https://python.langchain.com/docs/modules/memory/conversational_customization/", "Conversation Buffer": "https://python.langchain.com/docs/modules/memory/types/buffer/"}, "GradientLLM": {"memorize.md": "https://python.langchain.com/docs/integrations/tools/memorize/", "Gradient": "https://python.langchain.com/docs/integrations/providers/gradient/", "Improve the results by fine-tuning (optional) {#improve-the-results-by-fine-tuning-optional}": "https://python.langchain.com/docs/integrations/llms/gradient/"}, "ElevenLabsText2SpeechTool": {"eleven_labs_tts.md": "https://python.langchain.com/docs/integrations/tools/eleven_labs_tts/", "ElevenLabs": "https://python.langchain.com/docs/integrations/providers/elevenlabs/"}, "BearlyInterpreterTool": {"Extract pdf content": "https://python.langchain.com/docs/integrations/tools/bearly/"}, "VectorstoreIndexCreator": {"apify.md": "https://python.langchain.com/docs/integrations/tools/apify/", "hugging_face_dataset.md": "https://python.langchain.com/docs/integrations/document_loaders/hugging_face_dataset/", "Create a vectorstore retriever from the loader": "https://python.langchain.com/docs/integrations/document_loaders/modern_treasury/", "image_captions.md": "https://python.langchain.com/docs/integrations/document_loaders/image_captions/", "see https://python.langchain.com/en/latest/modules/data_connection/getting_started.html for more details": "https://python.langchain.com/docs/integrations/document_loaders/figma/", "apify_dataset.md": "https://python.langchain.com/docs/integrations/document_loaders/apify_dataset/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/"}, "ApifyWrapper": {"apify.md": "https://python.langchain.com/docs/integrations/tools/apify/", "Apify": "https://python.langchain.com/docs/integrations/providers/apify/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "ZapierToolkit": {"get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/"}, "ZapierNLAWrapper": {"get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/"}, "SimpleSequentialChain": {"get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/", "LLM Hyperparameters": "https://python.langchain.com/docs/integrations/callbacks/sagemaker_tracking/", "!pip3 install rebuff openai -U": "https://python.langchain.com/docs/integrations/providers/rebuff/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/predibase/", "Run the chain specifying only the input variable for the first chain.": "https://python.langchain.com/docs/integrations/llms/edenai/", "magics to auto-reload external modules in case you are making changes to langchain while working on this notebook": "https://python.langchain.com/docs/integrations/llms/replicate/"}, "TransformChain": {"get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/", "!pip3 install rebuff openai -U": "https://python.langchain.com/docs/integrations/providers/rebuff/"}, "ZapierNLARunAction": {"get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/"}, "RivaASR": {"send data into the chain": "https://python.langchain.com/docs/integrations/tools/nvidia_riva/"}, "RivaTTS": {"send data into the chain": "https://python.langchain.com/docs/integrations/tools/nvidia_riva/"}, "RivaAudioEncoding": {"send data into the chain": "https://python.langchain.com/docs/integrations/tools/nvidia_riva/"}, "AudioStream": {"send data into the chain": "https://python.langchain.com/docs/integrations/tools/nvidia_riva/"}, "GoldenQueryAPIWrapper": {"golden_query.md": "https://python.langchain.com/docs/integrations/tools/golden_query/", "Golden": "https://python.langchain.com/docs/integrations/providers/golden/"}, "create_react_agent": {"arxiv.md": "https://python.langchain.com/docs/integrations/tools/arxiv/", "Based on ReAct Agent": "https://python.langchain.com/docs/integrations/tools/ionic_shopping/", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/streamlit/", "Dataherald": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/dataherald-checkpoint/", "Define the neural network": "https://python.langchain.com/docs/integrations/toolkits/python/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/amadeus/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "agent_with_memory_in_db.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db/", "agent_with_memory.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations/"}, "ArxivAPIWrapper": {"arxiv.md": "https://python.langchain.com/docs/integrations/tools/arxiv/"}, "DuckDuckGoSearchRun": {"ddg.md": "https://python.langchain.com/docs/integrations/tools/ddg/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/", "openai_assistants.md": "https://python.langchain.com/docs/modules/agents/agent_types/openai_assistants/"}, "DuckDuckGoSearchAPIWrapper": {"ddg.md": "https://python.langchain.com/docs/integrations/tools/ddg/"}, "SceneXplainTool": {"sceneXplain.md": "https://python.langchain.com/docs/integrations/tools/sceneXplain/"}, "WolframAlphaAPIWrapper": {"wolfram_alpha.md": "https://python.langchain.com/docs/integrations/tools/wolfram_alpha/", "Wolfram Alpha": "https://python.langchain.com/docs/integrations/providers/wolfram_alpha/"}, "RunnableParallel": {"and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/", "> ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])": "https://python.langchain.com/docs/expression_language/get_started/", "The input schema of the chain is the input schema of its first part, the prompt.": "https://python.langchain.com/docs/expression_language/interface/", "prompt_llm_parser.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_llm_parser/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/", "Adding values to chain state {#adding-values-to-chain-state}": "https://python.langchain.com/docs/expression_language/primitives/assign/", "Formatting inputs & output {#formatting-inputs-output}": "https://python.langchain.com/docs/expression_language/primitives/parallel/", "Passing data through {#passing-data-through}": "https://python.langchain.com/docs/expression_language/primitives/passthrough/", "Chaining runnables {#chaining-runnables}": "https://python.langchain.com/docs/expression_language/primitives/sequence/"}, "ExaSearchRetriever": {"and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "exa_search.md": "https://python.langchain.com/docs/integrations/providers/exa_search/"}, "TextContentsOptions": {"and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/"}, "OpenAIFunctionsAgent": {"and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "Install package": "https://python.langchain.com/docs/integrations/toolkits/robocorp/"}, "EdenAiExplicitImageTool": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "EdenAiObjectDetectionTool": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "EdenAiParsingIDTool": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "EdenAiParsingInvoiceTool": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "EdenAiSpeechToTextTool": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "EdenAiTextModerationTool": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "EdenAiTextToSpeechTool": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "EdenAI": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/", "Run the chain specifying only the input variable for the first chain.": "https://python.langchain.com/docs/integrations/llms/edenai/"}, "RedditSearchRun": {"Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/"}, "RedditSearchAPIWrapper": {"Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/"}, "RedditSearchSchema": {"Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/"}, "StructuredChatAgent": {"Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/"}, "ReadOnlySharedMemory": {"Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/"}, "YouSearchTool": {"For use in Chaining section": "https://python.langchain.com/docs/integrations/tools/you/"}, "ShellTool": {"bash.md": "https://python.langchain.com/docs/integrations/tools/bash/"}, "PolygonAggregates": {"Get the last quote for ticker": "https://python.langchain.com/docs/integrations/tools/polygon/"}, "PolygonFinancials": {"Get the last quote for ticker": "https://python.langchain.com/docs/integrations/tools/polygon/"}, "PolygonLastQuote": {"Get the last quote for ticker": "https://python.langchain.com/docs/integrations/tools/polygon/"}, "PolygonTickerNews": {"Get the last quote for ticker": "https://python.langchain.com/docs/integrations/tools/polygon/"}, "PolygonAPIWrapper": {"Get the last quote for ticker": "https://python.langchain.com/docs/integrations/tools/polygon/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/toolkits/polygon/"}, "PolygonAggregatesSchema": {"Get the last quote for ticker": "https://python.langchain.com/docs/integrations/tools/polygon/"}, "FileManagementToolkit": {"We'll make a temporary directory to avoid clutter": "https://python.langchain.com/docs/integrations/tools/filesystem/"}, "BraveSearch": {"brave_search.md": "https://python.langchain.com/docs/integrations/tools/brave_search/", "Brave Search": "https://python.langchain.com/docs/integrations/providers/brave_search/"}, "RedisChatMessageHistory": {"redis_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/redis_chat_message_history/", "agent_with_memory_in_db.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/"}, "RunnableWithMessageHistory": {"redis_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/redis_chat_message_history/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/memory/google_sql_mssql/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/memory/sqlite/", "Optionally, specify your own session_state key for storing messages": "https://python.langchain.com/docs/integrations/memory/streamlit_chat_message_history/", "copy from tidb cloud console": "https://python.langchain.com/docs/integrations/memory/tidb_chat_message_history/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/chat_history/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "agent_with_memory_in_db.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db/", "agent_with_memory.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/"}, "ElasticsearchChatMessageHistory": {"If using Elastic Cloud:": "https://python.langchain.com/docs/integrations/memory/elasticsearch_chat_message_history/", "Elasticsearch": "https://python.langchain.com/docs/integrations/providers/elasticsearch/"}, "UpstashRedisChatMessageHistory": {"upstash_redis_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/upstash_redis_chat_message_history/", "Upstash Redis": "https://python.langchain.com/docs/integrations/providers/upstash/"}, "SingleStoreDBChatMessageHistory": {"singlestoredb_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/singlestoredb_chat_message_history/", "SingleStoreDB": "https://python.langchain.com/docs/integrations/providers/singlestoredb/"}, "PostgresChatMessageHistory": {"postgres_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/postgres_chat_message_history/"}, "MomentoChatMessageHistory": {"momento_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/momento_chat_message_history/", "Momento": "https://python.langchain.com/docs/integrations/providers/momento/"}, "XataChatMessageHistory": {"xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "Xata": "https://python.langchain.com/docs/integrations/providers/xata/"}, "XataVectorStore": {"xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "xata.md": "https://python.langchain.com/docs/integrations/vectorstores/xata/"}, "create_retriever_tool": {"xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/agents/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/"}, "CassandraChatMessageHistory": {"cassandra_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/cassandra_chat_message_history/", "Cassandra": "https://python.langchain.com/docs/integrations/providers/cassandra/"}, "SQLChatMessageHistory": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/memory/sqlite/", "SQLite": "https://python.langchain.com/docs/integrations/providers/sqlite/"}, "MotorheadMemory": {"loads previous state from Mot\u00f6rhead \ud83e\udd18": "https://python.langchain.com/docs/integrations/memory/motorhead_memory/", "Mot\u00f6rhead": "https://python.langchain.com/docs/integrations/providers/motorhead/"}, "AstraDBChatMessageHistory": {"astradb_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/astradb_chat_message_history/", "Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/"}, "StreamlitChatMessageHistory": {"Optionally, specify your own session_state key for storing messages": "https://python.langchain.com/docs/integrations/memory/streamlit_chat_message_history/", "Streamlit": "https://python.langchain.com/docs/integrations/providers/streamlit/"}, "Neo4jChatMessageHistory": {"neo4j_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/neo4j_chat_message_history/"}, "TiDBChatMessageHistory": {"copy from tidb cloud console": "https://python.langchain.com/docs/integrations/memory/tidb_chat_message_history/", "TiDB": "https://python.langchain.com/docs/integrations/providers/tidb/"}, "RocksetChatMessageHistory": {"rockset_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/rockset_chat_message_history/", "Rockset": "https://python.langchain.com/docs/integrations/providers/rockset/"}, "HuggingFaceTextGenInference": {"setup tools": "https://python.langchain.com/docs/integrations/chat/huggingface/", "!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/"}, "HuggingFaceEndpoint": {"setup tools": "https://python.langchain.com/docs/integrations/chat/huggingface/", "get a token: https://huggingface.co/docs/api-inference/quicktour#get-your-api-token": "https://python.langchain.com/docs/integrations/llms/huggingface_endpoint/"}, "HuggingFaceHub": {"setup tools": "https://python.langchain.com/docs/integrations/chat/huggingface/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/amadeus/", "Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "format_log_to_str": {"setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/"}, "ReActJsonSingleInputOutputParser": {"setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/amadeus/"}, "render_text_description": {"setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/", "If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/prompting/"}, "AzureMLChatOnlineEndpoint": {"azureml_chat_endpoint.md": "https://python.langchain.com/docs/integrations/chat/azureml_chat_endpoint/"}, "AzureMLEndpointApiType": {"azureml_chat_endpoint.md": "https://python.langchain.com/docs/integrations/chat/azureml_chat_endpoint/", "azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/"}, "CustomOpenAIChatContentFormatter": {"azureml_chat_endpoint.md": "https://python.langchain.com/docs/integrations/chat/azureml_chat_endpoint/"}, "ChatKinetica": {"Install Langchain community and core packages": "https://python.langchain.com/docs/integrations/chat/kinetica/", "Kinetica": "https://python.langchain.com/docs/integrations/providers/kinetica/"}, "KineticaSqlOutputParser": {"Install Langchain community and core packages": "https://python.langchain.com/docs/integrations/chat/kinetica/"}, "KineticaSqlResponse": {"Install Langchain community and core packages": "https://python.langchain.com/docs/integrations/chat/kinetica/"}, "PaiEasChatEndpoint": {"alibaba_cloud_pai_eas.md": "https://python.langchain.com/docs/integrations/chat/alibaba_cloud_pai_eas/", "Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/"}, "ChatFireworks": {"Setup {#setup}": "https://python.langchain.com/docs/integrations/chat/fireworks/", "structured_output.md": "https://python.langchain.com/docs/modules/model_io/chat/structured_output/", "response_metadata.md": "https://python.langchain.com/docs/modules/model_io/chat/response_metadata/"}, "ChatOctoAI": {"octoai.md": "https://python.langchain.com/docs/integrations/chat/octoai/"}, "ChatDeepInfra": {"get a new token: https://deepinfra.com/login?from=%2Fdash": "https://python.langchain.com/docs/integrations/chat/deepinfra/", "DeepInfra": "https://python.langchain.com/docs/integrations/providers/deepinfra/"}, "StreamingStdOutCallbackHandler": {"get a new token: https://deepinfra.com/login?from=%2Fdash": "https://python.langchain.com/docs/integrations/chat/deepinfra/", "litellm.md": "https://python.langchain.com/docs/integrations/chat/litellm/", "Let\u2019s try out LLAMA model offered on EverlyAI Hosted Endpoints {#lets-try-out-llama-model-offered-on-everlyai-hosted-endpoints}": "https://python.langchain.com/docs/integrations/chat/everlyai/", "gpt_router.md": "https://python.langchain.com/docs/integrations/chat/gpt_router/", "litellm_router.md": "https://python.langchain.com/docs/integrations/chat/litellm_router/", "zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "yuan2.md": "https://python.langchain.com/docs/integrations/chat/yuan2/", "GPT4All": "https://python.langchain.com/docs/integrations/providers/gpt4all/", "arthur_tracking.md": "https://python.langchain.com/docs/integrations/providers/arthur_tracking/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/", "textgen.md": "https://python.langchain.com/docs/integrations/llms/textgen/", "Callbacks support token-wise streaming": "https://python.langchain.com/docs/integrations/llms/llamacpp/", "Guardrails for Amazon Bedrock with trace": "https://python.langchain.com/docs/integrations/llms/bedrock/", "Note importing TitanTakeoffPro instead of TitanTakeoff will work as well both use same object under the hood": "https://python.langchain.com/docs/integrations/llms/titan_takeoff/", "Run the chain specifying only the input variable for the first chain.": "https://python.langchain.com/docs/integrations/llms/edenai/", "ctransformers.md": "https://python.langchain.com/docs/integrations/llms/ctransformers/", "get a token: https://huggingface.co/docs/api-inference/quicktour#get-your-api-token": "https://python.langchain.com/docs/integrations/llms/huggingface_endpoint/", "magics to auto-reload external modules in case you are making changes to langchain while working on this notebook": "https://python.langchain.com/docs/integrations/llms/replicate/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/"}, "ToolsOutputParser": {"open ../../../static/img/brand/wordmark.png as base64 str": "https://python.langchain.com/docs/integrations/chat/anthropic/"}, "ChatGroq": {"groq.md": "https://python.langchain.com/docs/integrations/chat/groq/", "structured_output.md": "https://python.langchain.com/docs/modules/model_io/chat/structured_output/", "response_metadata.md": "https://python.langchain.com/docs/modules/model_io/chat/response_metadata/"}, "ChatLiteLLM": {"litellm.md": "https://python.langchain.com/docs/integrations/chat/litellm/"}, "CallbackManager": {"litellm.md": "https://python.langchain.com/docs/integrations/chat/litellm/", "gpt_router.md": "https://python.langchain.com/docs/integrations/chat/gpt_router/", "litellm_router.md": "https://python.langchain.com/docs/integrations/chat/litellm_router/", "zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "Callbacks support token-wise streaming": "https://python.langchain.com/docs/integrations/llms/llamacpp/", "Note importing TitanTakeoffPro instead of TitanTakeoff will work as well both use same object under the hood": "https://python.langchain.com/docs/integrations/llms/titan_takeoff/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/"}, "LlamaEdgeChatService": {"service url": "https://python.langchain.com/docs/integrations/chat/llama_edge/"}, "HarmBlockThreshold": {"Note that each chunk may contain more than one \"token\"": "https://python.langchain.com/docs/integrations/chat/google_generative_ai/", "for running these examples in the notebook:": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm/", "google_ai.md": "https://python.langchain.com/docs/integrations/llms/google_ai/"}, "HarmCategory": {"Note that each chunk may contain more than one \"token\"": "https://python.langchain.com/docs/integrations/chat/google_generative_ai/", "for running these examples in the notebook:": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm/", "google_ai.md": "https://python.langchain.com/docs/integrations/llms/google_ai/"}, "OllamaFunctions": {"Schema": "https://python.langchain.com/docs/integrations/chat/ollama_functions/", "Ollama": "https://python.langchain.com/docs/integrations/providers/ollama/"}, "create_extraction_chain": {"Schema": "https://python.langchain.com/docs/integrations/chat/ollama_functions/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "VolcEngineMaasChat": {"Install the package": "https://python.langchain.com/docs/integrations/chat/volcengine_maas/"}, "ChatLlamaAPI": {"Replace 'Your_API_Token' with your actual API token": "https://python.langchain.com/docs/integrations/chat/llama_api/"}, "create_tagging_chain": {"Replace 'Your_API_Token' with your actual API token": "https://python.langchain.com/docs/integrations/chat/llama_api/"}, "ChatKonko": {"Konko {#konko}": "https://python.langchain.com/docs/integrations/chat/konko/"}, "create_structured_runnable": {"for running these examples in the notebook:": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm/"}, "MLXPipeline": {"setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/", "mlx_pipelines.md": "https://python.langchain.com/docs/integrations/llms/mlx_pipelines/"}, "ChatMLX": {"setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/"}, "GigaChat": {"gigachat.md": "https://python.langchain.com/docs/integrations/llms/gigachat/", "Salute Devices": "https://python.langchain.com/docs/integrations/providers/salute_devices/"}, "JinaChat": {"get a chat completion from the formatted messages": "https://python.langchain.com/docs/integrations/chat/jinachat/"}, "SystemMessagePromptTemplate": {"get a chat completion from the formatted messages": "https://python.langchain.com/docs/integrations/chat/vllm/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "see https://python.langchain.com/en/latest/modules/data_connection/getting_started.html for more details": "https://python.langchain.com/docs/integrations/document_loaders/figma/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/agents/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "Prompts": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/prompts-checkpoint/"}, "ChatOllama": {"LangChain supports many other chat models. Here, we're using Ollama": "https://python.langchain.com/docs/integrations/chat/ollama/", "Ollama": "https://python.langchain.com/docs/integrations/providers/ollama/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/"}, "get_openai_callback": {"azure_chat_openai.md": "https://python.langchain.com/docs/integrations/chat/azure_chat_openai/", "You can kick off concurrent runs from within the context manager": "https://python.langchain.com/docs/modules/callbacks/token_counting/", "!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/", "token_usage_tracking.md": "https://python.langchain.com/docs/modules/model_io/llms/token_usage_tracking/", "Run custom functions {#run-custom-functions}": "https://python.langchain.com/docs/expression_language/primitives/functions/"}, "ChatEverlyAI": {"Let\u2019s try out LLAMA model offered on EverlyAI Hosted Endpoints {#lets-try-out-llama-model-offered-on-everlyai-hosted-endpoints}": "https://python.langchain.com/docs/integrations/chat/everlyai/"}, "GPTRouter": {"gpt_router.md": "https://python.langchain.com/docs/integrations/chat/gpt_router/"}, "GPTRouterModel": {"gpt_router.md": "https://python.langchain.com/docs/integrations/chat/gpt_router/"}, "ChatLiteLLMRouter": {"litellm_router.md": "https://python.langchain.com/docs/integrations/chat/litellm_router/"}, "ChatFriendli": {"friendli.md": "https://python.langchain.com/docs/integrations/chat/friendli/"}, "ChatMistralAI": {"If api_key is not passed, default behavior is to use the `MISTRAL_API_KEY` environment variable.": "https://python.langchain.com/docs/integrations/chat/mistralai/", "mistralai.md": "https://python.langchain.com/docs/integrations/providers/mistralai/", "Install a model capable of tool calling": "https://python.langchain.com/docs/use_cases/extraction/quickstart/", "structured_output.md": "https://python.langchain.com/docs/modules/model_io/chat/structured_output/", "response_metadata.md": "https://python.langchain.com/docs/modules/model_io/chat/response_metadata/"}, "ChatZhipuAI": {"zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/"}, "create_json_chat_agent": {"zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/json_agent/"}, "ChatBaichuan": {"baichuan.md": "https://python.langchain.com/docs/integrations/chat/baichuan/", "Baichuan": "https://python.langchain.com/docs/integrations/providers/baichuan/"}, "Llama2Chat": {"!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/"}, "LlamaCpp": {"!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/", "Llama.cpp": "https://python.langchain.com/docs/integrations/providers/llamacpp/", "Callbacks support token-wise streaming": "https://python.langchain.com/docs/integrations/llms/llamacpp/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/"}, "QianfanChatEndpoint": {"baidu_qianfan_endpoint.md": "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint/", "ernie.md": "https://python.langchain.com/docs/integrations/chat/ernie/", "Baidu": "https://python.langchain.com/docs/integrations/providers/baidu/"}, "ChatEdenAI": {"edenai.md": "https://python.langchain.com/docs/integrations/chat/edenai/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "ErnieBotChat": {"ernie.md": "https://python.langchain.com/docs/integrations/chat/ernie/"}, "ChatHunyuan": {"tencent_hunyuan.md": "https://python.langchain.com/docs/integrations/chat/tencent_hunyuan/", "Tencent": "https://python.langchain.com/docs/integrations/providers/tencent/"}, "MiniMaxChat": {"minimax.md": "https://python.langchain.com/docs/integrations/chat/minimax/", "Minimax": "https://python.langchain.com/docs/integrations/providers/minimax/"}, "ChatYuan2": {"yuan2.md": "https://python.langchain.com/docs/integrations/chat/yuan2/"}, "ChatTongyi": {"Install the package": "https://python.langchain.com/docs/integrations/chat/tongyi/", "Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/"}, "PromptLayerChatOpenAI": {"promptlayer_chatopenai.md": "https://python.langchain.com/docs/integrations/chat/promptlayer_chatopenai/", "PromptLayer": "https://python.langchain.com/docs/integrations/providers/promptlayer/"}, "ChatSparkLLM": {"sparkllm.md": "https://python.langchain.com/docs/integrations/chat/sparkllm/"}, "MoonshotChat": {"Generate your api key from: https://platform.moonshot.cn/console/api-keys": "https://python.langchain.com/docs/integrations/chat/moonshot/"}, "ChatDappierAI": {"dappier.md": "https://python.langchain.com/docs/integrations/chat/dappier/"}, "ChatMaritalk": {"Loading the COMVEST 2024 notice": "https://python.langchain.com/docs/integrations/chat/maritalk/"}, "OnlinePDFLoader": {"Loading the COMVEST 2024 notice": "https://python.langchain.com/docs/integrations/chat/maritalk/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "load_qa_chain": {"Loading the COMVEST 2024 notice": "https://python.langchain.com/docs/integrations/chat/maritalk/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/", "You can store your OPENAI_API_KEY in a .env file as well": "https://python.langchain.com/docs/integrations/document_loaders/amazon_textract/", "sagemaker.md": "https://python.langchain.com/docs/integrations/llms/sagemaker/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "adding_memory_chain_multiple_inputs.md": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs/"}, "ChatPremAI": {"First step is to set up the env variable.": "https://python.langchain.com/docs/integrations/chat/premai/", "PremAI": "https://python.langchain.com/docs/integrations/providers/premai/"}, "ChatAnyscale": {"Let\u2019s try out each model offered on Anyscale Endpoints {#lets-try-out-each-model-offered-on-anyscale-endpoints}": "https://python.langchain.com/docs/integrations/chat/anyscale/", "Anyscale": "https://python.langchain.com/docs/integrations/providers/anyscale/"}, "ChatYandexGPT": {"yandex.md": "https://python.langchain.com/docs/integrations/chat/yandex/", "Yandex": "https://python.langchain.com/docs/integrations/providers/yandex/"}, "ChatPerplexity": {"perplexity.md": "https://python.langchain.com/docs/integrations/chat/perplexity/"}, "ChatAnthropicTools": {"anthropic_functions.md": "https://python.langchain.com/docs/integrations/chat/anthropic_functions/"}, "ChatMessage": {"Or via the async API": "https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/"}, "ConversationChain": {"Or via the async API": "https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/", "Guardrails for Amazon Bedrock with trace": "https://python.langchain.com/docs/integrations/llms/bedrock/", "!python -m spacy download en_core_web_lg": "https://python.langchain.com/docs/modules/memory/custom_memory/", "Combined": "https://python.langchain.com/docs/modules/memory/multiple_memory/", "Here it is by default set to \"AI\"": "https://python.langchain.com/docs/modules/memory/conversational_customization/", "kg.md": "https://python.langchain.com/docs/modules/memory/types/kg/", "We can see here that the buffer is updated": "https://python.langchain.com/docs/modules/memory/types/token_buffer/", "Entity": "https://python.langchain.com/docs/modules/memory/types/entity_summary_memory/", "Conversation Summary": "https://python.langchain.com/docs/modules/memory/types/summary/", "Backed by a Vector Store": "https://python.langchain.com/docs/modules/memory/types/vectorstore_retriever_memory/", "Conversation Buffer Window": "https://python.langchain.com/docs/modules/memory/types/buffer_window/", "Conversation Buffer": "https://python.langchain.com/docs/modules/memory/types/buffer/", "We can see here that there is a summary of the conversation and then some previous interactions": "https://python.langchain.com/docs/modules/memory/types/summary_buffer/"}, "DeepEvalCallbackHandler": {"Here we want to make sure the answer is minimally relevant": "https://python.langchain.com/docs/integrations/callbacks/confident/", "Confident AI": "https://python.langchain.com/docs/integrations/providers/confident/"}, "LLMonitorCallbackHandler": {"LLMonitor": "https://python.langchain.com/docs/integrations/providers/llmonitor/"}, "identify": {"LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/"}, "ContextCallbackHandler": {"context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "Context": "https://python.langchain.com/docs/integrations/providers/context/"}, "FiddlerCallbackHandler": {"Fiddler project and model names, used for model registration": "https://python.langchain.com/docs/integrations/callbacks/fiddler/", "Fiddler": "https://python.langchain.com/docs/integrations/providers/fiddler/"}, "FewShotChatMessagePromptTemplate": {"Fiddler project and model names, used for model registration": "https://python.langchain.com/docs/integrations/callbacks/fiddler/", "This is a prompt template used to format each individual example.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples_chat/"}, "LabelStudioCallbackHandler": {"labelstudio.md": "https://python.langchain.com/docs/integrations/callbacks/labelstudio/", "Label Studio": "https://python.langchain.com/docs/integrations/providers/labelstudio/"}, "CometTracer": {"Connect to Comet if no API Key is set": "https://python.langchain.com/docs/integrations/callbacks/comet_tracing/", "os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/"}, "ArgillaCallbackHandler": {"argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "Argilla": "https://python.langchain.com/docs/integrations/providers/argilla/"}, "StdOutCallbackHandler": {"argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/", "scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/", "os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/", "Setup and use the ClearML Callback": "https://python.langchain.com/docs/integrations/providers/clearml_tracking/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "Callbacks": "https://python.langchain.com/docs/modules/callbacks/index/", "this chain will both print to stdout (because verbose=True) and write to 'output.log'": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler/"}, "PromptLayerCallbackHandler": {"promptlayer.md": "https://python.langchain.com/docs/integrations/callbacks/promptlayer/", "PromptLayer": "https://python.langchain.com/docs/integrations/providers/promptlayer/"}, "GPT4All": {"promptlayer.md": "https://python.langchain.com/docs/integrations/callbacks/promptlayer/", "GPT4All": "https://python.langchain.com/docs/integrations/providers/gpt4all/", "Callbacks support token-wise streaming": "https://python.langchain.com/docs/integrations/llms/gpt4all/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/"}, "StreamlitCallbackHandler": {"Streamlit": "https://python.langchain.com/docs/integrations/providers/streamlit/", "GPT4All": "https://python.langchain.com/docs/integrations/providers/gpt4all/"}, "MultiQueryRetriever": {"1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_summary/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/"}, "UpTrainCallbackHandler": {"1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "UpTrain": "https://python.langchain.com/docs/integrations/providers/uptrain/"}, "TrubricsCallbackHandler": {"trubrics.md": "https://python.langchain.com/docs/integrations/callbacks/trubrics/", "Trubrics": "https://python.langchain.com/docs/integrations/providers/trubrics/"}, "InfinoCallbackHandler": {"Install necessary dependencies.": "https://python.langchain.com/docs/integrations/callbacks/infino/", "Infino": "https://python.langchain.com/docs/integrations/providers/infino/"}, "load_summarize_chain": {"Install necessary dependencies.": "https://python.langchain.com/docs/integrations/callbacks/infino/", "see https://python.langchain.com/docs/use_cases/summarization for more details": "https://python.langchain.com/docs/integrations/document_loaders/larksuite/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/"}, "FigmaFileLoader": {"Figma": "https://python.langchain.com/docs/integrations/providers/figma/", "see https://python.langchain.com/en/latest/modules/data_connection/getting_started.html for more details": "https://python.langchain.com/docs/integrations/document_loaders/figma/"}, "Baseten": {"Baseten": "https://python.langchain.com/docs/integrations/providers/baseten/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/baseten/"}, "WeatherDataLoader": {"Weather": "https://python.langchain.com/docs/integrations/providers/weather/", "Set API key either by passing it in to constructor directly": "https://python.langchain.com/docs/integrations/document_loaders/weather/"}, "Tair": {"Tair": "https://python.langchain.com/docs/integrations/providers/tair/", "Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "drop first if index already exists": "https://python.langchain.com/docs/integrations/vectorstores/tair/"}, "CollegeConfidentialLoader": {"College Confidential": "https://python.langchain.com/docs/integrations/providers/college_confidential/", "college_confidential.md": "https://python.langchain.com/docs/integrations/document_loaders/college_confidential/"}, "RWKV": {"RWKV-4": "https://python.langchain.com/docs/integrations/providers/rwkv/"}, "LakeFSLoader": {"lakeFS": "https://python.langchain.com/docs/integrations/providers/lakefs/", "lakefs.md": "https://python.langchain.com/docs/integrations/document_loaders/lakefs/"}, "FaunaLoader": {"Fauna": "https://python.langchain.com/docs/integrations/providers/fauna/", "fauna.md": "https://python.langchain.com/docs/integrations/document_loaders/fauna/"}, "OCIGenAI": {"Oracle Cloud Infrastructure (OCI)": "https://python.langchain.com/docs/integrations/providers/oci/", "use default authN method API-key": "https://python.langchain.com/docs/integrations/llms/oci_generative_ai/"}, "OCIModelDeploymentVLLM": {"Oracle Cloud Infrastructure (OCI)": "https://python.langchain.com/docs/integrations/providers/oci/", "Set authentication through ads": "https://python.langchain.com/docs/integrations/llms/oci_model_deployment_endpoint/"}, "OCIModelDeploymentTGI": {"Oracle Cloud Infrastructure (OCI)": "https://python.langchain.com/docs/integrations/providers/oci/", "Set authentication through ads": "https://python.langchain.com/docs/integrations/llms/oci_model_deployment_endpoint/"}, "Lantern": {"Lantern": "https://python.langchain.com/docs/integrations/providers/lantern/", "Pip install necessary package {#pip-install-necessary-package}": "https://python.langchain.com/docs/integrations/vectorstores/lantern/"}, "SQLiteCache": {"From LangChain, import standard modules for prompting.": "https://python.langchain.com/docs/integrations/providers/dspy/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/modules/model_io/llms/llm_caching/", "": "https://python.langchain.com/docs/modules/model_io/chat/chat_model_caching/"}, "set_llm_cache": {"From LangChain, import standard modules for prompting.": "https://python.langchain.com/docs/integrations/providers/dspy/", "MongoDB Atlas": "https://python.langchain.com/docs/integrations/providers/mongodb_atlas/", "Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "Redis": "https://python.langchain.com/docs/integrations/providers/redis/", "Cassandra": "https://python.langchain.com/docs/integrations/providers/cassandra/", "Momento": "https://python.langchain.com/docs/integrations/providers/momento/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/modules/model_io/llms/llm_caching/", "": "https://python.langchain.com/docs/modules/model_io/chat/chat_model_caching/"}, "Fireworks": {"Fireworks": "https://python.langchain.com/docs/integrations/providers/fireworks/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/fireworks/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/self_ask_with_search/"}, "DropboxLoader": {"Dropbox": "https://python.langchain.com/docs/integrations/providers/dropbox/", "Generate access token: https://www.dropbox.com/developers/apps/create.": "https://python.langchain.com/docs/integrations/document_loaders/dropbox/"}, "ForefrontAI": {"ForefrontAI": "https://python.langchain.com/docs/integrations/providers/forefrontai/", "get a new token: https://docs.forefront.ai/forefront/api-reference/authentication": "https://python.langchain.com/docs/integrations/llms/forefrontai/"}, "CometCallbackHandler": {"os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/"}, "CTransformers": {"C Transformers": "https://python.langchain.com/docs/integrations/providers/ctransformers/", "ctransformers.md": "https://python.langchain.com/docs/integrations/llms/ctransformers/"}, "BiliBiliLoader": {"BiliBili": "https://python.langchain.com/docs/integrations/providers/bilibili/", "bilibili.md": "https://python.langchain.com/docs/integrations/document_loaders/bilibili/"}, "TencentCOSDirectoryLoader": {"Tencent": "https://python.langchain.com/docs/integrations/providers/tencent/", "tencent_cos_directory.md": "https://python.langchain.com/docs/integrations/document_loaders/tencent_cos_directory/"}, "TencentCOSFileLoader": {"Tencent": "https://python.langchain.com/docs/integrations/providers/tencent/", "tencent_cos_file.md": "https://python.langchain.com/docs/integrations/document_loaders/tencent_cos_file/"}, "OBSDirectoryLoader": {"Huawei": "https://python.langchain.com/docs/integrations/providers/huawei/", "Install the required package": "https://python.langchain.com/docs/integrations/document_loaders/huawei_obs_directory/"}, "OBSFileLoader": {"Huawei": "https://python.langchain.com/docs/integrations/providers/huawei/", "Install the required package": "https://python.langchain.com/docs/integrations/document_loaders/huawei_obs_file/"}, "DiffbotLoader": {"Diffbot": "https://python.langchain.com/docs/integrations/providers/diffbot/", "diffbot.md": "https://python.langchain.com/docs/integrations/document_loaders/diffbot/"}, "DeepSparse": {"DeepSparse": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/deepsparse-checkpoint/", "deepsparse.md": "https://python.langchain.com/docs/integrations/llms/deepsparse/"}, "AimCallbackHandler": {"scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/"}, "ModernTreasuryLoader": {"Modern Treasury": "https://python.langchain.com/docs/integrations/providers/modern_treasury/", "Create a vectorstore retriever from the loader": "https://python.langchain.com/docs/integrations/document_loaders/modern_treasury/"}, "GitHubIssuesLoader": {"GitHub": "https://python.langchain.com/docs/integrations/providers/github/", "If you haven't set your access token as an environment variable, pass it in here.": "https://python.langchain.com/docs/integrations/document_loaders/github/"}, "GithubFileLoader": {"GitHub": "https://python.langchain.com/docs/integrations/providers/github/", "If you haven't set your access token as an environment variable, pass it in here.": "https://python.langchain.com/docs/integrations/document_loaders/github/"}, "Banana": {"Banana": "https://python.langchain.com/docs/integrations/providers/bananadev/", "Install the package https://docs.banana.dev/banana-docs/core-concepts/sdks/python": "https://python.langchain.com/docs/integrations/llms/banana/"}, "InfinispanVS": {"Infinispan VS": "https://python.langchain.com/docs/integrations/providers/infinispanvs/", "Ensure that all we need is installed": "https://python.langchain.com/docs/integrations/vectorstores/infinispanvs/"}, "CerebriumAI": {"CerebriumAI": "https://python.langchain.com/docs/integrations/providers/cerebriumai/", "Install the package": "https://python.langchain.com/docs/integrations/llms/cerebriumai/"}, "GutenbergLoader": {"Gutenberg": "https://python.langchain.com/docs/integrations/providers/gutenberg/", "gutenberg.md": "https://python.langchain.com/docs/integrations/document_loaders/gutenberg/"}, "WikipediaLoader": {"Wikipedia": "https://python.langchain.com/docs/integrations/providers/wikipedia/", "wikipedia.md": "https://python.langchain.com/docs/integrations/document_loaders/wikipedia/", "diffbot.md": "https://python.langchain.com/docs/integrations/graphs/diffbot/"}, "ConfluenceLoader": {"Confluence": "https://python.langchain.com/docs/integrations/providers/confluence/", "confluence.md": "https://python.langchain.com/docs/integrations/document_loaders/confluence/"}, "Predibase": {"Predibase": "https://python.langchain.com/docs/integrations/providers/predibase/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/predibase/"}, "Beam": {"Beam": "https://python.langchain.com/docs/integrations/providers/beam/", "Set the environment variables": "https://python.langchain.com/docs/integrations/llms/beam/"}, "GrobidParser": {"Grobid": "https://python.langchain.com/docs/integrations/providers/grobid/", "grobid.md": "https://python.langchain.com/docs/integrations/document_loaders/grobid/"}, "GenericLoader": {"Grobid": "https://python.langchain.com/docs/integrations/providers/grobid/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/", "grobid.md": "https://python.langchain.com/docs/integrations/document_loaders/grobid/", "Code for: class MyClass:": "https://python.langchain.com/docs/integrations/document_loaders/source_code/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "Custom Document Loader {#custom-document-loader}": "https://python.langchain.com/docs/modules/data_connection/document_loaders/custom/"}, "Typesense": {"Typesense": "https://python.langchain.com/docs/integrations/providers/typesense/", "typesense.md": "https://python.langchain.com/docs/integrations/vectorstores/typesense/"}, "Hologres": {"Hologres": "https://python.langchain.com/docs/integrations/providers/hologres/", "Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "hologres.md": "https://python.langchain.com/docs/integrations/vectorstores/hologres/"}, "AI21": {"AI21 Labs": "https://python.langchain.com/docs/integrations/providers/ai21/"}, "ArangoGraph": {"ArangoDB": "https://python.langchain.com/docs/integrations/providers/arangodb/", "Instantiate ArangoDB Database": "https://python.langchain.com/docs/integrations/graphs/arangodb/"}, "ArangoGraphQAChain": {"ArangoDB": "https://python.langchain.com/docs/integrations/providers/arangodb/", "Instantiate ArangoDB Database": "https://python.langchain.com/docs/integrations/graphs/arangodb/"}, "ArcGISLoader": {"ArcGIS": "https://python.langchain.com/docs/integrations/providers/arcgis/", "arcgis.md": "https://python.langchain.com/docs/integrations/document_loaders/arcgis/"}, "WandbCallbackHandler": {"os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/"}, "ObsidianLoader": {"Obsidian": "https://python.langchain.com/docs/integrations/providers/obsidian/", "obsidian.md": "https://python.langchain.com/docs/integrations/document_loaders/obsidian/"}, "create_sql_agent": {"CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Using LangSmith is recommended but not required. Uncomment below lines to use.": "https://python.langchain.com/docs/use_cases/sql/csv/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/agents/"}, "SQLDatabaseToolkit": {"CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/"}, "Nebula": {"Nebula": "https://python.langchain.com/docs/integrations/providers/symblai_nebula/", "symblai_nebula.md": "https://python.langchain.com/docs/integrations/llms/symblai_nebula/"}, "Writer": {"Writer": "https://python.langchain.com/docs/integrations/providers/writer/", "If you get an error, probably, you need to set up the \"base_url\" parameter that can be taken from the error log.": "https://python.langchain.com/docs/integrations/llms/writer/"}, "BaichuanLLM": {"Baichuan": "https://python.langchain.com/docs/integrations/providers/baichuan/", "Load the model": "https://python.langchain.com/docs/integrations/llms/baichuan/"}, "ApacheDoris": {"Apache Doris": "https://python.langchain.com/docs/integrations/providers/apache_doris/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/apache_doris/"}, "ZepVectorStore": {"Zep": "https://python.langchain.com/docs/integrations/providers/zep/", "Collection config is needed if we're creating a new Zep Collection": "https://python.langchain.com/docs/integrations/vectorstores/zep/"}, "BrowserlessLoader": {"Browserless": "https://python.langchain.com/docs/integrations/providers/browserless/", "browserless.md": "https://python.langchain.com/docs/integrations/document_loaders/browserless/"}, "AZLyricsLoader": {"AZLyrics": "https://python.langchain.com/docs/integrations/providers/azlyrics/", "azlyrics.md": "https://python.langchain.com/docs/integrations/document_loaders/azlyrics/"}, "ToMarkdownLoader": {"2Markdown": "https://python.langchain.com/docs/integrations/providers/tomarkdown/", "You will need to get your own API key. See https://2markdown.com/login": "https://python.langchain.com/docs/integrations/document_loaders/tomarkdown/"}, "Mlflow": {"MLflow Deployments for LLMs": "https://python.langchain.com/docs/integrations/providers/mlflow/"}, "MlflowEmbeddings": {"MLflow Deployments for LLMs": "https://python.langchain.com/docs/integrations/providers/mlflow/"}, "ChatMlflow": {"MLflow Deployments for LLMs": "https://python.langchain.com/docs/integrations/providers/mlflow/"}, "GitLoader": {"Git": "https://python.langchain.com/docs/integrations/providers/git/", "e.g. loading only python files": "https://python.langchain.com/docs/integrations/document_loaders/git/"}, "MlflowAIGateway": {"MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway/"}, "MlflowAIGatewayEmbeddings": {"MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway/"}, "ChatMLflowAIGateway": {"MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway/"}, "Tigris": {"Tigris": "https://python.langchain.com/docs/integrations/providers/tigris/", "tigris.md": "https://python.langchain.com/docs/integrations/vectorstores/tigris/"}, "Meilisearch": {"Meilisearch": "https://python.langchain.com/docs/integrations/providers/meilisearch/", "Use Meilisearch vector store to store texts & associated embeddings as vector": "https://python.langchain.com/docs/integrations/vectorstores/meilisearch/"}, "SQLDatabaseChain": {"!pip3 install rebuff openai -U": "https://python.langchain.com/docs/integrations/providers/rebuff/"}, "SnowflakeLoader": {"Snowflake": "https://python.langchain.com/docs/integrations/providers/snowflake/", "snowflake.md": "https://python.langchain.com/docs/integrations/document_loaders/snowflake/"}, "CubeSemanticLoader": {"Cube": "https://python.langchain.com/docs/integrations/providers/cube/", "Read more about security context here: https://cube.dev/docs/security": "https://python.langchain.com/docs/integrations/document_loaders/cube_semantic/"}, "Clickhouse": {"ClickHouse": "https://python.langchain.com/docs/integrations/providers/clickhouse/", "clickhouse.md": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse/"}, "ClickhouseSettings": {"ClickHouse": "https://python.langchain.com/docs/integrations/providers/clickhouse/", "clickhouse.md": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse/"}, "ChatDatabricks": {"-> content='Hello! How can I assist you today?'": "https://python.langchain.com/docs/integrations/providers/databricks/", "If running a Databricks notebook attached to an interactive cluster in \"single user\"": "https://python.langchain.com/docs/integrations/llms/databricks/"}, "DatabricksEmbeddings": {"-> content='Hello! How can I assist you today?'": "https://python.langchain.com/docs/integrations/providers/databricks/", "If running a Databricks notebook attached to an interactive cluster in \"single user\"": "https://python.langchain.com/docs/integrations/llms/databricks/"}, "TelegramChatFileLoader": {"Telegram": "https://python.langchain.com/docs/integrations/providers/telegram/", "telegram.md": "https://python.langchain.com/docs/integrations/document_loaders/telegram/"}, "TelegramChatApiLoader": {"Telegram": "https://python.langchain.com/docs/integrations/providers/telegram/", "telegram.md": "https://python.langchain.com/docs/integrations/document_loaders/telegram/"}, "PredictionGuard": {"Prediction Guard": "https://python.langchain.com/docs/integrations/providers/predictionguard/", "Optional, add your OpenAI API Key. This is optional, as Prediction Guard allows": "https://python.langchain.com/docs/integrations/llms/predictionguard/"}, "Together": {"together.md": "https://python.langchain.com/docs/integrations/llms/together/"}, "NotionDirectoryLoader": {"Notion DB": "https://python.langchain.com/docs/integrations/providers/notion/", "notion.md": "https://python.langchain.com/docs/integrations/document_loaders/notion/"}, "NotionDBLoader": {"Notion DB": "https://python.langchain.com/docs/integrations/providers/notion/", "notiondb.md": "https://python.langchain.com/docs/integrations/document_loaders/notiondb/"}, "MWDumpLoader": {"MediaWikiDump": "https://python.langchain.com/docs/integrations/providers/mediawikidump/", "mediawiki-utilities supports XML schema 0.11 in unmerged branches": "https://python.langchain.com/docs/integrations/document_loaders/mediawikidump/"}, "BraveSearchLoader": {"Brave Search": "https://python.langchain.com/docs/integrations/providers/brave_search/", "brave_search.md": "https://python.langchain.com/docs/integrations/document_loaders/brave_search/"}, "StarRocks": {"StarRocks": "https://python.langchain.com/docs/integrations/providers/starrocks/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/"}, "GooseAI": {"GooseAI": "https://python.langchain.com/docs/integrations/providers/gooseai/", "gooseai.md": "https://python.langchain.com/docs/integrations/llms/gooseai/"}, "DatadogLogsLoader": {"Datadog Logs": "https://python.langchain.com/docs/integrations/providers/datadog_logs/", "datadog_logs.md": "https://python.langchain.com/docs/integrations/document_loaders/datadog_logs/"}, "ApifyDatasetLoader": {"Apify": "https://python.langchain.com/docs/integrations/providers/apify/", "apify_dataset.md": "https://python.langchain.com/docs/integrations/document_loaders/apify_dataset/"}, "NLPCloud": {"NLPCloud": "https://python.langchain.com/docs/integrations/providers/nlpcloud/", "get a token: https://docs.nlpcloud.com/#authentication": "https://python.langchain.com/docs/integrations/llms/nlpcloud/"}, "SemaDB": {"SemaDB": "https://python.langchain.com/docs/integrations/providers/semadb/", "Create collection if running for the first time. If the collection": "https://python.langchain.com/docs/integrations/vectorstores/semadb/"}, "GitbookLoader": {"GitBook": "https://python.langchain.com/docs/integrations/providers/gitbook/", "show second document": "https://python.langchain.com/docs/integrations/document_loaders/gitbook/"}, "VoyageAIRerank": {"VoyageAI": "https://python.langchain.com/docs/integrations/providers/voyageai/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/voyageai-reranker/"}, "Rockset": {"Rockset": "https://python.langchain.com/docs/integrations/providers/rockset/", "output length: 4": "https://python.langchain.com/docs/integrations/vectorstores/rockset/"}, "RocksetLoader": {"Rockset": "https://python.langchain.com/docs/integrations/providers/rockset/", "Loading Documents {#loading-documents}": "https://python.langchain.com/docs/integrations/document_loaders/rockset/"}, "Minimax": {"Minimax": "https://python.langchain.com/docs/integrations/providers/minimax/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/minimax/"}, "UnstructuredAPIFileIOLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/"}, "UnstructuredAPIFileLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "# Install package": "https://python.langchain.com/docs/integrations/document_loaders/unstructured_file/"}, "UnstructuredCHMLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/"}, "UnstructuredCSVLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "csv.md": "https://python.langchain.com/docs/integrations/document_loaders/csv/"}, "UnstructuredEmailLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "email.md": "https://python.langchain.com/docs/integrations/document_loaders/email/"}, "UnstructuredEPubLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "epub.md": "https://python.langchain.com/docs/integrations/document_loaders/epub/"}, "UnstructuredFileIOLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'": "https://python.langchain.com/docs/integrations/document_loaders/google_drive/"}, "UnstructuredFileLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "# Install package": "https://python.langchain.com/docs/integrations/document_loaders/unstructured_file/"}, "UnstructuredHTMLLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "HTML": "https://python.langchain.com/docs/modules/data_connection/document_loaders/html/"}, "UnstructuredImageLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "image.md": "https://python.langchain.com/docs/integrations/document_loaders/image/"}, "UnstructuredMarkdownLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/", "Markdown": "https://python.langchain.com/docs/modules/data_connection/document_loaders/markdown/"}, "UnstructuredODTLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "odt.md": "https://python.langchain.com/docs/integrations/document_loaders/odt/"}, "UnstructuredOrgModeLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "org_mode.md": "https://python.langchain.com/docs/integrations/document_loaders/org_mode/"}, "UnstructuredPDFLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "UnstructuredRSTLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "rst.md": "https://python.langchain.com/docs/integrations/document_loaders/rst/"}, "UnstructuredRTFLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/"}, "UnstructuredTSVLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "tsv.md": "https://python.langchain.com/docs/integrations/document_loaders/tsv/"}, "UnstructuredURLLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "url.md": "https://python.langchain.com/docs/integrations/document_loaders/url/"}, "UnstructuredXMLLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "xml.md": "https://python.langchain.com/docs/integrations/document_loaders/xml/"}, "SelfHostedPipeline": {"Runhouse": "https://python.langchain.com/docs/integrations/providers/runhouse/", "For an on-demand A100 with GCP, Azure, or Lambda": "https://python.langchain.com/docs/integrations/llms/runhouse/"}, "SelfHostedHuggingFaceLLM": {"Runhouse": "https://python.langchain.com/docs/integrations/providers/runhouse/", "For an on-demand A100 with GCP, Azure, or Lambda": "https://python.langchain.com/docs/integrations/llms/runhouse/"}, "MlflowCallbackHandler": {"SCENARIO 1 - LLM": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking/"}, "AstraDBVectorStore": {"Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/"}, "AstraDBCache": {"Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "AstraDBSemanticCache": {"Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "AstraDBLoader": {"Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "astradb.md": "https://python.langchain.com/docs/integrations/document_loaders/astradb/"}, "AstraDBStore": {"Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "astradb.md": "https://python.langchain.com/docs/integrations/stores/astradb/"}, "AstraDBByteStore": {"Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "astradb.md": "https://python.langchain.com/docs/integrations/stores/astradb/"}, "SpreedlyLoader": {"Spreedly": "https://python.langchain.com/docs/integrations/providers/spreedly/", "Create a vectorstore retriever from the loader": "https://python.langchain.com/docs/integrations/document_loaders/spreedly/"}, "OpenLLM": {"OpenLLM": "https://python.langchain.com/docs/integrations/providers/openllm/", "openllm.md": "https://python.langchain.com/docs/integrations/llms/openllm/"}, "PubMedLoader": {"PubMed": "https://python.langchain.com/docs/integrations/providers/pubmed/", "pubmed.md": "https://python.langchain.com/docs/integrations/document_loaders/pubmed/"}, "SearxSearchResults": {"SearxNG Search API": "https://python.langchain.com/docs/integrations/providers/searx/"}, "ActionServerToolkit": {"Robocorp": "https://python.langchain.com/docs/integrations/providers/robocorp/", "Install package": "https://python.langchain.com/docs/integrations/toolkits/robocorp/"}, "SpacyTextSplitter": {"spaCy": "https://python.langchain.com/docs/integrations/providers/spacy/", "atlas.md": "https://python.langchain.com/docs/integrations/vectorstores/atlas/", "This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/split_by_token/"}, "Modal": {"Modal": "https://python.langchain.com/docs/integrations/providers/modal/", "Register an account with Modal and get a new token.": "https://python.langchain.com/docs/integrations/llms/modal/"}, "OpenCityDataLoader": {"Geopandas": "https://python.langchain.com/docs/integrations/providers/geopandas/", "Load Open City Data": "https://python.langchain.com/docs/integrations/document_loaders/geopandas/", "open_city_data.md": "https://python.langchain.com/docs/integrations/document_loaders/open_city_data/"}, "PGEmbedding": {"Postgres Embedding": "https://python.langchain.com/docs/integrations/providers/pg_embedding/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding/"}, "SQLiteVSS": {"SQLite": "https://python.langchain.com/docs/integrations/providers/sqlite/", "You need to install sqlite-vss as a dependency.": "https://python.langchain.com/docs/integrations/vectorstores/sqlitevss/"}, "Xinference": {"Xorbits Inference (Xinference)": "https://python.langchain.com/docs/integrations/providers/xinference/", "xinference.md": "https://python.langchain.com/docs/integrations/llms/xinference/"}, "IFixitLoader": {"iFixit": "https://python.langchain.com/docs/integrations/providers/ifixit/", "ifixit.md": "https://python.langchain.com/docs/integrations/document_loaders/ifixit/"}, "AlephAlpha": {"Aleph Alpha": "https://python.langchain.com/docs/integrations/providers/aleph_alpha/", "Install the package": "https://python.langchain.com/docs/integrations/llms/aleph_alpha/"}, "PipelineAI": {"PipelineAI": "https://python.langchain.com/docs/integrations/providers/pipelineai/", "Install the package": "https://python.langchain.com/docs/integrations/llms/pipelineai/"}, "FacebookChatLoader": {"Facebook - Meta": "https://python.langchain.com/docs/integrations/providers/facebook/", "pip install pandas": "https://python.langchain.com/docs/integrations/document_loaders/facebook_chat/"}, "Epsilla": {"Epsilla": "https://python.langchain.com/docs/integrations/providers/epsilla/", "epsilla.md": "https://python.langchain.com/docs/integrations/vectorstores/epsilla/"}, "AwaDB": {"AwaDB": "https://python.langchain.com/docs/integrations/providers/awadb/", "awadb.md": "https://python.langchain.com/docs/integrations/vectorstores/awadb/"}, "ArxivLoader": {"Arxiv": "https://python.langchain.com/docs/integrations/providers/arxiv/", "arxiv.md": "https://python.langchain.com/docs/integrations/document_loaders/arxiv/"}, "BlockchainDocumentLoader": {"Alchemy": "https://python.langchain.com/docs/integrations/providers/alchemy/", "get ALCHEMY_API_KEY from https://www.alchemy.com/": "https://python.langchain.com/docs/integrations/document_loaders/blockchain/"}, "BlockchainType": {"Alchemy": "https://python.langchain.com/docs/integrations/providers/alchemy/", "get ALCHEMY_API_KEY from https://www.alchemy.com/": "https://python.langchain.com/docs/integrations/document_loaders/blockchain/"}, "Anyscale": {"Anyscale": "https://python.langchain.com/docs/integrations/providers/anyscale/", "anyscale.md": "https://python.langchain.com/docs/integrations/llms/anyscale/"}, "AINetworkToolkit": {"AINetwork": "https://python.langchain.com/docs/integrations/providers/ainetwork/", "IMPORTANT: If you plan to use this account in the future, make sure to save the": "https://python.langchain.com/docs/integrations/toolkits/ainetwork/"}, "StripeLoader": {"Stripe": "https://python.langchain.com/docs/integrations/providers/stripe/", "Create a vectorstore retriever from the loader": "https://python.langchain.com/docs/integrations/document_loaders/stripe/"}, "StochasticAI": {"StochasticAI": "https://python.langchain.com/docs/integrations/providers/stochasticai/", "stochasticai.md": "https://python.langchain.com/docs/integrations/llms/stochasticai/"}, "Bagel": {"BagelDB": "https://python.langchain.com/docs/integrations/providers/bageldb/", "create cluster and add texts": "https://python.langchain.com/docs/integrations/vectorstores/bageldb/"}, "TigerGraph": {"TigerGraph": "https://python.langchain.com/docs/integrations/providers/tigergraph/"}, "BlackboardLoader": {"Blackboard": "https://python.langchain.com/docs/integrations/providers/blackboard/", "blackboard.md": "https://python.langchain.com/docs/integrations/document_loaders/blackboard/"}, "YandexGPT": {"Yandex": "https://python.langchain.com/docs/integrations/providers/yandex/", "yandex.md": "https://python.langchain.com/docs/integrations/llms/yandex/"}, "LanceDB": {"LanceDB": "https://python.langchain.com/docs/integrations/providers/lancedb/", "lancedb.md": "https://python.langchain.com/docs/integrations/vectorstores/lancedb/", "Vector stores": "https://python.langchain.com/docs/modules/data_connection/vectorstores/index/"}, "UpstashRedisCache": {"Upstash Redis": "https://python.langchain.com/docs/integrations/providers/upstash/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "NucliaTextTransformer": {"Nuclia": "https://python.langchain.com/docs/integrations/providers/nuclia/", "nuclia_transformer.md": "https://python.langchain.com/docs/integrations/document_transformers/nuclia_transformer/"}, "AnalyticDB": {"AnalyticDB": "https://python.langchain.com/docs/integrations/providers/analyticdb/", "Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "analyticdb.md": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb/"}, "GoogleApiYoutubeLoader": {"YouTube": "https://python.langchain.com/docs/integrations/providers/youtube/", "Init the GoogleApiClient": "https://python.langchain.com/docs/integrations/document_loaders/youtube_transcript/"}, "PromptLayerOpenAI": {"PromptLayer": "https://python.langchain.com/docs/integrations/providers/promptlayer/", "promptlayer_openai.md": "https://python.langchain.com/docs/integrations/llms/promptlayer_openai/"}, "USearch": {"USearch": "https://python.langchain.com/docs/integrations/providers/usearch/", "usearch.md": "https://python.langchain.com/docs/integrations/vectorstores/usearch/"}, "EtherscanLoader": {"Etherscan": "https://python.langchain.com/docs/integrations/providers/etherscan/", "etherscan.md": "https://python.langchain.com/docs/integrations/document_loaders/etherscan/"}, "Arcee": {"Arcee": "https://python.langchain.com/docs/integrations/providers/arcee/", "Create an instance of the Arcee class": "https://python.langchain.com/docs/integrations/llms/arcee/"}, "WhyLabsCallbackHandler": {"you don't need to call close to write profiles to WhyLabs, upload will occur periodically, but to demo let's not wait.": "https://python.langchain.com/docs/integrations/providers/whylabs_profiling/"}, "IuguLoader": {"Iugu": "https://python.langchain.com/docs/integrations/providers/iugu/", "Create a vectorstore retriever from the loader": "https://python.langchain.com/docs/integrations/document_loaders/iugu/"}, "CouchbaseLoader": {"Couchbase": "https://python.langchain.com/docs/integrations/providers/couchbase/", "query is a valid SQL++ query": "https://python.langchain.com/docs/integrations/document_loaders/couchbase/"}, "FlyteCallbackHandler": {"Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/"}, "wandb_tracing_enabled": {"wandb documentation to configure wandb using env variables": "https://python.langchain.com/docs/integrations/providers/wandb_tracing/"}, "ManifestWrapper": {"Hazy Research": "https://python.langchain.com/docs/integrations/providers/hazy_research/", "Map reduce example": "https://python.langchain.com/docs/integrations/llms/manifest/"}, "OntotextGraphDBGraph": {"Ontotext GraphDB": "https://python.langchain.com/docs/integrations/providers/ontotext_graphdb/", "feeding the schema using a user construct query": "https://python.langchain.com/docs/integrations/graphs/ontotext/"}, "OntotextGraphDBQAChain": {"Ontotext GraphDB": "https://python.langchain.com/docs/integrations/providers/ontotext_graphdb/", "feeding the schema using a user construct query": "https://python.langchain.com/docs/integrations/graphs/ontotext/"}, "Marqo": {"Marqo": "https://python.langchain.com/docs/integrations/providers/marqo/", "initialize marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo/"}, "IMSDbLoader": {"IMSDb": "https://python.langchain.com/docs/integrations/providers/imsdb/", "imsdb.md": "https://python.langchain.com/docs/integrations/document_loaders/imsdb/"}, "TiDBLoader": {"TiDB": "https://python.langchain.com/docs/integrations/providers/tidb/", "copy from tidb cloud console\uff0creplace it with your own": "https://python.langchain.com/docs/integrations/document_loaders/tidb/"}, "TiDBVectorStore": {"TiDB": "https://python.langchain.com/docs/integrations/providers/tidb/", "Here we useimport getpass": "https://python.langchain.com/docs/integrations/vectorstores/tidb_vector/"}, "DeepInfra": {"DeepInfra": "https://python.langchain.com/docs/integrations/providers/deepinfra/", "get a new token: https://deepinfra.com/login?from=%2Fdash": "https://python.langchain.com/docs/integrations/llms/deepinfra/"}, "RedditPostsLoader": {"Reddit": "https://python.langchain.com/docs/integrations/providers/reddit/", "load using 'subreddit' mode": "https://python.langchain.com/docs/integrations/document_loaders/reddit/"}, "TrelloLoader": {"Trello": "https://python.langchain.com/docs/integrations/providers/trello/", "If you have already set the API key and token using environment variables,": "https://python.langchain.com/docs/integrations/document_loaders/trello/"}, "AtlasDB": {"Atlas": "https://python.langchain.com/docs/integrations/providers/atlas/", "atlas.md": "https://python.langchain.com/docs/integrations/vectorstores/atlas/"}, "SKLearnVectorStore": {"scikit-learn": "https://python.langchain.com/docs/integrations/providers/sklearn/", "# if you plan to use bson serialization, install also:": "https://python.langchain.com/docs/integrations/vectorstores/sklearn/"}, "EverNoteLoader": {"EverNote": "https://python.langchain.com/docs/integrations/providers/evernote/", "lxml and html2text are required to parse EverNote notes": "https://python.langchain.com/docs/integrations/document_loaders/evernote/"}, "VDMS": {"VDMS": "https://python.langchain.com/docs/integrations/providers/vdms/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/vdms/"}, "VDMS_Client": {"VDMS": "https://python.langchain.com/docs/integrations/providers/vdms/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/vdms/"}, "TwitterTweetLoader": {"Twitter": "https://python.langchain.com/docs/integrations/providers/twitter/", "Or load from access token and consumer keys": "https://python.langchain.com/docs/integrations/document_loaders/twitter/"}, "DiscordChatLoader": {"Discord": "https://python.langchain.com/docs/integrations/providers/discord/", "discord.md": "https://python.langchain.com/docs/integrations/document_loaders/discord/"}, "AssemblyAIAudioTranscriptLoader": {"AssemblyAI": "https://python.langchain.com/docs/integrations/providers/assemblyai/", "or a local file path: audio_file = \"./nbc.mp3\"": "https://python.langchain.com/docs/integrations/document_loaders/assemblyai/"}, "RedisCache": {"Redis": "https://python.langchain.com/docs/integrations/providers/redis/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "RedisSemanticCache": {"Redis": "https://python.langchain.com/docs/integrations/providers/redis/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "Kinetica": {"Kinetica": "https://python.langchain.com/docs/integrations/providers/kinetica/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/kinetica/"}, "ClearMLCallbackHandler": {"Setup and use the ClearML Callback": "https://python.langchain.com/docs/integrations/providers/clearml_tracking/"}, "create_cohere_react_agent": {"Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/"}, "SlackDirectoryLoader": {"Slack": "https://python.langchain.com/docs/integrations/providers/slack/", "Optionally set your Slack URL. This will give you proper URLs in the docs sources.": "https://python.langchain.com/docs/integrations/document_loaders/slack/"}, "Ollama": {"Ollama": "https://python.langchain.com/docs/integrations/providers/ollama/", "ollama.md": "https://python.langchain.com/docs/integrations/llms/ollama/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/"}, "HNLoader": {"Hacker News": "https://python.langchain.com/docs/integrations/providers/hacker_news/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/vectorstores/google_spanner/", "hacker_news.md": "https://python.langchain.com/docs/integrations/document_loaders/hacker_news/"}, "CTranslate2": {"CTranslate2": "https://python.langchain.com/docs/integrations/providers/ctranslate2/", "conversation can take several minutes": "https://python.langchain.com/docs/integrations/llms/ctranslate2/"}, "QianfanLLMEndpoint": {"Baidu": "https://python.langchain.com/docs/integrations/providers/baidu/", "baidu_qianfan_endpoint.md": "https://python.langchain.com/docs/integrations/llms/baidu_qianfan_endpoint/"}, "BESVectorStore": {"Baidu": "https://python.langchain.com/docs/integrations/providers/baidu/", "Create a bes instance and index docs.": "https://python.langchain.com/docs/integrations/vectorstores/baiducloud_vector_search/"}, "Aphrodite": {"PygmalionAI": "https://python.langchain.com/docs/integrations/providers/pygmalionai/", "%pip list | grep aphrodite": "https://python.langchain.com/docs/integrations/llms/aphrodite/"}, "PaiEasEndpoint": {"Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "alibabacloud_pai_eas_endpoint.md": "https://python.langchain.com/docs/integrations/llms/alibabacloud_pai_eas_endpoint/"}, "MaxComputeLoader": {"Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "alibaba_cloud_maxcompute.md": "https://python.langchain.com/docs/integrations/document_loaders/alibaba_cloud_maxcompute/"}, "AlibabaCloudOpenSearch": {"Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "for example": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch/"}, "AlibabaCloudOpenSearchSettings": {"Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "for example": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch/"}, "DocusaurusLoader": {"Docusaurus": "https://python.langchain.com/docs/integrations/providers/docusaurus/", "fixes a bug with asyncio and jupyter": "https://python.langchain.com/docs/integrations/document_loaders/docusaurus/"}, "Annoy": {"Annoy": "https://python.langchain.com/docs/integrations/providers/annoy/", "default metric is angular": "https://python.langchain.com/docs/integrations/vectorstores/annoy/"}, "BibtexLoader": {"BibTeX": "https://python.langchain.com/docs/integrations/providers/bibtex/", "Create a dummy bibtex file and download a pdf.": "https://python.langchain.com/docs/integrations/document_loaders/bibtex/"}, "Cassandra": {"Cassandra": "https://python.langchain.com/docs/integrations/providers/cassandra/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/"}, "CassandraCache": {"Cassandra": "https://python.langchain.com/docs/integrations/providers/cassandra/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "CassandraSemanticCache": {"Cassandra": "https://python.langchain.com/docs/integrations/providers/cassandra/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "CassandraLoader": {"Cassandra": "https://python.langchain.com/docs/integrations/providers/cassandra/", "cassandra.md": "https://python.langchain.com/docs/integrations/document_loaders/cassandra/"}, "Vearch": {"Vearch": "https://python.langchain.com/docs/integrations/providers/vearch/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/vearch/"}, "JoplinLoader": {"Joplin": "https://python.langchain.com/docs/integrations/providers/joplin/", "joplin.md": "https://python.langchain.com/docs/integrations/document_loaders/joplin/"}, "ArthurCallbackHandler": {"arthur_tracking.md": "https://python.langchain.com/docs/integrations/providers/arthur_tracking/"}, "AcreomLoader": {"Acreom": "https://python.langchain.com/docs/integrations/providers/acreom/", "acreom.md": "https://python.langchain.com/docs/integrations/document_loaders/acreom/"}, "KDBAI": {"KDB.AI": "https://python.langchain.com/docs/integrations/providers/kdbai/", "Clean up KDB.AI \"documents\" table and index for similarity search": "https://python.langchain.com/docs/integrations/vectorstores/kdbai/"}, "DuckDBLoader": {"DuckDB": "https://python.langchain.com/docs/integrations/providers/duckdb/", "duckdb.md": "https://python.langchain.com/docs/integrations/document_loaders/duckdb/"}, "Petals": {"Petals": "https://python.langchain.com/docs/integrations/providers/petals/", "this can take several minutes to download big files!": "https://python.langchain.com/docs/integrations/llms/petals/"}, "MomentoCache": {"Momento": "https://python.langchain.com/docs/integrations/providers/momento/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "MomentoVectorIndex": {"Momento": "https://python.langchain.com/docs/integrations/providers/momento/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/momento_vector_index/"}, "NIBittensorLLM": {"Bittensor": "https://python.langchain.com/docs/integrations/providers/bittensor/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/"}, "Neo4jVector": {"Neo4j": "https://python.langchain.com/docs/integrations/providers/neo4j/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/neo4jvector/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/prompting/"}, "Neo4jGraph": {"Neo4j": "https://python.langchain.com/docs/integrations/providers/neo4j/", "diffbot.md": "https://python.langchain.com/docs/integrations/graphs/diffbot/", "How many people played in Top Gun?": "https://python.langchain.com/docs/integrations/graphs/neo4j_cypher/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/prompting/"}, "GraphCypherQAChain": {"Neo4j": "https://python.langchain.com/docs/integrations/providers/neo4j/", "Creating and executing the seeding query": "https://python.langchain.com/docs/integrations/graphs/memgraph/", "diffbot.md": "https://python.langchain.com/docs/integrations/graphs/diffbot/", "How many people played in Top Gun?": "https://python.langchain.com/docs/integrations/graphs/neo4j_cypher/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/prompting/"}, "DiffbotGraphTransformer": {"Neo4j": "https://python.langchain.com/docs/integrations/providers/neo4j/", "diffbot.md": "https://python.langchain.com/docs/integrations/graphs/diffbot/"}, "AirtableLoader": {"Airtable": "https://python.langchain.com/docs/integrations/providers/airtable/", "airtable.md": "https://python.langchain.com/docs/integrations/document_loaders/airtable/"}, "LarkSuiteDocLoader": {"ByteDance": "https://python.langchain.com/docs/integrations/providers/byte_dance/", "see https://python.langchain.com/docs/use_cases/summarization for more details": "https://python.langchain.com/docs/integrations/document_loaders/larksuite/"}, "JavelinAIGateway": {"Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "Step 4: Embeddings Example {#step-4-embeddings-example}": "https://python.langchain.com/docs/integrations/llms/javelin/"}, "JavelinAIGatewayEmbeddings": {"Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "Step 4: Embeddings Example {#step-4-embeddings-example}": "https://python.langchain.com/docs/integrations/llms/javelin/"}, "ChatJavelinAIGateway": {"Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "Step 4: Embeddings Example {#step-4-embeddings-example}": "https://python.langchain.com/docs/integrations/llms/javelin/"}, "TensorflowDatasetLoader": {"TensorFlow Datasets": "https://python.langchain.com/docs/integrations/providers/tensorflow_datasets/", "Feature structure of `mlqa/en` dataset:": "https://python.langchain.com/docs/integrations/document_loaders/tensorflow_datasets/"}, "Clarifai": {"Clarifai": "https://python.langchain.com/docs/integrations/providers/clarifai/", "Dependencies {#dependencies}": "https://python.langchain.com/docs/integrations/llms/clarifai/"}, "DataheraldTextToSQL": {"Dataherald": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/dataherald-checkpoint/"}, "RoamLoader": {"Roam": "https://python.langchain.com/docs/integrations/providers/roam/", "roam.md": "https://python.langchain.com/docs/integrations/document_loaders/roam/"}, "create_openai_tools_agent": {"Construct the OpenAI Tools agent": "https://python.langchain.com/docs/integrations/providers/portkey/logging_tracing_portkey/", "Portkey": "https://python.langchain.com/docs/integrations/providers/portkey/index/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/slack/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/tool_usage/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/"}, "CONDENSE_QUESTION_PROMPT": {"Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/"}, "load_qa_with_sources_chain": {"Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/"}, "QA_PROMPT": {"Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/"}, "Chroma": {"Chroma": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/chroma-checkpoint/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/"}, "RedisStore": {"redis.md": "https://python.langchain.com/docs/integrations/stores/redis/"}, "InMemoryByteStore": {"in_memory.md": "https://python.langchain.com/docs/integrations/stores/in_memory/", "Swapping the `ByteStore` {#swapping-the-bytestore}": "https://python.langchain.com/docs/modules/data_connection/text_embedding/caching_embeddings/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/"}, "LocalFileStore": {"file_system.md": "https://python.langchain.com/docs/integrations/stores/file_system/", "Swapping the `ByteStore` {#swapping-the-bytestore}": "https://python.langchain.com/docs/modules/data_connection/text_embedding/caching_embeddings/"}, "CacheBackedEmbeddings": {"astradb.md": "https://python.langchain.com/docs/integrations/stores/astradb/", "Swapping the `ByteStore` {#swapping-the-bytestore}": "https://python.langchain.com/docs/modules/data_connection/text_embedding/caching_embeddings/"}, "UpstashRedisByteStore": {"upstash_redis.md": "https://python.langchain.com/docs/integrations/stores/upstash_redis/"}, "ConneryToolkit": {"Specify your Connery Runner credentials.": "https://python.langchain.com/docs/integrations/toolkits/connery/"}, "create_csv_agent": {"Create a dataframe": "https://python.langchain.com/docs/integrations/toolkits/csv/"}, "create_xorbits_agent": {"xorbits.md": "https://python.langchain.com/docs/integrations/toolkits/xorbits/"}, "JiraToolkit": {"jira.md": "https://python.langchain.com/docs/integrations/toolkits/jira/"}, "JiraAPIWrapper": {"jira.md": "https://python.langchain.com/docs/integrations/toolkits/jira/"}, "create_spark_dataframe_agent": {"in apache-spark root directory. (tested here with \"spark-3.4.0-bin-hadoop3 and later\")": "https://python.langchain.com/docs/integrations/toolkits/spark/"}, "PyPDFLoader": {"document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "TODO : Set values as per your requirements": "https://python.langchain.com/docs/integrations/vectorstores/google_vertex_ai_vector_search/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "Clean up KDB.AI \"documents\" table and index for similarity search": "https://python.langchain.com/docs/integrations/vectorstores/kdbai/", "initialize MongoDB python client": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/", "merge_doc.md": "https://python.langchain.com/docs/integrations/document_loaders/merge_doc/", "google_cloud_storage_file.md": "https://python.langchain.com/docs/integrations/document_loaders/google_cloud_storage_file/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "set_debug": {"document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "textgen.md": "https://python.langchain.com/docs/integrations/llms/textgen/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "Debugging": "https://python.langchain.com/docs/guides/development/debugging/"}, "PythonREPLTool": {"Define the neural network": "https://python.langchain.com/docs/integrations/toolkits/python/"}, "create_pbi_agent": {"fictional example": "https://python.langchain.com/docs/integrations/toolkits/powerbi/"}, "AzureCognitiveServicesToolkit": {"For Windows/Linux": "https://python.langchain.com/docs/integrations/toolkits/azure_cognitive_services/"}, "Requests": {"Select the LLM to use. Here, we use gpt-3.5-turbo-instruct": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla/"}, "NLAToolkit": {"Select the LLM to use. Here, we use gpt-3.5-turbo-instruct": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla/"}, "build_resource_service": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/toolkits/gmail/"}, "get_gmail_credentials": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/toolkits/gmail/"}, "SlackToolkit": {"Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/slack/"}, "SteamToolkit": {"steam.md": "https://python.langchain.com/docs/integrations/toolkits/steam/"}, "SteamWebAPIWrapper": {"steam.md": "https://python.langchain.com/docs/integrations/toolkits/steam/"}, "create_json_agent": {"json.md": "https://python.langchain.com/docs/integrations/toolkits/json/"}, "JsonToolkit": {"json.md": "https://python.langchain.com/docs/integrations/toolkits/json/"}, "JsonSpec": {"json.md": "https://python.langchain.com/docs/integrations/toolkits/json/", "NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.": "https://python.langchain.com/docs/integrations/toolkits/openapi/"}, "AirbyteStripeLoader": {"airbyte_structured_qa.md": "https://python.langchain.com/docs/integrations/toolkits/airbyte_structured_qa/", "airbyte_stripe.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_stripe/"}, "create_pandas_dataframe_agent": {"airbyte_structured_qa.md": "https://python.langchain.com/docs/integrations/toolkits/airbyte_structured_qa/", "pandas.md": "https://python.langchain.com/docs/integrations/toolkits/pandas/", "Using LangSmith is recommended but not required. Uncomment below lines to use.": "https://python.langchain.com/docs/use_cases/sql/csv/"}, "GitHubToolkit": {"Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/"}, "GitHubAPIWrapper": {"Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/"}, "ConversationSummaryBufferMemory": {"Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/", "We can see here that there is a summary of the conversation and then some previous interactions": "https://python.langchain.com/docs/modules/memory/types/summary_buffer/"}, "render_text_description_and_args": {"Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/amadeus/"}, "ClickupToolkit": {"Copilot Sandbox": "https://python.langchain.com/docs/integrations/toolkits/clickup/"}, "ClickupAPIWrapper": {"Copilot Sandbox": "https://python.langchain.com/docs/integrations/toolkits/clickup/"}, "create_spark_sql_agent": {"Note, you can also connect to Spark via Spark connect. For example:": "https://python.langchain.com/docs/integrations/toolkits/spark_sql/"}, "SparkSQLToolkit": {"Note, you can also connect to Spark via Spark connect. For example:": "https://python.langchain.com/docs/integrations/toolkits/spark_sql/"}, "SparkSQL": {"Note, you can also connect to Spark via Spark connect. For example:": "https://python.langchain.com/docs/integrations/toolkits/spark_sql/"}, "PlayWrightBrowserToolkit": {"If this is your first time using playwright, you'll have to install a browser executable.": "https://python.langchain.com/docs/integrations/toolkits/playwright/"}, "create_async_playwright_browser": {"If this is your first time using playwright, you'll have to install a browser executable.": "https://python.langchain.com/docs/integrations/toolkits/playwright/"}, "create_conversational_retrieval_agent": {"cogniswitch.md": "https://python.langchain.com/docs/integrations/toolkits/cogniswitch/"}, "CogniswitchToolkit": {"cogniswitch.md": "https://python.langchain.com/docs/integrations/toolkits/cogniswitch/"}, "NasaToolkit": {"nasa.md": "https://python.langchain.com/docs/integrations/toolkits/nasa/"}, "NasaAPIWrapper": {"nasa.md": "https://python.langchain.com/docs/integrations/toolkits/nasa/"}, "MultionToolkit": {"Authorize connection to your Browser extention": "https://python.langchain.com/docs/integrations/toolkits/multion/"}, "AmadeusToolkit": {"Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/amadeus/"}, "AzureAiServicesToolkit": {"azure_ai_services.md": "https://python.langchain.com/docs/integrations/toolkits/azure_ai_services/"}, "create_structured_chat_agent": {"azure_ai_services.md": "https://python.langchain.com/docs/integrations/toolkits/azure_ai_services/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/structured_chat/"}, "reduce_openapi_spec": {"NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.": "https://python.langchain.com/docs/integrations/toolkits/openapi/"}, "RequestsWrapper": {"NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.": "https://python.langchain.com/docs/integrations/toolkits/openapi/"}, "create_openapi_agent": {"NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.": "https://python.langchain.com/docs/integrations/toolkits/openapi/"}, "OpenAPIToolkit": {"NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.": "https://python.langchain.com/docs/integrations/toolkits/openapi/"}, "GitLabToolkit": {"Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/gitlab/"}, "GitLabAPIWrapper": {"Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/gitlab/"}, "PolygonToolkit": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/toolkits/polygon/"}, "ApacheDorisSettings": {"load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/apache_doris/"}, "DistanceStrategy": {"Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/sap_hanavector/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/vectorstores/google_bigquery_vector_search/", "Create collection if running for the first time. If the collection": "https://python.langchain.com/docs/integrations/vectorstores/semadb/"}, "KineticaSettings": {"Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/kinetica/"}, "SentenceTransformerEmbeddings": {"You need to install sqlite-vss as a dependency.": "https://python.langchain.com/docs/integrations/vectorstores/sqlitevss/", "docs[0].metadata[\"id\"] == \"id:testapp:testapp::32\"": "https://python.langchain.com/docs/integrations/vectorstores/vespa/", "import": "https://python.langchain.com/docs/integrations/vectorstores/chroma/"}, "Vald": {"Refresh is required for server use": "https://python.langchain.com/docs/integrations/vectorstores/vald/"}, "RetrievalQAWithSourcesChain": {"install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/neo4jvector/", "initialize marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo/", "Uncomment this to install psychicapi if you don't already have it installed": "https://python.langchain.com/docs/integrations/document_loaders/psychic/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "Yellowbrick": {"Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/"}, "LLMRails": {"Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/llm_rails/"}, "HanaDB": {"Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/sap_hanavector/"}, "VectorSearchVectorStoreDatastore": {"TODO : Set values as per your requirements": "https://python.langchain.com/docs/integrations/vectorstores/google_vertex_ai_vector_search/"}, "VertexAI": {"TODO : Set values as per your requirements": "https://python.langchain.com/docs/integrations/vectorstores/google_vertex_ai_vector_search/", "google_vertex_ai_palm.md": "https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm/"}, "NucliaDB": {"nucliadb.md": "https://python.langchain.com/docs/integrations/vectorstores/nucliadb/"}, "Hippo": {"openai": "https://python.langchain.com/docs/integrations/vectorstores/hippo/"}, "RedisText": {"connection to redis standalone at localhost, db 0, no password": "https://python.langchain.com/docs/integrations/vectorstores/redis/"}, "RedisNum": {"connection to redis standalone at localhost, db 0, no password": "https://python.langchain.com/docs/integrations/vectorstores/redis/"}, "RedisTag": {"connection to redis standalone at localhost, db 0, no password": "https://python.langchain.com/docs/integrations/vectorstores/redis/"}, "RedisFilter": {"connection to redis standalone at localhost, db 0, no password": "https://python.langchain.com/docs/integrations/vectorstores/redis/"}, "VespaStore": {"docs[0].metadata[\"id\"] == \"id:testapp:testapp::32\"": "https://python.langchain.com/docs/integrations/vectorstores/vespa/"}, "CosmosDBSimilarityType": {"Set up the OpenAI Environment Variables": "https://python.langchain.com/docs/integrations/vectorstores/azure_cosmos_db/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "CosmosDBVectorSearchType": {"Set up the OpenAI Environment Variables": "https://python.langchain.com/docs/integrations/vectorstores/azure_cosmos_db/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "NeuralDBVectorStore": {"From scratch": "https://python.langchain.com/docs/integrations/vectorstores/thirdai_neuraldb/"}, "VikingDB": {"vikingdb.md": "https://python.langchain.com/docs/integrations/vectorstores/vikingdb/"}, "VikingDBConfig": {"vikingdb.md": "https://python.langchain.com/docs/integrations/vectorstores/vikingdb/"}, "InMemoryDocstore": {"default metric is angular": "https://python.langchain.com/docs/integrations/vectorstores/annoy/", "Define your embedding model": "https://python.langchain.com/docs/modules/data_connection/retrievers/time_weighted_vectorstore/", "Backed by a Vector Store": "https://python.langchain.com/docs/modules/memory/types/vectorstore_retriever_memory/"}, "CouchbaseVectorStore": {"Wait until the cluster is ready for use.": "https://python.langchain.com/docs/integrations/vectorstores/couchbase/"}, "VLite": {"Load the document and split it into chunks": "https://python.langchain.com/docs/integrations/vectorstores/vlite/"}, "DuckDB": {"duckdb.md": "https://python.langchain.com/docs/integrations/vectorstores/duckdb/"}, "StarRocksSettings": {"load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/"}, "PathwayVectorClient": {"take into account only sources modified later than unix timestamp": "https://python.langchain.com/docs/integrations/vectorstores/pathway/"}, "DocArrayHnswSearch": {"Get an OpenAI token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/vectorstores/docarray_hnsw/"}, "TileDB": {"tiledb.md": "https://python.langchain.com/docs/integrations/vectorstores/tiledb/"}, "EcloudESVectorStore": {"ecloud_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/ecloud_vector_search/"}, "SurrealDBStore": {"%pip install --upgrade --quiet surrealdb langchain langchain-community": "https://python.langchain.com/docs/integrations/vectorstores/surrealdb/"}, "ElasticVectorSearch": {"Metadata {#metadata}": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch/"}, "PGVecto_rs": {"Run tests with shell:": "https://python.langchain.com/docs/integrations/vectorstores/pgvecto_rs/"}, "JSONLoader": {"Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "JSON": "https://python.langchain.com/docs/modules/data_connection/document_loaders/json/"}, "CollectionConfig": {"Collection config is needed if we're creating a new Zep Collection": "https://python.langchain.com/docs/integrations/vectorstores/zep/"}, "BaiduVectorDB": {"baiduvectordb.md": "https://python.langchain.com/docs/integrations/vectorstores/baiduvectordb/"}, "openai": {"openai-old.md": "https://python.langchain.com/docs/integrations/adapters/openai-old/", "openai.md": "https://python.langchain.com/docs/integrations/adapters/openai/"}, "AsyncChromiumLoader": {"Load HTML": "https://python.langchain.com/docs/integrations/document_transformers/beautiful_soup/", "async_chromium.md": "https://python.langchain.com/docs/integrations/document_loaders/async_chromium/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "BeautifulSoupTransformer": {"Load HTML": "https://python.langchain.com/docs/integrations/document_transformers/beautiful_soup/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "OpenVINOReranker": {"Helper function for printing docs": "https://python.langchain.com/docs/integrations/document_transformers/openvino_rerank/"}, "create_metadata_tagger": {"Must be an OpenAI model that supports functions": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger/"}, "DoctranPropertyExtractor": {"doctran_extract_properties.md": "https://python.langchain.com/docs/integrations/document_transformers/doctran_extract_properties/"}, "DoctranQATransformer": {"doctran_interrogate_document.md": "https://python.langchain.com/docs/integrations/document_transformers/doctran_interrogate_document/"}, "CrossEncoderReranker": {"OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/cross_encoder_reranker/"}, "HuggingFaceCrossEncoder": {"OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/cross_encoder_reranker/"}, "DoctranTextTranslator": {"doctran_translate_document.md": "https://python.langchain.com/docs/integrations/document_transformers/doctran_translate_document/"}, "XorbitsLoader": {"Use lazy load for larger table, which won't read the full table into memory": "https://python.langchain.com/docs/integrations/document_loaders/xorbits/"}, "OutlookMessageLoader": {"email.md": "https://python.langchain.com/docs/integrations/document_loaders/email/"}, "TranscriptFormat": {"or a local file path: audio_file = \"./nbc.mp3\"": "https://python.langchain.com/docs/integrations/document_loaders/assemblyai/"}, "AirbyteSalesforceLoader": {"airbyte_salesforce.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_salesforce/"}, "AirbyteCDKLoader": {"airbyte_cdk.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_cdk/"}, "Docx2txtLoader": {"microsoft_word.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_word/"}, "RSpaceLoader": {"rspace.md": "https://python.langchain.com/docs/integrations/document_loaders/rspace/"}, "SeleniumURLLoader": {"url.md": "https://python.langchain.com/docs/integrations/document_loaders/url/"}, "PlaywrightURLLoader": {"url.md": "https://python.langchain.com/docs/integrations/document_loaders/url/"}, "AirbyteJSONLoader": {"airbyte_json.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_json/"}, "GeoDataFrameLoader": {"Load Open City Data": "https://python.langchain.com/docs/integrations/document_loaders/geopandas/"}, "AirbyteTypeformLoader": {"airbyte_typeform.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_typeform/"}, "MHTMLLoader": {"Create a new loader object for the MHTML file": "https://python.langchain.com/docs/integrations/document_loaders/mhtml/"}, "NewsURLLoader": {"news.md": "https://python.langchain.com/docs/integrations/document_loaders/news/"}, "ImageCaptionLoader": {"image_captions.md": "https://python.langchain.com/docs/integrations/document_loaders/image_captions/"}, "LLMSherpaFileLoader": {"Install package": "https://python.langchain.com/docs/integrations/document_loaders/llmsherpa/"}, "NucliaLoader": {"nuclia.md": "https://python.langchain.com/docs/integrations/document_loaders/nuclia/"}, "TomlLoader": {"toml.md": "https://python.langchain.com/docs/integrations/document_loaders/toml/"}, "PsychicLoader": {"Uncomment this to install psychicapi if you don't already have it installed": "https://python.langchain.com/docs/integrations/document_loaders/psychic/"}, "FireCrawlLoader": {"firecrawl.md": "https://python.langchain.com/docs/integrations/document_loaders/firecrawl/", "HTML": "https://python.langchain.com/docs/modules/data_connection/document_loaders/html/"}, "FakeListLLM": {"see https://python.langchain.com/docs/use_cases/summarization for more details": "https://python.langchain.com/docs/integrations/document_loaders/larksuite/", "Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "MergedDataLoader": {"merge_doc.md": "https://python.langchain.com/docs/integrations/document_loaders/merge_doc/"}, "RecursiveUrlLoader": {"Parameters {#parameters}": "https://python.langchain.com/docs/integrations/document_loaders/recursive_url/"}, "AirbyteHubspotLoader": {"airbyte_hubspot.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_hubspot/"}, "AirbyteGongLoader": {"airbyte_gong.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_gong/"}, "ReadTheDocsLoader": {"readthedocs_documentation.md": "https://python.langchain.com/docs/integrations/document_loaders/readthedocs_documentation/"}, "PolarsDataFrameLoader": {"Use lazy load for larger table, which won't read the full table into memory": "https://python.langchain.com/docs/integrations/document_loaders/polars_dataframe/"}, "DataFrameLoader": {"Use lazy load for larger table, which won't read the full table into memory": "https://python.langchain.com/docs/integrations/document_loaders/pandas_dataframe/"}, "SurrealDBLoader": {"%pip install --upgrade --quiet surrealdb langchain langchain-community": "https://python.langchain.com/docs/integrations/document_loaders/surrealdb/"}, "GoogleApiClient": {"Init the GoogleApiClient": "https://python.langchain.com/docs/integrations/document_loaders/youtube_transcript/"}, "ConcurrentLoader": {"concurrent.md": "https://python.langchain.com/docs/integrations/document_loaders/concurrent/"}, "RSSFeedLoader": {"rss.md": "https://python.langchain.com/docs/integrations/document_loaders/rss/"}, "PebbloSafeLoader": {"pebblo.md": "https://python.langchain.com/docs/integrations/document_loaders/pebblo/"}, "VsdxLoader": {"vsdx.md": "https://python.langchain.com/docs/integrations/document_loaders/vsdx/"}, "NotebookLoader": {"jupyter_notebook.md": "https://python.langchain.com/docs/integrations/document_loaders/jupyter_notebook/"}, "OracleAutonomousDatabaseLoader": {"oracleadb_loader.md": "https://python.langchain.com/docs/integrations/document_loaders/oracleadb_loader/"}, "LanguageParser": {"Code for: class MyClass:": "https://python.langchain.com/docs/integrations/document_loaders/source_code/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/"}, "Language": {"Code for: class MyClass:": "https://python.langchain.com/docs/integrations/document_loaders/source_code/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "Full list of supported languages": "https://python.langchain.com/docs/modules/data_connection/document_transformers/code_splitter/"}, "SRTLoader": {"subtitle.md": "https://python.langchain.com/docs/integrations/document_loaders/subtitle/"}, "MastodonTootsLoader": {"Or set up access information to use a Mastodon app.": "https://python.langchain.com/docs/integrations/document_loaders/mastodon/"}, "AirbyteShopifyLoader": {"airbyte_shopify.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_shopify/"}, "GlueCatalogLoader": {"glue_catalog.md": "https://python.langchain.com/docs/integrations/document_loaders/glue_catalog/"}, "PySparkDataFrameLoader": {"pyspark_dataframe.md": "https://python.langchain.com/docs/integrations/document_loaders/pyspark_dataframe/"}, "AirbyteZendeskSupportLoader": {"airbyte_zendesk_support.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_zendesk_support/"}, "CoNLLULoader": {"conll-u.md": "https://python.langchain.com/docs/integrations/document_loaders/conll-u/"}, "MongodbLoader": {"add this import for running in jupyter notebook": "https://python.langchain.com/docs/integrations/document_loaders/mongodb/"}, "SitemapLoader": {"fixes a bug with asyncio and jupyter": "https://python.langchain.com/docs/integrations/document_loaders/sitemap/"}, "YuqueLoader": {"yuque.md": "https://python.langchain.com/docs/integrations/document_loaders/yuque/"}, "QuipLoader": {"quip.md": "https://python.langchain.com/docs/integrations/document_loaders/quip/"}, "MemgraphGraph": {"Creating and executing the seeding query": "https://python.langchain.com/docs/integrations/graphs/memgraph/"}, "GraphSparqlQAChain": {"rdflib_sparql.md": "https://python.langchain.com/docs/integrations/graphs/rdflib_sparql/"}, "RdfGraph": {"rdflib_sparql.md": "https://python.langchain.com/docs/integrations/graphs/rdflib_sparql/"}, "NebulaGraphQAChain": {"connect ngql jupyter extension to nebulagraph": "https://python.langchain.com/docs/integrations/graphs/nebula_graph/"}, "NebulaGraph": {"connect ngql jupyter extension to nebulagraph": "https://python.langchain.com/docs/integrations/graphs/nebula_graph/"}, "GremlinQAChain": {"The underlying python-gremlin has a problem when running in notebook": "https://python.langchain.com/docs/integrations/graphs/azure_cosmosdb_gremlin/"}, "GremlinGraph": {"The underlying python-gremlin has a problem when running in notebook": "https://python.langchain.com/docs/integrations/graphs/azure_cosmosdb_gremlin/"}, "GraphDocument": {"The underlying python-gremlin has a problem when running in notebook": "https://python.langchain.com/docs/integrations/graphs/azure_cosmosdb_gremlin/"}, "Node": {"The underlying python-gremlin has a problem when running in notebook": "https://python.langchain.com/docs/integrations/graphs/azure_cosmosdb_gremlin/"}, "Relationship": {"The underlying python-gremlin has a problem when running in notebook": "https://python.langchain.com/docs/integrations/graphs/azure_cosmosdb_gremlin/"}, "GraphIndexCreator": {"networkx.md": "https://python.langchain.com/docs/integrations/graphs/networkx/"}, "GraphQAChain": {"networkx.md": "https://python.langchain.com/docs/integrations/graphs/networkx/"}, "NetworkxEntityGraph": {"networkx.md": "https://python.langchain.com/docs/integrations/graphs/networkx/"}, "HugeGraphQAChain": {"graph.refresh_schema()": "https://python.langchain.com/docs/integrations/graphs/hugegraph/"}, "HugeGraph": {"graph.refresh_schema()": "https://python.langchain.com/docs/integrations/graphs/hugegraph/"}, "AGEGraph": {"How many people played in Top Gun?": "https://python.langchain.com/docs/integrations/graphs/apache_age/"}, "NeptuneSparqlQAChain": {"Optionally change the schema": "https://python.langchain.com/docs/integrations/graphs/amazon_neptune_sparql/"}, "NeptuneRdfGraph": {"Optionally change the schema": "https://python.langchain.com/docs/integrations/graphs/amazon_neptune_sparql/"}, "NeptuneGraph": {"amazon_neptune_open_cypher.md": "https://python.langchain.com/docs/integrations/graphs/amazon_neptune_open_cypher/"}, "NeptuneAnalyticsGraph": {"amazon_neptune_open_cypher.md": "https://python.langchain.com/docs/integrations/graphs/amazon_neptune_open_cypher/"}, "NeptuneOpenCypherQAChain": {"amazon_neptune_open_cypher.md": "https://python.langchain.com/docs/integrations/graphs/amazon_neptune_open_cypher/"}, "KuzuQAChain": {"graph.refresh_schema()": "https://python.langchain.com/docs/integrations/graphs/kuzu_db/"}, "KuzuGraph": {"graph.refresh_schema()": "https://python.langchain.com/docs/integrations/graphs/kuzu_db/"}, "FalkorDBQAChain": {"falkordb.md": "https://python.langchain.com/docs/integrations/graphs/falkordb/"}, "FalkorDBGraph": {"falkordb.md": "https://python.langchain.com/docs/integrations/graphs/falkordb/"}, "ConversationBufferWindowMemory": {"Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/baseten/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "Conversation Buffer Window": "https://python.langchain.com/docs/modules/memory/types/buffer_window/"}, "Solar": {"solar.md": "https://python.langchain.com/docs/integrations/llms/solar/"}, "IpexLLM": {"Update Langchain": "https://python.langchain.com/docs/integrations/llms/ipex_llm/"}, "SagemakerEndpoint": {"sagemaker.md": "https://python.langchain.com/docs/integrations/llms/sagemaker/", "Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "LLMContentHandler": {"sagemaker.md": "https://python.langchain.com/docs/integrations/llms/sagemaker/", "Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "OctoAIEndpoint": {"octoai.md": "https://python.langchain.com/docs/integrations/llms/octoai/"}, "TextGen": {"textgen.md": "https://python.langchain.com/docs/integrations/llms/textgen/"}, "MosaicML": {"sign up for an account: https://forms.mosaicml.com/demo?utm_source=langchain": "https://python.langchain.com/docs/integrations/llms/mosaicml/"}, "VolcEngineMaasLLM": {"Install the package": "https://python.langchain.com/docs/integrations/llms/volcengine_maas/"}, "KoboldApiLLM": {"koboldai.md": "https://python.langchain.com/docs/integrations/llms/koboldai/"}, "Konko": {"konko.md": "https://python.langchain.com/docs/integrations/llms/konko/"}, "AsyncCallbackHandler": {"Guardrails for Amazon Bedrock with trace": "https://python.langchain.com/docs/integrations/llms/bedrock/", "To enable streaming, we pass in `streaming=True` to the ChatModel constructor": "https://python.langchain.com/docs/modules/callbacks/async_callbacks/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/"}, "set_verbose": {"install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "Debugging": "https://python.langchain.com/docs/guides/development/debugging/"}, "OpaquePrompts": {"install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/"}, "TitanTakeoff": {"Note importing TitanTakeoffPro instead of TitanTakeoff will work as well both use same object under the hood": "https://python.langchain.com/docs/integrations/llms/titan_takeoff/"}, "Friendli": {"friendli.md": "https://python.langchain.com/docs/integrations/llms/friendli/"}, "Databricks": {"If running a Databricks notebook attached to an interactive cluster in \"single user\"": "https://python.langchain.com/docs/integrations/llms/databricks/"}, "LMFormatEnforcer": {"lmformatenforcer_experimental.md": "https://python.langchain.com/docs/integrations/llms/lmformatenforcer_experimental/"}, "VLLM": {"vllm.md": "https://python.langchain.com/docs/integrations/llms/vllm/"}, "VLLMOpenAI": {"vllm.md": "https://python.langchain.com/docs/integrations/llms/vllm/"}, "CustomOpenAIContentFormatter": {"azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/"}, "ContentFormatterBase": {"azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/"}, "DollyContentFormatter": {"azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/"}, "load_llm": {"azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/"}, "MapReduceChain": {"Map reduce example": "https://python.langchain.com/docs/integrations/llms/manifest/"}, "ModelLaboratory": {"Map reduce example": "https://python.langchain.com/docs/integrations/llms/manifest/"}, "RELLM": {"We'll choose a regex that matches to a structured json string that looks like:": "https://python.langchain.com/docs/integrations/llms/rellm_experimental/"}, "Yuan2": {"default infer_api for a local deployed Yuan2.0 inference server": "https://python.langchain.com/docs/integrations/llms/yuan2/"}, "InMemoryCache": {"To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/modules/model_io/llms/llm_caching/", "": "https://python.langchain.com/docs/modules/model_io/chat/chat_model_caching/"}, "GPTCache": {"To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "SQLAlchemyCache": {"To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "AzureCosmosDBSemanticCache": {"To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "SparkLLM": {"Load the model": "https://python.langchain.com/docs/integrations/llms/sparkllm/"}, "Moonshot": {"Generate your api key from: https://platform.moonshot.cn/console/api-keys": "https://python.langchain.com/docs/integrations/llms/moonshot/"}, "OpenLM": {"Uncomment to install openlm and openai if you haven't already": "https://python.langchain.com/docs/integrations/llms/openlm/"}, "CloudflareWorkersAI": {"Using streaming": "https://python.langchain.com/docs/integrations/llms/cloudflare_workersai/"}, "ChatGLM3": {"Install required dependencies": "https://python.langchain.com/docs/integrations/llms/chatglm/"}, "ChatGLM": {"Install required dependencies": "https://python.langchain.com/docs/integrations/llms/chatglm/"}, "Llamafile": {"llamafile.md": "https://python.langchain.com/docs/integrations/llms/llamafile/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/"}, "LayerupSecurity": {"Layerup Security": "https://python.langchain.com/docs/guides/productionization/safety/layerup_security/"}, "JsonFormer": {"jsonformer_experimental.md": "https://python.langchain.com/docs/integrations/llms/jsonformer_experimental/"}, "WeightOnlyQuantPipeline": {"weight_only_quantization.md": "https://python.langchain.com/docs/integrations/llms/weight_only_quantization/"}, "Replicate": {"magics to auto-reload external modules in case you are making changes to langchain while working on this notebook": "https://python.langchain.com/docs/integrations/llms/replicate/"}, "create_history_aware_retriever": {"Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/chat_history/"}, "BaseOutputParser": {"Quickstart": "https://python.langchain.com/docs/get_started/.ipynb_checkpoints/quickstart-checkpoint/", "The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/"}, "ConditionalPromptSelector": {"Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/"}, "DatetimeOutputParser": {"Note that we set max_retries = 0 to avoid retrying on RateLimits, etc": "https://python.langchain.com/docs/guides/productionization/fallbacks/", "datetime.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/datetime/"}, "HuggingFaceInjectionIdentifier": {"Using https://huggingface.co/laiyer/deberta-v3-base-prompt-injection": "https://python.langchain.com/docs/guides/productionization/safety/hugging_face_prompt_injection/"}, "load_chain": {"Using https://huggingface.co/laiyer/deberta-v3-base-prompt-injection": "https://python.langchain.com/docs/guides/productionization/safety/hugging_face_prompt_injection/"}, "FallacyChain": {"Logical Fallacy chain": "https://python.langchain.com/docs/guides/productionization/safety/logical_fallacy_chain/"}, "ModerationPiiError": {"Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "BaseModerationConfig": {"Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "ModerationPiiConfig": {"Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "ModerationPromptSafetyConfig": {"Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "ModerationToxicityConfig": {"Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "BaseModerationCallbackHandler": {"Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "ConstitutionalChain": {"Constitutional chain": "https://python.langchain.com/docs/guides/productionization/safety/constitutional_chain/"}, "ConstitutionalPrinciple": {"Constitutional chain": "https://python.langchain.com/docs/guides/productionization/safety/constitutional_chain/"}, "format_document": {"QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/"}, "runnable": {"Multi-language data anonymization with Microsoft Presidio {#multi-language-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/multi_language/"}, "case_insensitive_matching_strategy": {"Reversible data anonymization with Microsoft Presidio {#reversible-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/reversible/"}, "fuzzy_matching_strategy": {"Reversible data anonymization with Microsoft Presidio {#reversible-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/reversible/"}, "combined_exact_fuzzy_matching_strategy": {"Reversible data anonymization with Microsoft Presidio {#reversible-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/reversible/"}, "load_evaluator": {"Initialize the language model": "https://python.langchain.com/docs/guides/productionization/evaluation/examples/comparisons/", "ANTHROPIC_API_KEY=": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/trajectory_eval/", "pairwise_embedding_distance.md": "https://python.langchain.com/docs/guides/productionization/evaluation/comparison/pairwise_embedding_distance/", "The prompt was assigned to the evaluator": "https://python.langchain.com/docs/guides/productionization/evaluation/comparison/pairwise_string/", "This is equivalent to loading using the enum": "https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain/", "Check for the presence of a YYYY-MM-DD string.": "https://python.langchain.com/docs/guides/productionization/evaluation/string/regex_match/", "Correct": "https://python.langchain.com/docs/guides/productionization/evaluation/string/scoring_eval_chain/", "Alternatively": "https://python.langchain.com/docs/guides/productionization/evaluation/string/exact_match/", "The results purely character-based, so it's less useful when negation is concerned": "https://python.langchain.com/docs/guides/productionization/evaluation/string/string_distance/", "You can load by enum or by raw python string": "https://python.langchain.com/docs/guides/productionization/evaluation/string/embedding_distance/"}, "load_dataset": {"Initialize the language model": "https://python.langchain.com/docs/guides/productionization/evaluation/examples/comparisons/"}, "AgentTrajectoryEvaluator": {"custom.md": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/custom/"}, "EmbeddingDistance": {"pairwise_embedding_distance.md": "https://python.langchain.com/docs/guides/productionization/evaluation/comparison/pairwise_embedding_distance/", "You can load by enum or by raw python string": "https://python.langchain.com/docs/guides/productionization/evaluation/string/embedding_distance/"}, "PairwiseStringEvaluator": {"%env ANTHROPIC_API_KEY=YOUR_API_KEY": "https://python.langchain.com/docs/guides/productionization/evaluation/comparison/custom/"}, "Criteria": {"This is equivalent to loading using the enum": "https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain/"}, "JsonValidityEvaluator": {"Equivalently": "https://python.langchain.com/docs/guides/productionization/evaluation/string/json/"}, "JsonEqualityEvaluator": {"Equivalently": "https://python.langchain.com/docs/guides/productionization/evaluation/string/json/"}, "JsonEditDistanceEvaluator": {"Equivalently": "https://python.langchain.com/docs/guides/productionization/evaluation/string/json/"}, "JsonSchemaEvaluator": {"Equivalently": "https://python.langchain.com/docs/guides/productionization/evaluation/string/json/"}, "RegexMatchStringEvaluator": {"Check for the presence of a YYYY-MM-DD string.": "https://python.langchain.com/docs/guides/productionization/evaluation/string/regex_match/"}, "StringEvaluator": {"The perplexity is much higher since LangChain was introduced after 'gpt-2' was released and because it is never used in the following context.": "https://python.langchain.com/docs/guides/productionization/evaluation/string/custom/"}, "ExactMatchStringEvaluator": {"Alternatively": "https://python.langchain.com/docs/guides/productionization/evaluation/string/exact_match/"}, "StringDistance": {"The results purely character-based, so it's less useful when negation is concerned": "https://python.langchain.com/docs/guides/productionization/evaluation/string/string_distance/"}, "WebResearchRetriever": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "StuffDocumentsChain": {"Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/", "Get embeddings.": "https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder/"}, "MapReduceDocumentsChain": {"Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/"}, "ReduceDocumentsChain": {"Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/"}, "AnalyzeDocumentChain": {"Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/"}, "get_openapi_chain": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/apis/"}, "APIChain": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/apis/"}, "open_meteo_docs": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/apis/"}, "tmdb_docs": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/apis/"}, "podcast_docs": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/apis/"}, "LLMRequestsChain": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/apis/"}, "FewShotPromptTemplate": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "Select the most similar example to the input.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples/", "Examples of a pretend task of creating antonyms.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/similarity/", "index.md": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/index/", "Examples of a fictional translation task.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/ngram_overlap/"}, "OPENAI_TEMPLATE": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/"}, "create_openai_data_generator": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/"}, "DatasetGenerator": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/"}, "create_data_generation_chain": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/"}, "create_extraction_chain_pydantic": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/"}, "PydanticOutputParser": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/", "Set up a parser": "https://python.langchain.com/docs/use_cases/extraction/how_to/parse/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/", "Define your desired data structure.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/pydantic/", "retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/", "output_fixing.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/output_fixing/"}, "create_tool_calling_agent": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/agents/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Construct the Tools agent": "https://python.langchain.com/docs/modules/agents/agent_types/tool_calling/", "!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/"}, "Runnable": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/human_in_the_loop/"}, "RunnableConfig": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "Run custom functions {#run-custom-functions}": "https://python.langchain.com/docs/expression_language/primitives/functions/"}, "ToolCall": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/"}, "JsonOutputParser": {"If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/prompting/", "Define your desired data structure.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/json/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/"}, "ConfigurableField": {"This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/", "initialize the bm25 retriever and faiss retriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/ensemble/", "batch_configurable_chain([\"ice cream\", \"spaghetti\", \"dumplings\"])": "https://python.langchain.com/docs/expression_language/why/", "Configure chain internals at runtime {#configure-chain-internals-at-runtime}": "https://python.langchain.com/docs/expression_language/primitives/configure/"}, "RunnableBinding": {"This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/"}, "RunnablePick": {"Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/"}, "ChatMessageHistory": {"import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/chat_history/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "agent_with_memory.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory/", "Conversation Summary": "https://python.langchain.com/docs/modules/memory/types/summary/", "Chat Messages": "https://python.langchain.com/docs/modules/memory/chat_messages/index/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/"}, "BaseChatMessageHistory": {"import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/chat_history/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/"}, "LogStreamCallbackHandler": {"import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/"}, "JsonOutputKeyToolsParser": {"Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "Using LangSmith is recommended but not required. Uncomment below lines to use.": "https://python.langchain.com/docs/use_cases/sql/csv/", "openai_tools.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_tools/"}, "ChatAnthropicMessages": {"Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/"}, "XMLOutputParser": {"Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "xml.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/xml/"}, "EmbeddingsFilter": {"Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/"}, "PydanticToolsParser": {"%pip install -qU langchain langchain-community langchain-openai langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/no_queries/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/step_back/", "Tool calling {#tool-calling}": "https://python.langchain.com/docs/modules/model_io/chat/function_calling/", "Function calling": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/function_calling-checkpoint/", "openai_tools.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_tools/"}, "chain": {"%pip install -qU langchain langchain-community langchain-openai langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/no_queries/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "decorator.md": "https://python.langchain.com/docs/expression_language/how_to/decorator/"}, "Comparator": {"constructing-filters.md": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/constructing-filters/"}, "Comparison": {"constructing-filters.md": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/constructing-filters/"}, "Operation": {"constructing-filters.md": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/constructing-filters/"}, "Operator": {"constructing-filters.md": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/constructing-filters/"}, "StructuredQuery": {"constructing-filters.md": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/constructing-filters/"}, "ChromaTranslator": {"constructing-filters.md": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/constructing-filters/", "This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/"}, "ElasticsearchTranslator": {"constructing-filters.md": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/constructing-filters/"}, "LLMGraphTransformer": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/constructing/"}, "CypherQueryCorrector": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/mapping/"}, "Schema": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/mapping/"}, "AsyncCallbackManagerForToolRun": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "Import things that are needed generically": "https://python.langchain.com/docs/modules/tools/custom_tools/"}, "CallbackManagerForToolRun": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "Import things that are needed generically": "https://python.langchain.com/docs/modules/tools/custom_tools/"}, "BaseTool": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "Import things that are needed generically": "https://python.langchain.com/docs/modules/tools/custom_tools/", "Function calling": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/function_calling-checkpoint/"}, "format_to_openai_function_messages": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/"}, "OpenAIFunctionsAgentOutputParser": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/"}, "convert_to_openai_function": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "tools_as_openai_functions.md": "https://python.langchain.com/docs/modules/tools/tools_as_openai_functions/"}, "SemanticSimilarityExampleSelector": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "Select the most similar example to the input.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples/", "This is a prompt template used to format each individual example.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples_chat/", "Examples of a pretend task of creating antonyms.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/similarity/"}, "RunnableBranch": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/quickstart/", "Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/"}, "BSHTMLLoader": {"Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "HTML": "https://python.langchain.com/docs/modules/data_connection/document_loaders/html/"}, "create_structured_output_runnable": {"Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/"}, "BS4HTMLParser": {"Configure the parsers that you want to use per mime-type!": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_files/"}, "PDFMinerParser": {"Configure the parsers that you want to use per mime-type!": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_files/"}, "MimeTypeBasedParser": {"Configure the parsers that you want to use per mime-type!": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_files/"}, "TextParser": {"Configure the parsers that you want to use per mime-type!": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_files/"}, "PythonAstREPLTool": {"Using LangSmith is recommended but not required. Uncomment below lines to use.": "https://python.langchain.com/docs/use_cases/sql/csv/"}, "create_sql_query_chain": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/"}, "QuerySQLDataBaseTool": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/quickstart/"}, "SQLRecordManager": {"indexing.md": "https://python.langchain.com/docs/modules/data_connection/indexing/"}, "index": {"indexing.md": "https://python.langchain.com/docs/modules/data_connection/indexing/"}, "BaseLoader": {"indexing.md": "https://python.langchain.com/docs/modules/data_connection/indexing/", "Custom Document Loader {#custom-document-loader}": "https://python.langchain.com/docs/modules/data_connection/document_loaders/custom/"}, "EnsembleRetriever": {"initialize the bm25 retriever and faiss retriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/ensemble/"}, "JsonKeyOutputFunctionsParser": {"The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "openai_functions.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_functions/", "prompt_llm_parser.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_llm_parser/"}, "LLMChainExtractor": {"Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/"}, "LLMChainFilter": {"Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/"}, "DocumentCompressorPipeline": {"Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/"}, "CallbackManagerForRetrieverRun": {"Custom Retriever {#custom-retriever}": "https://python.langchain.com/docs/modules/data_connection/retrievers/custom_retriever/"}, "BaseRetriever": {"Custom Retriever {#custom-retriever}": "https://python.langchain.com/docs/modules/data_connection/retrievers/custom_retriever/"}, "TimeWeightedVectorStoreRetriever": {"Define your embedding model": "https://python.langchain.com/docs/modules/data_connection/retrievers/time_weighted_vectorstore/"}, "mock_now": {"Define your embedding model": "https://python.langchain.com/docs/modules/data_connection/retrievers/time_weighted_vectorstore/"}, "ParentDocumentRetriever": {"This text splitter is used to create the child documents": "https://python.langchain.com/docs/modules/data_connection/retrievers/parent_document_retriever/"}, "StructuredQueryOutputParser": {"This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/"}, "get_query_constructor_prompt": {"This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/"}, "Pinecone": {"Self-querying": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/self_query-checkpoint/"}, "RecursiveJsonSplitter": {"This is a large nested json object and will be loaded as a python dict": "https://python.langchain.com/docs/modules/data_connection/document_transformers/recursive_json_splitter/"}, "HTMLHeaderTextSplitter": {"for local file use html_splitter.split_text_from_file()": "https://python.langchain.com/docs/modules/data_connection/document_transformers/HTML_header_metadata/"}, "SemanticChunker": {"This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/semantic-chunker/"}, "SentenceTransformersTokenTextSplitter": {"This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/split_by_token/"}, "NLTKTextSplitter": {"This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/split_by_token/"}, "KonlpyTextSplitter": {"This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/split_by_token/"}, "MarkdownHeaderTextSplitter": {"MD splits": "https://python.langchain.com/docs/modules/data_connection/document_transformers/markdown_header_metadata/"}, "HTMLSectionSplitter": {"Split": "https://python.langchain.com/docs/modules/data_connection/document_transformers/HTML_section_aware_splitter/"}, "BaseBlobParser": {"Custom Document Loader {#custom-document-loader}": "https://python.langchain.com/docs/modules/data_connection/document_loaders/custom/"}, "FileSystemBlobLoader": {"Custom Document Loader {#custom-document-loader}": "https://python.langchain.com/docs/modules/data_connection/document_loaders/custom/"}, "MathpixPDFLoader": {"PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "PyPDFium2Loader": {"PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "PDFMinerLoader": {"PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "PDFMinerPDFasHTMLLoader": {"PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "PyMuPDFLoader": {"PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "PyPDFDirectoryLoader": {"PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "PDFPlumberLoader": {"PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "PythonLoader": {"File Directory": "https://python.langchain.com/docs/modules/data_connection/document_loaders/file_directory/"}, "ToolException": {"Import things that are needed generically": "https://python.langchain.com/docs/modules/tools/custom_tools/"}, "MoveFileTool": {"tools_as_openai_functions.md": "https://python.langchain.com/docs/modules/tools/tools_as_openai_functions/"}, "BaseMemory": {"!python -m spacy download en_core_web_lg": "https://python.langchain.com/docs/modules/memory/custom_memory/"}, "CombinedMemory": {"Combined": "https://python.langchain.com/docs/modules/memory/multiple_memory/"}, "ConversationSummaryMemory": {"Combined": "https://python.langchain.com/docs/modules/memory/multiple_memory/", "Conversation Summary": "https://python.langchain.com/docs/modules/memory/types/summary/"}, "ConversationKGMemory": {"kg.md": "https://python.langchain.com/docs/modules/memory/types/kg/"}, "ConversationTokenBufferMemory": {"We can see here that the buffer is updated": "https://python.langchain.com/docs/modules/memory/types/token_buffer/"}, "ConversationEntityMemory": {"Entity": "https://python.langchain.com/docs/modules/memory/types/entity_summary_memory/"}, "ENTITY_MEMORY_CONVERSATION_TEMPLATE": {"Entity": "https://python.langchain.com/docs/modules/memory/types/entity_summary_memory/"}, "VectorStoreRetrieverMemory": {"Backed by a Vector Store": "https://python.langchain.com/docs/modules/memory/types/vectorstore_retriever_memory/"}, "BaseCallbackHandler": {"To enable streaming, we pass in `streaming=True` to the ChatModel constructor": "https://python.langchain.com/docs/modules/callbacks/async_callbacks/", "First, define custom callback handler implementations": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks/"}, "FileCallbackHandler": {"this chain will both print to stdout (because verbose=True) and write to 'output.log'": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler/"}, "LLMResult": {"To enable streaming, we pass in `streaming=True` to the ChatModel constructor": "https://python.langchain.com/docs/modules/callbacks/async_callbacks/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/"}, "create_xml_agent": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/xml_agent/"}, "XMLAgentOutputParser": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/xml_agent/"}, "create_self_ask_with_search_agent": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/self_ask_with_search/"}, "TavilyAnswer": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/self_ask_with_search/"}, "OpenAIAssistantRunnable": {"openai_assistants.md": "https://python.langchain.com/docs/modules/agents/agent_types/openai_assistants/"}, "AgentActionMessageLog": {"Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/"}, "LLMMathChain": {"need to use GPT-4 here as GPT-3.5 does not understand, however hard you insist, that": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter/"}, "ChatGenerationChunk": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "GenerationChunk": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/", "custom_llm.md": "https://python.langchain.com/docs/modules/model_io/llms/custom_llm/"}, "CommaSeparatedListOutputParser": {"Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/", "csv.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/csv/"}, "get_bedrock_anthropic_callback": {"!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/"}, "AIMessageChunk": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/", "The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/"}, "FunctionMessageChunk": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "HumanMessageChunk": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "SystemMessageChunk": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "ToolMessageChunk": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "AsyncCallbackManagerForLLMRun": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "CallbackManagerForLLMRun": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/", "custom_llm.md": "https://python.langchain.com/docs/modules/model_io/llms/custom_llm/"}, "SimpleChatModel": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "ChatGeneration": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/", "The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/"}, "ChatResult": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "run_in_executor": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "AIMessagePromptTemplate": {"Prompts": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/prompts-checkpoint/"}, "JsonOutputToolsParser": {"Function calling": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/function_calling-checkpoint/", "openai_tools.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_tools/"}, "RunnableGenerator": {"The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/"}, "OutputParserException": {"The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/"}, "BaseGenerationOutputParser": {"The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/"}, "Generation": {"The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/"}, "SimpleJsonOutputParser": {"Define your desired data structure.": "https://python.langchain.com/docs/modules/model_io/output_parsers/quick_start/"}, "ResponseSchema": {"structured.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/structured/"}, "StructuredOutputParser": {"structured.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/structured/"}, "YamlOutputParser": {"Define your desired data structure.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/yaml/"}, "OutputFixingParser": {"retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/", "output_fixing.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/output_fixing/"}, "RetryOutputParser": {"retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/"}, "EnumOutputParser": {"enum.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/enum/"}, "JsonOutputFunctionsParser": {"openai_functions.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_functions/", "prompt_llm_parser.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_llm_parser/"}, "PandasDataFrameOutputParser": {"Solely for documentation purposes.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/pandas_dataframe/"}, "PipelinePromptTemplate": {"composition.md": "https://python.langchain.com/docs/modules/model_io/prompts/composition/"}, "ChatMessagePromptTemplate": {"Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/"}, "MaxMarginalRelevanceExampleSelector": {"Examples of a pretend task of creating antonyms.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/mmr/"}, "LengthBasedExampleSelector": {"Examples of a pretend task of creating antonyms.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/length_based/"}, "BaseExampleSelector": {"index.md": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/index/"}, "LLM": {"custom_llm.md": "https://python.langchain.com/docs/modules/model_io/llms/custom_llm/"}, "ChatPromptValue": {"prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/"}, "cosine_similarity": {"Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/"}, "ConfigurableFieldSpec": {"Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/"}, "HubRunnable": {"Configure chain internals at runtime {#configure-chain-internals-at-runtime}": "https://python.langchain.com/docs/expression_language/primitives/configure/"}} \ No newline at end of file +{"ChatPromptTemplate": {"\ud83e\udd9c\ufe0f\ud83c\udfd3 LangServe": "https://python.langchain.com/docs/langserve/", "\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/", "Wait for the fine-tuning to complete (this may take some time)": "https://python.langchain.com/docs/integrations/chat_loaders/langsmith_llm_runs/", "del os.environ['NVIDIA_API_KEY'] ## delete key and reset": "https://python.langchain.com/docs/integrations/text_embedding/nvidia_ai_endpoints/", "For use in Chaining section": "https://python.langchain.com/docs/integrations/retrievers/you-retriever/", "fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "ragatouille.md": "https://python.langchain.com/docs/integrations/retrievers/ragatouille/", "redis_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/redis_chat_message_history/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/memory/google_sql_mssql/", "Optionally, specify your own session_state key for storing messages": "https://python.langchain.com/docs/integrations/memory/streamlit_chat_message_history/", "copy from tidb cloud console": "https://python.langchain.com/docs/integrations/memory/tidb_chat_message_history/", "Install Langchain community and core packages": "https://python.langchain.com/docs/integrations/chat/kinetica/", "open ../../../static/img/brand/wordmark.png as base64 str": "https://python.langchain.com/docs/integrations/chat/anthropic/", "groq.md": "https://python.langchain.com/docs/integrations/chat/groq/", "openai.md": "https://python.langchain.com/docs/integrations/chat/openai/", "for running these examples in the notebook:": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm/", "get a chat completion from the formatted messages": "https://python.langchain.com/docs/integrations/chat/vllm/", "LangChain supports many other chat models. Here, we're using Ollama": "https://python.langchain.com/docs/integrations/chat/ollama/", "If api_key is not passed, default behavior is to use the `MISTRAL_API_KEY` environment variable.": "https://python.langchain.com/docs/integrations/chat/mistralai/", "ai21.md": "https://python.langchain.com/docs/integrations/chat/ai21/", "!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/", "edenai.md": "https://python.langchain.com/docs/integrations/chat/edenai/", "yuan2.md": "https://python.langchain.com/docs/integrations/chat/yuan2/", "Loading the COMVEST 2024 notice": "https://python.langchain.com/docs/integrations/chat/maritalk/", "perplexity.md": "https://python.langchain.com/docs/integrations/chat/perplexity/", "using chat invoke": "https://python.langchain.com/docs/integrations/chat/upstage/", "Or via the async API": "https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/", "context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "Fiddler project and model names, used for model registration": "https://python.langchain.com/docs/integrations/callbacks/fiddler/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_summary/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "Must be an OpenAI model that supports functions": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger/", "see https://python.langchain.com/en/latest/modules/data_connection/getting_started.html for more details": "https://python.langchain.com/docs/integrations/document_loaders/figma/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Note that we set max_retries = 0 to avoid retrying on RateLimits, etc": "https://python.langchain.com/docs/guides/productionization/fallbacks/", "moderation.md": "https://python.langchain.com/docs/guides/productionization/safety/moderation/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/prompting/", "This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "%pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/quickstart/", "%pip install -qU langchain langchain-community langchain-openai langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/no_queries/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/step_back/", "%pip install -qU langchain langchain-community langchain-openai faker langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/high_cardinality/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde/", "Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/routing/", "%pip install -qU langchain langchain-openai youtube-transcript-api pytube": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/structuring/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/query_checking/", "Install a model capable of tool calling": "https://python.langchain.com/docs/use_cases/extraction/quickstart/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "Set up a parser": "https://python.langchain.com/docs/use_cases/extraction/how_to/parse/", "Using LangSmith is recommended but not required. Uncomment below lines to use.": "https://python.langchain.com/docs/use_cases/sql/csv/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/index/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "Construct the Tools agent": "https://python.langchain.com/docs/modules/agents/agent_types/tool_calling/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/", "!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/", "Tool calling {#tool-calling}": "https://python.langchain.com/docs/modules/model_io/chat/function_calling/", "Prompts": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/prompts-checkpoint/", "openai_functions.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_functions/", "openai_tools.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_tools/", "This is a prompt template used to format each individual example.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples_chat/", "Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/", "Prompt templates": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/.ipynb_checkpoints/index-checkpoint/", "custom_llm.md": "https://python.langchain.com/docs/modules/model_io/llms/custom_llm/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "batch_configurable_chain([\"ice cream\", \"spaghetti\", \"dumplings\"])": "https://python.langchain.com/docs/expression_language/why/", "> ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])": "https://python.langchain.com/docs/expression_language/get_started/", "The input schema of the chain is the input schema of its first part, the prompt.": "https://python.langchain.com/docs/expression_language/interface/", "prompt_llm_parser.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_llm_parser/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/", "multiple_chains.md": "https://python.langchain.com/docs/expression_language/cookbook/multiple_chains/", "code_writing.md": "https://python.langchain.com/docs/expression_language/cookbook/code_writing/", "inspect.md": "https://python.langchain.com/docs/expression_language/how_to/inspect/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/", "decorator.md": "https://python.langchain.com/docs/expression_language/how_to/decorator/", "Adding values to chain state {#adding-values-to-chain-state}": "https://python.langchain.com/docs/expression_language/primitives/assign/", "Binding: Attach runtime args {#binding-attach-runtime-args}": "https://python.langchain.com/docs/expression_language/primitives/binding/", "Formatting inputs & output {#formatting-inputs-output}": "https://python.langchain.com/docs/expression_language/primitives/parallel/", "Passing data through {#passing-data-through}": "https://python.langchain.com/docs/expression_language/primitives/passthrough/", "Run custom functions {#run-custom-functions}": "https://python.langchain.com/docs/expression_language/primitives/functions/", "Chaining runnables {#chaining-runnables}": "https://python.langchain.com/docs/expression_language/primitives/sequence/"}, "ChatAnthropic": {"\ud83e\udd9c\ufe0f\ud83c\udfd3 LangServe": "https://python.langchain.com/docs/langserve/", "open ../../../static/img/brand/wordmark.png as base64 str": "https://python.langchain.com/docs/integrations/chat/anthropic/", "Log10": "https://python.langchain.com/docs/integrations/providers/log10/", "Define the neural network": "https://python.langchain.com/docs/integrations/toolkits/python/", "If this is your first time using playwright, you'll have to install a browser executable.": "https://python.langchain.com/docs/integrations/toolkits/playwright/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/quick_start/", "Note that we set max_retries = 0 to avoid retrying on RateLimits, etc": "https://python.langchain.com/docs/guides/productionization/fallbacks/", "ANTHROPIC_API_KEY=": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/trajectory_eval/", "The prompt was assigned to the evaluator": "https://python.langchain.com/docs/guides/productionization/evaluation/comparison/pairwise_string/", "This is equivalent to loading using the enum": "https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/multiple_tools/", "Set up a parser": "https://python.langchain.com/docs/use_cases/extraction/how_to/parse/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/xml_agent/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/", "!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/", "streaming.md": "https://python.langchain.com/docs/modules/model_io/chat/streaming/", "structured_output.md": "https://python.langchain.com/docs/modules/model_io/chat/structured_output/", "response_metadata.md": "https://python.langchain.com/docs/modules/model_io/chat/response_metadata/", "The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/", "xml.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/xml/", "This is a prompt template used to format each individual example.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples_chat/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "batch_configurable_chain([\"ice cream\", \"spaghetti\", \"dumplings\"])": "https://python.langchain.com/docs/expression_language/why/", "Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/", "Configure chain internals at runtime {#configure-chain-internals-at-runtime}": "https://python.langchain.com/docs/expression_language/primitives/configure/", "Chaining runnables {#chaining-runnables}": "https://python.langchain.com/docs/expression_language/primitives/sequence/"}, "ChatOpenAI": {"\ud83e\udd9c\ufe0f\ud83c\udfd3 LangServe": "https://python.langchain.com/docs/langserve/", "\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/discord/", "This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/", "Wait for the fine-tuning to complete (this may take some time)": "https://python.langchain.com/docs/integrations/chat_loaders/langsmith_dataset/", "re_phrase.md": "https://python.langchain.com/docs/integrations/retrievers/re_phrase/", "For use in Chaining section": "https://python.langchain.com/docs/integrations/tools/you/", "fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "Helper function for printing docs": "https://python.langchain.com/docs/integrations/retrievers/llmlingua/", "outline.md": "https://python.langchain.com/docs/integrations/retrievers/outline/", "get a token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/retrievers/arxiv/", "Setup API keys for Kay and OpenAI": "https://python.langchain.com/docs/integrations/retrievers/sec_filings/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/toolkits/polygon/", "# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "ragatouille.md": "https://python.langchain.com/docs/integrations/retrievers/ragatouille/", "Setup API key": "https://python.langchain.com/docs/integrations/retrievers/kay/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/retrievers/flashrank-reranker/", "This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/tencentvectordb/", "OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "chatgpt_plugins.md": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins/", "Specify your Connery Runner credentials.": "https://python.langchain.com/docs/integrations/toolkits/connery/", "How to use it inside an Agent {#how-to-use-it-inside-an-agent}": "https://python.langchain.com/docs/integrations/tools/infobip/", "Artifacts are charts created by matplotlib when `plt.show()` is called": "https://python.langchain.com/docs/integrations/tools/e2b_data_analysis/", "Answer with 'Zhu'": "https://python.langchain.com/docs/integrations/tools/human_tools/", "How YahooFinanceNewsTool works? {#how-yahoofinancenewstool-works}": "https://python.langchain.com/docs/integrations/tools/yahoo_finance_news/", "start by installing semanticscholar api": "https://python.langchain.com/docs/integrations/tools/semanticscholar/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations/", "Extract pdf content": "https://python.langchain.com/docs/integrations/tools/bearly/", "arxiv.md": "https://python.langchain.com/docs/integrations/tools/arxiv/", "and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/", "bash.md": "https://python.langchain.com/docs/integrations/tools/bash/", "redis_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/redis_chat_message_history/", "xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "Remembrall": "https://python.langchain.com/docs/integrations/memory/remembrall/", "Optionally, specify your own session_state key for storing messages": "https://python.langchain.com/docs/integrations/memory/streamlit_chat_message_history/", "copy from tidb cloud console": "https://python.langchain.com/docs/integrations/memory/tidb_chat_message_history/", "openai.md": "https://python.langchain.com/docs/integrations/chat/openai/", "get a chat completion from the formatted messages": "https://python.langchain.com/docs/integrations/chat/vllm/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "labelstudio.md": "https://python.langchain.com/docs/integrations/callbacks/labelstudio/", "promptlayer.md": "https://python.langchain.com/docs/integrations/callbacks/promptlayer/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "trubrics.md": "https://python.langchain.com/docs/integrations/callbacks/trubrics/", "Install necessary dependencies.": "https://python.langchain.com/docs/integrations/callbacks/infino/", "CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb/", "Log10": "https://python.langchain.com/docs/integrations/providers/log10/", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/", "arthur_tracking.md": "https://python.langchain.com/docs/integrations/providers/arthur_tracking/", "Dataherald": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/dataherald-checkpoint/", "Construct the OpenAI Tools agent": "https://python.langchain.com/docs/integrations/providers/portkey/logging_tracing_portkey/", "Portkey": "https://python.langchain.com/docs/integrations/providers/portkey/index/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/momento_vector_index/", "Create a dataframe": "https://python.langchain.com/docs/integrations/toolkits/csv/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "Define the neural network": "https://python.langchain.com/docs/integrations/toolkits/python/", "fictional example": "https://python.langchain.com/docs/integrations/toolkits/powerbi/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/amadeus/", "airbyte_structured_qa.md": "https://python.langchain.com/docs/integrations/toolkits/airbyte_structured_qa/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/", "Note, you can also connect to Spark via Spark connect. For example:": "https://python.langchain.com/docs/integrations/toolkits/spark_sql/", "IMPORTANT: If you plan to use this account in the future, make sure to save the": "https://python.langchain.com/docs/integrations/toolkits/ainetwork/", "cogniswitch.md": "https://python.langchain.com/docs/integrations/toolkits/cogniswitch/", "pandas.md": "https://python.langchain.com/docs/integrations/toolkits/pandas/", "Install package": "https://python.langchain.com/docs/integrations/toolkits/robocorp/", "Authorize connection to your Browser extention": "https://python.langchain.com/docs/integrations/toolkits/multion/", "NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.": "https://python.langchain.com/docs/integrations/toolkits/openapi/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/neo4jvector/", "openai": "https://python.langchain.com/docs/integrations/vectorstores/hippo/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "Clean up KDB.AI \"documents\" table and index for similarity search": "https://python.langchain.com/docs/integrations/vectorstores/kdbai/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "Must be an OpenAI model that supports functions": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/", "see https://python.langchain.com/en/latest/modules/data_connection/getting_started.html for more details": "https://python.langchain.com/docs/integrations/document_loaders/figma/", "Creating and executing the seeding query": "https://python.langchain.com/docs/integrations/graphs/memgraph/", "rdflib_sparql.md": "https://python.langchain.com/docs/integrations/graphs/rdflib_sparql/", "connect ngql jupyter extension to nebulagraph": "https://python.langchain.com/docs/integrations/graphs/nebula_graph/", "graph.refresh_schema()": "https://python.langchain.com/docs/integrations/graphs/kuzu_db/", "diffbot.md": "https://python.langchain.com/docs/integrations/graphs/diffbot/", "feeding the schema using a user construct query": "https://python.langchain.com/docs/integrations/graphs/ontotext/", "How many people played in Top Gun?": "https://python.langchain.com/docs/integrations/graphs/neo4j_cypher/", "Instantiate ArangoDB Database": "https://python.langchain.com/docs/integrations/graphs/arangodb/", "amazon_neptune_open_cypher.md": "https://python.langchain.com/docs/integrations/graphs/amazon_neptune_open_cypher/", "falkordb.md": "https://python.langchain.com/docs/integrations/graphs/falkordb/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Debugging": "https://python.langchain.com/docs/guides/development/debugging/", "Note that we set max_retries = 0 to avoid retrying on RateLimits, etc": "https://python.langchain.com/docs/guides/productionization/fallbacks/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "Reversible data anonymization with Microsoft Presidio {#reversible-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/reversible/", "Download model": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/index/", "Initialize the language model": "https://python.langchain.com/docs/guides/productionization/evaluation/examples/comparisons/", "ANTHROPIC_API_KEY=": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/trajectory_eval/", "custom.md": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/custom/", "Correct": "https://python.langchain.com/docs/guides/productionization/evaluation/string/scoring_eval_chain/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/prompting/", "This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "%pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/quickstart/", "%pip install -qU langchain langchain-community langchain-openai langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/no_queries/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/step_back/", "%pip install -qU langchain langchain-community langchain-openai faker langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/high_cardinality/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde/", "Optional, uncomment to trace runs with LangSmith. Sign up here: https://smith.langchain.com.": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/routing/", "%pip install -qU langchain langchain-openai youtube-transcript-api pytube": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/structuring/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "Install a model capable of tool calling": "https://python.langchain.com/docs/use_cases/extraction/quickstart/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "Using LangSmith is recommended but not required. Uncomment below lines to use.": "https://python.langchain.com/docs/use_cases/sql/csv/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/index/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/", "This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/", "tools_as_openai_functions.md": "https://python.langchain.com/docs/modules/tools/tools_as_openai_functions/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "To enable streaming, we pass in `streaming=True` to the ChatModel constructor": "https://python.langchain.com/docs/modules/callbacks/async_callbacks/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/", "pip install wikipedia": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps/", "need to use GPT-4 here as GPT-3.5 does not understand, however hard you insist, that": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/", "!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/", "structured_output.md": "https://python.langchain.com/docs/modules/model_io/chat/structured_output/", "logprobs.md": "https://python.langchain.com/docs/modules/model_io/chat/logprobs/", "response_metadata.md": "https://python.langchain.com/docs/modules/model_io/chat/response_metadata/", "structured.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/structured/", "csv.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/csv/", "Define your desired data structure.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/pydantic/", "retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/", "enum.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/enum/", "openai_functions.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_functions/", "Solely for documentation purposes.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/pandas_dataframe/", "output_fixing.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/output_fixing/", "openai_tools.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_tools/", "composition.md": "https://python.langchain.com/docs/modules/model_io/prompts/composition/", "Prompt templates": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/.ipynb_checkpoints/index-checkpoint/", "batch_configurable_chain([\"ice cream\", \"spaghetti\", \"dumplings\"])": "https://python.langchain.com/docs/expression_language/why/", "The input schema of the chain is the input schema of its first part, the prompt.": "https://python.langchain.com/docs/expression_language/interface/", "prompt_llm_parser.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_llm_parser/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/", "multiple_chains.md": "https://python.langchain.com/docs/expression_language/cookbook/multiple_chains/", "code_writing.md": "https://python.langchain.com/docs/expression_language/cookbook/code_writing/", "inspect.md": "https://python.langchain.com/docs/expression_language/how_to/inspect/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/", "decorator.md": "https://python.langchain.com/docs/expression_language/how_to/decorator/", "Adding values to chain state {#adding-values-to-chain-state}": "https://python.langchain.com/docs/expression_language/primitives/assign/", "Binding: Attach runtime args {#binding-attach-runtime-args}": "https://python.langchain.com/docs/expression_language/primitives/binding/", "Formatting inputs & output {#formatting-inputs-output}": "https://python.langchain.com/docs/expression_language/primitives/parallel/", "Passing data through {#passing-data-through}": "https://python.langchain.com/docs/expression_language/primitives/passthrough/", "Configure chain internals at runtime {#configure-chain-internals-at-runtime}": "https://python.langchain.com/docs/expression_language/primitives/configure/", "Run custom functions {#run-custom-functions}": "https://python.langchain.com/docs/expression_language/primitives/functions/"}, "SystemMessage": {"\ud83e\udd9c\ufe0f\ud83c\udfd3 LangServe": "https://python.langchain.com/docs/langserve/", "# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "setup tools": "https://python.langchain.com/docs/integrations/chat/huggingface/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/chat/fireworks/", "octoai.md": "https://python.langchain.com/docs/integrations/chat/octoai/", "service url": "https://python.langchain.com/docs/integrations/chat/llama_edge/", "Note that each chunk may contain more than one \"token\"": "https://python.langchain.com/docs/integrations/chat/google_generative_ai/", "Konko {#konko}": "https://python.langchain.com/docs/integrations/chat/konko/", "openai.md": "https://python.langchain.com/docs/integrations/chat/openai/", "gigachat.md": "https://python.langchain.com/docs/integrations/chat/gigachat/", "get a chat completion from the formatted messages": "https://python.langchain.com/docs/integrations/chat/vllm/", "Let\u2019s try out LLAMA model offered on EverlyAI Hosted Endpoints {#lets-try-out-llama-model-offered-on-everlyai-hosted-endpoints}": "https://python.langchain.com/docs/integrations/chat/everlyai/", "friendli.md": "https://python.langchain.com/docs/integrations/chat/friendli/", "zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/", "yuan2.md": "https://python.langchain.com/docs/integrations/chat/yuan2/", "Install the package": "https://python.langchain.com/docs/integrations/chat/tongyi/", "Generate your api key from: https://platform.moonshot.cn/console/api-keys": "https://python.langchain.com/docs/integrations/chat/moonshot/", "First step is to set up the env variable.": "https://python.langchain.com/docs/integrations/chat/premai/", "Let\u2019s try out each model offered on Anyscale Endpoints {#lets-try-out-each-model-offered-on-anyscale-endpoints}": "https://python.langchain.com/docs/integrations/chat/anyscale/", "yandex.md": "https://python.langchain.com/docs/integrations/chat/yandex/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "labelstudio.md": "https://python.langchain.com/docs/integrations/callbacks/labelstudio/", "trubrics.md": "https://python.langchain.com/docs/integrations/callbacks/trubrics/", "MLflow Deployments for LLMs": "https://python.langchain.com/docs/integrations/providers/mlflow/", "MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway/", "PremAI": "https://python.langchain.com/docs/integrations/providers/premai/", "Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Install package": "https://python.langchain.com/docs/integrations/toolkits/robocorp/", "Step 4: Embeddings Example {#step-4-embeddings-example}": "https://python.langchain.com/docs/integrations/llms/javelin/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/few_shot/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/decomposition/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/model_io/chat/quick_start/", "composition.md": "https://python.langchain.com/docs/modules/model_io/prompts/composition/", "Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/"}, "HumanMessage": {"\ud83e\udd9c\ufe0f\ud83c\udfd3 LangServe": "https://python.langchain.com/docs/langserve/", "\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/discord/", "Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "Google": "https://python.langchain.com/docs/integrations/platforms/google/", "setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/", "azureml_chat_endpoint.md": "https://python.langchain.com/docs/integrations/chat/azureml_chat_endpoint/", "alibaba_cloud_pai_eas.md": "https://python.langchain.com/docs/integrations/chat/alibaba_cloud_pai_eas/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/chat/fireworks/", "octoai.md": "https://python.langchain.com/docs/integrations/chat/octoai/", "get a new token: https://deepinfra.com/login?from=%2Fdash": "https://python.langchain.com/docs/integrations/chat/deepinfra/", "open ../../../static/img/brand/wordmark.png as base64 str": "https://python.langchain.com/docs/integrations/chat/anthropic/", "litellm.md": "https://python.langchain.com/docs/integrations/chat/litellm/", "service url": "https://python.langchain.com/docs/integrations/chat/llama_edge/", "Note that each chunk may contain more than one \"token\"": "https://python.langchain.com/docs/integrations/chat/google_generative_ai/", "Schema": "https://python.langchain.com/docs/integrations/chat/ollama_functions/", "Install the package": "https://python.langchain.com/docs/integrations/chat/tongyi/", "Konko {#konko}": "https://python.langchain.com/docs/integrations/chat/konko/", "openai.md": "https://python.langchain.com/docs/integrations/chat/openai/", "for running these examples in the notebook:": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm/", "bedrock.md": "https://python.langchain.com/docs/integrations/chat/bedrock/", "gigachat.md": "https://python.langchain.com/docs/integrations/chat/gigachat/", "get a chat completion from the formatted messages": "https://python.langchain.com/docs/integrations/chat/vllm/", "LangChain supports many other chat models. Here, we're using Ollama": "https://python.langchain.com/docs/integrations/chat/ollama/", "azure_chat_openai.md": "https://python.langchain.com/docs/integrations/chat/azure_chat_openai/", "Let\u2019s try out LLAMA model offered on EverlyAI Hosted Endpoints {#lets-try-out-llama-model-offered-on-everlyai-hosted-endpoints}": "https://python.langchain.com/docs/integrations/chat/everlyai/", "gpt_router.md": "https://python.langchain.com/docs/integrations/chat/gpt_router/", "litellm_router.md": "https://python.langchain.com/docs/integrations/chat/litellm_router/", "friendli.md": "https://python.langchain.com/docs/integrations/chat/friendli/", "If api_key is not passed, default behavior is to use the `MISTRAL_API_KEY` environment variable.": "https://python.langchain.com/docs/integrations/chat/mistralai/", "zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "baichuan.md": "https://python.langchain.com/docs/integrations/chat/baichuan/", "baidu_qianfan_endpoint.md": "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "edenai.md": "https://python.langchain.com/docs/integrations/chat/edenai/", "ernie.md": "https://python.langchain.com/docs/integrations/chat/ernie/", "tencent_hunyuan.md": "https://python.langchain.com/docs/integrations/chat/tencent_hunyuan/", "minimax.md": "https://python.langchain.com/docs/integrations/chat/minimax/", "yuan2.md": "https://python.langchain.com/docs/integrations/chat/yuan2/", "promptlayer_chatopenai.md": "https://python.langchain.com/docs/integrations/chat/promptlayer_chatopenai/", "sparkllm.md": "https://python.langchain.com/docs/integrations/chat/sparkllm/", "Generate your api key from: https://platform.moonshot.cn/console/api-keys": "https://python.langchain.com/docs/integrations/chat/moonshot/", "dappier.md": "https://python.langchain.com/docs/integrations/chat/dappier/", "First step is to set up the env variable.": "https://python.langchain.com/docs/integrations/chat/premai/", "Let\u2019s try out each model offered on Anyscale Endpoints {#lets-try-out-each-model-offered-on-anyscale-endpoints}": "https://python.langchain.com/docs/integrations/chat/anyscale/", "yandex.md": "https://python.langchain.com/docs/integrations/chat/yandex/", "Or via the async API": "https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "labelstudio.md": "https://python.langchain.com/docs/integrations/callbacks/labelstudio/", "promptlayer.md": "https://python.langchain.com/docs/integrations/callbacks/promptlayer/", "trubrics.md": "https://python.langchain.com/docs/integrations/callbacks/trubrics/", "Log10": "https://python.langchain.com/docs/integrations/providers/log10/", "MLflow Deployments for LLMs": "https://python.langchain.com/docs/integrations/providers/mlflow/", "MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway/", "-> content='Hello! How can I assist you today?'": "https://python.langchain.com/docs/integrations/providers/databricks/", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/", "PremAI": "https://python.langchain.com/docs/integrations/providers/premai/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "arthur_tracking.md": "https://python.langchain.com/docs/integrations/providers/arthur_tracking/", "Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "google_vertex_ai_palm.md": "https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm/", "If running a Databricks notebook attached to an interactive cluster in \"single user\"": "https://python.langchain.com/docs/integrations/llms/databricks/", "Step 4: Embeddings Example {#step-4-embeddings-example}": "https://python.langchain.com/docs/integrations/llms/javelin/", "azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Chat Bot Feedback Template": "https://python.langchain.com/docs/templates/chat-bot-feedback/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/few_shot/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/decomposition/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "tools_as_openai_functions.md": "https://python.langchain.com/docs/modules/tools/tools_as_openai_functions/", "To enable streaming, we pass in `streaming=True` to the ChatModel constructor": "https://python.langchain.com/docs/modules/callbacks/async_callbacks/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/model_io/chat/quick_start/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/openai_tools/", "Construct the Tools agent": "https://python.langchain.com/docs/modules/agents/agent_types/tool_calling/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/", "Tool calling {#tool-calling}": "https://python.langchain.com/docs/modules/model_io/chat/function_calling/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/", "composition.md": "https://python.langchain.com/docs/modules/model_io/prompts/composition/", "Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/"}, "RunnableMap": {"\ud83e\udd9c\ufe0f\ud83c\udfd3 LangServe": "https://python.langchain.com/docs/langserve/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/parallel/"}, "RunnableLambda": {"\ud83e\udd9c\ufe0f\ud83c\udfd3 LangServe": "https://python.langchain.com/docs/langserve/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_summary/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/parallel/", "This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/", "Run custom functions {#run-custom-functions}": "https://python.langchain.com/docs/expression_language/primitives/functions/"}, "MessagesPlaceholder": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "redis_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/redis_chat_message_history/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/memory/google_sql_mssql/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "Optionally, specify your own session_state key for storing messages": "https://python.langchain.com/docs/integrations/memory/streamlit_chat_message_history/", "copy from tidb cloud console": "https://python.langchain.com/docs/integrations/memory/tidb_chat_message_history/", "!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/few_shot/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/decomposition/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/agents/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "Install a model capable of tool calling": "https://python.langchain.com/docs/use_cases/extraction/quickstart/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/", "Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/"}, "ToolMessage": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "open ../../../static/img/brand/wordmark.png as base64 str": "https://python.langchain.com/docs/integrations/chat/anthropic/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/few_shot/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/decomposition/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "Tool calling {#tool-calling}": "https://python.langchain.com/docs/modules/model_io/chat/function_calling/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "tool": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "Construct the OpenAI Tools agent": "https://python.langchain.com/docs/integrations/providers/portkey/logging_tracing_portkey/", "Portkey": "https://python.langchain.com/docs/integrations/providers/portkey/index/", "jsonformer_experimental.md": "https://python.langchain.com/docs/integrations/llms/jsonformer_experimental/", "ANTHROPIC_API_KEY=": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/trajectory_eval/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/agents/", "If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/prompting/", "Import things that are needed generically": "https://python.langchain.com/docs/modules/tools/custom_tools/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/", "Tool calling {#tool-calling}": "https://python.langchain.com/docs/modules/model_io/chat/function_calling/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/"}, "convert_to_openai_tool": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "Function calling": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/function_calling-checkpoint/"}, "TavilySearchResults": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/tools/tavily_search/", "zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/tool_usage/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/openai_tools/", "Construct the Tools agent": "https://python.langchain.com/docs/modules/agents/agent_types/tool_calling/"}, "format_tool_to_openai_function": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/"}, "BaseMessage": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/discord/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "Chat Bot Feedback Template": "https://python.langchain.com/docs/templates/chat-bot-feedback/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/few_shot/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/decomposition/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "FunctionMessage": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "AgentAction": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "custom.md": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/custom/", "First, define custom callback handler implementations": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks/"}, "AgentFinish": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "openai_assistants.md": "https://python.langchain.com/docs/modules/agents/agent_types/openai_assistants/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/"}, "create_openai_functions_agent": {"\ud83e\udd9c\ud83d\udd78\ufe0fLangGraph": "https://python.langchain.com/docs/langgraph/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/toolkits/polygon/", "How to use it inside an Agent {#how-to-use-it-inside-an-agent}": "https://python.langchain.com/docs/integrations/tools/infobip/", "start by installing semanticscholar api": "https://python.langchain.com/docs/integrations/tools/semanticscholar/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/openai_functions_agent/", "For use in Chaining section": "https://python.langchain.com/docs/integrations/tools/you/", "Define the neural network": "https://python.langchain.com/docs/integrations/toolkits/python/", "Authorize connection to your Browser extention": "https://python.langchain.com/docs/integrations/toolkits/multion/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "pip install wikipedia": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps/", "need to use GPT-4 here as GPT-3.5 does not understand, however hard you insist, that": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter/"}, "tracing_v2_enabled": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/", "Chat Bot Feedback Template": "https://python.langchain.com/docs/templates/chat-bot-feedback/"}, "AgentExecutor": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/agents/", "How to use it inside an Agent {#how-to-use-it-inside-an-agent}": "https://python.langchain.com/docs/integrations/tools/infobip/", "start by installing semanticscholar api": "https://python.langchain.com/docs/integrations/tools/semanticscholar/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations/", "memorize.md": "https://python.langchain.com/docs/integrations/tools/memorize/", "arxiv.md": "https://python.langchain.com/docs/integrations/tools/arxiv/", "and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/", "For use in Chaining section": "https://python.langchain.com/docs/integrations/tools/you/", "Based on ReAct Agent": "https://python.langchain.com/docs/integrations/tools/ionic_shopping/", "setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/", "zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/streamlit/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "Dataherald": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/dataherald-checkpoint/", "Construct the OpenAI Tools agent": "https://python.langchain.com/docs/integrations/providers/portkey/logging_tracing_portkey/", "Portkey": "https://python.langchain.com/docs/integrations/providers/portkey/index/", "Define the neural network": "https://python.langchain.com/docs/integrations/toolkits/python/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/amadeus/", "Install package": "https://python.langchain.com/docs/integrations/toolkits/robocorp/", "Authorize connection to your Browser extention": "https://python.langchain.com/docs/integrations/toolkits/multion/", "azure_ai_services.md": "https://python.langchain.com/docs/integrations/toolkits/azure_ai_services/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/tool_usage/", "agent_with_memory_in_db.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db/", "agent_with_memory.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Construct the Tools agent": "https://python.langchain.com/docs/modules/agents/agent_types/tool_calling/", "openai_assistants.md": "https://python.langchain.com/docs/modules/agents/agent_types/openai_assistants/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/", "pip install wikipedia": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps/", "need to use GPT-4 here as GPT-3.5 does not understand, however hard you insist, that": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/", "!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/"}, "format_to_openai_tool_messages": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/"}, "OpenAIToolsAgentOutputParser": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/"}, "DuckDuckGoSearchResults": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "ddg.md": "https://python.langchain.com/docs/integrations/tools/ddg/"}, "AgentType": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "chatgpt_plugins.md": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins/", "Specify your Connery Runner credentials.": "https://python.langchain.com/docs/integrations/toolkits/connery/", "use it with langchain {#use-it-with-langchain}": "https://python.langchain.com/docs/integrations/tools/google_jobs/", "google_serper.md": "https://python.langchain.com/docs/integrations/tools/google_serper/", "Artifacts are charts created by matplotlib when `plt.show()` is called": "https://python.langchain.com/docs/integrations/tools/e2b_data_analysis/", "Answer with 'Zhu'": "https://python.langchain.com/docs/integrations/tools/human_tools/", "How YahooFinanceNewsTool works? {#how-yahoofinancenewstool-works}": "https://python.langchain.com/docs/integrations/tools/yahoo_finance_news/", "google_finance.md": "https://python.langchain.com/docs/integrations/tools/google_finance/", "awslambda.md": "https://python.langchain.com/docs/integrations/tools/awslambda/", "folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'": "https://python.langchain.com/docs/integrations/tools/google_drive/", "openweathermap.md": "https://python.langchain.com/docs/integrations/tools/openweathermap/", "memorize.md": "https://python.langchain.com/docs/integrations/tools/memorize/", "search_tools.md": "https://python.langchain.com/docs/integrations/tools/search_tools/", "eleven_labs_tts.md": "https://python.langchain.com/docs/integrations/tools/eleven_labs_tts/", "Extract pdf content": "https://python.langchain.com/docs/integrations/tools/bearly/", "get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/", "graphql.md": "https://python.langchain.com/docs/integrations/tools/graphql/", "searchapi.md": "https://python.langchain.com/docs/integrations/tools/searchapi/", "edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "bash.md": "https://python.langchain.com/docs/integrations/tools/bash/", "Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "Connect to Comet if no API Key is set": "https://python.langchain.com/docs/integrations/callbacks/comet_tracing/", "argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/.ipynb_checkpoints/streamlit-checkpoint/", "SearchApi": "https://python.langchain.com/docs/integrations/providers/searchapi/", "scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/", "os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/", "SCENARIO 1 - LLM": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking/", "Serper - Google Search API": "https://python.langchain.com/docs/integrations/providers/google_serper/", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/", "wandb documentation to configure wandb using env variables": "https://python.langchain.com/docs/integrations/providers/wandb_tracing/", "Setup and use the ClearML Callback": "https://python.langchain.com/docs/integrations/providers/clearml_tracking/", "Create a dataframe": "https://python.langchain.com/docs/integrations/toolkits/csv/", "jira.md": "https://python.langchain.com/docs/integrations/toolkits/jira/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "For Windows/Linux": "https://python.langchain.com/docs/integrations/toolkits/azure_cognitive_services/", "Select the LLM to use. Here, we use gpt-3.5-turbo-instruct": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla/", "steam.md": "https://python.langchain.com/docs/integrations/toolkits/steam/", "airbyte_structured_qa.md": "https://python.langchain.com/docs/integrations/toolkits/airbyte_structured_qa/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/gitlab/", "Copilot Sandbox": "https://python.langchain.com/docs/integrations/toolkits/clickup/", "IMPORTANT: If you plan to use this account in the future, make sure to save the": "https://python.langchain.com/docs/integrations/toolkits/ainetwork/", "If this is your first time using playwright, you'll have to install a browser executable.": "https://python.langchain.com/docs/integrations/toolkits/playwright/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/office365/", "pandas.md": "https://python.langchain.com/docs/integrations/toolkits/pandas/", "nasa.md": "https://python.langchain.com/docs/integrations/toolkits/nasa/", "These are sample parameters for Falcon 40B Instruct Deployed from Amazon SageMaker JumpStart": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway/", "Debugging": "https://python.langchain.com/docs/guides/development/debugging/", "Using https://huggingface.co/protectai/deberta-v3-base-prompt-injection-v2": "https://python.langchain.com/docs/guides/productionization/safety/hugging_face_prompt_injection/", "Initialize the language model": "https://python.langchain.com/docs/guides/productionization/evaluation/examples/comparisons/", "ANTHROPIC_API_KEY=": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/trajectory_eval/", "First, define custom callback handler implementations": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks/", "need to use GPT-4 here as GPT-3.5 does not understand, however hard you insist, that": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter/", "token_usage_tracking.md": "https://python.langchain.com/docs/modules/model_io/llms/token_usage_tracking/"}, "initialize_agent": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "chatgpt_plugins.md": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins/", "Specify your Connery Runner credentials.": "https://python.langchain.com/docs/integrations/toolkits/connery/", "use it with langchain {#use-it-with-langchain}": "https://python.langchain.com/docs/integrations/tools/google_jobs/", "google_serper.md": "https://python.langchain.com/docs/integrations/tools/google_serper/", "Artifacts are charts created by matplotlib when `plt.show()` is called": "https://python.langchain.com/docs/integrations/tools/e2b_data_analysis/", "Answer with 'Zhu'": "https://python.langchain.com/docs/integrations/tools/human_tools/", "How YahooFinanceNewsTool works? {#how-yahoofinancenewstool-works}": "https://python.langchain.com/docs/integrations/tools/yahoo_finance_news/", "google_finance.md": "https://python.langchain.com/docs/integrations/tools/google_finance/", "awslambda.md": "https://python.langchain.com/docs/integrations/tools/awslambda/", "folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'": "https://python.langchain.com/docs/integrations/tools/google_drive/", "openweathermap.md": "https://python.langchain.com/docs/integrations/tools/openweathermap/", "memorize.md": "https://python.langchain.com/docs/integrations/tools/memorize/", "search_tools.md": "https://python.langchain.com/docs/integrations/tools/search_tools/", "eleven_labs_tts.md": "https://python.langchain.com/docs/integrations/tools/eleven_labs_tts/", "Extract pdf content": "https://python.langchain.com/docs/integrations/tools/bearly/", "get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/", "graphql.md": "https://python.langchain.com/docs/integrations/tools/graphql/", "searchapi.md": "https://python.langchain.com/docs/integrations/tools/searchapi/", "gradio_tools.md": "https://python.langchain.com/docs/integrations/tools/gradio_tools/", "sceneXplain.md": "https://python.langchain.com/docs/integrations/tools/sceneXplain/", "edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Needed if you would like to display images in the notebook": "https://python.langchain.com/docs/integrations/tools/dalle_image_generator/", "bash.md": "https://python.langchain.com/docs/integrations/tools/bash/", "Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "LLM Hyperparameters": "https://python.langchain.com/docs/integrations/callbacks/sagemaker_tracking/", "Connect to Comet if no API Key is set": "https://python.langchain.com/docs/integrations/callbacks/comet_tracing/", "argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/.ipynb_checkpoints/streamlit-checkpoint/", "SearchApi": "https://python.langchain.com/docs/integrations/providers/searchapi/", "os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/", "scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/", "os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/", "SCENARIO 1 - LLM": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking/", "Serper - Google Search API": "https://python.langchain.com/docs/integrations/providers/google_serper/", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/", "wandb documentation to configure wandb using env variables": "https://python.langchain.com/docs/integrations/providers/wandb_tracing/", "Setup and use the ClearML Callback": "https://python.langchain.com/docs/integrations/providers/clearml_tracking/", "jira.md": "https://python.langchain.com/docs/integrations/toolkits/jira/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "For Windows/Linux": "https://python.langchain.com/docs/integrations/toolkits/azure_cognitive_services/", "Select the LLM to use. Here, we use gpt-3.5-turbo-instruct": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla/", "steam.md": "https://python.langchain.com/docs/integrations/toolkits/steam/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/gitlab/", "Copilot Sandbox": "https://python.langchain.com/docs/integrations/toolkits/clickup/", "IMPORTANT: If you plan to use this account in the future, make sure to save the": "https://python.langchain.com/docs/integrations/toolkits/ainetwork/", "If this is your first time using playwright, you'll have to install a browser executable.": "https://python.langchain.com/docs/integrations/toolkits/playwright/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/office365/", "nasa.md": "https://python.langchain.com/docs/integrations/toolkits/nasa/", "These are sample parameters for Falcon 40B Instruct Deployed from Amazon SageMaker JumpStart": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway/", "Debugging": "https://python.langchain.com/docs/guides/development/debugging/", "Using https://huggingface.co/laiyer/deberta-v3-base-prompt-injection": "https://python.langchain.com/docs/guides/productionization/safety/hugging_face_prompt_injection/", "Initialize the language model": "https://python.langchain.com/docs/guides/productionization/evaluation/examples/comparisons/", "ANTHROPIC_API_KEY=": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/trajectory_eval/", "First, define custom callback handler implementations": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks/", "need to use GPT-4 here as GPT-3.5 does not understand, however hard you insist, that": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter/", "token_usage_tracking.md": "https://python.langchain.com/docs/modules/model_io/llms/token_usage_tracking/"}, "load_tools": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "Google": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/google-checkpoint/", "chatgpt_plugins.md": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins/", "use it with langchain {#use-it-with-langchain}": "https://python.langchain.com/docs/integrations/tools/google_jobs/", "Answer with 'Zhu'": "https://python.langchain.com/docs/integrations/tools/human_tools/", "google_finance.md": "https://python.langchain.com/docs/integrations/tools/google_finance/", "awslambda.md": "https://python.langchain.com/docs/integrations/tools/awslambda/", "folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'": "https://python.langchain.com/docs/integrations/tools/google_drive/", "Each tool wrapps a requests wrapper": "https://python.langchain.com/docs/integrations/tools/requests/", "openweathermap.md": "https://python.langchain.com/docs/integrations/tools/openweathermap/", "memorize.md": "https://python.langchain.com/docs/integrations/tools/memorize/", "search_tools.md": "https://python.langchain.com/docs/integrations/tools/search_tools/", "eleven_labs_tts.md": "https://python.langchain.com/docs/integrations/tools/eleven_labs_tts/", "arxiv.md": "https://python.langchain.com/docs/integrations/tools/arxiv/", "graphql.md": "https://python.langchain.com/docs/integrations/tools/graphql/", "sceneXplain.md": "https://python.langchain.com/docs/integrations/tools/sceneXplain/", "Needed if you would like to display images in the notebook": "https://python.langchain.com/docs/integrations/tools/dalle_image_generator/", "setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "LLM Hyperparameters": "https://python.langchain.com/docs/integrations/callbacks/sagemaker_tracking/", "Connect to Comet if no API Key is set": "https://python.langchain.com/docs/integrations/callbacks/comet_tracing/", "argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/.ipynb_checkpoints/streamlit-checkpoint/", "SerpAPI": "https://python.langchain.com/docs/integrations/providers/serpapi/", "SearchApi": "https://python.langchain.com/docs/integrations/providers/searchapi/", "os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/", "scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/", "Golden": "https://python.langchain.com/docs/integrations/providers/golden/", "os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/", "Wolfram Alpha": "https://python.langchain.com/docs/integrations/providers/wolfram_alpha/", "SCENARIO 1 - LLM": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking/", "DataForSEO": "https://python.langchain.com/docs/integrations/providers/dataforseo/", "SearxNG Search API": "https://python.langchain.com/docs/integrations/providers/searx/", "Serper - Google Search API": "https://python.langchain.com/docs/integrations/providers/google_serper/", "OpenWeatherMap": "https://python.langchain.com/docs/integrations/providers/openweathermap/", "Stack Exchange": "https://python.langchain.com/docs/integrations/providers/stackexchange/", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/", "wandb documentation to configure wandb using env variables": "https://python.langchain.com/docs/integrations/providers/wandb_tracing/", "Setup and use the ClearML Callback": "https://python.langchain.com/docs/integrations/providers/clearml_tracking/", "Dataherald": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/dataherald-checkpoint/", "These are sample parameters for Falcon 40B Instruct Deployed from Amazon SageMaker JumpStart": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway/", "Debugging": "https://python.langchain.com/docs/guides/development/debugging/", "First, define custom callback handler implementations": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks/", "!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/", "token_usage_tracking.md": "https://python.langchain.com/docs/modules/model_io/llms/token_usage_tracking/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/"}, "EvaluatorType": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/", "This is equivalent to loading using the enum": "https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain/"}, "RunEvalConfig": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/"}, "arun_on_dataset": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/"}, "run_on_dataset": {"Used by the agent in this tutorial": "https://python.langchain.com/docs/langsmith/walkthrough/"}, "BaseChatModel": {"Contribute Integrations": "https://python.langchain.com/docs/contributing/integrations/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "deprecated": {"Contribute Integrations": "https://python.langchain.com/docs/contributing/integrations/"}, "ChatSession": {"Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/discord/", "This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/"}, "map_ai_messages": {"Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/discord/", "This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/", "The file token.json stores the user's access and refresh tokens, and is": "https://python.langchain.com/docs/integrations/chat_loaders/gmail/"}, "merge_chat_runs": {"Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/discord/", "This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/"}, "FolderFacebookMessengerChatLoader": {"This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/facebook/", "Facebook - Meta": "https://python.langchain.com/docs/integrations/providers/facebook/"}, "SingleFileFacebookMessengerChatLoader": {"This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/facebook/", "Facebook - Meta": "https://python.langchain.com/docs/integrations/providers/facebook/"}, "convert_messages_for_finetuning": {"This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/", "Wait for the fine-tuning to complete (this may take some time)": "https://python.langchain.com/docs/integrations/chat_loaders/langsmith_dataset/"}, "StrOutputParser": {"This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/", "del os.environ['NVIDIA_API_KEY'] ## delete key and reset": "https://python.langchain.com/docs/integrations/text_embedding/nvidia_ai_endpoints/", "For use in Chaining section": "https://python.langchain.com/docs/integrations/retrievers/you-retriever/", "fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/retrievers/tavily/", "LangChain supports many other chat models. Here, we're using Ollama": "https://python.langchain.com/docs/integrations/chat/ollama/", "Loading the COMVEST 2024 notice": "https://python.langchain.com/docs/integrations/chat/maritalk/", "Or via the async API": "https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/", "Fiddler project and model names, used for model registration": "https://python.langchain.com/docs/integrations/callbacks/fiddler/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "From LangChain, import standard modules for prompting.": "https://python.langchain.com/docs/integrations/providers/dspy/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_summary/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "Install the package": "https://python.langchain.com/docs/integrations/llms/volcengine_maas/", "use default authN method API-key": "https://python.langchain.com/docs/integrations/llms/oci_generative_ai/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "AI21 Contextual Answer {#ai21-contextual-answer}": "https://python.langchain.com/docs/integrations/llms/ai21/", "Quickstart": "https://python.langchain.com/docs/use_cases/question_answering/.ipynb_checkpoints/quickstart-checkpoint/", "Note that we set max_retries = 0 to avoid retrying on RateLimits, etc": "https://python.langchain.com/docs/guides/productionization/fallbacks/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/step_back/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/query_checking/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/quickstart/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/index/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "batch_configurable_chain([\"ice cream\", \"spaghetti\", \"dumplings\"])": "https://python.langchain.com/docs/expression_language/why/", "> ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])": "https://python.langchain.com/docs/expression_language/get_started/", "The input schema of the chain is the input schema of its first part, the prompt.": "https://python.langchain.com/docs/expression_language/interface/", "prompt_llm_parser.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_llm_parser/", "multiple_chains.md": "https://python.langchain.com/docs/expression_language/cookbook/multiple_chains/", "code_writing.md": "https://python.langchain.com/docs/expression_language/cookbook/code_writing/", "Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/", "inspect.md": "https://python.langchain.com/docs/expression_language/how_to/inspect/", "decorator.md": "https://python.langchain.com/docs/expression_language/how_to/decorator/", "Adding values to chain state {#adding-values-to-chain-state}": "https://python.langchain.com/docs/expression_language/primitives/assign/", "Binding: Attach runtime args {#binding-attach-runtime-args}": "https://python.langchain.com/docs/expression_language/primitives/binding/", "Formatting inputs & output {#formatting-inputs-output}": "https://python.langchain.com/docs/expression_language/primitives/parallel/", "Passing data through {#passing-data-through}": "https://python.langchain.com/docs/expression_language/primitives/passthrough/", "Run custom functions {#run-custom-functions}": "https://python.langchain.com/docs/expression_language/primitives/functions/", "Chaining runnables {#chaining-runnables}": "https://python.langchain.com/docs/expression_language/primitives/sequence/"}, "convert_message_to_dict": {"Filter out tweets that reference other tweets, because it's a bit weird": "https://python.langchain.com/docs/integrations/chat_loaders/twitter/"}, "AIMessage": {"Filter out tweets that reference other tweets, because it's a bit weird": "https://python.langchain.com/docs/integrations/chat_loaders/twitter/", "Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "open ../../../static/img/brand/wordmark.png as base64 str": "https://python.langchain.com/docs/integrations/chat/anthropic/", "zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "yuan2.md": "https://python.langchain.com/docs/integrations/chat/yuan2/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Install required dependencies": "https://python.langchain.com/docs/integrations/llms/chatglm/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "Chat Bot Feedback Template": "https://python.langchain.com/docs/templates/chat-bot-feedback/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/human_in_the_loop/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/few_shot/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/decomposition/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "Define a custom prompt to provide instructions and any additional context.": "https://python.langchain.com/docs/use_cases/extraction/how_to/examples/", "Set up a parser": "https://python.langchain.com/docs/use_cases/extraction/how_to/parse/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/openai_tools/", "Construct the Tools agent": "https://python.langchain.com/docs/modules/agents/agent_types/tool_calling/", "custom_agent.md": "https://python.langchain.com/docs/modules/agents/how_to/custom_agent/", "Tool calling {#tool-calling}": "https://python.langchain.com/docs/modules/model_io/chat/function_calling/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/", "The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/", "composition.md": "https://python.langchain.com/docs/modules/model_io/prompts/composition/", "Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/"}, "convert_pydantic_to_openai_function": {"Wait for the fine-tuning to complete (this may take some time)": "https://python.langchain.com/docs/integrations/chat_loaders/langsmith_llm_runs/", "openai_functions.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_functions/"}, "PydanticOutputFunctionsParser": {"Wait for the fine-tuning to complete (this may take some time)": "https://python.langchain.com/docs/integrations/chat_loaders/langsmith_llm_runs/", "openai_functions.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_functions/"}, "LangSmithRunChatLoader": {"Wait for the fine-tuning to complete (this may take some time)": "https://python.langchain.com/docs/integrations/chat_loaders/langsmith_llm_runs/"}, "GMailLoader": {"The file token.json stores the user's access and refresh tokens, and is": "https://python.langchain.com/docs/integrations/chat_loaders/gmail/", "Google": "https://python.langchain.com/docs/integrations/platforms/google/"}, "SlackChatLoader": {"Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/slack/", "Slack": "https://python.langchain.com/docs/integrations/providers/slack/"}, "WhatsAppChatLoader": {"Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/whatsapp/", "Facebook - Meta": "https://python.langchain.com/docs/integrations/providers/facebook/", "WhatsApp": "https://python.langchain.com/docs/integrations/providers/whatsapp/", "whatsapp_chat.md": "https://python.langchain.com/docs/integrations/document_loaders/whatsapp_chat/"}, "LangSmithDatasetChatLoader": {"Wait for the fine-tuning to complete (this may take some time)": "https://python.langchain.com/docs/integrations/chat_loaders/langsmith_dataset/"}, "IMessageChatLoader": {"This uses some example data": "https://python.langchain.com/docs/integrations/chat_loaders/imessage/"}, "TelegramChatLoader": {"Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/telegram/", "Telegram": "https://python.langchain.com/docs/integrations/providers/telegram/"}, "base": {"Merge consecutive messages from the same sender into a single message": "https://python.langchain.com/docs/integrations/chat_loaders/discord/"}, "BookendEmbeddings": {"bookend.md": "https://python.langchain.com/docs/integrations/text_embedding/bookend/"}, "HuggingFaceBgeEmbeddings": {"bge_huggingface.md": "https://python.langchain.com/docs/integrations/text_embedding/bge_huggingface/", "Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/"}, "QuantizedBiEncoderEmbeddings": {"optimum_intel.md": "https://python.langchain.com/docs/integrations/text_embedding/optimum_intel/", "Intel": "https://python.langchain.com/docs/integrations/providers/intel/"}, "FireworksEmbeddings": {"Using the Embedding Model {#using-the-embedding-model}": "https://python.langchain.com/docs/integrations/text_embedding/fireworks/"}, "XinferenceEmbeddings": {"xinference.md": "https://python.langchain.com/docs/integrations/text_embedding/xinference/"}, "LLMRailsEmbeddings": {"llm_rails.md": "https://python.langchain.com/docs/integrations/text_embedding/llm_rails/"}, "DeepInfraEmbeddings": {"sign up for an account: https://deepinfra.com/login?utm_source=langchain": "https://python.langchain.com/docs/integrations/text_embedding/deepinfra/", "DeepInfra": "https://python.langchain.com/docs/integrations/providers/deepinfra/"}, "HuggingFaceEmbeddings": {"huggingfacehub.md": "https://python.langchain.com/docs/integrations/text_embedding/huggingfacehub/", "Equivalent to SentenceTransformerEmbeddings(model_name=\"all-MiniLM-L6-v2\")": "https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers/", "Get 3 diff embeddings.": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever/", "Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/", "VDMS": "https://python.langchain.com/docs/integrations/providers/vdms/", "Refresh is required for server use": "https://python.langchain.com/docs/integrations/vectorstores/vald/", "scann.md": "https://python.langchain.com/docs/integrations/vectorstores/scann/", "default metric is angular": "https://python.langchain.com/docs/integrations/vectorstores/annoy/", "tiledb.md": "https://python.langchain.com/docs/integrations/vectorstores/tiledb/", "%pip install --upgrade --quiet surrealdb langchain langchain-community": "https://python.langchain.com/docs/integrations/vectorstores/surrealdb/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/vearch/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/vdms/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/vectorstores/faiss/", "Ensure that all we need is installed": "https://python.langchain.com/docs/integrations/vectorstores/infinispanvs/", "Create collection if running for the first time. If the collection": "https://python.langchain.com/docs/integrations/vectorstores/semadb/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/cross_encoder_reranker/", "pairwise_embedding_distance.md": "https://python.langchain.com/docs/guides/productionization/evaluation/comparison/pairwise_embedding_distance/", "You can load by enum or by raw python string": "https://python.langchain.com/docs/guides/productionization/evaluation/string/embedding_distance/", "self-query-qdrant": "https://python.langchain.com/docs/templates/self-query-qdrant/", "Get embeddings.": "https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder/"}, "HuggingFaceInferenceAPIEmbeddings": {"huggingfacehub.md": "https://python.langchain.com/docs/integrations/text_embedding/huggingfacehub/"}, "HuggingFaceHubEmbeddings": {"huggingfacehub.md": "https://python.langchain.com/docs/integrations/text_embedding/huggingfacehub/", "text_embeddings_inference.md": "https://python.langchain.com/docs/integrations/text_embedding/text_embeddings_inference/", "Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/"}, "GoogleGenerativeAIEmbeddings": {"google_generative_ai.md": "https://python.langchain.com/docs/integrations/text_embedding/google_generative_ai/", "Google": "https://python.langchain.com/docs/integrations/platforms/google/"}, "GPT4AllEmbeddings": {"gpt4all.md": "https://python.langchain.com/docs/integrations/text_embedding/gpt4all/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/"}, "MosaicMLInstructorEmbeddings": {"sign up for an account: https://forms.mosaicml.com/demo?utm_source=langchain": "https://python.langchain.com/docs/integrations/text_embedding/mosaicml/"}, "QuantizedBgeEmbeddings": {"itrex.md": "https://python.langchain.com/docs/integrations/text_embedding/itrex/", "Intel": "https://python.langchain.com/docs/integrations/providers/intel/"}, "OpenAIEmbeddings": {"openai.md": "https://python.langchain.com/docs/integrations/text_embedding/openai/", "set the environment variables needed for openai package to know to reach out to azure": "https://python.langchain.com/docs/integrations/text_embedding/azureopenai/", "azure_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/azure_ai_search/", "re_phrase.md": "https://python.langchain.com/docs/integrations/retrievers/re_phrase/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/", "Establishing a connection to the database is facilitated through the singlestoredb Python connector.": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb/", "knn.md": "https://python.langchain.com/docs/integrations/retrievers/knn/", "initialize the index": "https://python.langchain.com/docs/integrations/retrievers/docarray_retriever/", "svm.md": "https://python.langchain.com/docs/integrations/retrievers/svm/", "create the index": "https://python.langchain.com/docs/integrations/retrievers/pinecone_hybrid_search/", "# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/retrievers/flashrank-reranker/", "Get 3 diff embeddings.": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever/", "This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/mongodb_atlas/", "or install latest:": "https://python.langchain.com/docs/integrations/vectorstores/dingo/", "Get openAI api key by reading local .env file": "https://python.langchain.com/docs/integrations/retrievers/self_query/timescalevector_self_query/", "create new index": "https://python.langchain.com/docs/integrations/retrievers/self_query/pinecone/", "in case if some queries fail consider installing libdeeplake manually": "https://python.langchain.com/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query/", "import os": "https://python.langchain.com/docs/integrations/retrievers/self_query/qdrant_self_query/", "OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "Here we want to make sure the answer is minimally relevant": "https://python.langchain.com/docs/integrations/callbacks/confident/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "ragatouille.md": "https://python.langchain.com/docs/integrations/providers/ragatouille/", "Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "lancedb.md": "https://python.langchain.com/docs/integrations/vectorstores/lancedb/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "databricks_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/databricks_vector_search/", "xata.md": "https://python.langchain.com/docs/integrations/vectorstores/xata/", "openai": "https://python.langchain.com/docs/integrations/vectorstores/hippo/", "connection to redis standalone at localhost, db 0, no password": "https://python.langchain.com/docs/integrations/vectorstores/redis/", "output length: 4": "https://python.langchain.com/docs/integrations/vectorstores/rockset/", "replace": "https://python.langchain.com/docs/integrations/vectorstores/zilliz/", "Set up the OpenAI Environment Variables": "https://python.langchain.com/docs/integrations/vectorstores/azure_cosmos_db/", "vikingdb.md": "https://python.langchain.com/docs/integrations/vectorstores/vikingdb/", "Wait until the cluster is ready for use.": "https://python.langchain.com/docs/integrations/vectorstores/couchbase/", "typesense.md": "https://python.langchain.com/docs/integrations/vectorstores/typesense/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/momento_vector_index/", "Here we useimport getpass": "https://python.langchain.com/docs/integrations/vectorstores/tidb_vector/", "or shorter": "https://python.langchain.com/docs/integrations/vectorstores/activeloop_deeplake/", "Pip install necessary package {#pip-install-necessary-package}": "https://python.langchain.com/docs/integrations/vectorstores/lantern/", "import": "https://python.langchain.com/docs/integrations/vectorstores/chroma/", "duckdb.md": "https://python.langchain.com/docs/integrations/vectorstores/duckdb/", "for example": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch/", "# if you plan to use bson serialization, install also:": "https://python.langchain.com/docs/integrations/vectorstores/sklearn/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/", "Get an OpenAI token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory/", "use directly a `where_str` to delete": "https://python.langchain.com/docs/integrations/vectorstores/myscale/", "clickhouse.md": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse/", "qdrant.md": "https://python.langchain.com/docs/integrations/vectorstores/qdrant/", "tigris.md": "https://python.langchain.com/docs/integrations/vectorstores/tigris/", "ecloud_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/ecloud_vector_search/", "with pip": "https://python.langchain.com/docs/integrations/vectorstores/supabase/", "If using the default Docker installation, use this instantiation instead:": "https://python.langchain.com/docs/integrations/vectorstores/opensearch/", "pinecone.md": "https://python.langchain.com/docs/integrations/vectorstores/pinecone/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/faiss_async/", "Option 1: use an OpenAI account": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "usearch.md": "https://python.langchain.com/docs/integrations/vectorstores/usearch/", "This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/", "Clean up KDB.AI \"documents\" table and index for similarity search": "https://python.langchain.com/docs/integrations/vectorstores/kdbai/", "Metadata {#metadata}": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/vectorstores/faiss/", "epsilla.md": "https://python.langchain.com/docs/integrations/vectorstores/epsilla/", "DocumentDB connection string": "https://python.langchain.com/docs/integrations/vectorstores/documentdb/", "analyticdb.md": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb/", "hologres.md": "https://python.langchain.com/docs/integrations/vectorstores/hologres/", "initialize MongoDB python client": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/", "Use Meilisearch vector store to store texts & associated embeddings as vector": "https://python.langchain.com/docs/integrations/vectorstores/meilisearch/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/", "Uncomment this to install psychicapi if you don't already have it installed": "https://python.langchain.com/docs/integrations/document_loaders/psychic/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/", "Quickstart": "https://python.langchain.com/docs/use_cases/question_answering/.ipynb_checkpoints/quickstart-checkpoint/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/quickstart/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "%pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/quickstart/", "%pip install -qU langchain langchain-community langchain-openai langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/no_queries/", "%pip install -qU langchain langchain-community langchain-openai faker langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/high_cardinality/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/", "indexing.md": "https://python.langchain.com/docs/modules/data_connection/indexing/", "Text embedding models": "https://python.langchain.com/docs/modules/data_connection/text_embedding/index/", "Swapping the `ByteStore` {#swapping-the-bytestore}": "https://python.langchain.com/docs/modules/data_connection/text_embedding/caching_embeddings/", "initialize the bm25 retriever and faiss retriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/ensemble/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "Define your embedding model": "https://python.langchain.com/docs/modules/data_connection/retrievers/time_weighted_vectorstore/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/", "This text splitter is used to create the child documents": "https://python.langchain.com/docs/modules/data_connection/retrievers/parent_document_retriever/", "vectorstore.md": "https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore/", "This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/", "Self-querying": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/self_query-checkpoint/", "Vector stores": "https://python.langchain.com/docs/modules/data_connection/vectorstores/index/", "This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/semantic-chunker/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/", "adding_memory_chain_multiple_inputs.md": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs/", "Backed by a Vector Store": "https://python.langchain.com/docs/modules/memory/types/vectorstore_retriever_memory/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/", "Select the most similar example to the input.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples/", "This is a prompt template used to format each individual example.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples_chat/", "Examples of a pretend task of creating antonyms.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/similarity/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "> ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])": "https://python.langchain.com/docs/expression_language/get_started/", "The input schema of the chain is the input schema of its first part, the prompt.": "https://python.langchain.com/docs/expression_language/interface/", "Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/", "inspect.md": "https://python.langchain.com/docs/expression_language/how_to/inspect/", "Adding values to chain state {#adding-values-to-chain-state}": "https://python.langchain.com/docs/expression_language/primitives/assign/", "Formatting inputs & output {#formatting-inputs-output}": "https://python.langchain.com/docs/expression_language/primitives/parallel/", "Passing data through {#passing-data-through}": "https://python.langchain.com/docs/expression_language/primitives/passthrough/"}, "VertexAIEmbeddings": {"google_vertex_ai_palm.md": "https://python.langchain.com/docs/integrations/text_embedding/google_vertex_ai_palm/", "Google": "https://python.langchain.com/docs/integrations/platforms/google/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/vectorstores/google_cloud_sql_pg/", "TODO : Set values as per your requirements": "https://python.langchain.com/docs/integrations/vectorstores/google_vertex_ai_vector_search/", "@markdown Please specify a source for demo purpose.": "https://python.langchain.com/docs/integrations/vectorstores/google_firestore/"}, "BedrockEmbeddings": {"async embed query": "https://python.langchain.com/docs/integrations/text_embedding/bedrock/", "AWS": "https://python.langchain.com/docs/integrations/platforms/aws/"}, "GigaChatEmbeddings": {"gigachat.md": "https://python.langchain.com/docs/integrations/text_embedding/gigachat/", "Salute Devices": "https://python.langchain.com/docs/integrations/providers/salute_devices/"}, "OllamaEmbeddings": {"ollama.md": "https://python.langchain.com/docs/integrations/text_embedding/ollama/", "Ollama": "https://python.langchain.com/docs/integrations/providers/ollama/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/"}, "OCIGenAIEmbeddings": {"use default authN method API-key": "https://python.langchain.com/docs/integrations/llms/oci_generative_ai/", "Oracle Cloud Infrastructure (OCI)": "https://python.langchain.com/docs/integrations/providers/oci/"}, "FastEmbedEmbeddings": {"fastembed.md": "https://python.langchain.com/docs/integrations/text_embedding/fastembed/"}, "LlamaCppEmbeddings": {"llamacpp.md": "https://python.langchain.com/docs/integrations/text_embedding/llamacpp/", "Llama.cpp": "https://python.langchain.com/docs/integrations/providers/llamacpp/"}, "NLPCloudEmbeddings": {"nlp_cloud.md": "https://python.langchain.com/docs/integrations/text_embedding/nlp_cloud/", "NLPCloud": "https://python.langchain.com/docs/integrations/providers/nlpcloud/"}, "LaserEmbeddings": {"Ex Instantiationz": "https://python.langchain.com/docs/integrations/text_embedding/laser/", "Facebook - Meta": "https://python.langchain.com/docs/integrations/providers/facebook/"}, "OpenCLIPEmbeddings": {"Image URIs": "https://python.langchain.com/docs/integrations/text_embedding/open_clip/", "Establishing a connection to the database is facilitated through the singlestoredb Python connector.": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb/"}, "TitanTakeoffEmbed": {"Model config for the embedding model, where you can specify the following parameters:": "https://python.langchain.com/docs/integrations/text_embedding/titan_takeoff/"}, "MistralAIEmbeddings": {"pip install -U langchain-mistralai": "https://python.langchain.com/docs/integrations/text_embedding/mistralai/", "mistralai.md": "https://python.langchain.com/docs/integrations/providers/mistralai/"}, "SpacyEmbeddings": {"spacy_embedding.md": "https://python.langchain.com/docs/integrations/text_embedding/spacy_embedding/", "spaCy": "https://python.langchain.com/docs/integrations/providers/spacy/"}, "BaichuanTextEmbeddings": {"baichuan.md": "https://python.langchain.com/docs/integrations/text_embedding/baichuan/", "Baichuan": "https://python.langchain.com/docs/integrations/providers/baichuan/"}, "TogetherEmbeddings": {"install package": "https://python.langchain.com/docs/integrations/text_embedding/together/", "together.md": "https://python.langchain.com/docs/integrations/providers/together/"}, "HuggingFaceInstructEmbeddings": {"instruct_embeddings.md": "https://python.langchain.com/docs/integrations/text_embedding/instruct_embeddings/", "Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/"}, "QianfanEmbeddingsEndpoint": {"baidu_qianfan_endpoint.md": "https://python.langchain.com/docs/integrations/text_embedding/baidu_qianfan_endpoint/", "ernie.md": "https://python.langchain.com/docs/integrations/text_embedding/ernie/", "Baidu": "https://python.langchain.com/docs/integrations/providers/baidu/", "Create a bes instance and index docs.": "https://python.langchain.com/docs/integrations/vectorstores/baiducloud_vector_search/"}, "CohereEmbeddings": {"cohere.md": "https://python.langchain.com/docs/integrations/text_embedding/cohere/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "See docker command above to launch a postgres instance with pgvector enabled.": "https://python.langchain.com/docs/integrations/vectorstores/pgvector/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "Text embedding models": "https://python.langchain.com/docs/modules/data_connection/text_embedding/index/"}, "EdenAiEmbeddings": {"edenai.md": "https://python.langchain.com/docs/integrations/text_embedding/edenai/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "JohnSnowLabsEmbeddings": {"If you have a enterprise license, you can run this to install enterprise features": "https://python.langchain.com/docs/integrations/text_embedding/johnsnowlabs_embedding/"}, "ErnieEmbeddings": {"ernie.md": "https://python.langchain.com/docs/integrations/text_embedding/ernie/"}, "LLMChain": {"Dependencies {#dependencies}": "https://python.langchain.com/docs/integrations/llms/clarifai/", "re_phrase.md": "https://python.langchain.com/docs/integrations/retrievers/re_phrase/", "memorize.md": "https://python.langchain.com/docs/integrations/tools/memorize/", "get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/", "Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/", "Needed if you would like to display images in the notebook": "https://python.langchain.com/docs/integrations/tools/dalle_image_generator/", "loads previous state from Mot\u00f6rhead \ud83e\udd18": "https://python.langchain.com/docs/integrations/memory/motorhead_memory/", "!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/", "context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "LLM Hyperparameters": "https://python.langchain.com/docs/integrations/callbacks/sagemaker_tracking/", "argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/", "scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/", "os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/", "MLflow Deployments for LLMs": "https://python.langchain.com/docs/integrations/providers/mlflow/", "MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway/", "!pip3 install rebuff openai -U": "https://python.langchain.com/docs/integrations/providers/rebuff/", "Prediction Guard": "https://python.langchain.com/docs/integrations/providers/predictionguard/", "SCENARIO 1 - LLM": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking/", "Shale Protocol": "https://python.langchain.com/docs/integrations/providers/shaleprotocol/", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/", "0: Import ray serve and request from starlette": "https://python.langchain.com/docs/integrations/providers/ray_serve/", "Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/minimax/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "stochasticai.md": "https://python.langchain.com/docs/integrations/llms/stochasticai/", "solar.md": "https://python.langchain.com/docs/integrations/llms/solar/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "Update Langchain": "https://python.langchain.com/docs/integrations/llms/ipex_llm/", "Install the package https://docs.banana.dev/banana-docs/core-concepts/sdks/python": "https://python.langchain.com/docs/integrations/llms/banana/", "alibabacloud_pai_eas_endpoint.md": "https://python.langchain.com/docs/integrations/llms/alibabacloud_pai_eas_endpoint/", "openllm.md": "https://python.langchain.com/docs/integrations/llms/openllm/", "octoai.md": "https://python.langchain.com/docs/integrations/llms/octoai/", "If you get an error, probably, you need to set up the \"base_url\" parameter that can be taken from the error log.": "https://python.langchain.com/docs/integrations/llms/writer/", "Register an account with Modal and get a new token.": "https://python.langchain.com/docs/integrations/llms/modal/", "textgen.md": "https://python.langchain.com/docs/integrations/llms/textgen/", "xinference.md": "https://python.langchain.com/docs/integrations/llms/xinference/", "symblai_nebula.md": "https://python.langchain.com/docs/integrations/llms/symblai_nebula/", "get a new token: https://deepinfra.com/login?from=%2Fdash": "https://python.langchain.com/docs/integrations/llms/deepinfra/", "get a token: https://docs.nlpcloud.com/#authentication": "https://python.langchain.com/docs/integrations/llms/nlpcloud/", "Callbacks support token-wise streaming": "https://python.langchain.com/docs/integrations/llms/gpt4all/", "get a new token: https://docs.forefront.ai/forefront/api-reference/authentication": "https://python.langchain.com/docs/integrations/llms/forefrontai/", "sign up for an account: https://forms.mosaicml.com/demo?utm_source=langchain": "https://python.langchain.com/docs/integrations/llms/mosaicml/", "Install the package": "https://python.langchain.com/docs/integrations/llms/pipelineai/", "get a token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/llms/openai/", "gigachat.md": "https://python.langchain.com/docs/integrations/llms/gigachat/", "use default authN method API-key": "https://python.langchain.com/docs/integrations/llms/oci_generative_ai/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "%pip list | grep aphrodite": "https://python.langchain.com/docs/integrations/llms/aphrodite/", "Run the chain specifying only the input variable for the first chain.": "https://python.langchain.com/docs/integrations/llms/edenai/", "Optional, add your OpenAI API Key. This is optional, as Prediction Guard allows": "https://python.langchain.com/docs/integrations/llms/predictionguard/", "Step 4: Embeddings Example {#step-4-embeddings-example}": "https://python.langchain.com/docs/integrations/llms/javelin/", "Calling a single prompt": "https://python.langchain.com/docs/integrations/llms/ibm_watsonx/", "ctransformers.md": "https://python.langchain.com/docs/integrations/llms/ctransformers/", "vllm.md": "https://python.langchain.com/docs/integrations/llms/vllm/", "azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/", "default infer_api for a local deployed Yuan2.0 inference server": "https://python.langchain.com/docs/integrations/llms/yuan2/", "get a token: https://huggingface.co/docs/api-inference/quicktour#get-your-api-token": "https://python.langchain.com/docs/integrations/llms/huggingface_endpoint/", "For an on-demand A100 with GCP, Azure, or Lambda": "https://python.langchain.com/docs/integrations/llms/runhouse/", "anyscale.md": "https://python.langchain.com/docs/integrations/llms/anyscale/", "yandex.md": "https://python.langchain.com/docs/integrations/llms/yandex/", "gooseai.md": "https://python.langchain.com/docs/integrations/llms/gooseai/", "Uncomment to install openlm and openai if you haven't already": "https://python.langchain.com/docs/integrations/llms/openlm/", "Using streaming": "https://python.langchain.com/docs/integrations/llms/cloudflare_workersai/", "conversation can take several minutes": "https://python.langchain.com/docs/integrations/llms/ctranslate2/", "Install required dependencies": "https://python.langchain.com/docs/integrations/llms/chatglm/", "Improve the results by fine-tuning (optional) {#improve-the-results-by-fine-tuning-optional}": "https://python.langchain.com/docs/integrations/llms/gradient/", "this can take several minutes to download big files!": "https://python.langchain.com/docs/integrations/llms/petals/", "magics to auto-reload external modules in case you are making changes to langchain while working on this notebook": "https://python.langchain.com/docs/integrations/llms/replicate/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/", "Logical Fallacy chain": "https://python.langchain.com/docs/guides/productionization/safety/logical_fallacy_chain/", "Constitutional chain": "https://python.langchain.com/docs/guides/productionization/safety/constitutional_chain/", "custom.md": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/custom/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/apis/", "Get embeddings.": "https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "Callbacks": "https://python.langchain.com/docs/modules/callbacks/index/", "composition.md": "https://python.langchain.com/docs/modules/model_io/prompts/composition/"}, "ClarifaiEmbeddings": {"Dependencies {#dependencies}": "https://python.langchain.com/docs/integrations/text_embedding/clarifai/", "Clarifai": "https://python.langchain.com/docs/integrations/providers/clarifai/"}, "PromptTemplate": {"Dependencies {#dependencies}": "https://python.langchain.com/docs/integrations/llms/clarifai/", "re_phrase.md": "https://python.langchain.com/docs/integrations/retrievers/re_phrase/", "folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'": "https://python.langchain.com/docs/integrations/document_loaders/google_drive/", "get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/", "send data into the chain": "https://python.langchain.com/docs/integrations/tools/nvidia_riva/", "and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/", "Needed if you would like to display images in the notebook": "https://python.langchain.com/docs/integrations/tools/dalle_image_generator/", "loads previous state from Mot\u00f6rhead \ud83e\udd18": "https://python.langchain.com/docs/integrations/memory/motorhead_memory/", "context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "LLM Hyperparameters": "https://python.langchain.com/docs/integrations/callbacks/sagemaker_tracking/", "argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "From LangChain, import standard modules for prompting.": "https://python.langchain.com/docs/integrations/providers/dspy/", "os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/", "scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/", "os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/", "MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway/", "!pip3 install rebuff openai -U": "https://python.langchain.com/docs/integrations/providers/rebuff/", "Prediction Guard": "https://python.langchain.com/docs/integrations/providers/predictionguard/", "SCENARIO 1 - LLM": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking/", "Shale Protocol": "https://python.langchain.com/docs/integrations/providers/shaleprotocol/", "Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/", "0: Import ray serve and request from starlette": "https://python.langchain.com/docs/integrations/providers/ray_serve/", "Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "DocumentDB connection string": "https://python.langchain.com/docs/integrations/vectorstores/documentdb/", "initialize MongoDB python client": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/", "airbyte.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte/", "Creating and executing the seeding query": "https://python.langchain.com/docs/integrations/graphs/memgraph/", "How many people played in Top Gun?": "https://python.langchain.com/docs/integrations/graphs/neo4j_cypher/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/minimax/", "stochasticai.md": "https://python.langchain.com/docs/integrations/llms/stochasticai/", "solar.md": "https://python.langchain.com/docs/integrations/llms/solar/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "Update Langchain": "https://python.langchain.com/docs/integrations/llms/ipex_llm/", "Install the package https://docs.banana.dev/banana-docs/core-concepts/sdks/python": "https://python.langchain.com/docs/integrations/llms/banana/", "alibabacloud_pai_eas_endpoint.md": "https://python.langchain.com/docs/integrations/llms/alibabacloud_pai_eas_endpoint/", "openllm.md": "https://python.langchain.com/docs/integrations/llms/openllm/", "sagemaker.md": "https://python.langchain.com/docs/integrations/llms/sagemaker/", "octoai.md": "https://python.langchain.com/docs/integrations/llms/octoai/", "If you get an error, probably, you need to set up the \"base_url\" parameter that can be taken from the error log.": "https://python.langchain.com/docs/integrations/llms/writer/", "Register an account with Modal and get a new token.": "https://python.langchain.com/docs/integrations/llms/modal/", "textgen.md": "https://python.langchain.com/docs/integrations/llms/textgen/", "xinference.md": "https://python.langchain.com/docs/integrations/llms/xinference/", "symblai_nebula.md": "https://python.langchain.com/docs/integrations/llms/symblai_nebula/", "get a new token: https://deepinfra.com/login?from=%2Fdash": "https://python.langchain.com/docs/integrations/llms/deepinfra/", "anthropic.md": "https://python.langchain.com/docs/integrations/llms/anthropic/", "get a token: https://docs.nlpcloud.com/#authentication": "https://python.langchain.com/docs/integrations/llms/nlpcloud/", "Callbacks support token-wise streaming": "https://python.langchain.com/docs/integrations/llms/llamacpp/", "get a new token: https://docs.forefront.ai/forefront/api-reference/authentication": "https://python.langchain.com/docs/integrations/llms/forefrontai/", "sign up for an account: https://forms.mosaicml.com/demo?utm_source=langchain": "https://python.langchain.com/docs/integrations/llms/mosaicml/", "Install the package": "https://python.langchain.com/docs/integrations/llms/pipelineai/", "get a token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/llms/openai/", "google_vertex_ai_palm.md": "https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm/", "gigachat.md": "https://python.langchain.com/docs/integrations/llms/gigachat/", "use default authN method API-key": "https://python.langchain.com/docs/integrations/llms/oci_generative_ai/", "huggingface_pipelines.md": "https://python.langchain.com/docs/integrations/llms/huggingface_pipelines/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "Note importing TitanTakeoffPro instead of TitanTakeoff will work as well both use same object under the hood": "https://python.langchain.com/docs/integrations/llms/titan_takeoff/", "%pip list | grep aphrodite": "https://python.langchain.com/docs/integrations/llms/aphrodite/", "AI21 Contextual Answer {#ai21-contextual-answer}": "https://python.langchain.com/docs/integrations/llms/ai21/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/llms/cohere/", "Run the chain specifying only the input variable for the first chain.": "https://python.langchain.com/docs/integrations/llms/edenai/", "Optional, add your OpenAI API Key. This is optional, as Prediction Guard allows": "https://python.langchain.com/docs/integrations/llms/predictionguard/", "Step 4: Embeddings Example {#step-4-embeddings-example}": "https://python.langchain.com/docs/integrations/llms/javelin/", "Calling a single prompt": "https://python.langchain.com/docs/integrations/llms/ibm_watsonx/", "ctransformers.md": "https://python.langchain.com/docs/integrations/llms/ctransformers/", "vllm.md": "https://python.langchain.com/docs/integrations/llms/vllm/", "azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/", "Map reduce example": "https://python.langchain.com/docs/integrations/llms/manifest/", "get a token: https://huggingface.co/docs/api-inference/quicktour#get-your-api-token": "https://python.langchain.com/docs/integrations/llms/huggingface_endpoint/", "mlx_pipelines.md": "https://python.langchain.com/docs/integrations/llms/mlx_pipelines/", "For an on-demand A100 with GCP, Azure, or Lambda": "https://python.langchain.com/docs/integrations/llms/runhouse/", "anyscale.md": "https://python.langchain.com/docs/integrations/llms/anyscale/", "yandex.md": "https://python.langchain.com/docs/integrations/llms/yandex/", "gooseai.md": "https://python.langchain.com/docs/integrations/llms/gooseai/", "Uncomment to install openlm and openai if you haven't already": "https://python.langchain.com/docs/integrations/llms/openlm/", "Using streaming": "https://python.langchain.com/docs/integrations/llms/cloudflare_workersai/", "conversation can take several minutes": "https://python.langchain.com/docs/integrations/llms/ctranslate2/", "google_ai.md": "https://python.langchain.com/docs/integrations/llms/google_ai/", "Install required dependencies": "https://python.langchain.com/docs/integrations/llms/chatglm/", "Improve the results by fine-tuning (optional) {#improve-the-results-by-fine-tuning-optional}": "https://python.langchain.com/docs/integrations/llms/gradient/", "this can take several minutes to download big files!": "https://python.langchain.com/docs/integrations/llms/petals/", "openvino.md": "https://python.langchain.com/docs/integrations/llms/openvino/", "weight_only_quantization.md": "https://python.langchain.com/docs/integrations/llms/weight_only_quantization/", "magics to auto-reload external modules in case you are making changes to langchain while working on this notebook": "https://python.langchain.com/docs/integrations/llms/replicate/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/", "Note that we set max_retries = 0 to avoid retrying on RateLimits, etc": "https://python.langchain.com/docs/guides/productionization/fallbacks/", "Logical Fallacy chain": "https://python.langchain.com/docs/guides/productionization/safety/logical_fallacy_chain/", "Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/", "Constitutional chain": "https://python.langchain.com/docs/guides/productionization/safety/constitutional_chain/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "Reversible data anonymization with Microsoft Presidio {#reversible-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/reversible/", "Download model": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/index/", "The prompt was assigned to the evaluator": "https://python.langchain.com/docs/guides/productionization/evaluation/comparison/pairwise_string/", "This is equivalent to loading using the enum": "https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "Get embeddings.": "https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "!python -m spacy download en_core_web_lg": "https://python.langchain.com/docs/modules/memory/custom_memory/", "adding_memory_chain_multiple_inputs.md": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "Combined": "https://python.langchain.com/docs/modules/memory/multiple_memory/", "Here it is by default set to \"AI\"": "https://python.langchain.com/docs/modules/memory/conversational_customization/", "kg.md": "https://python.langchain.com/docs/modules/memory/types/kg/", "Backed by a Vector Store": "https://python.langchain.com/docs/modules/memory/types/vectorstore_retriever_memory/", "Callbacks": "https://python.langchain.com/docs/modules/callbacks/index/", "this chain will both print to stdout (because verbose=True) and write to 'output.log'": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/", "Prompts": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/prompts-checkpoint/", "Define your desired data structure.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/pydantic/", "structured.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/structured/", "csv.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/csv/", "retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/", "enum.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/enum/", "datetime.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/datetime/", "Solely for documentation purposes.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/pandas_dataframe/", "xml.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/xml/", "Select the most similar example to the input.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples/", "partial.md": "https://python.langchain.com/docs/modules/model_io/prompts/partial/", "composition.md": "https://python.langchain.com/docs/modules/model_io/prompts/composition/", "Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/", "Examples of a pretend task of creating antonyms.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/similarity/", "index.md": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/index/", "Examples of a fictional translation task.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/ngram_overlap/", "Prompt templates": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/.ipynb_checkpoints/index-checkpoint/", "Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/", "Configure chain internals at runtime {#configure-chain-internals-at-runtime}": "https://python.langchain.com/docs/expression_language/primitives/configure/"}, "AzureOpenAIEmbeddings": {"set the environment variables needed for openai package to know to reach out to azure": "https://python.langchain.com/docs/integrations/text_embedding/azureopenai/", "Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "Option 1: use an OpenAI account": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch/"}, "InfinityEmbeddings": {"Option 1: Use infinity from Python {#option-1-use-infinity-from-python}": "https://python.langchain.com/docs/integrations/text_embedding/infinity/", "Infinity": "https://python.langchain.com/docs/integrations/providers/infinity/"}, "InfinityEmbeddingsLocal": {"Option 1: Use infinity from Python {#option-1-use-infinity-from-python}": "https://python.langchain.com/docs/integrations/text_embedding/infinity/"}, "AwaEmbeddings": {"pip install awadb": "https://python.langchain.com/docs/integrations/text_embedding/awadb/", "AwaDB": "https://python.langchain.com/docs/integrations/providers/awadb/"}, "VolcanoEmbeddings": {"volcengine.md": "https://python.langchain.com/docs/integrations/text_embedding/volcengine/"}, "MiniMaxEmbeddings": {"minimax.md": "https://python.langchain.com/docs/integrations/text_embedding/minimax/", "Minimax": "https://python.langchain.com/docs/integrations/providers/minimax/"}, "FakeEmbeddings": {"fake.md": "https://python.langchain.com/docs/integrations/text_embedding/fake/", "initialize the index": "https://python.langchain.com/docs/integrations/retrievers/docarray_retriever/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/vectara/", "drop first if index already exists": "https://python.langchain.com/docs/integrations/vectorstores/tair/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/vectorstores/google_memorystore_redis/", "Run tests with shell:": "https://python.langchain.com/docs/integrations/vectorstores/pgvecto_rs/", "baiduvectordb.md": "https://python.langchain.com/docs/integrations/vectorstores/baiduvectordb/"}, "NeMoEmbeddings": {"nemo.md": "https://python.langchain.com/docs/integrations/text_embedding/nemo/"}, "NomicEmbeddings": {"install package": "https://python.langchain.com/docs/integrations/text_embedding/nomic/", "nomic.md": "https://python.langchain.com/docs/integrations/providers/nomic/"}, "SparkLLMTextEmbeddings": {"sparkllm.md": "https://python.langchain.com/docs/integrations/text_embedding/sparkllm/"}, "PremAIEmbeddings": {"Let's start by doing some imports and define our embedding object": "https://python.langchain.com/docs/integrations/text_embedding/premai/"}, "ElasticsearchEmbeddings": {"Define the model ID": "https://python.langchain.com/docs/integrations/text_embedding/elasticsearch/", "Elasticsearch": "https://python.langchain.com/docs/integrations/providers/elasticsearch/"}, "VoyageAIEmbeddings": {"retrieve the most relevant documents": "https://python.langchain.com/docs/integrations/text_embedding/voyageai/", "VoyageAI": "https://python.langchain.com/docs/integrations/providers/voyageai/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/voyageai-reranker/"}, "KNNRetriever": {"retrieve the most relevant documents": "https://python.langchain.com/docs/integrations/text_embedding/voyageai/", "knn.md": "https://python.langchain.com/docs/integrations/retrievers/knn/"}, "SelfHostedEmbeddings": {"For an on-demand A100 with GCP, Azure, or Lambda": "https://python.langchain.com/docs/integrations/text_embedding/self-hosted/"}, "SelfHostedHuggingFaceEmbeddings": {"For an on-demand A100 with GCP, Azure, or Lambda": "https://python.langchain.com/docs/integrations/text_embedding/self-hosted/"}, "SelfHostedHuggingFaceInstructEmbeddings": {"For an on-demand A100 with GCP, Azure, or Lambda": "https://python.langchain.com/docs/integrations/text_embedding/self-hosted/"}, "AnyscaleEmbeddings": {"anyscale.md": "https://python.langchain.com/docs/integrations/text_embedding/anyscale/", "Anyscale": "https://python.langchain.com/docs/integrations/providers/anyscale/"}, "EmbaasEmbeddings": {"Set API key": "https://python.langchain.com/docs/integrations/text_embedding/embaas/"}, "YandexGPTEmbeddings": {"yandex.md": "https://python.langchain.com/docs/integrations/text_embedding/yandex/"}, "JinaEmbeddings": {"jina.md": "https://python.langchain.com/docs/integrations/text_embedding/jina/", "Jina": "https://python.langchain.com/docs/integrations/providers/jina/"}, "AlephAlphaAsymmetricSemanticEmbedding": {"aleph_alpha.md": "https://python.langchain.com/docs/integrations/text_embedding/aleph_alpha/", "Aleph Alpha": "https://python.langchain.com/docs/integrations/providers/aleph_alpha/"}, "AlephAlphaSymmetricSemanticEmbedding": {"aleph_alpha.md": "https://python.langchain.com/docs/integrations/text_embedding/aleph_alpha/", "Aleph Alpha": "https://python.langchain.com/docs/integrations/providers/aleph_alpha/"}, "CloudflareWorkersAIEmbeddings": {"single string embeddings": "https://python.langchain.com/docs/integrations/text_embedding/cloudflare_workersai/", "Cloudflare": "https://python.langchain.com/docs/integrations/providers/cloudflare/"}, "DashScopeEmbeddings": {"dashscope.md": "https://python.langchain.com/docs/integrations/text_embedding/dashscope/", "create DashVector collection": "https://python.langchain.com/docs/integrations/retrievers/self_query/dashvector/", "add texts": "https://python.langchain.com/docs/integrations/vectorstores/dashvector/"}, "TensorflowHubEmbeddings": {"tensorflowhub.md": "https://python.langchain.com/docs/integrations/text_embedding/tensorflowhub/"}, "LlamafileEmbeddings": {"llamafile setup": "https://python.langchain.com/docs/integrations/text_embedding/llamafile/"}, "GradientEmbeddings": {"(demo) compute similarity": "https://python.langchain.com/docs/integrations/text_embedding/gradient/", "Gradient": "https://python.langchain.com/docs/integrations/providers/gradient/"}, "ModelScopeEmbeddings": {"modelscope_hub.md": "https://python.langchain.com/docs/integrations/text_embedding/modelscope_hub/", "ModelScope": "https://python.langchain.com/docs/integrations/providers/modelscope/"}, "SagemakerEndpointEmbeddings": {"client = boto3.client(": "https://python.langchain.com/docs/integrations/text_embedding/sagemaker-endpoint/", "AWS": "https://python.langchain.com/docs/integrations/platforms/aws/"}, "EmbeddingsContentHandler": {"client = boto3.client(": "https://python.langchain.com/docs/integrations/text_embedding/sagemaker-endpoint/"}, "DocArrayInMemorySearch": {"async embed query": "https://python.langchain.com/docs/integrations/text_embedding/upstage/", "Get an OpenAI token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory/", "> ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])": "https://python.langchain.com/docs/expression_language/get_started/"}, "OpenVINOEmbeddings": {"openvino.md": "https://python.langchain.com/docs/integrations/text_embedding/openvino/", "Helper function for printing docs": "https://python.langchain.com/docs/integrations/document_transformers/openvino_rerank/"}, "OpenVINOBgeEmbeddings": {"openvino.md": "https://python.langchain.com/docs/integrations/text_embedding/openvino/"}, "NVIDIAEmbeddings": {"del os.environ['NVIDIA_API_KEY'] ## delete key and reset": "https://python.langchain.com/docs/integrations/text_embedding/nvidia_ai_endpoints/", "NVIDIA": "https://python.langchain.com/docs/integrations/providers/nvidia/"}, "FAISS": {"del os.environ['NVIDIA_API_KEY'] ## delete key and reset": "https://python.langchain.com/docs/integrations/text_embedding/nvidia_ai_endpoints/", "fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/cross_encoder_reranker/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "ragatouille.md": "https://python.langchain.com/docs/integrations/providers/ragatouille/", "Facebook - Meta": "https://python.langchain.com/docs/integrations/providers/facebook/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/faiss_async/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/vectorstores/faiss/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/", "use default authN method API-key": "https://python.langchain.com/docs/integrations/llms/oci_generative_ai/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/", "Swapping the `ByteStore` {#swapping-the-bytestore}": "https://python.langchain.com/docs/modules/data_connection/text_embedding/caching_embeddings/", "initialize the bm25 retriever and faiss retriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/ensemble/", "Define your embedding model": "https://python.langchain.com/docs/modules/data_connection/retrievers/time_weighted_vectorstore/", "vectorstore.md": "https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore/", "Vector stores": "https://python.langchain.com/docs/modules/data_connection/vectorstores/index/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/", "Backed by a Vector Store": "https://python.langchain.com/docs/modules/memory/types/vectorstore_retriever_memory/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Examples of a pretend task of creating antonyms.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/mmr/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "The input schema of the chain is the input schema of its first part, the prompt.": "https://python.langchain.com/docs/expression_language/interface/", "inspect.md": "https://python.langchain.com/docs/expression_language/how_to/inspect/", "Adding values to chain state {#adding-values-to-chain-state}": "https://python.langchain.com/docs/expression_language/primitives/assign/", "Formatting inputs & output {#formatting-inputs-output}": "https://python.langchain.com/docs/expression_language/primitives/parallel/", "Passing data through {#passing-data-through}": "https://python.langchain.com/docs/expression_language/primitives/passthrough/"}, "RunnablePassthrough": {"del os.environ['NVIDIA_API_KEY'] ## delete key and reset": "https://python.langchain.com/docs/integrations/text_embedding/nvidia_ai_endpoints/", "For use in Chaining section": "https://python.langchain.com/docs/integrations/retrievers/you-retriever/", "fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "From LangChain, import standard modules for prompting.": "https://python.langchain.com/docs/integrations/providers/dspy/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_summary/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "use default authN method API-key": "https://python.langchain.com/docs/integrations/llms/oci_generative_ai/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/prompting/", "This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "Quickstart": "https://python.langchain.com/docs/use_cases/question_answering/.ipynb_checkpoints/quickstart-checkpoint/", "%pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/quickstart/", "%pip install -qU langchain langchain-community langchain-openai langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/no_queries/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/step_back/", "%pip install -qU langchain langchain-community langchain-openai faker langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/high_cardinality/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/quickstart/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/index/", "Tool calling {#tool-calling}": "https://python.langchain.com/docs/modules/model_io/chat/function_calling/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "batch_configurable_chain([\"ice cream\", \"spaghetti\", \"dumplings\"])": "https://python.langchain.com/docs/expression_language/why/", "> ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])": "https://python.langchain.com/docs/expression_language/get_started/", "The input schema of the chain is the input schema of its first part, the prompt.": "https://python.langchain.com/docs/expression_language/interface/", "prompt_llm_parser.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_llm_parser/", "multiple_chains.md": "https://python.langchain.com/docs/expression_language/cookbook/multiple_chains/", "Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/", "inspect.md": "https://python.langchain.com/docs/expression_language/how_to/inspect/", "Adding values to chain state {#adding-values-to-chain-state}": "https://python.langchain.com/docs/expression_language/primitives/assign/", "Binding: Attach runtime args {#binding-attach-runtime-args}": "https://python.langchain.com/docs/expression_language/primitives/binding/", "Formatting inputs & output {#formatting-inputs-output}": "https://python.langchain.com/docs/expression_language/primitives/parallel/", "Passing data through {#passing-data-through}": "https://python.langchain.com/docs/expression_language/primitives/passthrough/"}, "ChatNVIDIA": {"del os.environ['NVIDIA_API_KEY'] ## delete key and reset": "https://python.langchain.com/docs/integrations/text_embedding/nvidia_ai_endpoints/", "Or via the async API": "https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/", "NVIDIA": "https://python.langchain.com/docs/integrations/providers/nvidia/"}, "LocalAIEmbeddings": {"if you are behind an explicit proxy, you can use the OPENAI_PROXY environment variable to pass through": "https://python.langchain.com/docs/integrations/text_embedding/localai/"}, "AzureAISearchRetriever": {"azure_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/azure_ai_search/", "Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/"}, "DirectoryLoader": {"azure_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/azure_ai_search/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/", "File Directory": "https://python.langchain.com/docs/modules/data_connection/document_loaders/file_directory/"}, "TextLoader": {"azure_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/azure_ai_search/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/cross_encoder_reranker/", "Establishing a connection to the database is facilitated through the singlestoredb Python connector.": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/momento_vector_index/", "Here we want to make sure the answer is minimally relevant": "https://python.langchain.com/docs/integrations/callbacks/confident/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "VDMS": "https://python.langchain.com/docs/integrations/providers/vdms/", "lancedb.md": "https://python.langchain.com/docs/integrations/vectorstores/lancedb/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/vdms/", "You need to install sqlite-vss as a dependency.": "https://python.langchain.com/docs/integrations/vectorstores/sqlitevss/", "Refresh is required for server use": "https://python.langchain.com/docs/integrations/vectorstores/vald/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "add texts": "https://python.langchain.com/docs/integrations/vectorstores/dashvector/", "databricks_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/databricks_vector_search/", "scann.md": "https://python.langchain.com/docs/integrations/vectorstores/scann/", "xata.md": "https://python.langchain.com/docs/integrations/vectorstores/xata/", "openai": "https://python.langchain.com/docs/integrations/vectorstores/hippo/", "docs[0].metadata[\"id\"] == \"id:testapp:testapp::32\"": "https://python.langchain.com/docs/integrations/vectorstores/vespa/", "output length: 4": "https://python.langchain.com/docs/integrations/vectorstores/rockset/", "or install latest:": "https://python.langchain.com/docs/integrations/vectorstores/dingo/", "replace": "https://python.langchain.com/docs/integrations/vectorstores/zilliz/", "Set up the OpenAI Environment Variables": "https://python.langchain.com/docs/integrations/vectorstores/azure_cosmos_db/", "vikingdb.md": "https://python.langchain.com/docs/integrations/vectorstores/vikingdb/", "default metric is angular": "https://python.langchain.com/docs/integrations/vectorstores/annoy/", "Wait until the cluster is ready for use.": "https://python.langchain.com/docs/integrations/vectorstores/couchbase/", "typesense.md": "https://python.langchain.com/docs/integrations/vectorstores/typesense/", "Here we useimport getpass": "https://python.langchain.com/docs/integrations/vectorstores/tidb_vector/", "atlas.md": "https://python.langchain.com/docs/integrations/vectorstores/atlas/", "or shorter": "https://python.langchain.com/docs/integrations/vectorstores/activeloop_deeplake/", "Load the document and split it into chunks": "https://python.langchain.com/docs/integrations/vectorstores/vlite/", "Pip install necessary package {#pip-install-necessary-package}": "https://python.langchain.com/docs/integrations/vectorstores/lantern/", "drop first if index already exists": "https://python.langchain.com/docs/integrations/vectorstores/tair/", "import": "https://python.langchain.com/docs/integrations/vectorstores/chroma/", "duckdb.md": "https://python.langchain.com/docs/integrations/vectorstores/duckdb/", "for example": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch/", "Dependencies {#dependencies}": "https://python.langchain.com/docs/integrations/vectorstores/clarifai/", "# if you plan to use bson serialization, install also:": "https://python.langchain.com/docs/integrations/vectorstores/sklearn/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/", "Get an OpenAI token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory/", "use directly a `where_str` to delete": "https://python.langchain.com/docs/integrations/vectorstores/myscale/", "tiledb.md": "https://python.langchain.com/docs/integrations/vectorstores/tiledb/", "clickhouse.md": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/vectorstores/google_memorystore_redis/", "qdrant.md": "https://python.langchain.com/docs/integrations/vectorstores/qdrant/", "tigris.md": "https://python.langchain.com/docs/integrations/vectorstores/tigris/", "ecloud_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/ecloud_vector_search/", "Create a bes instance and index docs.": "https://python.langchain.com/docs/integrations/vectorstores/baiducloud_vector_search/", "awadb.md": "https://python.langchain.com/docs/integrations/vectorstores/awadb/", "with pip": "https://python.langchain.com/docs/integrations/vectorstores/supabase/", "%pip install --upgrade --quiet surrealdb langchain langchain-community": "https://python.langchain.com/docs/integrations/vectorstores/surrealdb/", "If using the default Docker installation, use this instantiation instead:": "https://python.langchain.com/docs/integrations/vectorstores/opensearch/", "pinecone.md": "https://python.langchain.com/docs/integrations/vectorstores/pinecone/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/vearch/", "create cluster and add texts": "https://python.langchain.com/docs/integrations/vectorstores/bageldb/", "Option 1: use an OpenAI account": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch/", "usearch.md": "https://python.langchain.com/docs/integrations/vectorstores/usearch/", "This will only get documents for Ankush": "https://python.langchain.com/docs/integrations/vectorstores/milvus/", "Metadata {#metadata}": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch/", "Run tests with shell:": "https://python.langchain.com/docs/integrations/vectorstores/pgvecto_rs/", "initialize marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/vectorstores/faiss/", "epsilla.md": "https://python.langchain.com/docs/integrations/vectorstores/epsilla/", "DocumentDB connection string": "https://python.langchain.com/docs/integrations/vectorstores/documentdb/", "Create collection if running for the first time. If the collection": "https://python.langchain.com/docs/integrations/vectorstores/semadb/", "analyticdb.md": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb/", "hologres.md": "https://python.langchain.com/docs/integrations/vectorstores/hologres/", "baiduvectordb.md": "https://python.langchain.com/docs/integrations/vectorstores/baiduvectordb/", "Use Meilisearch vector store to store texts & associated embeddings as vector": "https://python.langchain.com/docs/integrations/vectorstores/meilisearch/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Swapping the `ByteStore` {#swapping-the-bytestore}": "https://python.langchain.com/docs/modules/data_connection/text_embedding/caching_embeddings/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "This text splitter is used to create the child documents": "https://python.langchain.com/docs/modules/data_connection/retrievers/parent_document_retriever/", "vectorstore.md": "https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/", "Vector stores": "https://python.langchain.com/docs/modules/data_connection/vectorstores/index/", "Document loaders": "https://python.langchain.com/docs/modules/data_connection/document_loaders/index/", "File Directory": "https://python.langchain.com/docs/modules/data_connection/document_loaders/file_directory/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/"}, "TokenTextSplitter": {"azure_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/azure_ai_search/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/split_by_token/"}, "AzureSearch": {"azure_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/azure_ai_search/", "Option 1: use an OpenAI account": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch/"}, "RePhraseQueryRetriever": {"re_phrase.md": "https://python.langchain.com/docs/integrations/retrievers/re_phrase/"}, "WebBaseLoader": {"re_phrase.md": "https://python.langchain.com/docs/integrations/retrievers/re_phrase/", "Install necessary dependencies.": "https://python.langchain.com/docs/integrations/callbacks/infino/", "Collection config is needed if we're creating a new Zep Collection": "https://python.langchain.com/docs/integrations/vectorstores/zep/", "merge_doc.md": "https://python.langchain.com/docs/integrations/document_loaders/merge_doc/", "Use this piece of code for testing new custom BeautifulSoup parsers": "https://python.langchain.com/docs/integrations/document_loaders/web_base/", "Quickstart": "https://python.langchain.com/docs/use_cases/question_answering/.ipynb_checkpoints/quickstart-checkpoint/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/quickstart/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/"}, "RecursiveCharacterTextSplitter": {"re_phrase.md": "https://python.langchain.com/docs/integrations/retrievers/re_phrase/", "Helper function for printing docs": "https://python.langchain.com/docs/integrations/document_transformers/openvino_rerank/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/cross_encoder_reranker/", "# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "Loading the COMVEST 2024 notice": "https://python.langchain.com/docs/integrations/chat/maritalk/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "ragatouille.md": "https://python.langchain.com/docs/integrations/providers/ragatouille/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "TODO : Set values as per your requirements": "https://python.langchain.com/docs/integrations/vectorstores/google_vertex_ai_vector_search/", "vikingdb.md": "https://python.langchain.com/docs/integrations/vectorstores/vikingdb/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "Collection config is needed if we're creating a new Zep Collection": "https://python.langchain.com/docs/integrations/vectorstores/zep/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/vearch/", "initialize MongoDB python client": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/", "Code for: class MyClass:": "https://python.langchain.com/docs/integrations/document_loaders/source_code/", "Quickstart": "https://python.langchain.com/docs/use_cases/question_answering/.ipynb_checkpoints/quickstart-checkpoint/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/quickstart/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "%pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/quickstart/", "%pip install -qU langchain langchain-community langchain-openai langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/no_queries/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/", "This text splitter is used to create the child documents": "https://python.langchain.com/docs/modules/data_connection/retrievers/parent_document_retriever/", "Full list of supported languages": "https://python.langchain.com/docs/modules/data_connection/document_transformers/code_splitter/", "This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/split_by_token/", "for local file use html_splitter.split_text_from_file()": "https://python.langchain.com/docs/modules/data_connection/document_transformers/HTML_header_metadata/", "MD splits": "https://python.langchain.com/docs/modules/data_connection/document_transformers/markdown_header_metadata/", "Split": "https://python.langchain.com/docs/modules/data_connection/document_transformers/HTML_section_aware_splitter/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/"}, "YouSearchAPIWrapper": {"For use in Chaining section": "https://python.langchain.com/docs/integrations/tools/you/"}, "YouRetriever": {"For use in Chaining section": "https://python.langchain.com/docs/integrations/retrievers/you-retriever/"}, "Jaguar": {"cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "Jaguar": "https://python.langchain.com/docs/integrations/providers/jaguar/"}, "CharacterTextSplitter": {"cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "Establishing a connection to the database is facilitated through the singlestoredb Python connector.": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/momento_vector_index/", "OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "Here we want to make sure the answer is minimally relevant": "https://python.langchain.com/docs/integrations/callbacks/confident/", "VDMS": "https://python.langchain.com/docs/integrations/providers/vdms/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "lancedb.md": "https://python.langchain.com/docs/integrations/vectorstores/lancedb/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/vdms/", "You need to install sqlite-vss as a dependency.": "https://python.langchain.com/docs/integrations/vectorstores/sqlitevss/", "Refresh is required for server use": "https://python.langchain.com/docs/integrations/vectorstores/vald/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "add texts": "https://python.langchain.com/docs/integrations/vectorstores/dashvector/", "databricks_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/databricks_vector_search/", "scann.md": "https://python.langchain.com/docs/integrations/vectorstores/scann/", "xata.md": "https://python.langchain.com/docs/integrations/vectorstores/xata/", "openai": "https://python.langchain.com/docs/integrations/vectorstores/hippo/", "docs[0].metadata[\"id\"] == \"id:testapp:testapp::32\"": "https://python.langchain.com/docs/integrations/vectorstores/vespa/", "output length: 4": "https://python.langchain.com/docs/integrations/vectorstores/rockset/", "or install latest:": "https://python.langchain.com/docs/integrations/vectorstores/dingo/", "replace": "https://python.langchain.com/docs/integrations/vectorstores/zilliz/", "Set up the OpenAI Environment Variables": "https://python.langchain.com/docs/integrations/vectorstores/azure_cosmos_db/", "default metric is angular": "https://python.langchain.com/docs/integrations/vectorstores/annoy/", "Wait until the cluster is ready for use.": "https://python.langchain.com/docs/integrations/vectorstores/couchbase/", "typesense.md": "https://python.langchain.com/docs/integrations/vectorstores/typesense/", "Here we useimport getpass": "https://python.langchain.com/docs/integrations/vectorstores/tidb_vector/", "or shorter": "https://python.langchain.com/docs/integrations/vectorstores/activeloop_deeplake/", "Load the document and split it into chunks": "https://python.langchain.com/docs/integrations/vectorstores/vlite/", "Pip install necessary package {#pip-install-necessary-package}": "https://python.langchain.com/docs/integrations/vectorstores/lantern/", "drop first if index already exists": "https://python.langchain.com/docs/integrations/vectorstores/tair/", "import": "https://python.langchain.com/docs/integrations/vectorstores/chroma/", "duckdb.md": "https://python.langchain.com/docs/integrations/vectorstores/duckdb/", "for example": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch/", "Dependencies {#dependencies}": "https://python.langchain.com/docs/integrations/vectorstores/clarifai/", "# if you plan to use bson serialization, install also:": "https://python.langchain.com/docs/integrations/vectorstores/sklearn/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/", "Get an OpenAI token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/vectorstores/docarray_in_memory/", "use directly a `where_str` to delete": "https://python.langchain.com/docs/integrations/vectorstores/myscale/", "tiledb.md": "https://python.langchain.com/docs/integrations/vectorstores/tiledb/", "clickhouse.md": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/vectorstores/google_memorystore_redis/", "qdrant.md": "https://python.langchain.com/docs/integrations/vectorstores/qdrant/", "tigris.md": "https://python.langchain.com/docs/integrations/vectorstores/tigris/", "ecloud_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/ecloud_vector_search/", "Create a bes instance and index docs.": "https://python.langchain.com/docs/integrations/vectorstores/baiducloud_vector_search/", "awadb.md": "https://python.langchain.com/docs/integrations/vectorstores/awadb/", "with pip": "https://python.langchain.com/docs/integrations/vectorstores/supabase/", "%pip install --upgrade --quiet surrealdb langchain langchain-community": "https://python.langchain.com/docs/integrations/vectorstores/surrealdb/", "If using the default Docker installation, use this instantiation instead:": "https://python.langchain.com/docs/integrations/vectorstores/opensearch/", "pinecone.md": "https://python.langchain.com/docs/integrations/vectorstores/pinecone/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/faiss_async/", "create cluster and add texts": "https://python.langchain.com/docs/integrations/vectorstores/bageldb/", "Option 1: use an OpenAI account": "https://python.langchain.com/docs/integrations/vectorstores/azuresearch/", "usearch.md": "https://python.langchain.com/docs/integrations/vectorstores/usearch/", "This will only get documents for Ankush": "https://python.langchain.com/docs/integrations/vectorstores/milvus/", "Metadata {#metadata}": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch/", "Run tests with shell:": "https://python.langchain.com/docs/integrations/vectorstores/pgvecto_rs/", "initialize marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/vectorstores/faiss/", "epsilla.md": "https://python.langchain.com/docs/integrations/vectorstores/epsilla/", "DocumentDB connection string": "https://python.langchain.com/docs/integrations/vectorstores/documentdb/", "Create collection if running for the first time. If the collection": "https://python.langchain.com/docs/integrations/vectorstores/semadb/", "analyticdb.md": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb/", "hologres.md": "https://python.langchain.com/docs/integrations/vectorstores/hologres/", "baiduvectordb.md": "https://python.langchain.com/docs/integrations/vectorstores/baiduvectordb/", "Use Meilisearch vector store to store texts & associated embeddings as vector": "https://python.langchain.com/docs/integrations/vectorstores/meilisearch/", "Uncomment this to install psychicapi if you don't already have it installed": "https://python.langchain.com/docs/integrations/document_loaders/psychic/", "Map reduce example": "https://python.langchain.com/docs/integrations/llms/manifest/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "indexing.md": "https://python.langchain.com/docs/modules/data_connection/indexing/", "Swapping the `ByteStore` {#swapping-the-bytestore}": "https://python.langchain.com/docs/modules/data_connection/text_embedding/caching_embeddings/", "Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/", "vectorstore.md": "https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/", "Vector stores": "https://python.langchain.com/docs/modules/data_connection/vectorstores/index/", "This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/split_by_token/", "adding_memory_chain_multiple_inputs.md": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs/"}, "MultiVectorRetriever": {"fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/"}, "Document": {"fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "STEP 1: Load": "https://python.langchain.com/docs/integrations/retrievers/chatgpt-plugin/", "cohere.md": "https://python.langchain.com/docs/integrations/retrievers/cohere/", "client.schema.delete_all()": "https://python.langchain.com/docs/integrations/retrievers/weaviate-hybrid/", "bm25.md": "https://python.langchain.com/docs/integrations/retrievers/bm25/", "Create a retriever with a demo encoder": "https://python.langchain.com/docs/integrations/retrievers/qdrant-sparse/", "elasticsearch_retriever.md": "https://python.langchain.com/docs/integrations/retrievers/elasticsearch_retriever/", "tf_idf.md": "https://python.langchain.com/docs/integrations/retrievers/tf_idf/", "This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/mongodb_atlas/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/retrievers/self_query/vectara_self_query/", "create DashVector collection": "https://python.langchain.com/docs/integrations/retrievers/self_query/dashvector/", "or install latest:": "https://python.langchain.com/docs/integrations/retrievers/self_query/dingo/", "Get openAI api key by reading local .env file": "https://python.langchain.com/docs/integrations/retrievers/self_query/timescalevector_self_query/", "create new index": "https://python.langchain.com/docs/integrations/retrievers/self_query/pinecone/", "in case if some queries fail consider installing libdeeplake manually": "https://python.langchain.com/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query/", "import os": "https://python.langchain.com/docs/integrations/retrievers/self_query/qdrant_self_query/", "apify.md": "https://python.langchain.com/docs/integrations/tools/apify/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "See docker command above to launch a postgres instance with pgvector enabled.": "https://python.langchain.com/docs/integrations/vectorstores/pgvector/", "default metric is angular": "https://python.langchain.com/docs/integrations/vectorstores/annoy/", "Pip install necessary package {#pip-install-necessary-package}": "https://python.langchain.com/docs/integrations/vectorstores/lantern/", "@markdown Please specify a source for demo purpose.": "https://python.langchain.com/docs/integrations/document_loaders/google_firestore/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/faiss_async/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "This will only get documents for Ankush": "https://python.langchain.com/docs/integrations/vectorstores/milvus/", "Metadata {#metadata}": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch/", "Run tests with shell:": "https://python.langchain.com/docs/integrations/vectorstores/pgvecto_rs/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/vectorstores/faiss/", "nuclia_transformer.md": "https://python.langchain.com/docs/integrations/document_transformers/nuclia_transformer/", "ai21_semantic_text_splitter.md": "https://python.langchain.com/docs/integrations/document_transformers/ai21_semantic_text_splitter/", "Must be an OpenAI model that supports functions": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger/", "doctran_extract_properties.md": "https://python.langchain.com/docs/integrations/document_transformers/doctran_extract_properties/", "google_translate.md": "https://python.langchain.com/docs/integrations/document_transformers/google_translate/", "doctran_interrogate_document.md": "https://python.langchain.com/docs/integrations/document_transformers/doctran_interrogate_document/", "doctran_translate_document.md": "https://python.langchain.com/docs/integrations/document_transformers/doctran_translate_document/", "Feature structure of `mlqa/en` dataset:": "https://python.langchain.com/docs/integrations/document_loaders/tensorflow_datasets/", "@markdown Please fill in the both the Google Cloud region and name of your Cloud SQL instance.": "https://python.langchain.com/docs/integrations/document_loaders/google_cloud_sql_mssql/", "airbyte_salesforce.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_salesforce/", "airbyte_cdk.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_cdk/", "airbyte_stripe.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_stripe/", "copypaste.md": "https://python.langchain.com/docs/integrations/document_loaders/copypaste/", "airbyte_typeform.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_typeform/", "apify_dataset.md": "https://python.langchain.com/docs/integrations/document_loaders/apify_dataset/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/document_loaders/google_datastore/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "airbyte_hubspot.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_hubspot/", "airbyte_gong.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_gong/", "@markdown Please specify an endpoint associated with the instance and a key prefix for demo purpose.": "https://python.langchain.com/docs/integrations/document_loaders/google_memorystore_redis/", "@markdown Please specify an instance and a table for demo purpose.": "https://python.langchain.com/docs/integrations/document_loaders/google_bigtable/", "@title Set Your Values Here { display-mode: \"form\" }": "https://python.langchain.com/docs/integrations/document_loaders/google_el_carro/", "airbyte_shopify.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_shopify/", "airbyte_zendesk_support.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_zendesk_support/", "@markdown Please specify an instance id, a database, and a table for demo purpose.": "https://python.langchain.com/docs/integrations/document_loaders/google_spanner/", "The underlying python-gremlin has a problem when running in notebook": "https://python.langchain.com/docs/integrations/graphs/azure_cosmosdb_gremlin/", "sagemaker.md": "https://python.langchain.com/docs/integrations/llms/sagemaker/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "self-query-qdrant": "https://python.langchain.com/docs/templates/self-query-qdrant/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "%pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/quickstart/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/constructing/", "Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "indexing.md": "https://python.langchain.com/docs/modules/data_connection/indexing/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "Custom Retriever {#custom-retriever}": "https://python.langchain.com/docs/modules/data_connection/retrievers/custom_retriever/", "Define your embedding model": "https://python.langchain.com/docs/modules/data_connection/retrievers/time_weighted_vectorstore/", "This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/", "Self-querying": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/self_query-checkpoint/", "Custom Document Loader {#custom-document-loader}": "https://python.langchain.com/docs/modules/data_connection/document_loaders/custom/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "BaseStore": {"fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/"}, "VectorStore": {"fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/"}, "InMemoryStore": {"fleet_context.md": "https://python.langchain.com/docs/integrations/retrievers/fleet_context/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "This text splitter is used to create the child documents": "https://python.langchain.com/docs/modules/data_connection/retrievers/parent_document_retriever/"}, "ContextualCompressionRetriever": {"Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/cross_encoder_reranker/", "Get 3 diff embeddings.": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "ragatouille.md": "https://python.langchain.com/docs/integrations/providers/ragatouille/"}, "LLMLinguaCompressor": {"Helper function for printing docs": "https://python.langchain.com/docs/integrations/retrievers/llmlingua/"}, "RetrievalQA": {"Helper function for printing docs": "https://python.langchain.com/docs/integrations/retrievers/llmlingua/", "bedrock.md": "https://python.langchain.com/docs/integrations/retrievers/bedrock/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/voyageai-reranker/", "# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "Here we want to make sure the answer is minimally relevant": "https://python.langchain.com/docs/integrations/callbacks/confident/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/", "scann.md": "https://python.langchain.com/docs/integrations/vectorstores/scann/", "TODO : Set values as per your requirements": "https://python.langchain.com/docs/integrations/vectorstores/google_vertex_ai_vector_search/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/momento_vector_index/", "or shorter": "https://python.langchain.com/docs/integrations/vectorstores/activeloop_deeplake/", "Clean up KDB.AI \"documents\" table and index for similarity search": "https://python.langchain.com/docs/integrations/vectorstores/kdbai/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "DocumentDB connection string": "https://python.langchain.com/docs/integrations/vectorstores/documentdb/", "initialize MongoDB python client": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/"}, "ElasticSearchBM25Retriever": {"Alternatively, you can load an existing index": "https://python.langchain.com/docs/integrations/retrievers/elastic_search_bm25/"}, "OutlineRetriever": {"outline.md": "https://python.langchain.com/docs/integrations/retrievers/outline/", "Outline": "https://python.langchain.com/docs/integrations/providers/outline/"}, "ConversationalRetrievalChain": {"outline.md": "https://python.langchain.com/docs/integrations/retrievers/outline/", "get a token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/retrievers/arxiv/", "Setup API keys for Kay and OpenAI": "https://python.langchain.com/docs/integrations/retrievers/sec_filings/", "Setup API key": "https://python.langchain.com/docs/integrations/retrievers/kay/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/sap_hanavector/"}, "ZepMemory": {"Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "Zep": "https://python.langchain.com/docs/integrations/providers/zep/"}, "SearchScope": {"Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/retrievers/zep_memorystore/"}, "SearchType": {"Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/retrievers/zep_memorystore/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/"}, "ZepRetriever": {"Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "Zep": "https://python.langchain.com/docs/integrations/providers/zep/"}, "VespaRetriever": {"vespa.md": "https://python.langchain.com/docs/integrations/retrievers/vespa/", "Vespa": "https://python.langchain.com/docs/integrations/providers/vespa/"}, "AmazonKendraRetriever": {"amazon_kendra_retriever.md": "https://python.langchain.com/docs/integrations/retrievers/amazon_kendra_retriever/"}, "AmazonKnowledgeBasesRetriever": {"bedrock.md": "https://python.langchain.com/docs/integrations/retrievers/bedrock/"}, "Bedrock": {"bedrock.md": "https://python.langchain.com/docs/integrations/retrievers/bedrock/", "Guardrails for Amazon Bedrock with trace": "https://python.langchain.com/docs/integrations/llms/bedrock/"}, "CohereRerank": {"OR (depending on Python version)": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker/"}, "Cohere": {"OR (depending on Python version)": "https://python.langchain.com/docs/integrations/retrievers/cohere-reranker/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "self-query-qdrant": "https://python.langchain.com/docs/templates/self-query-qdrant/"}, "NeuralDBRetriever": {"From scratch": "https://python.langchain.com/docs/integrations/retrievers/thirdai_neuraldb/"}, "SingleStoreDB": {"Establishing a connection to the database is facilitated through the singlestoredb Python connector.": "https://python.langchain.com/docs/integrations/vectorstores/singlestoredb/", "SingleStoreDB": "https://python.langchain.com/docs/integrations/providers/singlestoredb/"}, "WikipediaRetriever": {"get a token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/retrievers/wikipedia/", "Wikipedia": "https://python.langchain.com/docs/integrations/providers/wikipedia/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/"}, "MetalRetriever": {"metal.md": "https://python.langchain.com/docs/integrations/retrievers/metal/", "Metal": "https://python.langchain.com/docs/integrations/providers/metal/"}, "BreebsRetriever": {"breebs.md": "https://python.langchain.com/docs/integrations/retrievers/breebs/", "Breebs (Open Knowledge)": "https://python.langchain.com/docs/integrations/providers/breebs/"}, "CSVLoader": {"STEP 1: Load": "https://python.langchain.com/docs/integrations/retrievers/chatgpt-plugin/", "csv.md": "https://python.langchain.com/docs/integrations/document_loaders/csv/", "pebblo.md": "https://python.langchain.com/docs/integrations/document_loaders/pebblo/", "CSV": "https://python.langchain.com/docs/modules/data_connection/document_loaders/csv/"}, "ChatGPTPluginRetriever": {"STEP 1: Load": "https://python.langchain.com/docs/integrations/retrievers/chatgpt-plugin/", "OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/"}, "KayAiRetriever": {"Setup API keys for Kay and OpenAI": "https://python.langchain.com/docs/integrations/retrievers/sec_filings/", "Setup API key": "https://python.langchain.com/docs/integrations/retrievers/kay/"}, "ChatCohere": {"cohere.md": "https://python.langchain.com/docs/integrations/retrievers/cohere/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/chat/cohere/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/quick_start/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/"}, "CohereRagRetriever": {"cohere.md": "https://python.langchain.com/docs/integrations/retrievers/cohere/", "Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/"}, "DriaRetriever": {"Installation {#installation}": "https://python.langchain.com/docs/integrations/retrievers/dria_index/"}, "DocArrayRetriever": {"initialize the index": "https://python.langchain.com/docs/integrations/retrievers/docarray_retriever/"}, "SVMRetriever": {"svm.md": "https://python.langchain.com/docs/integrations/retrievers/svm/"}, "TavilySearchAPIRetriever": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/retrievers/tavily/"}, "PineconeHybridSearchRetriever": {"create the index": "https://python.langchain.com/docs/integrations/retrievers/pinecone_hybrid_search/", "Pinecone": "https://python.langchain.com/docs/integrations/providers/pinecone/"}, "DeepLake": {"# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "in case if some queries fail consider installing libdeeplake manually": "https://python.langchain.com/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query/", "Activeloop Deep Lake": "https://python.langchain.com/docs/integrations/providers/activeloop_deeplake/", "or shorter": "https://python.langchain.com/docs/integrations/vectorstores/activeloop_deeplake/"}, "AsyncHtmlLoader": {"# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "html2text.md": "https://python.langchain.com/docs/integrations/document_transformers/html2text/", "async_html.md": "https://python.langchain.com/docs/integrations/document_loaders/async_html/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "Html2TextTransformer": {"# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "html2text.md": "https://python.langchain.com/docs/integrations/document_transformers/html2text/", "async_chromium.md": "https://python.langchain.com/docs/integrations/document_loaders/async_chromium/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "create_structured_output_chain": {"# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/mapping/"}, "HumanMessagePromptTemplate": {"# activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `": "https://python.langchain.com/docs/integrations/retrievers/activeloop/", "get a chat completion from the formatted messages": "https://python.langchain.com/docs/integrations/chat/vllm/", "!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/", "context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "see https://python.langchain.com/en/latest/modules/data_connection/getting_started.html for more details": "https://python.langchain.com/docs/integrations/document_loaders/figma/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "Prompts": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/prompts-checkpoint/", "Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/", "Prompt templates": "https://python.langchain.com/docs/modules/model_io/prompts/prompt_templates/.ipynb_checkpoints/index-checkpoint/"}, "PubMedRetriever": {"pubmed.md": "https://python.langchain.com/docs/integrations/retrievers/pubmed/", "PubMed": "https://python.langchain.com/docs/integrations/providers/pubmed/"}, "WeaviateHybridSearchRetriever": {"client.schema.delete_all()": "https://python.langchain.com/docs/integrations/retrievers/weaviate-hybrid/"}, "EmbedchainRetriever": {"Installation {#installation}": "https://python.langchain.com/docs/integrations/retrievers/embedchain/"}, "create_retrieval_chain": {"ragatouille.md": "https://python.langchain.com/docs/integrations/retrievers/ragatouille/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/chat_history/"}, "create_stuff_documents_chain": {"ragatouille.md": "https://python.langchain.com/docs/integrations/retrievers/ragatouille/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/chat_history/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/quickstart/"}, "ArxivRetriever": {"get a token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/retrievers/arxiv/", "Arxiv": "https://python.langchain.com/docs/integrations/providers/arxiv/"}, "BM25Retriever": {"bm25.md": "https://python.langchain.com/docs/integrations/retrievers/bm25/", "Loading the COMVEST 2024 notice": "https://python.langchain.com/docs/integrations/chat/maritalk/", "initialize the bm25 retriever and faiss retriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/ensemble/"}, "QdrantSparseVectorRetriever": {"Create a retriever with a demo encoder": "https://python.langchain.com/docs/integrations/retrievers/qdrant-sparse/"}, "DeterministicFakeEmbedding": {"elasticsearch_retriever.md": "https://python.langchain.com/docs/integrations/retrievers/elasticsearch_retriever/"}, "Embeddings": {"elasticsearch_retriever.md": "https://python.langchain.com/docs/integrations/retrievers/elasticsearch_retriever/", "Ensure that all we need is installed": "https://python.langchain.com/docs/integrations/vectorstores/infinispanvs/"}, "ElasticsearchRetriever": {"elasticsearch_retriever.md": "https://python.langchain.com/docs/integrations/retrievers/elasticsearch_retriever/"}, "ArceeRetriever": {"Define filters": "https://python.langchain.com/docs/integrations/retrievers/arcee/", "Arcee": "https://python.langchain.com/docs/integrations/providers/arcee/"}, "FlashrankRerank": {"OR (depending on Python version)": "https://python.langchain.com/docs/integrations/retrievers/flashrank-reranker/", "1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/"}, "ChaindeskRetriever": {"chaindesk.md": "https://python.langchain.com/docs/integrations/retrievers/chaindesk/", "Chaindesk": "https://python.langchain.com/docs/integrations/providers/chaindesk/"}, "MergerRetriever": {"Get 3 diff embeddings.": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever/"}, "EmbeddingsClusteringFilter": {"Get 3 diff embeddings.": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever/"}, "EmbeddingsRedundantFilter": {"Get 3 diff embeddings.": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever/", "Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/"}, "LongContextReorder": {"Get 3 diff embeddings.": "https://python.langchain.com/docs/integrations/retrievers/merger_retriever/", "Get embeddings.": "https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder/"}, "TFIDFRetriever": {"tf_idf.md": "https://python.langchain.com/docs/integrations/retrievers/tf_idf/"}, "GoogleVertexAIMultiTurnSearchRetriever": {"google_vertex_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/google_vertex_ai_search/"}, "GoogleVertexAISearchRetriever": {"google_vertex_ai_search.md": "https://python.langchain.com/docs/integrations/retrievers/google_vertex_ai_search/", "Google": "https://python.langchain.com/docs/integrations/platforms/google/"}, "Milvus": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/milvus_self_query/", "Milvus": "https://python.langchain.com/docs/integrations/providers/milvus/", "Zilliz": "https://python.langchain.com/docs/integrations/providers/zilliz/", "replace": "https://python.langchain.com/docs/integrations/vectorstores/zilliz/", "This will only get documents for Ankush": "https://python.langchain.com/docs/integrations/vectorstores/milvus/"}, "AttributeInfo": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/mongodb_atlas/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/retrievers/self_query/vectara_self_query/", "create DashVector collection": "https://python.langchain.com/docs/integrations/retrievers/self_query/dashvector/", "or install latest:": "https://python.langchain.com/docs/integrations/retrievers/self_query/dingo/", "Get openAI api key by reading local .env file": "https://python.langchain.com/docs/integrations/retrievers/self_query/timescalevector_self_query/", "create new index": "https://python.langchain.com/docs/integrations/retrievers/self_query/pinecone/", "in case if some queries fail consider installing libdeeplake manually": "https://python.langchain.com/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query/", "import os": "https://python.langchain.com/docs/integrations/retrievers/self_query/qdrant_self_query/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "self-query-qdrant": "https://python.langchain.com/docs/templates/self-query-qdrant/", "This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/", "Self-querying": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/self_query-checkpoint/"}, "SelfQueryRetriever": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/mongodb_atlas/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/retrievers/self_query/vectara_self_query/", "create DashVector collection": "https://python.langchain.com/docs/integrations/retrievers/self_query/dashvector/", "or install latest:": "https://python.langchain.com/docs/integrations/retrievers/self_query/dingo/", "Get openAI api key by reading local .env file": "https://python.langchain.com/docs/integrations/retrievers/self_query/timescalevector_self_query/", "create new index": "https://python.langchain.com/docs/integrations/retrievers/self_query/pinecone/", "in case if some queries fail consider installing libdeeplake manually": "https://python.langchain.com/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query/", "import os": "https://python.langchain.com/docs/integrations/retrievers/self_query/qdrant_self_query/", "Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "Chroma": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/chroma-checkpoint/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/", "Self-querying": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/self_query-checkpoint/"}, "OpenAI": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/mongodb_atlas/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/", "or install latest:": "https://python.langchain.com/docs/integrations/retrievers/self_query/dingo/", "Get openAI api key by reading local .env file": "https://python.langchain.com/docs/integrations/retrievers/self_query/timescalevector_self_query/", "create new index": "https://python.langchain.com/docs/integrations/retrievers/self_query/pinecone/", "in case if some queries fail consider installing libdeeplake manually": "https://python.langchain.com/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query/", "import os": "https://python.langchain.com/docs/integrations/retrievers/self_query/qdrant_self_query/", "OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "use it with langchain {#use-it-with-langchain}": "https://python.langchain.com/docs/integrations/tools/google_jobs/", "google_serper.md": "https://python.langchain.com/docs/integrations/tools/google_serper/", "Answer with 'Zhu'": "https://python.langchain.com/docs/integrations/tools/human_tools/", "google_finance.md": "https://python.langchain.com/docs/integrations/tools/google_finance/", "awslambda.md": "https://python.langchain.com/docs/integrations/tools/awslambda/", "folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'": "https://python.langchain.com/docs/integrations/tools/google_drive/", "openweathermap.md": "https://python.langchain.com/docs/integrations/tools/openweathermap/", "search_tools.md": "https://python.langchain.com/docs/integrations/tools/search_tools/", "eleven_labs_tts.md": "https://python.langchain.com/docs/integrations/tools/eleven_labs_tts/", "get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/", "send data into the chain": "https://python.langchain.com/docs/integrations/tools/nvidia_riva/", "lemonai.md": "https://python.langchain.com/docs/integrations/tools/lemonai/", "graphql.md": "https://python.langchain.com/docs/integrations/tools/graphql/", "searchapi.md": "https://python.langchain.com/docs/integrations/tools/searchapi/", "gradio_tools.md": "https://python.langchain.com/docs/integrations/tools/gradio_tools/", "sceneXplain.md": "https://python.langchain.com/docs/integrations/tools/sceneXplain/", "Needed if you would like to display images in the notebook": "https://python.langchain.com/docs/integrations/tools/dalle_image_generator/", "Based on ReAct Agent": "https://python.langchain.com/docs/integrations/tools/ionic_shopping/", "Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "loads previous state from Mot\u00f6rhead \ud83e\udd18": "https://python.langchain.com/docs/integrations/memory/motorhead_memory/", "Here we want to make sure the answer is minimally relevant": "https://python.langchain.com/docs/integrations/callbacks/confident/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "Fiddler project and model names, used for model registration": "https://python.langchain.com/docs/integrations/callbacks/fiddler/", "LLM Hyperparameters": "https://python.langchain.com/docs/integrations/callbacks/sagemaker_tracking/", "labelstudio.md": "https://python.langchain.com/docs/integrations/callbacks/labelstudio/", "Connect to Comet if no API Key is set": "https://python.langchain.com/docs/integrations/callbacks/comet_tracing/", "argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "promptlayer.md": "https://python.langchain.com/docs/integrations/callbacks/promptlayer/", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/.ipynb_checkpoints/streamlit-checkpoint/", "trubrics.md": "https://python.langchain.com/docs/integrations/callbacks/trubrics/", "Install necessary dependencies.": "https://python.langchain.com/docs/integrations/callbacks/infino/", "From LangChain, import standard modules for prompting.": "https://python.langchain.com/docs/integrations/providers/dspy/", "SearchApi": "https://python.langchain.com/docs/integrations/providers/searchapi/", "os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/", "scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/", "os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/", "Log10": "https://python.langchain.com/docs/integrations/providers/log10/", "LangChain Decorators \u2728": "https://python.langchain.com/docs/integrations/providers/langchain_decorators/", "!pip3 install rebuff openai -U": "https://python.langchain.com/docs/integrations/providers/rebuff/", "SCENARIO 1 - LLM": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking/", "Serper - Google Search API": "https://python.langchain.com/docs/integrations/providers/google_serper/", "Helicone": "https://python.langchain.com/docs/integrations/providers/helicone/", "Shale Protocol": "https://python.langchain.com/docs/integrations/providers/shaleprotocol/", "you don't need to call close to write profiles to WhyLabs, upload will occur periodically, but to demo let's not wait.": "https://python.langchain.com/docs/integrations/providers/whylabs_profiling/", "wandb documentation to configure wandb using env variables": "https://python.langchain.com/docs/integrations/providers/wandb_tracing/", "Setup and use the ClearML Callback": "https://python.langchain.com/docs/integrations/providers/clearml_tracking/", "0: Import ray serve and request from starlette": "https://python.langchain.com/docs/integrations/providers/ray_serve/", "Create a dataframe": "https://python.langchain.com/docs/integrations/toolkits/csv/", "xorbits.md": "https://python.langchain.com/docs/integrations/toolkits/xorbits/", "jira.md": "https://python.langchain.com/docs/integrations/toolkits/jira/", "in apache-spark root directory. (tested here with \"spark-3.4.0-bin-hadoop3 and later\")": "https://python.langchain.com/docs/integrations/toolkits/spark/", "For Windows/Linux": "https://python.langchain.com/docs/integrations/toolkits/azure_cognitive_services/", "Select the LLM to use. Here, we use gpt-3.5-turbo-instruct": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla/", "steam.md": "https://python.langchain.com/docs/integrations/toolkits/steam/", "json.md": "https://python.langchain.com/docs/integrations/toolkits/json/", "Copilot Sandbox": "https://python.langchain.com/docs/integrations/toolkits/clickup/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/office365/", "pandas.md": "https://python.langchain.com/docs/integrations/toolkits/pandas/", "nasa.md": "https://python.langchain.com/docs/integrations/toolkits/nasa/", "azure_ai_services.md": "https://python.langchain.com/docs/integrations/toolkits/azure_ai_services/", "NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.": "https://python.langchain.com/docs/integrations/toolkits/openapi/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/gitlab/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/", "install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "initialize marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "DocumentDB connection string": "https://python.langchain.com/docs/integrations/vectorstores/documentdb/", "initialize MongoDB python client": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/voyageai-reranker/", "Uncomment this to install psychicapi if you don't already have it installed": "https://python.langchain.com/docs/integrations/document_loaders/psychic/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "You can store your OPENAI_API_KEY in a .env file as well": "https://python.langchain.com/docs/integrations/document_loaders/amazon_textract/", "networkx.md": "https://python.langchain.com/docs/integrations/graphs/networkx/", "get a token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/llms/openai/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/modules/model_io/llms/llm_caching/", "Layerup Security": "https://python.langchain.com/docs/guides/productionization/safety/layerup_security/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Note that we set max_retries = 0 to avoid retrying on RateLimits, etc": "https://python.langchain.com/docs/guides/productionization/fallbacks/", "Using https://huggingface.co/laiyer/deberta-v3-base-prompt-injection": "https://python.langchain.com/docs/guides/productionization/safety/hugging_face_prompt_injection/", "Logical Fallacy chain": "https://python.langchain.com/docs/guides/productionization/safety/logical_fallacy_chain/", "Constitutional chain": "https://python.langchain.com/docs/guides/productionization/safety/constitutional_chain/", "moderation.md": "https://python.langchain.com/docs/guides/productionization/safety/moderation/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/", "Get embeddings.": "https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder/", "Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/", "Self-querying": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/self_query-checkpoint/", "agent_with_memory_in_db.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "!python -m spacy download en_core_web_lg": "https://python.langchain.com/docs/modules/memory/custom_memory/", "adding_memory_chain_multiple_inputs.md": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "Combined": "https://python.langchain.com/docs/modules/memory/multiple_memory/", "Here it is by default set to \"AI\"": "https://python.langchain.com/docs/modules/memory/conversational_customization/", "agent_with_memory.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory/", "kg.md": "https://python.langchain.com/docs/modules/memory/types/kg/", "We can see here that the buffer is updated": "https://python.langchain.com/docs/modules/memory/types/token_buffer/", "Entity": "https://python.langchain.com/docs/modules/memory/types/entity_summary_memory/", "Conversation Summary": "https://python.langchain.com/docs/modules/memory/types/summary/", "Backed by a Vector Store": "https://python.langchain.com/docs/modules/memory/types/vectorstore_retriever_memory/", "Conversation Buffer Window": "https://python.langchain.com/docs/modules/memory/types/buffer_window/", "Conversation Buffer": "https://python.langchain.com/docs/modules/memory/types/buffer/", "We can see here that there is a summary of the conversation and then some previous interactions": "https://python.langchain.com/docs/modules/memory/types/summary_buffer/", "Callbacks": "https://python.langchain.com/docs/modules/callbacks/index/", "First, define custom callback handler implementations": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks/", "You can kick off concurrent runs from within the context manager": "https://python.langchain.com/docs/modules/callbacks/token_counting/", "this chain will both print to stdout (because verbose=True) and write to 'output.log'": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/handle_parsing_errors/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/", "Define your desired data structure.": "https://python.langchain.com/docs/modules/model_io/output_parsers/quick_start/", "retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/", "datetime.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/datetime/", "token_usage_tracking.md": "https://python.langchain.com/docs/modules/model_io/llms/token_usage_tracking/", "streaming_llm.md": "https://python.langchain.com/docs/modules/model_io/llms/streaming_llm/", "Quick Start {#quick-start}": "https://python.langchain.com/docs/modules/model_io/llms/quick_start/", "batch_configurable_chain([\"ice cream\", \"spaghetti\", \"dumplings\"])": "https://python.langchain.com/docs/expression_language/why/", "> ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])": "https://python.langchain.com/docs/expression_language/get_started/"}, "PGVector": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/pgvector_self_query/", "PGVector": "https://python.langchain.com/docs/integrations/providers/pgvector/", "See docker command above to launch a postgres instance with pgvector enabled.": "https://python.langchain.com/docs/integrations/vectorstores/pgvector/"}, "Weaviate": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/weaviate_self_query/"}, "Vectara": {"Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/vectara/", "Vectara": "https://python.langchain.com/docs/integrations/providers/vectara/index/"}, "DashVector": {"create DashVector collection": "https://python.langchain.com/docs/integrations/retrievers/self_query/dashvector/", "DashVector": "https://python.langchain.com/docs/integrations/providers/dashvector/", "add texts": "https://python.langchain.com/docs/integrations/vectorstores/dashvector/"}, "Tongyi": {"create DashVector collection": "https://python.langchain.com/docs/integrations/retrievers/self_query/dashvector/", "Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "Install the package": "https://python.langchain.com/docs/integrations/llms/tongyi/"}, "DatabricksVectorSearch": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/databricks_vector_search/", "databricks_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/databricks_vector_search/"}, "Dingo": {"or install latest:": "https://python.langchain.com/docs/integrations/vectorstores/dingo/", "DingoDB": "https://python.langchain.com/docs/integrations/providers/dingo/"}, "OpenSearchVectorSearch": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/opensearch_self_query/", "AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "OpenSearch": "https://python.langchain.com/docs/integrations/providers/opensearch/", "If using the default Docker installation, use this instantiation instead:": "https://python.langchain.com/docs/integrations/vectorstores/opensearch/"}, "ElasticsearchStore": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/elasticsearch_self_query/", "Elasticsearch": "https://python.langchain.com/docs/integrations/providers/elasticsearch/", "Metadata {#metadata}": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch/", "indexing.md": "https://python.langchain.com/docs/modules/data_connection/indexing/"}, "ConnectionParams": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/tencentvectordb/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/", "baiduvectordb.md": "https://python.langchain.com/docs/integrations/vectorstores/baiduvectordb/"}, "MetaField": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/tencentvectordb/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/"}, "TencentVectorDB": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/tencentvectordb/", "Tencent": "https://python.langchain.com/docs/integrations/providers/tencent/", "from langchain_community.embeddings.openai import OpenAIEmbeddings": "https://python.langchain.com/docs/integrations/vectorstores/tencentvectordb/"}, "TimescaleVector": {"Get openAI api key by reading local .env file": "https://python.langchain.com/docs/integrations/retrievers/self_query/timescalevector_self_query/", "Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/"}, "AstraDB": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/astradb/"}, "SupabaseVectorStore": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/supabase_self_query/", "Supabase (Postgres)": "https://python.langchain.com/docs/integrations/providers/supabase/", "with pip": "https://python.langchain.com/docs/integrations/vectorstores/supabase/"}, "Redis": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/redis_self_query/", "Redis": "https://python.langchain.com/docs/integrations/providers/redis/", "connection to redis standalone at localhost, db 0, no password": "https://python.langchain.com/docs/integrations/vectorstores/redis/"}, "MyScale": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/myscale_self_query/", "MyScale": "https://python.langchain.com/docs/integrations/providers/myscale/", "use directly a `where_str` to delete": "https://python.langchain.com/docs/integrations/vectorstores/myscale/"}, "MongoDBAtlasVectorSearch": {"This example only specifies a relevant query": "https://python.langchain.com/docs/integrations/retrievers/self_query/mongodb_atlas/", "initialize MongoDB python client": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/"}, "Qdrant": {"import os": "https://python.langchain.com/docs/integrations/retrievers/self_query/qdrant_self_query/", "Qdrant": "https://python.langchain.com/docs/integrations/providers/qdrant/", "qdrant.md": "https://python.langchain.com/docs/integrations/vectorstores/qdrant/", "Vector stores": "https://python.langchain.com/docs/modules/data_connection/vectorstores/index/"}, "AzureMLOnlineEndpoint": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/"}, "AzureOpenAI": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "The API version you want to use: set this to `2023-12-01-preview` for the released version.": "https://python.langchain.com/docs/integrations/llms/azure_openai/"}, "AzureChatOpenAI": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "azure_chat_openai.md": "https://python.langchain.com/docs/integrations/chat/azure_chat_openai/", "The underlying python-gremlin has a problem when running in notebook": "https://python.langchain.com/docs/integrations/graphs/azure_cosmosdb_gremlin/"}, "AzureAIDataLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "Create a connection to your project": "https://python.langchain.com/docs/integrations/document_loaders/azure_ai_data/"}, "AzureAIDocumentIntelligenceLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "microsoft_word.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_word/", "microsoft_excel.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_excel/", "microsoft_powerpoint.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_powerpoint/", "azure_document_intelligence.md": "https://python.langchain.com/docs/integrations/document_loaders/azure_document_intelligence/", "Microsoft Office": "https://python.langchain.com/docs/modules/data_connection/document_loaders/office_file/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/", "HTML": "https://python.langchain.com/docs/modules/data_connection/document_loaders/html/"}, "AzureBlobStorageContainerLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "azure_blob_storage_container.md": "https://python.langchain.com/docs/integrations/document_loaders/azure_blob_storage_container/"}, "AzureBlobStorageFileLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "azure_blob_storage_file.md": "https://python.langchain.com/docs/integrations/document_loaders/azure_blob_storage_file/"}, "OneDriveLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "microsoft_onedrive.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_onedrive/"}, "UnstructuredWordDocumentLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "microsoft_word.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_word/"}, "UnstructuredExcelLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "microsoft_excel.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_excel/"}, "SharePointLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "loads documents from root directory": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_sharepoint/"}, "UnstructuredPowerPointLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "microsoft_powerpoint.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_powerpoint/"}, "OneNoteLoader": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "microsoft_onenote.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_onenote/"}, "AzureCosmosDBVectorSearch": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "Set up the OpenAI Environment Variables": "https://python.langchain.com/docs/integrations/vectorstores/azure_cosmos_db/"}, "O365Toolkit": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/office365/"}, "PowerBIToolkit": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "fictional example": "https://python.langchain.com/docs/integrations/toolkits/powerbi/"}, "PowerBIDataset": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "fictional example": "https://python.langchain.com/docs/integrations/toolkits/powerbi/"}, "BingSearchAPIWrapper": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/", "bing_search.md": "https://python.langchain.com/docs/integrations/tools/bing_search/"}, "PresidioAnonymizer": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "Download model": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/index/"}, "PresidioReversibleAnonymizer": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/microsoft/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "Multi-language data anonymization with Microsoft Presidio {#multi-language-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/multi_language/", "Reversible data anonymization with Microsoft Presidio {#reversible-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/reversible/", "Download model": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/index/"}, "AmazonAPIGateway": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "These are sample parameters for Falcon 40B Instruct Deployed from Amazon SageMaker JumpStart": "https://python.langchain.com/docs/integrations/llms/amazon_api_gateway/"}, "ContentHandlerBase": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/"}, "S3DirectoryLoader": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "aws_s3_directory.md": "https://python.langchain.com/docs/integrations/document_loaders/aws_s3_directory/"}, "S3FileLoader": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "aws_s3_file.md": "https://python.langchain.com/docs/integrations/document_loaders/aws_s3_file/"}, "AmazonTextractPDFLoader": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "You can store your OPENAI_API_KEY in a .env file as well": "https://python.langchain.com/docs/integrations/document_loaders/amazon_textract/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "AthenaLoader": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "athena.md": "https://python.langchain.com/docs/integrations/document_loaders/athena/"}, "DocumentDBVectorSearch": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/"}, "DynamoDBChatMessageHistory": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/memory/aws_dynamodb/"}, "SageMakerCallbackHandler": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "LLM Hyperparameters": "https://python.langchain.com/docs/integrations/callbacks/sagemaker_tracking/"}, "AmazonComprehendModerationChain": {"AWS": "https://python.langchain.com/docs/integrations/platforms/aws/", "Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "ChatHuggingFace": {"Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/", "setup tools": "https://python.langchain.com/docs/integrations/chat/huggingface/"}, "HuggingFacePipeline": {"Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/", "huggingface_pipelines.md": "https://python.langchain.com/docs/integrations/llms/huggingface_pipelines/", "lmformatenforcer_experimental.md": "https://python.langchain.com/docs/integrations/llms/lmformatenforcer_experimental/", "We'll choose a regex that matches to a structured json string that looks like:": "https://python.langchain.com/docs/integrations/llms/rellm_experimental/", "mlx_pipelines.md": "https://python.langchain.com/docs/integrations/llms/mlx_pipelines/", "jsonformer_experimental.md": "https://python.langchain.com/docs/integrations/llms/jsonformer_experimental/", "openvino.md": "https://python.langchain.com/docs/integrations/llms/openvino/", "weight_only_quantization.md": "https://python.langchain.com/docs/integrations/llms/weight_only_quantization/"}, "HuggingFaceDatasetLoader": {"Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/", "hugging_face_dataset.md": "https://python.langchain.com/docs/integrations/document_loaders/hugging_face_dataset/"}, "load_huggingface_tool": {"Hugging Face": "https://python.langchain.com/docs/integrations/platforms/huggingface/", "Requires transformers>=4.29.0 and huggingface_hub>=0.14.1": "https://python.langchain.com/docs/integrations/tools/huggingface_tools/"}, "ChatGPTLoader": {"OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "chatgpt_loader.md": "https://python.langchain.com/docs/integrations/document_loaders/chatgpt_loader/"}, "DallEAPIWrapper": {"OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "Needed if you would like to display images in the notebook": "https://python.langchain.com/docs/integrations/tools/dalle_image_generator/"}, "OpenAIModerationChain": {"OpenAI": "https://python.langchain.com/docs/integrations/platforms/openai/", "moderation.md": "https://python.langchain.com/docs/guides/productionization/safety/moderation/"}, "GoogleGenerativeAI": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_ai.md": "https://python.langchain.com/docs/integrations/llms/google_ai/"}, "VertexAIModelGarden": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_vertex_ai_palm.md": "https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm/"}, "ChatGoogleGenerativeAI": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "Note that each chunk may contain more than one \"token\"": "https://python.langchain.com/docs/integrations/chat/google_generative_ai/"}, "ChatVertexAI": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/memory/google_sql_mssql/", "for running these examples in the notebook:": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm/", "google_vertex_ai_palm.md": "https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm/", "structured_output.md": "https://python.langchain.com/docs/modules/model_io/chat/structured_output/", "response_metadata.md": "https://python.langchain.com/docs/modules/model_io/chat/response_metadata/"}, "BigQueryLoader": {"Google": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/google-checkpoint/", "Note that the `id` column is being returned twice, with one instance aliased as `source`": "https://python.langchain.com/docs/integrations/document_loaders/google_bigquery/"}, "GCSDirectoryLoader": {"Google": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/google-checkpoint/", "google_cloud_storage_directory.md": "https://python.langchain.com/docs/integrations/document_loaders/google_cloud_storage_directory/"}, "GCSFileLoader": {"Google": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/google-checkpoint/", "google_cloud_storage_file.md": "https://python.langchain.com/docs/integrations/document_loaders/google_cloud_storage_file/"}, "GoogleDriveLoader": {"Google": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/google-checkpoint/", "folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'": "https://python.langchain.com/docs/integrations/document_loaders/google_drive/"}, "GoogleSpeechToTextLoader": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "or a local file path: file_path = \"./audio.wav\"": "https://python.langchain.com/docs/integrations/document_loaders/google_speech_to_text/"}, "Blob": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_docai.md": "https://python.langchain.com/docs/integrations/document_transformers/google_docai/", "Configure the parsers that you want to use per mime-type!": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_files/", "Custom Document Loader {#custom-document-loader}": "https://python.langchain.com/docs/modules/data_connection/document_loaders/custom/"}, "DocAIParser": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_docai.md": "https://python.langchain.com/docs/integrations/document_transformers/google_docai/"}, "GoogleTranslateTransformer": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_translate.md": "https://python.langchain.com/docs/integrations/document_transformers/google_translate/"}, "BigQueryVectorSearch": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/vectorstores/google_bigquery_vector_search/"}, "VectorSearchVectorStore": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "TODO : Set values as per your requirements": "https://python.langchain.com/docs/integrations/vectorstores/google_vertex_ai_vector_search/"}, "ScaNN": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "scann.md": "https://python.langchain.com/docs/integrations/vectorstores/scann/"}, "GoogleDocumentAIWarehouseRetriever": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/"}, "GoogleCloudTextToSpeechTool": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_cloud_texttospeech.md": "https://python.langchain.com/docs/integrations/tools/google_cloud_texttospeech/"}, "GoogleFinanceQueryRun": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_finance.md": "https://python.langchain.com/docs/integrations/tools/google_finance/"}, "GoogleFinanceAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_finance.md": "https://python.langchain.com/docs/integrations/tools/google_finance/"}, "GoogleJobsQueryRun": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "use it with langchain {#use-it-with-langchain}": "https://python.langchain.com/docs/integrations/tools/google_jobs/"}, "GoogleLensQueryRun": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "Runs google lens on an image of Danny Devito": "https://python.langchain.com/docs/integrations/tools/google_lens/"}, "GoogleLensAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "Runs google lens on an image of Danny Devito": "https://python.langchain.com/docs/integrations/tools/google_lens/"}, "GooglePlacesTool": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_places.md": "https://python.langchain.com/docs/integrations/tools/google_places/"}, "GoogleScholarQueryRun": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_scholar.md": "https://python.langchain.com/docs/integrations/tools/google_scholar/"}, "GoogleScholarAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_scholar.md": "https://python.langchain.com/docs/integrations/tools/google_scholar/"}, "GoogleSearchAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/google-checkpoint/", "google_search.md": "https://python.langchain.com/docs/integrations/tools/google_search/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/", "agent_with_memory.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory/"}, "GoogleTrendsQueryRun": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_trends.md": "https://python.langchain.com/docs/integrations/tools/google_trends/"}, "GoogleTrendsAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_trends.md": "https://python.langchain.com/docs/integrations/tools/google_trends/"}, "GmailToolkit": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/toolkits/gmail/"}, "SearchApiAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "searchapi.md": "https://python.langchain.com/docs/integrations/tools/searchapi/", "SearchApi": "https://python.langchain.com/docs/integrations/providers/searchapi/"}, "SerpAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "You can create the tool to pass to an agent": "https://python.langchain.com/docs/integrations/tools/serpapi/", "setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/", "SerpAPI": "https://python.langchain.com/docs/integrations/providers/serpapi/", "Initialize the language model": "https://python.langchain.com/docs/guides/productionization/evaluation/examples/comparisons/", "agent_with_memory_in_db.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db/"}, "GoogleSerperAPIWrapper": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "google_serper.md": "https://python.langchain.com/docs/integrations/tools/google_serper/", "Serper - Google Search API": "https://python.langchain.com/docs/integrations/providers/google_serper/"}, "YouTubeSearchTool": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "youtube.md": "https://python.langchain.com/docs/integrations/tools/youtube/"}, "YoutubeAudioLoader": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/"}, "OpenAIWhisperParser": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/"}, "YoutubeLoader": {"Google": "https://python.langchain.com/docs/integrations/platforms/google/", "YouTube": "https://python.langchain.com/docs/integrations/providers/youtube/", "Init the GoogleApiClient": "https://python.langchain.com/docs/integrations/document_loaders/youtube_transcript/", "%pip install -qU langchain langchain-community langchain-openai youtube-transcript-api pytube langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/quickstart/", "%pip install -qU langchain langchain-openai youtube-transcript-api pytube": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/structuring/"}, "AnthropicLLM": {"Anthropic": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/anthropic-checkpoint/", "anthropic.md": "https://python.langchain.com/docs/integrations/llms/anthropic/"}, "MatchingEngine": {"Google": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/google-checkpoint/"}, "AzureCognitiveSearchRetriever": {"Microsoft": "https://python.langchain.com/docs/integrations/platforms/.ipynb_checkpoints/microsoft-checkpoint/"}, "AIPluginTool": {"chatgpt_plugins.md": "https://python.langchain.com/docs/integrations/tools/chatgpt_plugins/"}, "DataForSeoAPIWrapper": {"dataforseo.md": "https://python.langchain.com/docs/integrations/tools/dataforseo/", "DataForSEO": "https://python.langchain.com/docs/integrations/providers/dataforseo/"}, "Tool": {"dataforseo.md": "https://python.langchain.com/docs/integrations/tools/dataforseo/", "You can create the tool to pass to an agent": "https://python.langchain.com/docs/integrations/tools/serpapi/", "google_serper.md": "https://python.langchain.com/docs/integrations/tools/google_serper/", "searchapi.md": "https://python.langchain.com/docs/integrations/tools/searchapi/", "google_search.md": "https://python.langchain.com/docs/integrations/tools/google_search/", "Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/", "Based on ReAct Agent": "https://python.langchain.com/docs/integrations/tools/ionic_shopping/", "Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "SearchApi": "https://python.langchain.com/docs/integrations/providers/searchapi/", "Serper - Google Search API": "https://python.langchain.com/docs/integrations/providers/google_serper/", "document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "Pydantic compatibility": "https://python.langchain.com/docs/guides/development/pydantic_compatibility/", "Initialize the language model": "https://python.langchain.com/docs/guides/productionization/evaluation/examples/comparisons/", "agent_with_memory_in_db.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db/", "agent_with_memory.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory/", "need to use GPT-4 here as GPT-3.5 does not understand, however hard you insist, that": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter/"}, "ConneryService": {"Specify your Connery Runner credentials.": "https://python.langchain.com/docs/integrations/toolkits/connery/"}, "DataheraldAPIWrapper": {"dataherald.md": "https://python.langchain.com/docs/integrations/tools/dataherald/", "Dataherald": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/dataherald-checkpoint/"}, "SearxSearchWrapper": {"searx_search.md": "https://python.langchain.com/docs/integrations/tools/searx_search/", "SearxNG Search API": "https://python.langchain.com/docs/integrations/providers/searx/"}, "PythonREPL": {"You can create the tool to pass to an agent": "https://python.langchain.com/docs/integrations/tools/python/", "code_writing.md": "https://python.langchain.com/docs/expression_language/cookbook/code_writing/"}, "GoogleJobsAPIWrapper": {"use it with langchain {#use-it-with-langchain}": "https://python.langchain.com/docs/integrations/tools/google_jobs/"}, "InfobipAPIWrapper": {"How to use it inside an Agent {#how-to-use-it-inside-an-agent}": "https://python.langchain.com/docs/integrations/tools/infobip/"}, "StructuredTool": {"How to use it inside an Agent {#how-to-use-it-inside-an-agent}": "https://python.langchain.com/docs/integrations/tools/infobip/", "Import things that are needed generically": "https://python.langchain.com/docs/modules/tools/custom_tools/"}, "E2BDataAnalysisTool": {"Artifacts are charts created by matplotlib when `plt.show()` is called": "https://python.langchain.com/docs/integrations/tools/e2b_data_analysis/", "openai_assistants.md": "https://python.langchain.com/docs/modules/agents/agent_types/openai_assistants/"}, "SQLDatabase": {"In order to build a selectable on SA's Core API, you need a table definition.": "https://python.langchain.com/docs/integrations/tools/sql_database/", "CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb/", "!pip3 install rebuff openai -U": "https://python.langchain.com/docs/integrations/providers/rebuff/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Using LangSmith is recommended but not required. Uncomment below lines to use.": "https://python.langchain.com/docs/use_cases/sql/csv/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/"}, "HumanInputRun": {"Answer with 'Zhu'": "https://python.langchain.com/docs/integrations/tools/human_tools/"}, "NucliaUnderstandingAPI": {"nuclia.md": "https://python.langchain.com/docs/integrations/document_loaders/nuclia/", "Nuclia": "https://python.langchain.com/docs/integrations/providers/nuclia/", "nuclia_transformer.md": "https://python.langchain.com/docs/integrations/document_transformers/nuclia_transformer/"}, "YahooFinanceNewsTool": {"How YahooFinanceNewsTool works? {#how-yahoofinancenewstool-works}": "https://python.langchain.com/docs/integrations/tools/yahoo_finance_news/"}, "WikidataAPIWrapper": {"wikidata.md": "https://python.langchain.com/docs/integrations/tools/wikidata/"}, "WikidataQueryRun": {"wikidata.md": "https://python.langchain.com/docs/integrations/tools/wikidata/"}, "TwilioAPIWrapper": {"twilio.md": "https://python.langchain.com/docs/integrations/tools/twilio/"}, "IFTTTWebhook": {"ifttt.md": "https://python.langchain.com/docs/integrations/tools/ifttt/"}, "SemanticScholarQueryRun": {"start by installing semanticscholar api": "https://python.langchain.com/docs/integrations/tools/semanticscholar/"}, "WikipediaQueryRun": {"wikipedia.md": "https://python.langchain.com/docs/integrations/tools/wikipedia/", "index.md": "https://python.langchain.com/docs/modules/tools/index/", "pip install wikipedia": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/"}, "WikipediaAPIWrapper": {"wikipedia.md": "https://python.langchain.com/docs/integrations/tools/wikipedia/", "Set this to your Zep server URL": "https://python.langchain.com/docs/integrations/memory/zep_memory/", "index.md": "https://python.langchain.com/docs/modules/tools/index/", "pip install wikipedia": "https://python.langchain.com/docs/modules/agents/how_to/intermediate_steps/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/"}, "AlphaVantageAPIWrapper": {"alpha_vantage.md": "https://python.langchain.com/docs/integrations/tools/alpha_vantage/"}, "StackExchangeAPIWrapper": {"stackexchange.md": "https://python.langchain.com/docs/integrations/tools/stackexchange/", "Stack Exchange": "https://python.langchain.com/docs/integrations/providers/stackexchange/"}, "TextRequestsWrapper": {"Each tool wrapps a requests wrapper": "https://python.langchain.com/docs/integrations/tools/requests/"}, "OpenWeatherMapAPIWrapper": {"openweathermap.md": "https://python.langchain.com/docs/integrations/tools/openweathermap/", "OpenWeatherMap": "https://python.langchain.com/docs/integrations/providers/openweathermap/"}, "get_from_env": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/integrations/tools/passio_nutrition_ai/"}, "NutritionAI": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/integrations/tools/passio_nutrition_ai/"}, "NutritionAIAPI": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/integrations/tools/passio_nutrition_ai/"}, "PubmedQueryRun": {"pubmed.md": "https://python.langchain.com/docs/integrations/tools/pubmed/"}, "ConversationBufferMemory": {"memorize.md": "https://python.langchain.com/docs/integrations/tools/memorize/", "gradio_tools.md": "https://python.langchain.com/docs/integrations/tools/gradio_tools/", "sceneXplain.md": "https://python.langchain.com/docs/integrations/tools/sceneXplain/", "Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/", "xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/", "Or via the async API": "https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/sap_hanavector/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "Guardrails for Amazon Bedrock with trace": "https://python.langchain.com/docs/integrations/llms/bedrock/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "adding_memory_chain_multiple_inputs.md": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs/", "adding_memory.md": "https://python.langchain.com/docs/modules/memory/adding_memory/", "Combined": "https://python.langchain.com/docs/modules/memory/multiple_memory/", "Here it is by default set to \"AI\"": "https://python.langchain.com/docs/modules/memory/conversational_customization/", "Conversation Buffer": "https://python.langchain.com/docs/modules/memory/types/buffer/"}, "GradientLLM": {"memorize.md": "https://python.langchain.com/docs/integrations/tools/memorize/", "Gradient": "https://python.langchain.com/docs/integrations/providers/gradient/", "Improve the results by fine-tuning (optional) {#improve-the-results-by-fine-tuning-optional}": "https://python.langchain.com/docs/integrations/llms/gradient/"}, "ElevenLabsText2SpeechTool": {"eleven_labs_tts.md": "https://python.langchain.com/docs/integrations/tools/eleven_labs_tts/", "ElevenLabs": "https://python.langchain.com/docs/integrations/providers/elevenlabs/"}, "BearlyInterpreterTool": {"Extract pdf content": "https://python.langchain.com/docs/integrations/tools/bearly/"}, "VectorstoreIndexCreator": {"apify.md": "https://python.langchain.com/docs/integrations/tools/apify/", "hugging_face_dataset.md": "https://python.langchain.com/docs/integrations/document_loaders/hugging_face_dataset/", "Create a vectorstore retriever from the loader": "https://python.langchain.com/docs/integrations/document_loaders/modern_treasury/", "image_captions.md": "https://python.langchain.com/docs/integrations/document_loaders/image_captions/", "see https://python.langchain.com/en/latest/modules/data_connection/getting_started.html for more details": "https://python.langchain.com/docs/integrations/document_loaders/figma/", "apify_dataset.md": "https://python.langchain.com/docs/integrations/document_loaders/apify_dataset/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/"}, "ApifyWrapper": {"apify.md": "https://python.langchain.com/docs/integrations/tools/apify/", "Apify": "https://python.langchain.com/docs/integrations/providers/apify/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "ZapierToolkit": {"get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/"}, "ZapierNLAWrapper": {"get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/"}, "SimpleSequentialChain": {"get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/", "LLM Hyperparameters": "https://python.langchain.com/docs/integrations/callbacks/sagemaker_tracking/", "!pip3 install rebuff openai -U": "https://python.langchain.com/docs/integrations/providers/rebuff/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/predibase/", "Run the chain specifying only the input variable for the first chain.": "https://python.langchain.com/docs/integrations/llms/edenai/", "magics to auto-reload external modules in case you are making changes to langchain while working on this notebook": "https://python.langchain.com/docs/integrations/llms/replicate/"}, "TransformChain": {"get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/", "!pip3 install rebuff openai -U": "https://python.langchain.com/docs/integrations/providers/rebuff/"}, "ZapierNLARunAction": {"get from https://platform.openai.com/": "https://python.langchain.com/docs/integrations/tools/zapier/"}, "RivaASR": {"send data into the chain": "https://python.langchain.com/docs/integrations/tools/nvidia_riva/"}, "RivaTTS": {"send data into the chain": "https://python.langchain.com/docs/integrations/tools/nvidia_riva/"}, "RivaAudioEncoding": {"send data into the chain": "https://python.langchain.com/docs/integrations/tools/nvidia_riva/"}, "AudioStream": {"send data into the chain": "https://python.langchain.com/docs/integrations/tools/nvidia_riva/"}, "GoldenQueryAPIWrapper": {"golden_query.md": "https://python.langchain.com/docs/integrations/tools/golden_query/", "Golden": "https://python.langchain.com/docs/integrations/providers/golden/"}, "create_react_agent": {"arxiv.md": "https://python.langchain.com/docs/integrations/tools/arxiv/", "Based on ReAct Agent": "https://python.langchain.com/docs/integrations/tools/ionic_shopping/", "Streamlit": "https://python.langchain.com/docs/integrations/callbacks/streamlit/", "Dataherald": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/dataherald-checkpoint/", "Define the neural network": "https://python.langchain.com/docs/integrations/toolkits/python/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/amadeus/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "agent_with_memory_in_db.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db/", "agent_with_memory.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/max_iterations/"}, "ArxivAPIWrapper": {"arxiv.md": "https://python.langchain.com/docs/integrations/tools/arxiv/"}, "DuckDuckGoSearchRun": {"ddg.md": "https://python.langchain.com/docs/integrations/tools/ddg/", "Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/", "openai_assistants.md": "https://python.langchain.com/docs/modules/agents/agent_types/openai_assistants/"}, "DuckDuckGoSearchAPIWrapper": {"ddg.md": "https://python.langchain.com/docs/integrations/tools/ddg/"}, "SceneXplainTool": {"sceneXplain.md": "https://python.langchain.com/docs/integrations/tools/sceneXplain/"}, "WolframAlphaAPIWrapper": {"wolfram_alpha.md": "https://python.langchain.com/docs/integrations/tools/wolfram_alpha/", "Wolfram Alpha": "https://python.langchain.com/docs/integrations/providers/wolfram_alpha/"}, "RunnableParallel": {"and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/", "Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/", "> ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])": "https://python.langchain.com/docs/expression_language/get_started/", "The input schema of the chain is the input schema of its first part, the prompt.": "https://python.langchain.com/docs/expression_language/interface/", "prompt_llm_parser.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_llm_parser/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/", "Adding values to chain state {#adding-values-to-chain-state}": "https://python.langchain.com/docs/expression_language/primitives/assign/", "Formatting inputs & output {#formatting-inputs-output}": "https://python.langchain.com/docs/expression_language/primitives/parallel/", "Passing data through {#passing-data-through}": "https://python.langchain.com/docs/expression_language/primitives/passthrough/", "Chaining runnables {#chaining-runnables}": "https://python.langchain.com/docs/expression_language/primitives/sequence/"}, "ExaSearchRetriever": {"and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "exa_search.md": "https://python.langchain.com/docs/integrations/providers/exa_search/"}, "TextContentsOptions": {"and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/"}, "OpenAIFunctionsAgent": {"and some deps for this notebook": "https://python.langchain.com/docs/integrations/tools/exa_search/", "LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/", "Install package": "https://python.langchain.com/docs/integrations/toolkits/robocorp/"}, "EdenAiExplicitImageTool": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "EdenAiObjectDetectionTool": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "EdenAiParsingIDTool": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "EdenAiParsingInvoiceTool": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "EdenAiSpeechToTextTool": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "EdenAiTextModerationTool": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "EdenAiTextToSpeechTool": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "EdenAI": {"edenai_tools.md": "https://python.langchain.com/docs/integrations/tools/edenai_tools/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/", "Run the chain specifying only the input variable for the first chain.": "https://python.langchain.com/docs/integrations/llms/edenai/"}, "RedditSearchRun": {"Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/"}, "RedditSearchAPIWrapper": {"Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/"}, "RedditSearchSchema": {"Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/"}, "StructuredChatAgent": {"Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/"}, "ReadOnlySharedMemory": {"Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools": "https://python.langchain.com/docs/integrations/tools/reddit_search/"}, "YouSearchTool": {"For use in Chaining section": "https://python.langchain.com/docs/integrations/tools/you/"}, "ShellTool": {"bash.md": "https://python.langchain.com/docs/integrations/tools/bash/"}, "PolygonAggregates": {"Get the last quote for ticker": "https://python.langchain.com/docs/integrations/tools/polygon/"}, "PolygonFinancials": {"Get the last quote for ticker": "https://python.langchain.com/docs/integrations/tools/polygon/"}, "PolygonLastQuote": {"Get the last quote for ticker": "https://python.langchain.com/docs/integrations/tools/polygon/"}, "PolygonTickerNews": {"Get the last quote for ticker": "https://python.langchain.com/docs/integrations/tools/polygon/"}, "PolygonAPIWrapper": {"Get the last quote for ticker": "https://python.langchain.com/docs/integrations/tools/polygon/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/toolkits/polygon/"}, "PolygonAggregatesSchema": {"Get the last quote for ticker": "https://python.langchain.com/docs/integrations/tools/polygon/"}, "FileManagementToolkit": {"We'll make a temporary directory to avoid clutter": "https://python.langchain.com/docs/integrations/tools/filesystem/"}, "BraveSearch": {"brave_search.md": "https://python.langchain.com/docs/integrations/tools/brave_search/", "Brave Search": "https://python.langchain.com/docs/integrations/providers/brave_search/"}, "RedisChatMessageHistory": {"redis_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/redis_chat_message_history/", "agent_with_memory_in_db.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/"}, "RunnableWithMessageHistory": {"redis_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/redis_chat_message_history/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/memory/google_sql_mssql/", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/memory/sqlite/", "Optionally, specify your own session_state key for storing messages": "https://python.langchain.com/docs/integrations/memory/streamlit_chat_message_history/", "copy from tidb cloud console": "https://python.langchain.com/docs/integrations/memory/tidb_chat_message_history/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/chat_history/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "agent_with_memory_in_db.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory_in_db/", "agent_with_memory.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/"}, "ElasticsearchChatMessageHistory": {"If using Elastic Cloud:": "https://python.langchain.com/docs/integrations/memory/elasticsearch_chat_message_history/", "Elasticsearch": "https://python.langchain.com/docs/integrations/providers/elasticsearch/"}, "UpstashRedisChatMessageHistory": {"upstash_redis_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/upstash_redis_chat_message_history/", "Upstash Redis": "https://python.langchain.com/docs/integrations/providers/upstash/"}, "SingleStoreDBChatMessageHistory": {"singlestoredb_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/singlestoredb_chat_message_history/", "SingleStoreDB": "https://python.langchain.com/docs/integrations/providers/singlestoredb/"}, "PostgresChatMessageHistory": {"postgres_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/postgres_chat_message_history/"}, "MomentoChatMessageHistory": {"momento_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/momento_chat_message_history/", "Momento": "https://python.langchain.com/docs/integrations/providers/momento/"}, "XataChatMessageHistory": {"xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "Xata": "https://python.langchain.com/docs/integrations/providers/xata/"}, "XataVectorStore": {"xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "xata.md": "https://python.langchain.com/docs/integrations/vectorstores/xata/"}, "create_retriever_tool": {"xata_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/xata_chat_message_history/", "Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/agents/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/"}, "CassandraChatMessageHistory": {"cassandra_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/cassandra_chat_message_history/", "Cassandra": "https://python.langchain.com/docs/integrations/providers/cassandra/"}, "SQLChatMessageHistory": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/memory/sqlite/", "SQLite": "https://python.langchain.com/docs/integrations/providers/sqlite/"}, "MotorheadMemory": {"loads previous state from Mot\u00f6rhead \ud83e\udd18": "https://python.langchain.com/docs/integrations/memory/motorhead_memory/", "Mot\u00f6rhead": "https://python.langchain.com/docs/integrations/providers/motorhead/"}, "AstraDBChatMessageHistory": {"astradb_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/astradb_chat_message_history/", "Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/"}, "StreamlitChatMessageHistory": {"Optionally, specify your own session_state key for storing messages": "https://python.langchain.com/docs/integrations/memory/streamlit_chat_message_history/", "Streamlit": "https://python.langchain.com/docs/integrations/providers/streamlit/"}, "Neo4jChatMessageHistory": {"neo4j_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/neo4j_chat_message_history/"}, "TiDBChatMessageHistory": {"copy from tidb cloud console": "https://python.langchain.com/docs/integrations/memory/tidb_chat_message_history/", "TiDB": "https://python.langchain.com/docs/integrations/providers/tidb/"}, "RocksetChatMessageHistory": {"rockset_chat_message_history.md": "https://python.langchain.com/docs/integrations/memory/rockset_chat_message_history/", "Rockset": "https://python.langchain.com/docs/integrations/providers/rockset/"}, "HuggingFaceTextGenInference": {"setup tools": "https://python.langchain.com/docs/integrations/chat/huggingface/", "!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/"}, "HuggingFaceEndpoint": {"setup tools": "https://python.langchain.com/docs/integrations/chat/huggingface/", "get a token: https://huggingface.co/docs/api-inference/quicktour#get-your-api-token": "https://python.langchain.com/docs/integrations/llms/huggingface_endpoint/"}, "HuggingFaceHub": {"setup tools": "https://python.langchain.com/docs/integrations/chat/huggingface/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/amadeus/", "Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "format_log_to_str": {"setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/"}, "ReActJsonSingleInputOutputParser": {"setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/amadeus/"}, "render_text_description": {"setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/", "If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/prompting/"}, "AzureMLChatOnlineEndpoint": {"azureml_chat_endpoint.md": "https://python.langchain.com/docs/integrations/chat/azureml_chat_endpoint/"}, "AzureMLEndpointApiType": {"azureml_chat_endpoint.md": "https://python.langchain.com/docs/integrations/chat/azureml_chat_endpoint/", "azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/"}, "CustomOpenAIChatContentFormatter": {"azureml_chat_endpoint.md": "https://python.langchain.com/docs/integrations/chat/azureml_chat_endpoint/"}, "ChatKinetica": {"Install Langchain community and core packages": "https://python.langchain.com/docs/integrations/chat/kinetica/", "Kinetica": "https://python.langchain.com/docs/integrations/providers/kinetica/"}, "KineticaSqlOutputParser": {"Install Langchain community and core packages": "https://python.langchain.com/docs/integrations/chat/kinetica/"}, "KineticaSqlResponse": {"Install Langchain community and core packages": "https://python.langchain.com/docs/integrations/chat/kinetica/"}, "PaiEasChatEndpoint": {"alibaba_cloud_pai_eas.md": "https://python.langchain.com/docs/integrations/chat/alibaba_cloud_pai_eas/", "Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/"}, "ChatFireworks": {"Setup {#setup}": "https://python.langchain.com/docs/integrations/chat/fireworks/", "structured_output.md": "https://python.langchain.com/docs/modules/model_io/chat/structured_output/", "response_metadata.md": "https://python.langchain.com/docs/modules/model_io/chat/response_metadata/"}, "ChatOctoAI": {"octoai.md": "https://python.langchain.com/docs/integrations/chat/octoai/"}, "ChatDeepInfra": {"get a new token: https://deepinfra.com/login?from=%2Fdash": "https://python.langchain.com/docs/integrations/chat/deepinfra/", "DeepInfra": "https://python.langchain.com/docs/integrations/providers/deepinfra/"}, "StreamingStdOutCallbackHandler": {"get a new token: https://deepinfra.com/login?from=%2Fdash": "https://python.langchain.com/docs/integrations/chat/deepinfra/", "litellm.md": "https://python.langchain.com/docs/integrations/chat/litellm/", "Let\u2019s try out LLAMA model offered on EverlyAI Hosted Endpoints {#lets-try-out-llama-model-offered-on-everlyai-hosted-endpoints}": "https://python.langchain.com/docs/integrations/chat/everlyai/", "gpt_router.md": "https://python.langchain.com/docs/integrations/chat/gpt_router/", "litellm_router.md": "https://python.langchain.com/docs/integrations/chat/litellm_router/", "zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "yuan2.md": "https://python.langchain.com/docs/integrations/chat/yuan2/", "GPT4All": "https://python.langchain.com/docs/integrations/providers/gpt4all/", "arthur_tracking.md": "https://python.langchain.com/docs/integrations/providers/arthur_tracking/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/", "textgen.md": "https://python.langchain.com/docs/integrations/llms/textgen/", "Callbacks support token-wise streaming": "https://python.langchain.com/docs/integrations/llms/llamacpp/", "Guardrails for Amazon Bedrock with trace": "https://python.langchain.com/docs/integrations/llms/bedrock/", "Note importing TitanTakeoffPro instead of TitanTakeoff will work as well both use same object under the hood": "https://python.langchain.com/docs/integrations/llms/titan_takeoff/", "Run the chain specifying only the input variable for the first chain.": "https://python.langchain.com/docs/integrations/llms/edenai/", "ctransformers.md": "https://python.langchain.com/docs/integrations/llms/ctransformers/", "get a token: https://huggingface.co/docs/api-inference/quicktour#get-your-api-token": "https://python.langchain.com/docs/integrations/llms/huggingface_endpoint/", "magics to auto-reload external modules in case you are making changes to langchain while working on this notebook": "https://python.langchain.com/docs/integrations/llms/replicate/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/"}, "ToolsOutputParser": {"open ../../../static/img/brand/wordmark.png as base64 str": "https://python.langchain.com/docs/integrations/chat/anthropic/"}, "ChatGroq": {"groq.md": "https://python.langchain.com/docs/integrations/chat/groq/", "structured_output.md": "https://python.langchain.com/docs/modules/model_io/chat/structured_output/", "response_metadata.md": "https://python.langchain.com/docs/modules/model_io/chat/response_metadata/"}, "ChatLiteLLM": {"litellm.md": "https://python.langchain.com/docs/integrations/chat/litellm/"}, "CallbackManager": {"litellm.md": "https://python.langchain.com/docs/integrations/chat/litellm/", "gpt_router.md": "https://python.langchain.com/docs/integrations/chat/gpt_router/", "litellm_router.md": "https://python.langchain.com/docs/integrations/chat/litellm_router/", "zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "Callbacks support token-wise streaming": "https://python.langchain.com/docs/integrations/llms/llamacpp/", "Note importing TitanTakeoffPro instead of TitanTakeoff will work as well both use same object under the hood": "https://python.langchain.com/docs/integrations/llms/titan_takeoff/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/"}, "LlamaEdgeChatService": {"service url": "https://python.langchain.com/docs/integrations/chat/llama_edge/"}, "HarmBlockThreshold": {"Note that each chunk may contain more than one \"token\"": "https://python.langchain.com/docs/integrations/chat/google_generative_ai/", "for running these examples in the notebook:": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm/", "google_ai.md": "https://python.langchain.com/docs/integrations/llms/google_ai/"}, "HarmCategory": {"Note that each chunk may contain more than one \"token\"": "https://python.langchain.com/docs/integrations/chat/google_generative_ai/", "for running these examples in the notebook:": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm/", "google_ai.md": "https://python.langchain.com/docs/integrations/llms/google_ai/"}, "OllamaFunctions": {"Schema": "https://python.langchain.com/docs/integrations/chat/ollama_functions/", "Ollama": "https://python.langchain.com/docs/integrations/providers/ollama/"}, "create_extraction_chain": {"Schema": "https://python.langchain.com/docs/integrations/chat/ollama_functions/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "VolcEngineMaasChat": {"Install the package": "https://python.langchain.com/docs/integrations/chat/volcengine_maas/"}, "ChatLlamaAPI": {"Replace 'Your_API_Token' with your actual API token": "https://python.langchain.com/docs/integrations/chat/llama_api/"}, "create_tagging_chain": {"Replace 'Your_API_Token' with your actual API token": "https://python.langchain.com/docs/integrations/chat/llama_api/"}, "ChatKonko": {"Konko {#konko}": "https://python.langchain.com/docs/integrations/chat/konko/"}, "create_structured_runnable": {"for running these examples in the notebook:": "https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm/"}, "MLXPipeline": {"setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/", "mlx_pipelines.md": "https://python.langchain.com/docs/integrations/llms/mlx_pipelines/"}, "ChatMLX": {"setup tools": "https://python.langchain.com/docs/integrations/chat/mlx/"}, "GigaChat": {"gigachat.md": "https://python.langchain.com/docs/integrations/llms/gigachat/", "Salute Devices": "https://python.langchain.com/docs/integrations/providers/salute_devices/"}, "JinaChat": {"get a chat completion from the formatted messages": "https://python.langchain.com/docs/integrations/chat/jinachat/"}, "SystemMessagePromptTemplate": {"get a chat completion from the formatted messages": "https://python.langchain.com/docs/integrations/chat/vllm/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "see https://python.langchain.com/en/latest/modules/data_connection/getting_started.html for more details": "https://python.langchain.com/docs/integrations/document_loaders/figma/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/agents/", "[Beta] Memory": "https://python.langchain.com/docs/modules/memory/.ipynb_checkpoints/index-checkpoint/", "Prompts": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/prompts-checkpoint/"}, "ChatOllama": {"LangChain supports many other chat models. Here, we're using Ollama": "https://python.langchain.com/docs/integrations/chat/ollama/", "Ollama": "https://python.langchain.com/docs/integrations/providers/ollama/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/"}, "get_openai_callback": {"azure_chat_openai.md": "https://python.langchain.com/docs/integrations/chat/azure_chat_openai/", "You can kick off concurrent runs from within the context manager": "https://python.langchain.com/docs/modules/callbacks/token_counting/", "!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/", "token_usage_tracking.md": "https://python.langchain.com/docs/modules/model_io/llms/token_usage_tracking/", "Run custom functions {#run-custom-functions}": "https://python.langchain.com/docs/expression_language/primitives/functions/"}, "ChatEverlyAI": {"Let\u2019s try out LLAMA model offered on EverlyAI Hosted Endpoints {#lets-try-out-llama-model-offered-on-everlyai-hosted-endpoints}": "https://python.langchain.com/docs/integrations/chat/everlyai/"}, "GPTRouter": {"gpt_router.md": "https://python.langchain.com/docs/integrations/chat/gpt_router/"}, "GPTRouterModel": {"gpt_router.md": "https://python.langchain.com/docs/integrations/chat/gpt_router/"}, "ChatLiteLLMRouter": {"litellm_router.md": "https://python.langchain.com/docs/integrations/chat/litellm_router/"}, "ChatFriendli": {"friendli.md": "https://python.langchain.com/docs/integrations/chat/friendli/"}, "ChatMistralAI": {"If api_key is not passed, default behavior is to use the `MISTRAL_API_KEY` environment variable.": "https://python.langchain.com/docs/integrations/chat/mistralai/", "mistralai.md": "https://python.langchain.com/docs/integrations/providers/mistralai/", "Install a model capable of tool calling": "https://python.langchain.com/docs/use_cases/extraction/quickstart/", "structured_output.md": "https://python.langchain.com/docs/modules/model_io/chat/structured_output/", "response_metadata.md": "https://python.langchain.com/docs/modules/model_io/chat/response_metadata/"}, "ChatZhipuAI": {"zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/"}, "create_json_chat_agent": {"zhipuai.md": "https://python.langchain.com/docs/integrations/chat/zhipuai/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/json_agent/"}, "ChatBaichuan": {"baichuan.md": "https://python.langchain.com/docs/integrations/chat/baichuan/", "Baichuan": "https://python.langchain.com/docs/integrations/providers/baichuan/"}, "Llama2Chat": {"!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/"}, "LlamaCpp": {"!pip3 install text-generation": "https://python.langchain.com/docs/integrations/chat/llama2_chat/", "Llama.cpp": "https://python.langchain.com/docs/integrations/providers/llamacpp/", "Callbacks support token-wise streaming": "https://python.langchain.com/docs/integrations/llms/llamacpp/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/"}, "QianfanChatEndpoint": {"baidu_qianfan_endpoint.md": "https://python.langchain.com/docs/integrations/chat/baidu_qianfan_endpoint/", "ernie.md": "https://python.langchain.com/docs/integrations/chat/ernie/", "Baidu": "https://python.langchain.com/docs/integrations/providers/baidu/"}, "ChatEdenAI": {"edenai.md": "https://python.langchain.com/docs/integrations/chat/edenai/", "Eden AI": "https://python.langchain.com/docs/integrations/providers/edenai/"}, "ErnieBotChat": {"ernie.md": "https://python.langchain.com/docs/integrations/chat/ernie/"}, "ChatHunyuan": {"tencent_hunyuan.md": "https://python.langchain.com/docs/integrations/chat/tencent_hunyuan/", "Tencent": "https://python.langchain.com/docs/integrations/providers/tencent/"}, "MiniMaxChat": {"minimax.md": "https://python.langchain.com/docs/integrations/chat/minimax/", "Minimax": "https://python.langchain.com/docs/integrations/providers/minimax/"}, "ChatYuan2": {"yuan2.md": "https://python.langchain.com/docs/integrations/chat/yuan2/"}, "ChatTongyi": {"Install the package": "https://python.langchain.com/docs/integrations/chat/tongyi/", "Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/"}, "PromptLayerChatOpenAI": {"promptlayer_chatopenai.md": "https://python.langchain.com/docs/integrations/chat/promptlayer_chatopenai/", "PromptLayer": "https://python.langchain.com/docs/integrations/providers/promptlayer/"}, "ChatSparkLLM": {"sparkllm.md": "https://python.langchain.com/docs/integrations/chat/sparkllm/"}, "MoonshotChat": {"Generate your api key from: https://platform.moonshot.cn/console/api-keys": "https://python.langchain.com/docs/integrations/chat/moonshot/"}, "ChatDappierAI": {"dappier.md": "https://python.langchain.com/docs/integrations/chat/dappier/"}, "ChatMaritalk": {"Loading the COMVEST 2024 notice": "https://python.langchain.com/docs/integrations/chat/maritalk/"}, "OnlinePDFLoader": {"Loading the COMVEST 2024 notice": "https://python.langchain.com/docs/integrations/chat/maritalk/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "load_qa_chain": {"Loading the COMVEST 2024 notice": "https://python.langchain.com/docs/integrations/chat/maritalk/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/", "You can store your OPENAI_API_KEY in a .env file as well": "https://python.langchain.com/docs/integrations/document_loaders/amazon_textract/", "sagemaker.md": "https://python.langchain.com/docs/integrations/llms/sagemaker/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "adding_memory_chain_multiple_inputs.md": "https://python.langchain.com/docs/modules/memory/adding_memory_chain_multiple_inputs/"}, "ChatPremAI": {"First step is to set up the env variable.": "https://python.langchain.com/docs/integrations/chat/premai/", "PremAI": "https://python.langchain.com/docs/integrations/providers/premai/"}, "ChatAnyscale": {"Let\u2019s try out each model offered on Anyscale Endpoints {#lets-try-out-each-model-offered-on-anyscale-endpoints}": "https://python.langchain.com/docs/integrations/chat/anyscale/", "Anyscale": "https://python.langchain.com/docs/integrations/providers/anyscale/"}, "ChatYandexGPT": {"yandex.md": "https://python.langchain.com/docs/integrations/chat/yandex/", "Yandex": "https://python.langchain.com/docs/integrations/providers/yandex/"}, "ChatPerplexity": {"perplexity.md": "https://python.langchain.com/docs/integrations/chat/perplexity/"}, "ChatAnthropicTools": {"anthropic_functions.md": "https://python.langchain.com/docs/integrations/chat/anthropic_functions/"}, "ChatMessage": {"Or via the async API": "https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/"}, "ConversationChain": {"Or via the async API": "https://python.langchain.com/docs/integrations/chat/nvidia_ai_endpoints/", "Guardrails for Amazon Bedrock with trace": "https://python.langchain.com/docs/integrations/llms/bedrock/", "!python -m spacy download en_core_web_lg": "https://python.langchain.com/docs/modules/memory/custom_memory/", "Combined": "https://python.langchain.com/docs/modules/memory/multiple_memory/", "Here it is by default set to \"AI\"": "https://python.langchain.com/docs/modules/memory/conversational_customization/", "kg.md": "https://python.langchain.com/docs/modules/memory/types/kg/", "We can see here that the buffer is updated": "https://python.langchain.com/docs/modules/memory/types/token_buffer/", "Entity": "https://python.langchain.com/docs/modules/memory/types/entity_summary_memory/", "Conversation Summary": "https://python.langchain.com/docs/modules/memory/types/summary/", "Backed by a Vector Store": "https://python.langchain.com/docs/modules/memory/types/vectorstore_retriever_memory/", "Conversation Buffer Window": "https://python.langchain.com/docs/modules/memory/types/buffer_window/", "Conversation Buffer": "https://python.langchain.com/docs/modules/memory/types/buffer/", "We can see here that there is a summary of the conversation and then some previous interactions": "https://python.langchain.com/docs/modules/memory/types/summary_buffer/"}, "DeepEvalCallbackHandler": {"Here we want to make sure the answer is minimally relevant": "https://python.langchain.com/docs/integrations/callbacks/confident/", "Confident AI": "https://python.langchain.com/docs/integrations/providers/confident/"}, "LLMonitorCallbackHandler": {"LLMonitor": "https://python.langchain.com/docs/integrations/providers/llmonitor/"}, "identify": {"LLMonitor": "https://python.langchain.com/docs/integrations/callbacks/llmonitor/"}, "ContextCallbackHandler": {"context.md": "https://python.langchain.com/docs/integrations/callbacks/context/", "Context": "https://python.langchain.com/docs/integrations/providers/context/"}, "FiddlerCallbackHandler": {"Fiddler project and model names, used for model registration": "https://python.langchain.com/docs/integrations/callbacks/fiddler/", "Fiddler": "https://python.langchain.com/docs/integrations/providers/fiddler/"}, "FewShotChatMessagePromptTemplate": {"Fiddler project and model names, used for model registration": "https://python.langchain.com/docs/integrations/callbacks/fiddler/", "This is a prompt template used to format each individual example.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples_chat/"}, "LabelStudioCallbackHandler": {"labelstudio.md": "https://python.langchain.com/docs/integrations/callbacks/labelstudio/", "Label Studio": "https://python.langchain.com/docs/integrations/providers/labelstudio/"}, "CometTracer": {"Connect to Comet if no API Key is set": "https://python.langchain.com/docs/integrations/callbacks/comet_tracing/", "os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/"}, "ArgillaCallbackHandler": {"argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "Argilla": "https://python.langchain.com/docs/integrations/providers/argilla/"}, "StdOutCallbackHandler": {"argilla.md": "https://python.langchain.com/docs/integrations/callbacks/argilla/", "os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/", "scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/", "os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/", "Setup and use the ClearML Callback": "https://python.langchain.com/docs/integrations/providers/clearml_tracking/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "Callbacks": "https://python.langchain.com/docs/modules/callbacks/index/", "this chain will both print to stdout (because verbose=True) and write to 'output.log'": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler/"}, "PromptLayerCallbackHandler": {"promptlayer.md": "https://python.langchain.com/docs/integrations/callbacks/promptlayer/", "PromptLayer": "https://python.langchain.com/docs/integrations/providers/promptlayer/"}, "GPT4All": {"promptlayer.md": "https://python.langchain.com/docs/integrations/callbacks/promptlayer/", "GPT4All": "https://python.langchain.com/docs/integrations/providers/gpt4all/", "Callbacks support token-wise streaming": "https://python.langchain.com/docs/integrations/llms/gpt4all/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/"}, "StreamlitCallbackHandler": {"Streamlit": "https://python.langchain.com/docs/integrations/providers/streamlit/", "GPT4All": "https://python.langchain.com/docs/integrations/providers/gpt4all/"}, "MultiQueryRetriever": {"1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_summary/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/"}, "UpTrainCallbackHandler": {"1. Vanilla RAG {#vanilla-rag-1}": "https://python.langchain.com/docs/integrations/callbacks/uptrain/", "UpTrain": "https://python.langchain.com/docs/integrations/providers/uptrain/"}, "TrubricsCallbackHandler": {"trubrics.md": "https://python.langchain.com/docs/integrations/callbacks/trubrics/", "Trubrics": "https://python.langchain.com/docs/integrations/providers/trubrics/"}, "InfinoCallbackHandler": {"Install necessary dependencies.": "https://python.langchain.com/docs/integrations/callbacks/infino/", "Infino": "https://python.langchain.com/docs/integrations/providers/infino/"}, "load_summarize_chain": {"Install necessary dependencies.": "https://python.langchain.com/docs/integrations/callbacks/infino/", "see https://python.langchain.com/docs/use_cases/summarization for more details": "https://python.langchain.com/docs/integrations/document_loaders/larksuite/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/"}, "FigmaFileLoader": {"Figma": "https://python.langchain.com/docs/integrations/providers/figma/", "see https://python.langchain.com/en/latest/modules/data_connection/getting_started.html for more details": "https://python.langchain.com/docs/integrations/document_loaders/figma/"}, "Baseten": {"Baseten": "https://python.langchain.com/docs/integrations/providers/baseten/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/baseten/"}, "WeatherDataLoader": {"Weather": "https://python.langchain.com/docs/integrations/providers/weather/", "Set API key either by passing it in to constructor directly": "https://python.langchain.com/docs/integrations/document_loaders/weather/"}, "Tair": {"Tair": "https://python.langchain.com/docs/integrations/providers/tair/", "Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "drop first if index already exists": "https://python.langchain.com/docs/integrations/vectorstores/tair/"}, "CollegeConfidentialLoader": {"College Confidential": "https://python.langchain.com/docs/integrations/providers/college_confidential/", "college_confidential.md": "https://python.langchain.com/docs/integrations/document_loaders/college_confidential/"}, "RWKV": {"RWKV-4": "https://python.langchain.com/docs/integrations/providers/rwkv/"}, "LakeFSLoader": {"lakeFS": "https://python.langchain.com/docs/integrations/providers/lakefs/", "lakefs.md": "https://python.langchain.com/docs/integrations/document_loaders/lakefs/"}, "FaunaLoader": {"Fauna": "https://python.langchain.com/docs/integrations/providers/fauna/", "fauna.md": "https://python.langchain.com/docs/integrations/document_loaders/fauna/"}, "OCIGenAI": {"Oracle Cloud Infrastructure (OCI)": "https://python.langchain.com/docs/integrations/providers/oci/", "use default authN method API-key": "https://python.langchain.com/docs/integrations/llms/oci_generative_ai/"}, "OCIModelDeploymentVLLM": {"Oracle Cloud Infrastructure (OCI)": "https://python.langchain.com/docs/integrations/providers/oci/", "Set authentication through ads": "https://python.langchain.com/docs/integrations/llms/oci_model_deployment_endpoint/"}, "OCIModelDeploymentTGI": {"Oracle Cloud Infrastructure (OCI)": "https://python.langchain.com/docs/integrations/providers/oci/", "Set authentication through ads": "https://python.langchain.com/docs/integrations/llms/oci_model_deployment_endpoint/"}, "Lantern": {"Lantern": "https://python.langchain.com/docs/integrations/providers/lantern/", "Pip install necessary package {#pip-install-necessary-package}": "https://python.langchain.com/docs/integrations/vectorstores/lantern/"}, "SQLiteCache": {"From LangChain, import standard modules for prompting.": "https://python.langchain.com/docs/integrations/providers/dspy/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/modules/model_io/llms/llm_caching/", "": "https://python.langchain.com/docs/modules/model_io/chat/chat_model_caching/"}, "set_llm_cache": {"From LangChain, import standard modules for prompting.": "https://python.langchain.com/docs/integrations/providers/dspy/", "MongoDB Atlas": "https://python.langchain.com/docs/integrations/providers/mongodb_atlas/", "Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "Redis": "https://python.langchain.com/docs/integrations/providers/redis/", "Cassandra": "https://python.langchain.com/docs/integrations/providers/cassandra/", "Momento": "https://python.langchain.com/docs/integrations/providers/momento/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/modules/model_io/llms/llm_caching/", "": "https://python.langchain.com/docs/modules/model_io/chat/chat_model_caching/"}, "Fireworks": {"Fireworks": "https://python.langchain.com/docs/integrations/providers/fireworks/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/fireworks/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/self_ask_with_search/"}, "DropboxLoader": {"Dropbox": "https://python.langchain.com/docs/integrations/providers/dropbox/", "Generate access token: https://www.dropbox.com/developers/apps/create.": "https://python.langchain.com/docs/integrations/document_loaders/dropbox/"}, "ForefrontAI": {"ForefrontAI": "https://python.langchain.com/docs/integrations/providers/forefrontai/", "get a new token: https://docs.forefront.ai/forefront/api-reference/authentication": "https://python.langchain.com/docs/integrations/llms/forefrontai/"}, "CometCallbackHandler": {"os.environ[\"OPENAI_ORGANIZATION\"] = \"...\"": "https://python.langchain.com/docs/integrations/providers/comet_tracking/"}, "CTransformers": {"C Transformers": "https://python.langchain.com/docs/integrations/providers/ctransformers/", "ctransformers.md": "https://python.langchain.com/docs/integrations/llms/ctransformers/"}, "BiliBiliLoader": {"BiliBili": "https://python.langchain.com/docs/integrations/providers/bilibili/", "bilibili.md": "https://python.langchain.com/docs/integrations/document_loaders/bilibili/"}, "TencentCOSDirectoryLoader": {"Tencent": "https://python.langchain.com/docs/integrations/providers/tencent/", "tencent_cos_directory.md": "https://python.langchain.com/docs/integrations/document_loaders/tencent_cos_directory/"}, "TencentCOSFileLoader": {"Tencent": "https://python.langchain.com/docs/integrations/providers/tencent/", "tencent_cos_file.md": "https://python.langchain.com/docs/integrations/document_loaders/tencent_cos_file/"}, "OBSDirectoryLoader": {"Huawei": "https://python.langchain.com/docs/integrations/providers/huawei/", "Install the required package": "https://python.langchain.com/docs/integrations/document_loaders/huawei_obs_directory/"}, "OBSFileLoader": {"Huawei": "https://python.langchain.com/docs/integrations/providers/huawei/", "Install the required package": "https://python.langchain.com/docs/integrations/document_loaders/huawei_obs_file/"}, "DiffbotLoader": {"Diffbot": "https://python.langchain.com/docs/integrations/providers/diffbot/", "diffbot.md": "https://python.langchain.com/docs/integrations/document_loaders/diffbot/"}, "DeepSparse": {"DeepSparse": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/deepsparse-checkpoint/", "deepsparse.md": "https://python.langchain.com/docs/integrations/llms/deepsparse/"}, "AimCallbackHandler": {"scenario 1 - LLM": "https://python.langchain.com/docs/integrations/providers/aim_tracking/"}, "ModernTreasuryLoader": {"Modern Treasury": "https://python.langchain.com/docs/integrations/providers/modern_treasury/", "Create a vectorstore retriever from the loader": "https://python.langchain.com/docs/integrations/document_loaders/modern_treasury/"}, "GitHubIssuesLoader": {"GitHub": "https://python.langchain.com/docs/integrations/providers/github/", "If you haven't set your access token as an environment variable, pass it in here.": "https://python.langchain.com/docs/integrations/document_loaders/github/"}, "GithubFileLoader": {"GitHub": "https://python.langchain.com/docs/integrations/providers/github/", "If you haven't set your access token as an environment variable, pass it in here.": "https://python.langchain.com/docs/integrations/document_loaders/github/"}, "Banana": {"Banana": "https://python.langchain.com/docs/integrations/providers/bananadev/", "Install the package https://docs.banana.dev/banana-docs/core-concepts/sdks/python": "https://python.langchain.com/docs/integrations/llms/banana/"}, "InfinispanVS": {"Infinispan VS": "https://python.langchain.com/docs/integrations/providers/infinispanvs/", "Ensure that all we need is installed": "https://python.langchain.com/docs/integrations/vectorstores/infinispanvs/"}, "CerebriumAI": {"CerebriumAI": "https://python.langchain.com/docs/integrations/providers/cerebriumai/", "Install the package": "https://python.langchain.com/docs/integrations/llms/cerebriumai/"}, "GutenbergLoader": {"Gutenberg": "https://python.langchain.com/docs/integrations/providers/gutenberg/", "gutenberg.md": "https://python.langchain.com/docs/integrations/document_loaders/gutenberg/"}, "WikipediaLoader": {"Wikipedia": "https://python.langchain.com/docs/integrations/providers/wikipedia/", "wikipedia.md": "https://python.langchain.com/docs/integrations/document_loaders/wikipedia/", "diffbot.md": "https://python.langchain.com/docs/integrations/graphs/diffbot/"}, "ConfluenceLoader": {"Confluence": "https://python.langchain.com/docs/integrations/providers/confluence/", "confluence.md": "https://python.langchain.com/docs/integrations/document_loaders/confluence/"}, "Predibase": {"Predibase": "https://python.langchain.com/docs/integrations/providers/predibase/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/predibase/"}, "Beam": {"Beam": "https://python.langchain.com/docs/integrations/providers/beam/", "Set the environment variables": "https://python.langchain.com/docs/integrations/llms/beam/"}, "GrobidParser": {"Grobid": "https://python.langchain.com/docs/integrations/providers/grobid/", "grobid.md": "https://python.langchain.com/docs/integrations/document_loaders/grobid/"}, "GenericLoader": {"Grobid": "https://python.langchain.com/docs/integrations/providers/grobid/", "set a flag to switch between local and remote parsing": "https://python.langchain.com/docs/integrations/document_loaders/youtube_audio/", "grobid.md": "https://python.langchain.com/docs/integrations/document_loaders/grobid/", "Code for: class MyClass:": "https://python.langchain.com/docs/integrations/document_loaders/source_code/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "Custom Document Loader {#custom-document-loader}": "https://python.langchain.com/docs/modules/data_connection/document_loaders/custom/"}, "Typesense": {"Typesense": "https://python.langchain.com/docs/integrations/providers/typesense/", "typesense.md": "https://python.langchain.com/docs/integrations/vectorstores/typesense/"}, "Hologres": {"Hologres": "https://python.langchain.com/docs/integrations/providers/hologres/", "Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "hologres.md": "https://python.langchain.com/docs/integrations/vectorstores/hologres/"}, "AI21": {"AI21 Labs": "https://python.langchain.com/docs/integrations/providers/ai21/"}, "ArangoGraph": {"ArangoDB": "https://python.langchain.com/docs/integrations/providers/arangodb/", "Instantiate ArangoDB Database": "https://python.langchain.com/docs/integrations/graphs/arangodb/"}, "ArangoGraphQAChain": {"ArangoDB": "https://python.langchain.com/docs/integrations/providers/arangodb/", "Instantiate ArangoDB Database": "https://python.langchain.com/docs/integrations/graphs/arangodb/"}, "ArcGISLoader": {"ArcGIS": "https://python.langchain.com/docs/integrations/providers/arcgis/", "arcgis.md": "https://python.langchain.com/docs/integrations/document_loaders/arcgis/"}, "WandbCallbackHandler": {"os.environ[\"OPENAI_API_KEY\"] = \"\"": "https://python.langchain.com/docs/integrations/providers/wandb_tracking/"}, "ObsidianLoader": {"Obsidian": "https://python.langchain.com/docs/integrations/providers/obsidian/", "obsidian.md": "https://python.langchain.com/docs/integrations/document_loaders/obsidian/"}, "create_sql_agent": {"CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Using LangSmith is recommended but not required. Uncomment below lines to use.": "https://python.langchain.com/docs/use_cases/sql/csv/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/agents/"}, "SQLDatabaseToolkit": {"CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/"}, "Nebula": {"Nebula": "https://python.langchain.com/docs/integrations/providers/symblai_nebula/", "symblai_nebula.md": "https://python.langchain.com/docs/integrations/llms/symblai_nebula/"}, "Writer": {"Writer": "https://python.langchain.com/docs/integrations/providers/writer/", "If you get an error, probably, you need to set up the \"base_url\" parameter that can be taken from the error log.": "https://python.langchain.com/docs/integrations/llms/writer/"}, "BaichuanLLM": {"Baichuan": "https://python.langchain.com/docs/integrations/providers/baichuan/", "Load the model": "https://python.langchain.com/docs/integrations/llms/baichuan/"}, "ApacheDoris": {"Apache Doris": "https://python.langchain.com/docs/integrations/providers/apache_doris/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/apache_doris/"}, "ZepVectorStore": {"Zep": "https://python.langchain.com/docs/integrations/providers/zep/", "Collection config is needed if we're creating a new Zep Collection": "https://python.langchain.com/docs/integrations/vectorstores/zep/"}, "BrowserlessLoader": {"Browserless": "https://python.langchain.com/docs/integrations/providers/browserless/", "browserless.md": "https://python.langchain.com/docs/integrations/document_loaders/browserless/"}, "AZLyricsLoader": {"AZLyrics": "https://python.langchain.com/docs/integrations/providers/azlyrics/", "azlyrics.md": "https://python.langchain.com/docs/integrations/document_loaders/azlyrics/"}, "ToMarkdownLoader": {"2Markdown": "https://python.langchain.com/docs/integrations/providers/tomarkdown/", "You will need to get your own API key. See https://2markdown.com/login": "https://python.langchain.com/docs/integrations/document_loaders/tomarkdown/"}, "Mlflow": {"MLflow Deployments for LLMs": "https://python.langchain.com/docs/integrations/providers/mlflow/"}, "MlflowEmbeddings": {"MLflow Deployments for LLMs": "https://python.langchain.com/docs/integrations/providers/mlflow/"}, "ChatMlflow": {"MLflow Deployments for LLMs": "https://python.langchain.com/docs/integrations/providers/mlflow/"}, "GitLoader": {"Git": "https://python.langchain.com/docs/integrations/providers/git/", "e.g. loading only python files": "https://python.langchain.com/docs/integrations/document_loaders/git/"}, "MlflowAIGateway": {"MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway/"}, "MlflowAIGatewayEmbeddings": {"MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway/"}, "ChatMLflowAIGateway": {"MLflow AI Gateway": "https://python.langchain.com/docs/integrations/providers/mlflow_ai_gateway/"}, "Tigris": {"Tigris": "https://python.langchain.com/docs/integrations/providers/tigris/", "tigris.md": "https://python.langchain.com/docs/integrations/vectorstores/tigris/"}, "Meilisearch": {"Meilisearch": "https://python.langchain.com/docs/integrations/providers/meilisearch/", "Use Meilisearch vector store to store texts & associated embeddings as vector": "https://python.langchain.com/docs/integrations/vectorstores/meilisearch/"}, "SQLDatabaseChain": {"!pip3 install rebuff openai -U": "https://python.langchain.com/docs/integrations/providers/rebuff/"}, "SnowflakeLoader": {"Snowflake": "https://python.langchain.com/docs/integrations/providers/snowflake/", "snowflake.md": "https://python.langchain.com/docs/integrations/document_loaders/snowflake/"}, "CubeSemanticLoader": {"Cube": "https://python.langchain.com/docs/integrations/providers/cube/", "Read more about security context here: https://cube.dev/docs/security": "https://python.langchain.com/docs/integrations/document_loaders/cube_semantic/"}, "Clickhouse": {"ClickHouse": "https://python.langchain.com/docs/integrations/providers/clickhouse/", "clickhouse.md": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse/"}, "ClickhouseSettings": {"ClickHouse": "https://python.langchain.com/docs/integrations/providers/clickhouse/", "clickhouse.md": "https://python.langchain.com/docs/integrations/vectorstores/clickhouse/"}, "ChatDatabricks": {"-> content='Hello! How can I assist you today?'": "https://python.langchain.com/docs/integrations/providers/databricks/", "If running a Databricks notebook attached to an interactive cluster in \"single user\"": "https://python.langchain.com/docs/integrations/llms/databricks/"}, "DatabricksEmbeddings": {"-> content='Hello! How can I assist you today?'": "https://python.langchain.com/docs/integrations/providers/databricks/", "If running a Databricks notebook attached to an interactive cluster in \"single user\"": "https://python.langchain.com/docs/integrations/llms/databricks/"}, "TelegramChatFileLoader": {"Telegram": "https://python.langchain.com/docs/integrations/providers/telegram/", "telegram.md": "https://python.langchain.com/docs/integrations/document_loaders/telegram/"}, "TelegramChatApiLoader": {"Telegram": "https://python.langchain.com/docs/integrations/providers/telegram/", "telegram.md": "https://python.langchain.com/docs/integrations/document_loaders/telegram/"}, "PredictionGuard": {"Prediction Guard": "https://python.langchain.com/docs/integrations/providers/predictionguard/", "Optional, add your OpenAI API Key. This is optional, as Prediction Guard allows": "https://python.langchain.com/docs/integrations/llms/predictionguard/"}, "Together": {"together.md": "https://python.langchain.com/docs/integrations/llms/together/"}, "NotionDirectoryLoader": {"Notion DB": "https://python.langchain.com/docs/integrations/providers/notion/", "notion.md": "https://python.langchain.com/docs/integrations/document_loaders/notion/"}, "NotionDBLoader": {"Notion DB": "https://python.langchain.com/docs/integrations/providers/notion/", "notiondb.md": "https://python.langchain.com/docs/integrations/document_loaders/notiondb/"}, "MWDumpLoader": {"MediaWikiDump": "https://python.langchain.com/docs/integrations/providers/mediawikidump/", "mediawiki-utilities supports XML schema 0.11 in unmerged branches": "https://python.langchain.com/docs/integrations/document_loaders/mediawikidump/"}, "BraveSearchLoader": {"Brave Search": "https://python.langchain.com/docs/integrations/providers/brave_search/", "brave_search.md": "https://python.langchain.com/docs/integrations/document_loaders/brave_search/"}, "StarRocks": {"StarRocks": "https://python.langchain.com/docs/integrations/providers/starrocks/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/"}, "GooseAI": {"GooseAI": "https://python.langchain.com/docs/integrations/providers/gooseai/", "gooseai.md": "https://python.langchain.com/docs/integrations/llms/gooseai/"}, "DatadogLogsLoader": {"Datadog Logs": "https://python.langchain.com/docs/integrations/providers/datadog_logs/", "datadog_logs.md": "https://python.langchain.com/docs/integrations/document_loaders/datadog_logs/"}, "ApifyDatasetLoader": {"Apify": "https://python.langchain.com/docs/integrations/providers/apify/", "apify_dataset.md": "https://python.langchain.com/docs/integrations/document_loaders/apify_dataset/"}, "NLPCloud": {"NLPCloud": "https://python.langchain.com/docs/integrations/providers/nlpcloud/", "get a token: https://docs.nlpcloud.com/#authentication": "https://python.langchain.com/docs/integrations/llms/nlpcloud/"}, "SemaDB": {"SemaDB": "https://python.langchain.com/docs/integrations/providers/semadb/", "Create collection if running for the first time. If the collection": "https://python.langchain.com/docs/integrations/vectorstores/semadb/"}, "GitbookLoader": {"GitBook": "https://python.langchain.com/docs/integrations/providers/gitbook/", "show second document": "https://python.langchain.com/docs/integrations/document_loaders/gitbook/"}, "VoyageAIRerank": {"VoyageAI": "https://python.langchain.com/docs/integrations/providers/voyageai/", "OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/voyageai-reranker/"}, "Rockset": {"Rockset": "https://python.langchain.com/docs/integrations/providers/rockset/", "output length: 4": "https://python.langchain.com/docs/integrations/vectorstores/rockset/"}, "RocksetLoader": {"Rockset": "https://python.langchain.com/docs/integrations/providers/rockset/", "Loading Documents {#loading-documents}": "https://python.langchain.com/docs/integrations/document_loaders/rockset/"}, "Minimax": {"Minimax": "https://python.langchain.com/docs/integrations/providers/minimax/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/minimax/"}, "UnstructuredAPIFileIOLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/"}, "UnstructuredAPIFileLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "# Install package": "https://python.langchain.com/docs/integrations/document_loaders/unstructured_file/"}, "UnstructuredCHMLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/"}, "UnstructuredCSVLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "csv.md": "https://python.langchain.com/docs/integrations/document_loaders/csv/"}, "UnstructuredEmailLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "email.md": "https://python.langchain.com/docs/integrations/document_loaders/email/"}, "UnstructuredEPubLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "epub.md": "https://python.langchain.com/docs/integrations/document_loaders/epub/"}, "UnstructuredFileIOLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "folder_id='1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5'": "https://python.langchain.com/docs/integrations/document_loaders/google_drive/"}, "UnstructuredFileLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "# Install package": "https://python.langchain.com/docs/integrations/document_loaders/unstructured_file/"}, "UnstructuredHTMLLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "HTML": "https://python.langchain.com/docs/modules/data_connection/document_loaders/html/"}, "UnstructuredImageLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "image.md": "https://python.langchain.com/docs/integrations/document_loaders/image/"}, "UnstructuredMarkdownLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/", "Markdown": "https://python.langchain.com/docs/modules/data_connection/document_loaders/markdown/"}, "UnstructuredODTLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "odt.md": "https://python.langchain.com/docs/integrations/document_loaders/odt/"}, "UnstructuredOrgModeLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "org_mode.md": "https://python.langchain.com/docs/integrations/document_loaders/org_mode/"}, "UnstructuredPDFLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "UnstructuredRSTLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "rst.md": "https://python.langchain.com/docs/integrations/document_loaders/rst/"}, "UnstructuredRTFLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/"}, "UnstructuredTSVLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "tsv.md": "https://python.langchain.com/docs/integrations/document_loaders/tsv/"}, "UnstructuredURLLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "url.md": "https://python.langchain.com/docs/integrations/document_loaders/url/"}, "UnstructuredXMLLoader": {"Unstructured": "https://python.langchain.com/docs/integrations/providers/unstructured/", "xml.md": "https://python.langchain.com/docs/integrations/document_loaders/xml/"}, "SelfHostedPipeline": {"Runhouse": "https://python.langchain.com/docs/integrations/providers/runhouse/", "For an on-demand A100 with GCP, Azure, or Lambda": "https://python.langchain.com/docs/integrations/llms/runhouse/"}, "SelfHostedHuggingFaceLLM": {"Runhouse": "https://python.langchain.com/docs/integrations/providers/runhouse/", "For an on-demand A100 with GCP, Azure, or Lambda": "https://python.langchain.com/docs/integrations/llms/runhouse/"}, "MlflowCallbackHandler": {"SCENARIO 1 - LLM": "https://python.langchain.com/docs/integrations/providers/mlflow_tracking/"}, "AstraDBVectorStore": {"Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/"}, "AstraDBCache": {"Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "AstraDBSemanticCache": {"Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "AstraDBLoader": {"Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "astradb.md": "https://python.langchain.com/docs/integrations/document_loaders/astradb/"}, "AstraDBStore": {"Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "astradb.md": "https://python.langchain.com/docs/integrations/stores/astradb/"}, "AstraDBByteStore": {"Astra DB": "https://python.langchain.com/docs/integrations/providers/astradb/", "astradb.md": "https://python.langchain.com/docs/integrations/stores/astradb/"}, "SpreedlyLoader": {"Spreedly": "https://python.langchain.com/docs/integrations/providers/spreedly/", "Create a vectorstore retriever from the loader": "https://python.langchain.com/docs/integrations/document_loaders/spreedly/"}, "OpenLLM": {"OpenLLM": "https://python.langchain.com/docs/integrations/providers/openllm/", "openllm.md": "https://python.langchain.com/docs/integrations/llms/openllm/"}, "PubMedLoader": {"PubMed": "https://python.langchain.com/docs/integrations/providers/pubmed/", "pubmed.md": "https://python.langchain.com/docs/integrations/document_loaders/pubmed/"}, "SearxSearchResults": {"SearxNG Search API": "https://python.langchain.com/docs/integrations/providers/searx/"}, "ActionServerToolkit": {"Robocorp": "https://python.langchain.com/docs/integrations/providers/robocorp/", "Install package": "https://python.langchain.com/docs/integrations/toolkits/robocorp/"}, "SpacyTextSplitter": {"spaCy": "https://python.langchain.com/docs/integrations/providers/spacy/", "atlas.md": "https://python.langchain.com/docs/integrations/vectorstores/atlas/", "This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/split_by_token/"}, "Modal": {"Modal": "https://python.langchain.com/docs/integrations/providers/modal/", "Register an account with Modal and get a new token.": "https://python.langchain.com/docs/integrations/llms/modal/"}, "OpenCityDataLoader": {"Geopandas": "https://python.langchain.com/docs/integrations/providers/geopandas/", "Load Open City Data": "https://python.langchain.com/docs/integrations/document_loaders/geopandas/", "open_city_data.md": "https://python.langchain.com/docs/integrations/document_loaders/open_city_data/"}, "PGEmbedding": {"Postgres Embedding": "https://python.langchain.com/docs/integrations/providers/pg_embedding/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/pgembedding/"}, "SQLiteVSS": {"SQLite": "https://python.langchain.com/docs/integrations/providers/sqlite/", "You need to install sqlite-vss as a dependency.": "https://python.langchain.com/docs/integrations/vectorstores/sqlitevss/"}, "Xinference": {"Xorbits Inference (Xinference)": "https://python.langchain.com/docs/integrations/providers/xinference/", "xinference.md": "https://python.langchain.com/docs/integrations/llms/xinference/"}, "IFixitLoader": {"iFixit": "https://python.langchain.com/docs/integrations/providers/ifixit/", "ifixit.md": "https://python.langchain.com/docs/integrations/document_loaders/ifixit/"}, "AlephAlpha": {"Aleph Alpha": "https://python.langchain.com/docs/integrations/providers/aleph_alpha/", "Install the package": "https://python.langchain.com/docs/integrations/llms/aleph_alpha/"}, "PipelineAI": {"PipelineAI": "https://python.langchain.com/docs/integrations/providers/pipelineai/", "Install the package": "https://python.langchain.com/docs/integrations/llms/pipelineai/"}, "FacebookChatLoader": {"Facebook - Meta": "https://python.langchain.com/docs/integrations/providers/facebook/", "pip install pandas": "https://python.langchain.com/docs/integrations/document_loaders/facebook_chat/"}, "Epsilla": {"Epsilla": "https://python.langchain.com/docs/integrations/providers/epsilla/", "epsilla.md": "https://python.langchain.com/docs/integrations/vectorstores/epsilla/"}, "AwaDB": {"AwaDB": "https://python.langchain.com/docs/integrations/providers/awadb/", "awadb.md": "https://python.langchain.com/docs/integrations/vectorstores/awadb/"}, "ArxivLoader": {"Arxiv": "https://python.langchain.com/docs/integrations/providers/arxiv/", "arxiv.md": "https://python.langchain.com/docs/integrations/document_loaders/arxiv/"}, "BlockchainDocumentLoader": {"Alchemy": "https://python.langchain.com/docs/integrations/providers/alchemy/", "get ALCHEMY_API_KEY from https://www.alchemy.com/": "https://python.langchain.com/docs/integrations/document_loaders/blockchain/"}, "BlockchainType": {"Alchemy": "https://python.langchain.com/docs/integrations/providers/alchemy/", "get ALCHEMY_API_KEY from https://www.alchemy.com/": "https://python.langchain.com/docs/integrations/document_loaders/blockchain/"}, "Anyscale": {"Anyscale": "https://python.langchain.com/docs/integrations/providers/anyscale/", "anyscale.md": "https://python.langchain.com/docs/integrations/llms/anyscale/"}, "AINetworkToolkit": {"AINetwork": "https://python.langchain.com/docs/integrations/providers/ainetwork/", "IMPORTANT: If you plan to use this account in the future, make sure to save the": "https://python.langchain.com/docs/integrations/toolkits/ainetwork/"}, "StripeLoader": {"Stripe": "https://python.langchain.com/docs/integrations/providers/stripe/", "Create a vectorstore retriever from the loader": "https://python.langchain.com/docs/integrations/document_loaders/stripe/"}, "StochasticAI": {"StochasticAI": "https://python.langchain.com/docs/integrations/providers/stochasticai/", "stochasticai.md": "https://python.langchain.com/docs/integrations/llms/stochasticai/"}, "Bagel": {"BagelDB": "https://python.langchain.com/docs/integrations/providers/bageldb/", "create cluster and add texts": "https://python.langchain.com/docs/integrations/vectorstores/bageldb/"}, "TigerGraph": {"TigerGraph": "https://python.langchain.com/docs/integrations/providers/tigergraph/"}, "BlackboardLoader": {"Blackboard": "https://python.langchain.com/docs/integrations/providers/blackboard/", "blackboard.md": "https://python.langchain.com/docs/integrations/document_loaders/blackboard/"}, "YandexGPT": {"Yandex": "https://python.langchain.com/docs/integrations/providers/yandex/", "yandex.md": "https://python.langchain.com/docs/integrations/llms/yandex/"}, "LanceDB": {"LanceDB": "https://python.langchain.com/docs/integrations/providers/lancedb/", "lancedb.md": "https://python.langchain.com/docs/integrations/vectorstores/lancedb/", "Vector stores": "https://python.langchain.com/docs/modules/data_connection/vectorstores/index/"}, "UpstashRedisCache": {"Upstash Redis": "https://python.langchain.com/docs/integrations/providers/upstash/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "NucliaTextTransformer": {"Nuclia": "https://python.langchain.com/docs/integrations/providers/nuclia/", "nuclia_transformer.md": "https://python.langchain.com/docs/integrations/document_transformers/nuclia_transformer/"}, "AnalyticDB": {"AnalyticDB": "https://python.langchain.com/docs/integrations/providers/analyticdb/", "Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "analyticdb.md": "https://python.langchain.com/docs/integrations/vectorstores/analyticdb/"}, "GoogleApiYoutubeLoader": {"YouTube": "https://python.langchain.com/docs/integrations/providers/youtube/", "Init the GoogleApiClient": "https://python.langchain.com/docs/integrations/document_loaders/youtube_transcript/"}, "PromptLayerOpenAI": {"PromptLayer": "https://python.langchain.com/docs/integrations/providers/promptlayer/", "promptlayer_openai.md": "https://python.langchain.com/docs/integrations/llms/promptlayer_openai/"}, "USearch": {"USearch": "https://python.langchain.com/docs/integrations/providers/usearch/", "usearch.md": "https://python.langchain.com/docs/integrations/vectorstores/usearch/"}, "EtherscanLoader": {"Etherscan": "https://python.langchain.com/docs/integrations/providers/etherscan/", "etherscan.md": "https://python.langchain.com/docs/integrations/document_loaders/etherscan/"}, "Arcee": {"Arcee": "https://python.langchain.com/docs/integrations/providers/arcee/", "Create an instance of the Arcee class": "https://python.langchain.com/docs/integrations/llms/arcee/"}, "WhyLabsCallbackHandler": {"you don't need to call close to write profiles to WhyLabs, upload will occur periodically, but to demo let's not wait.": "https://python.langchain.com/docs/integrations/providers/whylabs_profiling/"}, "IuguLoader": {"Iugu": "https://python.langchain.com/docs/integrations/providers/iugu/", "Create a vectorstore retriever from the loader": "https://python.langchain.com/docs/integrations/document_loaders/iugu/"}, "CouchbaseLoader": {"Couchbase": "https://python.langchain.com/docs/integrations/providers/couchbase/", "query is a valid SQL++ query": "https://python.langchain.com/docs/integrations/document_loaders/couchbase/"}, "FlyteCallbackHandler": {"Flyte": "https://python.langchain.com/docs/integrations/providers/flyte/"}, "wandb_tracing_enabled": {"wandb documentation to configure wandb using env variables": "https://python.langchain.com/docs/integrations/providers/wandb_tracing/"}, "ManifestWrapper": {"Hazy Research": "https://python.langchain.com/docs/integrations/providers/hazy_research/", "Map reduce example": "https://python.langchain.com/docs/integrations/llms/manifest/"}, "OntotextGraphDBGraph": {"Ontotext GraphDB": "https://python.langchain.com/docs/integrations/providers/ontotext_graphdb/", "feeding the schema using a user construct query": "https://python.langchain.com/docs/integrations/graphs/ontotext/"}, "OntotextGraphDBQAChain": {"Ontotext GraphDB": "https://python.langchain.com/docs/integrations/providers/ontotext_graphdb/", "feeding the schema using a user construct query": "https://python.langchain.com/docs/integrations/graphs/ontotext/"}, "Marqo": {"Marqo": "https://python.langchain.com/docs/integrations/providers/marqo/", "initialize marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo/"}, "IMSDbLoader": {"IMSDb": "https://python.langchain.com/docs/integrations/providers/imsdb/", "imsdb.md": "https://python.langchain.com/docs/integrations/document_loaders/imsdb/"}, "TiDBLoader": {"TiDB": "https://python.langchain.com/docs/integrations/providers/tidb/", "copy from tidb cloud console\uff0creplace it with your own": "https://python.langchain.com/docs/integrations/document_loaders/tidb/"}, "TiDBVectorStore": {"TiDB": "https://python.langchain.com/docs/integrations/providers/tidb/", "Here we useimport getpass": "https://python.langchain.com/docs/integrations/vectorstores/tidb_vector/"}, "DeepInfra": {"DeepInfra": "https://python.langchain.com/docs/integrations/providers/deepinfra/", "get a new token: https://deepinfra.com/login?from=%2Fdash": "https://python.langchain.com/docs/integrations/llms/deepinfra/"}, "RedditPostsLoader": {"Reddit": "https://python.langchain.com/docs/integrations/providers/reddit/", "load using 'subreddit' mode": "https://python.langchain.com/docs/integrations/document_loaders/reddit/"}, "TrelloLoader": {"Trello": "https://python.langchain.com/docs/integrations/providers/trello/", "If you have already set the API key and token using environment variables,": "https://python.langchain.com/docs/integrations/document_loaders/trello/"}, "AtlasDB": {"Atlas": "https://python.langchain.com/docs/integrations/providers/atlas/", "atlas.md": "https://python.langchain.com/docs/integrations/vectorstores/atlas/"}, "SKLearnVectorStore": {"scikit-learn": "https://python.langchain.com/docs/integrations/providers/sklearn/", "# if you plan to use bson serialization, install also:": "https://python.langchain.com/docs/integrations/vectorstores/sklearn/"}, "EverNoteLoader": {"EverNote": "https://python.langchain.com/docs/integrations/providers/evernote/", "lxml and html2text are required to parse EverNote notes": "https://python.langchain.com/docs/integrations/document_loaders/evernote/"}, "VDMS": {"VDMS": "https://python.langchain.com/docs/integrations/providers/vdms/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/vdms/"}, "VDMS_Client": {"VDMS": "https://python.langchain.com/docs/integrations/providers/vdms/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/vdms/"}, "TwitterTweetLoader": {"Twitter": "https://python.langchain.com/docs/integrations/providers/twitter/", "Or load from access token and consumer keys": "https://python.langchain.com/docs/integrations/document_loaders/twitter/"}, "DiscordChatLoader": {"Discord": "https://python.langchain.com/docs/integrations/providers/discord/", "discord.md": "https://python.langchain.com/docs/integrations/document_loaders/discord/"}, "AssemblyAIAudioTranscriptLoader": {"AssemblyAI": "https://python.langchain.com/docs/integrations/providers/assemblyai/", "or a local file path: audio_file = \"./nbc.mp3\"": "https://python.langchain.com/docs/integrations/document_loaders/assemblyai/"}, "RedisCache": {"Redis": "https://python.langchain.com/docs/integrations/providers/redis/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "RedisSemanticCache": {"Redis": "https://python.langchain.com/docs/integrations/providers/redis/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "Kinetica": {"Kinetica": "https://python.langchain.com/docs/integrations/providers/kinetica/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/kinetica/"}, "ClearMLCallbackHandler": {"Setup and use the ClearML Callback": "https://python.langchain.com/docs/integrations/providers/clearml_tracking/"}, "create_cohere_react_agent": {"Cohere": "https://python.langchain.com/docs/integrations/providers/cohere/"}, "SlackDirectoryLoader": {"Slack": "https://python.langchain.com/docs/integrations/providers/slack/", "Optionally set your Slack URL. This will give you proper URLs in the docs sources.": "https://python.langchain.com/docs/integrations/document_loaders/slack/"}, "Ollama": {"Ollama": "https://python.langchain.com/docs/integrations/providers/ollama/", "ollama.md": "https://python.langchain.com/docs/integrations/llms/ollama/", "Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/"}, "HNLoader": {"Hacker News": "https://python.langchain.com/docs/integrations/providers/hacker_news/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/vectorstores/google_spanner/", "hacker_news.md": "https://python.langchain.com/docs/integrations/document_loaders/hacker_news/"}, "CTranslate2": {"CTranslate2": "https://python.langchain.com/docs/integrations/providers/ctranslate2/", "conversation can take several minutes": "https://python.langchain.com/docs/integrations/llms/ctranslate2/"}, "QianfanLLMEndpoint": {"Baidu": "https://python.langchain.com/docs/integrations/providers/baidu/", "baidu_qianfan_endpoint.md": "https://python.langchain.com/docs/integrations/llms/baidu_qianfan_endpoint/"}, "BESVectorStore": {"Baidu": "https://python.langchain.com/docs/integrations/providers/baidu/", "Create a bes instance and index docs.": "https://python.langchain.com/docs/integrations/vectorstores/baiducloud_vector_search/"}, "Aphrodite": {"PygmalionAI": "https://python.langchain.com/docs/integrations/providers/pygmalionai/", "%pip list | grep aphrodite": "https://python.langchain.com/docs/integrations/llms/aphrodite/"}, "PaiEasEndpoint": {"Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "alibabacloud_pai_eas_endpoint.md": "https://python.langchain.com/docs/integrations/llms/alibabacloud_pai_eas_endpoint/"}, "MaxComputeLoader": {"Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "alibaba_cloud_maxcompute.md": "https://python.langchain.com/docs/integrations/document_loaders/alibaba_cloud_maxcompute/"}, "AlibabaCloudOpenSearch": {"Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "for example": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch/"}, "AlibabaCloudOpenSearchSettings": {"Alibaba Cloud": "https://python.langchain.com/docs/integrations/providers/alibaba_cloud/", "for example": "https://python.langchain.com/docs/integrations/vectorstores/alibabacloud_opensearch/"}, "DocusaurusLoader": {"Docusaurus": "https://python.langchain.com/docs/integrations/providers/docusaurus/", "fixes a bug with asyncio and jupyter": "https://python.langchain.com/docs/integrations/document_loaders/docusaurus/"}, "Annoy": {"Annoy": "https://python.langchain.com/docs/integrations/providers/annoy/", "default metric is angular": "https://python.langchain.com/docs/integrations/vectorstores/annoy/"}, "BibtexLoader": {"BibTeX": "https://python.langchain.com/docs/integrations/providers/bibtex/", "Create a dummy bibtex file and download a pdf.": "https://python.langchain.com/docs/integrations/document_loaders/bibtex/"}, "Cassandra": {"Cassandra": "https://python.langchain.com/docs/integrations/providers/cassandra/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/"}, "CassandraCache": {"Cassandra": "https://python.langchain.com/docs/integrations/providers/cassandra/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "CassandraSemanticCache": {"Cassandra": "https://python.langchain.com/docs/integrations/providers/cassandra/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "CassandraLoader": {"Cassandra": "https://python.langchain.com/docs/integrations/providers/cassandra/", "cassandra.md": "https://python.langchain.com/docs/integrations/document_loaders/cassandra/"}, "Vearch": {"Vearch": "https://python.langchain.com/docs/integrations/providers/vearch/", "OR": "https://python.langchain.com/docs/integrations/vectorstores/vearch/"}, "JoplinLoader": {"Joplin": "https://python.langchain.com/docs/integrations/providers/joplin/", "joplin.md": "https://python.langchain.com/docs/integrations/document_loaders/joplin/"}, "ArthurCallbackHandler": {"arthur_tracking.md": "https://python.langchain.com/docs/integrations/providers/arthur_tracking/"}, "AcreomLoader": {"Acreom": "https://python.langchain.com/docs/integrations/providers/acreom/", "acreom.md": "https://python.langchain.com/docs/integrations/document_loaders/acreom/"}, "KDBAI": {"KDB.AI": "https://python.langchain.com/docs/integrations/providers/kdbai/", "Clean up KDB.AI \"documents\" table and index for similarity search": "https://python.langchain.com/docs/integrations/vectorstores/kdbai/"}, "DuckDBLoader": {"DuckDB": "https://python.langchain.com/docs/integrations/providers/duckdb/", "duckdb.md": "https://python.langchain.com/docs/integrations/document_loaders/duckdb/"}, "Petals": {"Petals": "https://python.langchain.com/docs/integrations/providers/petals/", "this can take several minutes to download big files!": "https://python.langchain.com/docs/integrations/llms/petals/"}, "MomentoCache": {"Momento": "https://python.langchain.com/docs/integrations/providers/momento/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "MomentoVectorIndex": {"Momento": "https://python.langchain.com/docs/integrations/providers/momento/", "Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/momento_vector_index/"}, "NIBittensorLLM": {"Bittensor": "https://python.langchain.com/docs/integrations/providers/bittensor/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/"}, "Neo4jVector": {"Neo4j": "https://python.langchain.com/docs/integrations/providers/neo4j/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/neo4jvector/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/prompting/"}, "Neo4jGraph": {"Neo4j": "https://python.langchain.com/docs/integrations/providers/neo4j/", "diffbot.md": "https://python.langchain.com/docs/integrations/graphs/diffbot/", "How many people played in Top Gun?": "https://python.langchain.com/docs/integrations/graphs/neo4j_cypher/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/prompting/"}, "GraphCypherQAChain": {"Neo4j": "https://python.langchain.com/docs/integrations/providers/neo4j/", "Creating and executing the seeding query": "https://python.langchain.com/docs/integrations/graphs/memgraph/", "diffbot.md": "https://python.langchain.com/docs/integrations/graphs/diffbot/", "How many people played in Top Gun?": "https://python.langchain.com/docs/integrations/graphs/neo4j_cypher/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/prompting/"}, "DiffbotGraphTransformer": {"Neo4j": "https://python.langchain.com/docs/integrations/providers/neo4j/", "diffbot.md": "https://python.langchain.com/docs/integrations/graphs/diffbot/"}, "AirtableLoader": {"Airtable": "https://python.langchain.com/docs/integrations/providers/airtable/", "airtable.md": "https://python.langchain.com/docs/integrations/document_loaders/airtable/"}, "LarkSuiteDocLoader": {"ByteDance": "https://python.langchain.com/docs/integrations/providers/byte_dance/", "see https://python.langchain.com/docs/use_cases/summarization for more details": "https://python.langchain.com/docs/integrations/document_loaders/larksuite/"}, "JavelinAIGateway": {"Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "Step 4: Embeddings Example {#step-4-embeddings-example}": "https://python.langchain.com/docs/integrations/llms/javelin/"}, "JavelinAIGatewayEmbeddings": {"Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "Step 4: Embeddings Example {#step-4-embeddings-example}": "https://python.langchain.com/docs/integrations/llms/javelin/"}, "ChatJavelinAIGateway": {"Javelin AI Gateway": "https://python.langchain.com/docs/integrations/providers/javelin_ai_gateway/", "Step 4: Embeddings Example {#step-4-embeddings-example}": "https://python.langchain.com/docs/integrations/llms/javelin/"}, "TensorflowDatasetLoader": {"TensorFlow Datasets": "https://python.langchain.com/docs/integrations/providers/tensorflow_datasets/", "Feature structure of `mlqa/en` dataset:": "https://python.langchain.com/docs/integrations/document_loaders/tensorflow_datasets/"}, "Clarifai": {"Clarifai": "https://python.langchain.com/docs/integrations/providers/clarifai/", "Dependencies {#dependencies}": "https://python.langchain.com/docs/integrations/llms/clarifai/"}, "DataheraldTextToSQL": {"Dataherald": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/dataherald-checkpoint/"}, "RoamLoader": {"Roam": "https://python.langchain.com/docs/integrations/providers/roam/", "roam.md": "https://python.langchain.com/docs/integrations/document_loaders/roam/"}, "create_openai_tools_agent": {"Construct the OpenAI Tools agent": "https://python.langchain.com/docs/integrations/providers/portkey/logging_tracing_portkey/", "Portkey": "https://python.langchain.com/docs/integrations/providers/portkey/index/", "sql_database.md": "https://python.langchain.com/docs/integrations/toolkits/sql_database/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/slack/", "conversational_retrieval_agents.md": "https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/tool_usage/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/"}, "CONDENSE_QUESTION_PROMPT": {"Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/"}, "load_qa_with_sources_chain": {"Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/"}, "QA_PROMPT": {"Setup {#setup}": "https://python.langchain.com/docs/integrations/providers/vectara/vectara_chat/"}, "Chroma": {"Chroma": "https://python.langchain.com/docs/integrations/providers/.ipynb_checkpoints/chroma-checkpoint/", "You need the dgml-utils package to use the DocugamiLoader (run pip install directly without \"poetry run\" if you are not using poetry)": "https://python.langchain.com/docs/integrations/document_loaders/docugami/", "Retrievers": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/index-checkpoint/"}, "RedisStore": {"redis.md": "https://python.langchain.com/docs/integrations/stores/redis/"}, "InMemoryByteStore": {"in_memory.md": "https://python.langchain.com/docs/integrations/stores/in_memory/", "Swapping the `ByteStore` {#swapping-the-bytestore}": "https://python.langchain.com/docs/modules/data_connection/text_embedding/caching_embeddings/", "The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/"}, "LocalFileStore": {"file_system.md": "https://python.langchain.com/docs/integrations/stores/file_system/", "Swapping the `ByteStore` {#swapping-the-bytestore}": "https://python.langchain.com/docs/modules/data_connection/text_embedding/caching_embeddings/"}, "CacheBackedEmbeddings": {"astradb.md": "https://python.langchain.com/docs/integrations/stores/astradb/", "Swapping the `ByteStore` {#swapping-the-bytestore}": "https://python.langchain.com/docs/modules/data_connection/text_embedding/caching_embeddings/"}, "UpstashRedisByteStore": {"upstash_redis.md": "https://python.langchain.com/docs/integrations/stores/upstash_redis/"}, "ConneryToolkit": {"Specify your Connery Runner credentials.": "https://python.langchain.com/docs/integrations/toolkits/connery/"}, "create_csv_agent": {"Create a dataframe": "https://python.langchain.com/docs/integrations/toolkits/csv/"}, "create_xorbits_agent": {"xorbits.md": "https://python.langchain.com/docs/integrations/toolkits/xorbits/"}, "JiraToolkit": {"jira.md": "https://python.langchain.com/docs/integrations/toolkits/jira/"}, "JiraAPIWrapper": {"jira.md": "https://python.langchain.com/docs/integrations/toolkits/jira/"}, "create_spark_dataframe_agent": {"in apache-spark root directory. (tested here with \"spark-3.4.0-bin-hadoop3 and later\")": "https://python.langchain.com/docs/integrations/toolkits/spark/"}, "PyPDFLoader": {"document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "TODO : Set values as per your requirements": "https://python.langchain.com/docs/integrations/vectorstores/google_vertex_ai_vector_search/", "astradb.md": "https://python.langchain.com/docs/integrations/vectorstores/astradb/", "cassandra.md": "https://python.langchain.com/docs/integrations/vectorstores/cassandra/", "Clean up KDB.AI \"documents\" table and index for similarity search": "https://python.langchain.com/docs/integrations/vectorstores/kdbai/", "initialize MongoDB python client": "https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas/", "merge_doc.md": "https://python.langchain.com/docs/integrations/document_loaders/merge_doc/", "google_cloud_storage_file.md": "https://python.langchain.com/docs/integrations/document_loaders/google_cloud_storage_file/", "PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "set_debug": {"document_comparison_toolkit.md": "https://python.langchain.com/docs/integrations/toolkits/document_comparison_toolkit/", "System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model": "https://python.langchain.com/docs/integrations/llms/bittensor/", "textgen.md": "https://python.langchain.com/docs/integrations/llms/textgen/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "Debugging": "https://python.langchain.com/docs/guides/development/debugging/"}, "PythonREPLTool": {"Define the neural network": "https://python.langchain.com/docs/integrations/toolkits/python/"}, "create_pbi_agent": {"fictional example": "https://python.langchain.com/docs/integrations/toolkits/powerbi/"}, "AzureCognitiveServicesToolkit": {"For Windows/Linux": "https://python.langchain.com/docs/integrations/toolkits/azure_cognitive_services/"}, "Requests": {"Select the LLM to use. Here, we use gpt-3.5-turbo-instruct": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla/"}, "NLAToolkit": {"Select the LLM to use. Here, we use gpt-3.5-turbo-instruct": "https://python.langchain.com/docs/integrations/toolkits/openapi_nla/"}, "build_resource_service": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/toolkits/gmail/"}, "get_gmail_credentials": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/toolkits/gmail/"}, "SlackToolkit": {"Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/slack/"}, "SteamToolkit": {"steam.md": "https://python.langchain.com/docs/integrations/toolkits/steam/"}, "SteamWebAPIWrapper": {"steam.md": "https://python.langchain.com/docs/integrations/toolkits/steam/"}, "create_json_agent": {"json.md": "https://python.langchain.com/docs/integrations/toolkits/json/"}, "JsonToolkit": {"json.md": "https://python.langchain.com/docs/integrations/toolkits/json/"}, "JsonSpec": {"json.md": "https://python.langchain.com/docs/integrations/toolkits/json/", "NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.": "https://python.langchain.com/docs/integrations/toolkits/openapi/"}, "AirbyteStripeLoader": {"airbyte_structured_qa.md": "https://python.langchain.com/docs/integrations/toolkits/airbyte_structured_qa/", "airbyte_stripe.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_stripe/"}, "create_pandas_dataframe_agent": {"airbyte_structured_qa.md": "https://python.langchain.com/docs/integrations/toolkits/airbyte_structured_qa/", "pandas.md": "https://python.langchain.com/docs/integrations/toolkits/pandas/", "Using LangSmith is recommended but not required. Uncomment below lines to use.": "https://python.langchain.com/docs/use_cases/sql/csv/"}, "GitHubToolkit": {"Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/"}, "GitHubAPIWrapper": {"Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/"}, "ConversationSummaryBufferMemory": {"Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/", "We can see here that there is a summary of the conversation and then some previous interactions": "https://python.langchain.com/docs/modules/memory/types/summary_buffer/"}, "render_text_description_and_args": {"Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/github/", "Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/amadeus/"}, "ClickupToolkit": {"Copilot Sandbox": "https://python.langchain.com/docs/integrations/toolkits/clickup/"}, "ClickupAPIWrapper": {"Copilot Sandbox": "https://python.langchain.com/docs/integrations/toolkits/clickup/"}, "create_spark_sql_agent": {"Note, you can also connect to Spark via Spark connect. For example:": "https://python.langchain.com/docs/integrations/toolkits/spark_sql/"}, "SparkSQLToolkit": {"Note, you can also connect to Spark via Spark connect. For example:": "https://python.langchain.com/docs/integrations/toolkits/spark_sql/"}, "SparkSQL": {"Note, you can also connect to Spark via Spark connect. For example:": "https://python.langchain.com/docs/integrations/toolkits/spark_sql/"}, "PlayWrightBrowserToolkit": {"If this is your first time using playwright, you'll have to install a browser executable.": "https://python.langchain.com/docs/integrations/toolkits/playwright/"}, "create_async_playwright_browser": {"If this is your first time using playwright, you'll have to install a browser executable.": "https://python.langchain.com/docs/integrations/toolkits/playwright/"}, "create_conversational_retrieval_agent": {"cogniswitch.md": "https://python.langchain.com/docs/integrations/toolkits/cogniswitch/"}, "CogniswitchToolkit": {"cogniswitch.md": "https://python.langchain.com/docs/integrations/toolkits/cogniswitch/"}, "NasaToolkit": {"nasa.md": "https://python.langchain.com/docs/integrations/toolkits/nasa/"}, "NasaAPIWrapper": {"nasa.md": "https://python.langchain.com/docs/integrations/toolkits/nasa/"}, "MultionToolkit": {"Authorize connection to your Browser extention": "https://python.langchain.com/docs/integrations/toolkits/multion/"}, "AmadeusToolkit": {"Set environmental variables here": "https://python.langchain.com/docs/integrations/toolkits/amadeus/"}, "AzureAiServicesToolkit": {"azure_ai_services.md": "https://python.langchain.com/docs/integrations/toolkits/azure_ai_services/"}, "create_structured_chat_agent": {"azure_ai_services.md": "https://python.langchain.com/docs/integrations/toolkits/azure_ai_services/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/structured_chat/"}, "reduce_openapi_spec": {"NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.": "https://python.langchain.com/docs/integrations/toolkits/openapi/"}, "RequestsWrapper": {"NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.": "https://python.langchain.com/docs/integrations/toolkits/openapi/"}, "create_openapi_agent": {"NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.": "https://python.langchain.com/docs/integrations/toolkits/openapi/"}, "OpenAPIToolkit": {"NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.": "https://python.langchain.com/docs/integrations/toolkits/openapi/"}, "GitLabToolkit": {"Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/gitlab/"}, "GitLabAPIWrapper": {"Set your environment variables using os.environ": "https://python.langchain.com/docs/integrations/toolkits/gitlab/"}, "PolygonToolkit": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/integrations/toolkits/polygon/"}, "ApacheDorisSettings": {"load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/apache_doris/"}, "DistanceStrategy": {"Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/sap_hanavector/", "# Automatically restart kernel after installs so that your environment can access the new packages": "https://python.langchain.com/docs/integrations/vectorstores/google_bigquery_vector_search/", "Create collection if running for the first time. If the collection": "https://python.langchain.com/docs/integrations/vectorstores/semadb/"}, "KineticaSettings": {"Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/kinetica/"}, "SentenceTransformerEmbeddings": {"You need to install sqlite-vss as a dependency.": "https://python.langchain.com/docs/integrations/vectorstores/sqlitevss/", "docs[0].metadata[\"id\"] == \"id:testapp:testapp::32\"": "https://python.langchain.com/docs/integrations/vectorstores/vespa/", "import": "https://python.langchain.com/docs/integrations/vectorstores/chroma/"}, "Vald": {"Refresh is required for server use": "https://python.langchain.com/docs/integrations/vectorstores/vald/"}, "RetrievalQAWithSourcesChain": {"install package": "https://python.langchain.com/docs/integrations/vectorstores/weaviate/", "Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/", "cosine: distance metric": "https://python.langchain.com/docs/integrations/vectorstores/jaguar/", "Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/neo4jvector/", "initialize marqo": "https://python.langchain.com/docs/integrations/vectorstores/marqo/", "Uncomment this to install psychicapi if you don't already have it installed": "https://python.langchain.com/docs/integrations/document_loaders/psychic/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "Yellowbrick": {"Install all needed libraries": "https://python.langchain.com/docs/integrations/vectorstores/yellowbrick/"}, "LLMRails": {"Setup {#setup}": "https://python.langchain.com/docs/integrations/vectorstores/llm_rails/"}, "HanaDB": {"Pip install necessary package": "https://python.langchain.com/docs/integrations/vectorstores/sap_hanavector/"}, "VectorSearchVectorStoreDatastore": {"TODO : Set values as per your requirements": "https://python.langchain.com/docs/integrations/vectorstores/google_vertex_ai_vector_search/"}, "VertexAI": {"TODO : Set values as per your requirements": "https://python.langchain.com/docs/integrations/vectorstores/google_vertex_ai_vector_search/", "google_vertex_ai_palm.md": "https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm/"}, "NucliaDB": {"nucliadb.md": "https://python.langchain.com/docs/integrations/vectorstores/nucliadb/"}, "Hippo": {"openai": "https://python.langchain.com/docs/integrations/vectorstores/hippo/"}, "RedisText": {"connection to redis standalone at localhost, db 0, no password": "https://python.langchain.com/docs/integrations/vectorstores/redis/"}, "RedisNum": {"connection to redis standalone at localhost, db 0, no password": "https://python.langchain.com/docs/integrations/vectorstores/redis/"}, "RedisTag": {"connection to redis standalone at localhost, db 0, no password": "https://python.langchain.com/docs/integrations/vectorstores/redis/"}, "RedisFilter": {"connection to redis standalone at localhost, db 0, no password": "https://python.langchain.com/docs/integrations/vectorstores/redis/"}, "VespaStore": {"docs[0].metadata[\"id\"] == \"id:testapp:testapp::32\"": "https://python.langchain.com/docs/integrations/vectorstores/vespa/"}, "CosmosDBSimilarityType": {"Set up the OpenAI Environment Variables": "https://python.langchain.com/docs/integrations/vectorstores/azure_cosmos_db/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "CosmosDBVectorSearchType": {"Set up the OpenAI Environment Variables": "https://python.langchain.com/docs/integrations/vectorstores/azure_cosmos_db/", "To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "NeuralDBVectorStore": {"From scratch": "https://python.langchain.com/docs/integrations/vectorstores/thirdai_neuraldb/"}, "VikingDB": {"vikingdb.md": "https://python.langchain.com/docs/integrations/vectorstores/vikingdb/"}, "VikingDBConfig": {"vikingdb.md": "https://python.langchain.com/docs/integrations/vectorstores/vikingdb/"}, "InMemoryDocstore": {"default metric is angular": "https://python.langchain.com/docs/integrations/vectorstores/annoy/", "Define your embedding model": "https://python.langchain.com/docs/modules/data_connection/retrievers/time_weighted_vectorstore/", "Backed by a Vector Store": "https://python.langchain.com/docs/modules/memory/types/vectorstore_retriever_memory/"}, "CouchbaseVectorStore": {"Wait until the cluster is ready for use.": "https://python.langchain.com/docs/integrations/vectorstores/couchbase/"}, "VLite": {"Load the document and split it into chunks": "https://python.langchain.com/docs/integrations/vectorstores/vlite/"}, "DuckDB": {"duckdb.md": "https://python.langchain.com/docs/integrations/vectorstores/duckdb/"}, "StarRocksSettings": {"load text splitter and split docs into snippets of text": "https://python.langchain.com/docs/integrations/vectorstores/starrocks/"}, "PathwayVectorClient": {"take into account only sources modified later than unix timestamp": "https://python.langchain.com/docs/integrations/vectorstores/pathway/"}, "DocArrayHnswSearch": {"Get an OpenAI token: https://platform.openai.com/account/api-keys": "https://python.langchain.com/docs/integrations/vectorstores/docarray_hnsw/"}, "TileDB": {"tiledb.md": "https://python.langchain.com/docs/integrations/vectorstores/tiledb/"}, "EcloudESVectorStore": {"ecloud_vector_search.md": "https://python.langchain.com/docs/integrations/vectorstores/ecloud_vector_search/"}, "SurrealDBStore": {"%pip install --upgrade --quiet surrealdb langchain langchain-community": "https://python.langchain.com/docs/integrations/vectorstores/surrealdb/"}, "ElasticVectorSearch": {"Metadata {#metadata}": "https://python.langchain.com/docs/integrations/vectorstores/elasticsearch/"}, "PGVecto_rs": {"Run tests with shell:": "https://python.langchain.com/docs/integrations/vectorstores/pgvecto_rs/"}, "JSONLoader": {"Pip install necessary packages": "https://python.langchain.com/docs/integrations/vectorstores/timescalevector/", "JSON": "https://python.langchain.com/docs/modules/data_connection/document_loaders/json/"}, "CollectionConfig": {"Collection config is needed if we're creating a new Zep Collection": "https://python.langchain.com/docs/integrations/vectorstores/zep/"}, "BaiduVectorDB": {"baiduvectordb.md": "https://python.langchain.com/docs/integrations/vectorstores/baiduvectordb/"}, "openai": {"openai-old.md": "https://python.langchain.com/docs/integrations/adapters/openai-old/", "openai.md": "https://python.langchain.com/docs/integrations/adapters/openai/"}, "AsyncChromiumLoader": {"Load HTML": "https://python.langchain.com/docs/integrations/document_transformers/beautiful_soup/", "async_chromium.md": "https://python.langchain.com/docs/integrations/document_loaders/async_chromium/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "BeautifulSoupTransformer": {"Load HTML": "https://python.langchain.com/docs/integrations/document_transformers/beautiful_soup/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "OpenVINOReranker": {"Helper function for printing docs": "https://python.langchain.com/docs/integrations/document_transformers/openvino_rerank/"}, "create_metadata_tagger": {"Must be an OpenAI model that supports functions": "https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger/"}, "DoctranPropertyExtractor": {"doctran_extract_properties.md": "https://python.langchain.com/docs/integrations/document_transformers/doctran_extract_properties/"}, "DoctranQATransformer": {"doctran_interrogate_document.md": "https://python.langchain.com/docs/integrations/document_transformers/doctran_interrogate_document/"}, "CrossEncoderReranker": {"OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/cross_encoder_reranker/"}, "HuggingFaceCrossEncoder": {"OR (depending on Python version)": "https://python.langchain.com/docs/integrations/document_transformers/cross_encoder_reranker/"}, "DoctranTextTranslator": {"doctran_translate_document.md": "https://python.langchain.com/docs/integrations/document_transformers/doctran_translate_document/"}, "XorbitsLoader": {"Use lazy load for larger table, which won't read the full table into memory": "https://python.langchain.com/docs/integrations/document_loaders/xorbits/"}, "OutlookMessageLoader": {"email.md": "https://python.langchain.com/docs/integrations/document_loaders/email/"}, "TranscriptFormat": {"or a local file path: audio_file = \"./nbc.mp3\"": "https://python.langchain.com/docs/integrations/document_loaders/assemblyai/"}, "AirbyteSalesforceLoader": {"airbyte_salesforce.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_salesforce/"}, "AirbyteCDKLoader": {"airbyte_cdk.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_cdk/"}, "Docx2txtLoader": {"microsoft_word.md": "https://python.langchain.com/docs/integrations/document_loaders/microsoft_word/"}, "RSpaceLoader": {"rspace.md": "https://python.langchain.com/docs/integrations/document_loaders/rspace/"}, "SeleniumURLLoader": {"url.md": "https://python.langchain.com/docs/integrations/document_loaders/url/"}, "PlaywrightURLLoader": {"url.md": "https://python.langchain.com/docs/integrations/document_loaders/url/"}, "AirbyteJSONLoader": {"airbyte_json.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_json/"}, "GeoDataFrameLoader": {"Load Open City Data": "https://python.langchain.com/docs/integrations/document_loaders/geopandas/"}, "AirbyteTypeformLoader": {"airbyte_typeform.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_typeform/"}, "MHTMLLoader": {"Create a new loader object for the MHTML file": "https://python.langchain.com/docs/integrations/document_loaders/mhtml/"}, "NewsURLLoader": {"news.md": "https://python.langchain.com/docs/integrations/document_loaders/news/"}, "ImageCaptionLoader": {"image_captions.md": "https://python.langchain.com/docs/integrations/document_loaders/image_captions/"}, "LLMSherpaFileLoader": {"Install package": "https://python.langchain.com/docs/integrations/document_loaders/llmsherpa/"}, "NucliaLoader": {"nuclia.md": "https://python.langchain.com/docs/integrations/document_loaders/nuclia/"}, "TomlLoader": {"toml.md": "https://python.langchain.com/docs/integrations/document_loaders/toml/"}, "PsychicLoader": {"Uncomment this to install psychicapi if you don't already have it installed": "https://python.langchain.com/docs/integrations/document_loaders/psychic/"}, "FireCrawlLoader": {"firecrawl.md": "https://python.langchain.com/docs/integrations/document_loaders/firecrawl/", "HTML": "https://python.langchain.com/docs/modules/data_connection/document_loaders/html/"}, "FakeListLLM": {"see https://python.langchain.com/docs/use_cases/summarization for more details": "https://python.langchain.com/docs/integrations/document_loaders/larksuite/", "Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "MergedDataLoader": {"merge_doc.md": "https://python.langchain.com/docs/integrations/document_loaders/merge_doc/"}, "RecursiveUrlLoader": {"Parameters {#parameters}": "https://python.langchain.com/docs/integrations/document_loaders/recursive_url/"}, "AirbyteHubspotLoader": {"airbyte_hubspot.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_hubspot/"}, "AirbyteGongLoader": {"airbyte_gong.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_gong/"}, "ReadTheDocsLoader": {"readthedocs_documentation.md": "https://python.langchain.com/docs/integrations/document_loaders/readthedocs_documentation/"}, "PolarsDataFrameLoader": {"Use lazy load for larger table, which won't read the full table into memory": "https://python.langchain.com/docs/integrations/document_loaders/polars_dataframe/"}, "DataFrameLoader": {"Use lazy load for larger table, which won't read the full table into memory": "https://python.langchain.com/docs/integrations/document_loaders/pandas_dataframe/"}, "SurrealDBLoader": {"%pip install --upgrade --quiet surrealdb langchain langchain-community": "https://python.langchain.com/docs/integrations/document_loaders/surrealdb/"}, "GoogleApiClient": {"Init the GoogleApiClient": "https://python.langchain.com/docs/integrations/document_loaders/youtube_transcript/"}, "ConcurrentLoader": {"concurrent.md": "https://python.langchain.com/docs/integrations/document_loaders/concurrent/"}, "RSSFeedLoader": {"rss.md": "https://python.langchain.com/docs/integrations/document_loaders/rss/"}, "PebbloSafeLoader": {"pebblo.md": "https://python.langchain.com/docs/integrations/document_loaders/pebblo/"}, "VsdxLoader": {"vsdx.md": "https://python.langchain.com/docs/integrations/document_loaders/vsdx/"}, "NotebookLoader": {"jupyter_notebook.md": "https://python.langchain.com/docs/integrations/document_loaders/jupyter_notebook/"}, "OracleAutonomousDatabaseLoader": {"oracleadb_loader.md": "https://python.langchain.com/docs/integrations/document_loaders/oracleadb_loader/"}, "LanguageParser": {"Code for: class MyClass:": "https://python.langchain.com/docs/integrations/document_loaders/source_code/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/"}, "Language": {"Code for: class MyClass:": "https://python.langchain.com/docs/integrations/document_loaders/source_code/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "Full list of supported languages": "https://python.langchain.com/docs/modules/data_connection/document_transformers/code_splitter/"}, "SRTLoader": {"subtitle.md": "https://python.langchain.com/docs/integrations/document_loaders/subtitle/"}, "MastodonTootsLoader": {"Or set up access information to use a Mastodon app.": "https://python.langchain.com/docs/integrations/document_loaders/mastodon/"}, "AirbyteShopifyLoader": {"airbyte_shopify.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_shopify/"}, "GlueCatalogLoader": {"glue_catalog.md": "https://python.langchain.com/docs/integrations/document_loaders/glue_catalog/"}, "PySparkDataFrameLoader": {"pyspark_dataframe.md": "https://python.langchain.com/docs/integrations/document_loaders/pyspark_dataframe/"}, "AirbyteZendeskSupportLoader": {"airbyte_zendesk_support.md": "https://python.langchain.com/docs/integrations/document_loaders/airbyte_zendesk_support/"}, "CoNLLULoader": {"conll-u.md": "https://python.langchain.com/docs/integrations/document_loaders/conll-u/"}, "MongodbLoader": {"add this import for running in jupyter notebook": "https://python.langchain.com/docs/integrations/document_loaders/mongodb/"}, "SitemapLoader": {"fixes a bug with asyncio and jupyter": "https://python.langchain.com/docs/integrations/document_loaders/sitemap/"}, "YuqueLoader": {"yuque.md": "https://python.langchain.com/docs/integrations/document_loaders/yuque/"}, "QuipLoader": {"quip.md": "https://python.langchain.com/docs/integrations/document_loaders/quip/"}, "MemgraphGraph": {"Creating and executing the seeding query": "https://python.langchain.com/docs/integrations/graphs/memgraph/"}, "GraphSparqlQAChain": {"rdflib_sparql.md": "https://python.langchain.com/docs/integrations/graphs/rdflib_sparql/"}, "RdfGraph": {"rdflib_sparql.md": "https://python.langchain.com/docs/integrations/graphs/rdflib_sparql/"}, "NebulaGraphQAChain": {"connect ngql jupyter extension to nebulagraph": "https://python.langchain.com/docs/integrations/graphs/nebula_graph/"}, "NebulaGraph": {"connect ngql jupyter extension to nebulagraph": "https://python.langchain.com/docs/integrations/graphs/nebula_graph/"}, "GremlinQAChain": {"The underlying python-gremlin has a problem when running in notebook": "https://python.langchain.com/docs/integrations/graphs/azure_cosmosdb_gremlin/"}, "GremlinGraph": {"The underlying python-gremlin has a problem when running in notebook": "https://python.langchain.com/docs/integrations/graphs/azure_cosmosdb_gremlin/"}, "GraphDocument": {"The underlying python-gremlin has a problem when running in notebook": "https://python.langchain.com/docs/integrations/graphs/azure_cosmosdb_gremlin/"}, "Node": {"The underlying python-gremlin has a problem when running in notebook": "https://python.langchain.com/docs/integrations/graphs/azure_cosmosdb_gremlin/"}, "Relationship": {"The underlying python-gremlin has a problem when running in notebook": "https://python.langchain.com/docs/integrations/graphs/azure_cosmosdb_gremlin/"}, "GraphIndexCreator": {"networkx.md": "https://python.langchain.com/docs/integrations/graphs/networkx/"}, "GraphQAChain": {"networkx.md": "https://python.langchain.com/docs/integrations/graphs/networkx/"}, "NetworkxEntityGraph": {"networkx.md": "https://python.langchain.com/docs/integrations/graphs/networkx/"}, "HugeGraphQAChain": {"graph.refresh_schema()": "https://python.langchain.com/docs/integrations/graphs/hugegraph/"}, "HugeGraph": {"graph.refresh_schema()": "https://python.langchain.com/docs/integrations/graphs/hugegraph/"}, "AGEGraph": {"How many people played in Top Gun?": "https://python.langchain.com/docs/integrations/graphs/apache_age/"}, "NeptuneSparqlQAChain": {"Optionally change the schema": "https://python.langchain.com/docs/integrations/graphs/amazon_neptune_sparql/"}, "NeptuneRdfGraph": {"Optionally change the schema": "https://python.langchain.com/docs/integrations/graphs/amazon_neptune_sparql/"}, "NeptuneGraph": {"amazon_neptune_open_cypher.md": "https://python.langchain.com/docs/integrations/graphs/amazon_neptune_open_cypher/"}, "NeptuneAnalyticsGraph": {"amazon_neptune_open_cypher.md": "https://python.langchain.com/docs/integrations/graphs/amazon_neptune_open_cypher/"}, "NeptuneOpenCypherQAChain": {"amazon_neptune_open_cypher.md": "https://python.langchain.com/docs/integrations/graphs/amazon_neptune_open_cypher/"}, "KuzuQAChain": {"graph.refresh_schema()": "https://python.langchain.com/docs/integrations/graphs/kuzu_db/"}, "KuzuGraph": {"graph.refresh_schema()": "https://python.langchain.com/docs/integrations/graphs/kuzu_db/"}, "FalkorDBQAChain": {"falkordb.md": "https://python.langchain.com/docs/integrations/graphs/falkordb/"}, "FalkorDBGraph": {"falkordb.md": "https://python.langchain.com/docs/integrations/graphs/falkordb/"}, "ConversationBufferWindowMemory": {"Setup {#setup}": "https://python.langchain.com/docs/integrations/llms/baseten/", "install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "Conversation Buffer Window": "https://python.langchain.com/docs/modules/memory/types/buffer_window/"}, "Solar": {"solar.md": "https://python.langchain.com/docs/integrations/llms/solar/"}, "IpexLLM": {"Update Langchain": "https://python.langchain.com/docs/integrations/llms/ipex_llm/"}, "SagemakerEndpoint": {"sagemaker.md": "https://python.langchain.com/docs/integrations/llms/sagemaker/", "Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "LLMContentHandler": {"sagemaker.md": "https://python.langchain.com/docs/integrations/llms/sagemaker/", "Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "OctoAIEndpoint": {"octoai.md": "https://python.langchain.com/docs/integrations/llms/octoai/"}, "TextGen": {"textgen.md": "https://python.langchain.com/docs/integrations/llms/textgen/"}, "MosaicML": {"sign up for an account: https://forms.mosaicml.com/demo?utm_source=langchain": "https://python.langchain.com/docs/integrations/llms/mosaicml/"}, "VolcEngineMaasLLM": {"Install the package": "https://python.langchain.com/docs/integrations/llms/volcengine_maas/"}, "KoboldApiLLM": {"koboldai.md": "https://python.langchain.com/docs/integrations/llms/koboldai/"}, "Konko": {"konko.md": "https://python.langchain.com/docs/integrations/llms/konko/"}, "AsyncCallbackHandler": {"Guardrails for Amazon Bedrock with trace": "https://python.langchain.com/docs/integrations/llms/bedrock/", "To enable streaming, we pass in `streaming=True` to the ChatModel constructor": "https://python.langchain.com/docs/modules/callbacks/async_callbacks/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/"}, "set_verbose": {"install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/", "Debugging": "https://python.langchain.com/docs/guides/development/debugging/"}, "OpaquePrompts": {"install the opaqueprompts and langchain packages": "https://python.langchain.com/docs/integrations/llms/opaqueprompts/"}, "TitanTakeoff": {"Note importing TitanTakeoffPro instead of TitanTakeoff will work as well both use same object under the hood": "https://python.langchain.com/docs/integrations/llms/titan_takeoff/"}, "Friendli": {"friendli.md": "https://python.langchain.com/docs/integrations/llms/friendli/"}, "Databricks": {"If running a Databricks notebook attached to an interactive cluster in \"single user\"": "https://python.langchain.com/docs/integrations/llms/databricks/"}, "LMFormatEnforcer": {"lmformatenforcer_experimental.md": "https://python.langchain.com/docs/integrations/llms/lmformatenforcer_experimental/"}, "VLLM": {"vllm.md": "https://python.langchain.com/docs/integrations/llms/vllm/"}, "VLLMOpenAI": {"vllm.md": "https://python.langchain.com/docs/integrations/llms/vllm/"}, "CustomOpenAIContentFormatter": {"azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/"}, "ContentFormatterBase": {"azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/"}, "DollyContentFormatter": {"azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/"}, "load_llm": {"azure_ml.md": "https://python.langchain.com/docs/integrations/llms/azure_ml/"}, "MapReduceChain": {"Map reduce example": "https://python.langchain.com/docs/integrations/llms/manifest/"}, "ModelLaboratory": {"Map reduce example": "https://python.langchain.com/docs/integrations/llms/manifest/"}, "RELLM": {"We'll choose a regex that matches to a structured json string that looks like:": "https://python.langchain.com/docs/integrations/llms/rellm_experimental/"}, "Yuan2": {"default infer_api for a local deployed Yuan2.0 inference server": "https://python.langchain.com/docs/integrations/llms/yuan2/"}, "InMemoryCache": {"To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/modules/model_io/llms/llm_caching/", "": "https://python.langchain.com/docs/modules/model_io/chat/chat_model_caching/"}, "GPTCache": {"To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "SQLAlchemyCache": {"To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "AzureCosmosDBSemanticCache": {"To make the caching really obvious, lets use a slower model.": "https://python.langchain.com/docs/integrations/llms/llm_caching/"}, "SparkLLM": {"Load the model": "https://python.langchain.com/docs/integrations/llms/sparkllm/"}, "Moonshot": {"Generate your api key from: https://platform.moonshot.cn/console/api-keys": "https://python.langchain.com/docs/integrations/llms/moonshot/"}, "OpenLM": {"Uncomment to install openlm and openai if you haven't already": "https://python.langchain.com/docs/integrations/llms/openlm/"}, "CloudflareWorkersAI": {"Using streaming": "https://python.langchain.com/docs/integrations/llms/cloudflare_workersai/"}, "ChatGLM3": {"Install required dependencies": "https://python.langchain.com/docs/integrations/llms/chatglm/"}, "ChatGLM": {"Install required dependencies": "https://python.langchain.com/docs/integrations/llms/chatglm/"}, "Llamafile": {"llamafile.md": "https://python.langchain.com/docs/integrations/llms/llamafile/", "Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/", "Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/"}, "LayerupSecurity": {"Layerup Security": "https://python.langchain.com/docs/guides/productionization/safety/layerup_security/"}, "JsonFormer": {"jsonformer_experimental.md": "https://python.langchain.com/docs/integrations/llms/jsonformer_experimental/"}, "WeightOnlyQuantPipeline": {"weight_only_quantization.md": "https://python.langchain.com/docs/integrations/llms/weight_only_quantization/"}, "Replicate": {"magics to auto-reload external modules in case you are making changes to langchain while working on this notebook": "https://python.langchain.com/docs/integrations/llms/replicate/"}, "create_history_aware_retriever": {"Quickstart": "https://python.langchain.com/docs/get_started/quickstart/", "Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/code_understanding/", "import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/chat_history/"}, "BaseOutputParser": {"Quickstart": "https://python.langchain.com/docs/get_started/.ipynb_checkpoints/quickstart-checkpoint/", "The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/"}, "ConditionalPromptSelector": {"Download a llamafile from HuggingFace": "https://python.langchain.com/docs/guides/development/local_llms/"}, "DatetimeOutputParser": {"Note that we set max_retries = 0 to avoid retrying on RateLimits, etc": "https://python.langchain.com/docs/guides/productionization/fallbacks/", "datetime.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/datetime/"}, "HuggingFaceInjectionIdentifier": {"Using https://huggingface.co/laiyer/deberta-v3-base-prompt-injection": "https://python.langchain.com/docs/guides/productionization/safety/hugging_face_prompt_injection/"}, "load_chain": {"Using https://huggingface.co/laiyer/deberta-v3-base-prompt-injection": "https://python.langchain.com/docs/guides/productionization/safety/hugging_face_prompt_injection/"}, "FallacyChain": {"Logical Fallacy chain": "https://python.langchain.com/docs/guides/productionization/safety/logical_fallacy_chain/"}, "ModerationPiiError": {"Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "BaseModerationConfig": {"Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "ModerationPiiConfig": {"Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "ModerationPromptSafetyConfig": {"Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "ModerationToxicityConfig": {"Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "BaseModerationCallbackHandler": {"Define callback handlers by subclassing BaseModerationCallbackHandler": "https://python.langchain.com/docs/guides/productionization/safety/amazon_comprehend_chain/"}, "ConstitutionalChain": {"Constitutional chain": "https://python.langchain.com/docs/guides/productionization/safety/constitutional_chain/"}, "ConstitutionalPrinciple": {"Constitutional chain": "https://python.langchain.com/docs/guides/productionization/safety/constitutional_chain/"}, "format_document": {"QA with private data protection {#qa-with-private-data-protection}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/"}, "runnable": {"Multi-language data anonymization with Microsoft Presidio {#multi-language-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/multi_language/"}, "case_insensitive_matching_strategy": {"Reversible data anonymization with Microsoft Presidio {#reversible-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/reversible/"}, "fuzzy_matching_strategy": {"Reversible data anonymization with Microsoft Presidio {#reversible-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/reversible/"}, "combined_exact_fuzzy_matching_strategy": {"Reversible data anonymization with Microsoft Presidio {#reversible-data-anonymization-with-microsoft-presidio}": "https://python.langchain.com/docs/guides/productionization/safety/presidio_data_anonymization/reversible/"}, "load_evaluator": {"Initialize the language model": "https://python.langchain.com/docs/guides/productionization/evaluation/examples/comparisons/", "ANTHROPIC_API_KEY=": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/trajectory_eval/", "pairwise_embedding_distance.md": "https://python.langchain.com/docs/guides/productionization/evaluation/comparison/pairwise_embedding_distance/", "The prompt was assigned to the evaluator": "https://python.langchain.com/docs/guides/productionization/evaluation/comparison/pairwise_string/", "This is equivalent to loading using the enum": "https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain/", "Check for the presence of a YYYY-MM-DD string.": "https://python.langchain.com/docs/guides/productionization/evaluation/string/regex_match/", "Correct": "https://python.langchain.com/docs/guides/productionization/evaluation/string/scoring_eval_chain/", "Alternatively": "https://python.langchain.com/docs/guides/productionization/evaluation/string/exact_match/", "The results purely character-based, so it's less useful when negation is concerned": "https://python.langchain.com/docs/guides/productionization/evaluation/string/string_distance/", "You can load by enum or by raw python string": "https://python.langchain.com/docs/guides/productionization/evaluation/string/embedding_distance/"}, "load_dataset": {"Initialize the language model": "https://python.langchain.com/docs/guides/productionization/evaluation/examples/comparisons/"}, "AgentTrajectoryEvaluator": {"custom.md": "https://python.langchain.com/docs/guides/productionization/evaluation/trajectory/custom/"}, "EmbeddingDistance": {"pairwise_embedding_distance.md": "https://python.langchain.com/docs/guides/productionization/evaluation/comparison/pairwise_embedding_distance/", "You can load by enum or by raw python string": "https://python.langchain.com/docs/guides/productionization/evaluation/string/embedding_distance/"}, "PairwiseStringEvaluator": {"%env ANTHROPIC_API_KEY=YOUR_API_KEY": "https://python.langchain.com/docs/guides/productionization/evaluation/comparison/custom/"}, "Criteria": {"This is equivalent to loading using the enum": "https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain/"}, "JsonValidityEvaluator": {"Equivalently": "https://python.langchain.com/docs/guides/productionization/evaluation/string/json/"}, "JsonEqualityEvaluator": {"Equivalently": "https://python.langchain.com/docs/guides/productionization/evaluation/string/json/"}, "JsonEditDistanceEvaluator": {"Equivalently": "https://python.langchain.com/docs/guides/productionization/evaluation/string/json/"}, "JsonSchemaEvaluator": {"Equivalently": "https://python.langchain.com/docs/guides/productionization/evaluation/string/json/"}, "RegexMatchStringEvaluator": {"Check for the presence of a YYYY-MM-DD string.": "https://python.langchain.com/docs/guides/productionization/evaluation/string/regex_match/"}, "StringEvaluator": {"The perplexity is much higher since LangChain was introduced after 'gpt-2' was released and because it is never used in the following context.": "https://python.langchain.com/docs/guides/productionization/evaluation/string/custom/"}, "ExactMatchStringEvaluator": {"Alternatively": "https://python.langchain.com/docs/guides/productionization/evaluation/string/exact_match/"}, "StringDistance": {"The results purely character-based, so it's less useful when negation is concerned": "https://python.langchain.com/docs/guides/productionization/evaluation/string/string_distance/"}, "WebResearchRetriever": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/web_scraping/"}, "StuffDocumentsChain": {"Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/", "Get embeddings.": "https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder/"}, "MapReduceDocumentsChain": {"Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/"}, "ReduceDocumentsChain": {"Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/"}, "AnalyzeDocumentChain": {"Set env var OPENAI_API_KEY or load from a .env file": "https://python.langchain.com/docs/use_cases/summarization/"}, "get_openapi_chain": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/apis/"}, "APIChain": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/apis/"}, "open_meteo_docs": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/apis/"}, "tmdb_docs": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/apis/"}, "podcast_docs": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/apis/"}, "LLMRequestsChain": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/apis/"}, "FewShotPromptTemplate": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/", "Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "Select the most similar example to the input.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples/", "Examples of a pretend task of creating antonyms.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/similarity/", "index.md": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/index/", "Examples of a fictional translation task.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/ngram_overlap/"}, "OPENAI_TEMPLATE": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/"}, "create_openai_data_generator": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/"}, "DatasetGenerator": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/"}, "create_data_generation_chain": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/"}, "create_extraction_chain_pydantic": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/"}, "PydanticOutputParser": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/data_generation/", "Set up a parser": "https://python.langchain.com/docs/use_cases/extraction/how_to/parse/", "Build a sample vectorDB": "https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever/", "Define your desired data structure.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/pydantic/", "retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/", "output_fixing.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/output_fixing/"}, "create_tool_calling_agent": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/agents/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Construct the Tools agent": "https://python.langchain.com/docs/modules/agents/agent_types/tool_calling/", "!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/"}, "Runnable": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/human_in_the_loop/"}, "RunnableConfig": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/", "Run custom functions {#run-custom-functions}": "https://python.langchain.com/docs/expression_language/primitives/functions/"}, "ToolCall": {"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"": "https://python.langchain.com/docs/use_cases/tool_use/tool_error_handling/"}, "JsonOutputParser": {"If you'd like to use LangSmith, uncomment the below:": "https://python.langchain.com/docs/use_cases/tool_use/prompting/", "Define your desired data structure.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/json/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/"}, "ConfigurableField": {"This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/", "initialize the bm25 retriever and faiss retriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/ensemble/", "batch_configurable_chain([\"ice cream\", \"spaghetti\", \"dumplings\"])": "https://python.langchain.com/docs/expression_language/why/", "Configure chain internals at runtime {#configure-chain-internals-at-runtime}": "https://python.langchain.com/docs/expression_language/primitives/configure/"}, "RunnableBinding": {"This will only get documents for Ankush": "https://python.langchain.com/docs/use_cases/question_answering/per_user/"}, "RunnablePick": {"Make sure the model path is correct for your system!": "https://python.langchain.com/docs/use_cases/question_answering/local_retrieval_qa/"}, "ChatMessageHistory": {"import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/chat_history/", "Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/memory_management/", "agent_with_memory.md": "https://python.langchain.com/docs/modules/memory/agent_with_memory/", "Conversation Summary": "https://python.langchain.com/docs/modules/memory/types/summary/", "Chat Messages": "https://python.langchain.com/docs/modules/memory/chat_messages/index/", "Quickstart {#quickstart}": "https://python.langchain.com/docs/modules/agents/quick_start/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/"}, "BaseChatMessageHistory": {"import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/chat_history/", "Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/"}, "LogStreamCallbackHandler": {"import dotenv": "https://python.langchain.com/docs/use_cases/question_answering/streaming/"}, "JsonOutputKeyToolsParser": {"Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "Using LangSmith is recommended but not required. Uncomment below lines to use.": "https://python.langchain.com/docs/use_cases/sql/csv/", "openai_tools.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_tools/"}, "ChatAnthropicMessages": {"Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/"}, "XMLOutputParser": {"Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "xml.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/xml/"}, "EmbeddingsFilter": {"Uncomment if you want to log to LangSmith": "https://python.langchain.com/docs/use_cases/question_answering/citations/", "Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/"}, "PydanticToolsParser": {"%pip install -qU langchain langchain-community langchain-openai langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/no_queries/", "%pip install -qU langchain langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde/", "%pip install -qU langchain-core langchain-openai": "https://python.langchain.com/docs/use_cases/query_analysis/techniques/step_back/", "Tool calling {#tool-calling}": "https://python.langchain.com/docs/modules/model_io/chat/function_calling/", "Function calling": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/function_calling-checkpoint/", "openai_tools.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_tools/"}, "chain": {"%pip install -qU langchain langchain-community langchain-openai langchain-chroma": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/no_queries/", "Streaming With LangChain {#streaming-with-langchain}": "https://python.langchain.com/docs/expression_language/streaming/", "decorator.md": "https://python.langchain.com/docs/expression_language/how_to/decorator/"}, "Comparator": {"constructing-filters.md": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/constructing-filters/"}, "Comparison": {"constructing-filters.md": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/constructing-filters/"}, "Operation": {"constructing-filters.md": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/constructing-filters/"}, "Operator": {"constructing-filters.md": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/constructing-filters/"}, "StructuredQuery": {"constructing-filters.md": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/constructing-filters/"}, "ChromaTranslator": {"constructing-filters.md": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/constructing-filters/", "This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/"}, "ElasticsearchTranslator": {"constructing-filters.md": "https://python.langchain.com/docs/use_cases/query_analysis/how_to/constructing-filters/"}, "LLMGraphTransformer": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/constructing/"}, "CypherQueryCorrector": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/mapping/"}, "Schema": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/mapping/"}, "AsyncCallbackManagerForToolRun": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "Import things that are needed generically": "https://python.langchain.com/docs/modules/tools/custom_tools/"}, "CallbackManagerForToolRun": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "Import things that are needed generically": "https://python.langchain.com/docs/modules/tools/custom_tools/"}, "BaseTool": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "Import things that are needed generically": "https://python.langchain.com/docs/modules/tools/custom_tools/", "Function calling": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/function_calling-checkpoint/"}, "format_to_openai_function_messages": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/"}, "OpenAIFunctionsAgentOutputParser": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/"}, "convert_to_openai_function": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/graph/semantic/", "tools_as_openai_functions.md": "https://python.langchain.com/docs/modules/tools/tools_as_openai_functions/"}, "SemanticSimilarityExampleSelector": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "Select the most similar example to the input.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples/", "This is a prompt template used to format each individual example.": "https://python.langchain.com/docs/modules/model_io/prompts/few_shot_examples_chat/", "Examples of a pretend task of creating antonyms.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/similarity/"}, "RunnableBranch": {"Set env var OPENAI_API_KEY or load from a .env file:": "https://python.langchain.com/docs/use_cases/chatbots/quickstart/", "Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/"}, "BSHTMLLoader": {"Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/", "HTML": "https://python.langchain.com/docs/modules/data_connection/document_loaders/html/"}, "create_structured_output_runnable": {"Download the content": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_long_text/"}, "BS4HTMLParser": {"Configure the parsers that you want to use per mime-type!": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_files/"}, "PDFMinerParser": {"Configure the parsers that you want to use per mime-type!": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_files/"}, "MimeTypeBasedParser": {"Configure the parsers that you want to use per mime-type!": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_files/"}, "TextParser": {"Configure the parsers that you want to use per mime-type!": "https://python.langchain.com/docs/use_cases/extraction/how_to/handle_files/"}, "PythonAstREPLTool": {"Using LangSmith is recommended but not required. Uncomment below lines to use.": "https://python.langchain.com/docs/use_cases/sql/csv/"}, "create_sql_query_chain": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/prompting/", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()": "https://python.langchain.com/docs/use_cases/sql/large_db/"}, "QuerySQLDataBaseTool": {"Uncomment the below to use LangSmith. Not required.": "https://python.langchain.com/docs/use_cases/sql/quickstart/"}, "SQLRecordManager": {"indexing.md": "https://python.langchain.com/docs/modules/data_connection/indexing/"}, "index": {"indexing.md": "https://python.langchain.com/docs/modules/data_connection/indexing/"}, "BaseLoader": {"indexing.md": "https://python.langchain.com/docs/modules/data_connection/indexing/", "Custom Document Loader {#custom-document-loader}": "https://python.langchain.com/docs/modules/data_connection/document_loaders/custom/"}, "EnsembleRetriever": {"initialize the bm25 retriever and faiss retriever": "https://python.langchain.com/docs/modules/data_connection/retrievers/ensemble/"}, "JsonKeyOutputFunctionsParser": {"The vectorstore to use to index the child chunks": "https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector/", "openai_functions.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_functions/", "prompt_llm_parser.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_llm_parser/"}, "LLMChainExtractor": {"Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/"}, "LLMChainFilter": {"Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/"}, "DocumentCompressorPipeline": {"Helper function for printing docs": "https://python.langchain.com/docs/modules/data_connection/retrievers/contextual_compression/"}, "CallbackManagerForRetrieverRun": {"Custom Retriever {#custom-retriever}": "https://python.langchain.com/docs/modules/data_connection/retrievers/custom_retriever/"}, "BaseRetriever": {"Custom Retriever {#custom-retriever}": "https://python.langchain.com/docs/modules/data_connection/retrievers/custom_retriever/"}, "TimeWeightedVectorStoreRetriever": {"Define your embedding model": "https://python.langchain.com/docs/modules/data_connection/retrievers/time_weighted_vectorstore/"}, "mock_now": {"Define your embedding model": "https://python.langchain.com/docs/modules/data_connection/retrievers/time_weighted_vectorstore/"}, "ParentDocumentRetriever": {"This text splitter is used to create the child documents": "https://python.langchain.com/docs/modules/data_connection/retrievers/parent_document_retriever/"}, "StructuredQueryOutputParser": {"This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/"}, "get_query_constructor_prompt": {"This example only specifies a filter": "https://python.langchain.com/docs/modules/data_connection/retrievers/self_query/"}, "Pinecone": {"Self-querying": "https://python.langchain.com/docs/modules/data_connection/retrievers/.ipynb_checkpoints/self_query-checkpoint/"}, "RecursiveJsonSplitter": {"This is a large nested json object and will be loaded as a python dict": "https://python.langchain.com/docs/modules/data_connection/document_transformers/recursive_json_splitter/"}, "HTMLHeaderTextSplitter": {"for local file use html_splitter.split_text_from_file()": "https://python.langchain.com/docs/modules/data_connection/document_transformers/HTML_header_metadata/"}, "SemanticChunker": {"This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/semantic-chunker/"}, "SentenceTransformersTokenTextSplitter": {"This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/split_by_token/"}, "NLTKTextSplitter": {"This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/split_by_token/"}, "KonlpyTextSplitter": {"This is a long document we can split up.": "https://python.langchain.com/docs/modules/data_connection/document_transformers/split_by_token/"}, "MarkdownHeaderTextSplitter": {"MD splits": "https://python.langchain.com/docs/modules/data_connection/document_transformers/markdown_header_metadata/"}, "HTMLSectionSplitter": {"Split": "https://python.langchain.com/docs/modules/data_connection/document_transformers/HTML_section_aware_splitter/"}, "BaseBlobParser": {"Custom Document Loader {#custom-document-loader}": "https://python.langchain.com/docs/modules/data_connection/document_loaders/custom/"}, "FileSystemBlobLoader": {"Custom Document Loader {#custom-document-loader}": "https://python.langchain.com/docs/modules/data_connection/document_loaders/custom/"}, "MathpixPDFLoader": {"PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "PyPDFium2Loader": {"PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "PDFMinerLoader": {"PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "PDFMinerPDFasHTMLLoader": {"PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "PyMuPDFLoader": {"PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "PyPDFDirectoryLoader": {"PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "PDFPlumberLoader": {"PDF": "https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf/"}, "PythonLoader": {"File Directory": "https://python.langchain.com/docs/modules/data_connection/document_loaders/file_directory/"}, "ToolException": {"Import things that are needed generically": "https://python.langchain.com/docs/modules/tools/custom_tools/"}, "MoveFileTool": {"tools_as_openai_functions.md": "https://python.langchain.com/docs/modules/tools/tools_as_openai_functions/"}, "BaseMemory": {"!python -m spacy download en_core_web_lg": "https://python.langchain.com/docs/modules/memory/custom_memory/"}, "CombinedMemory": {"Combined": "https://python.langchain.com/docs/modules/memory/multiple_memory/"}, "ConversationSummaryMemory": {"Combined": "https://python.langchain.com/docs/modules/memory/multiple_memory/", "Conversation Summary": "https://python.langchain.com/docs/modules/memory/types/summary/"}, "ConversationKGMemory": {"kg.md": "https://python.langchain.com/docs/modules/memory/types/kg/"}, "ConversationTokenBufferMemory": {"We can see here that the buffer is updated": "https://python.langchain.com/docs/modules/memory/types/token_buffer/"}, "ConversationEntityMemory": {"Entity": "https://python.langchain.com/docs/modules/memory/types/entity_summary_memory/"}, "ENTITY_MEMORY_CONVERSATION_TEMPLATE": {"Entity": "https://python.langchain.com/docs/modules/memory/types/entity_summary_memory/"}, "VectorStoreRetrieverMemory": {"Backed by a Vector Store": "https://python.langchain.com/docs/modules/memory/types/vectorstore_retriever_memory/"}, "BaseCallbackHandler": {"To enable streaming, we pass in `streaming=True` to the ChatModel constructor": "https://python.langchain.com/docs/modules/callbacks/async_callbacks/", "First, define custom callback handler implementations": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks/"}, "FileCallbackHandler": {"this chain will both print to stdout (because verbose=True) and write to 'output.log'": "https://python.langchain.com/docs/modules/callbacks/filecallbackhandler/"}, "LLMResult": {"To enable streaming, we pass in `streaming=True` to the ChatModel constructor": "https://python.langchain.com/docs/modules/callbacks/async_callbacks/", "Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/"}, "create_xml_agent": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/xml_agent/"}, "XMLAgentOutputParser": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/xml_agent/"}, "create_self_ask_with_search_agent": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/self_ask_with_search/"}, "TavilyAnswer": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/agent_types/self_ask_with_search/"}, "OpenAIAssistantRunnable": {"openai_assistants.md": "https://python.langchain.com/docs/modules/agents/agent_types/openai_assistants/"}, "AgentActionMessageLog": {"Load in document to retrieve over": "https://python.langchain.com/docs/modules/agents/how_to/agent_structured/"}, "LLMMathChain": {"need to use GPT-4 here as GPT-3.5 does not understand, however hard you insist, that": "https://python.langchain.com/docs/modules/agents/how_to/agent_iter/"}, "ChatGenerationChunk": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/", "custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "GenerationChunk": {"Get the prompt to use - you can modify this!": "https://python.langchain.com/docs/modules/agents/how_to/streaming/", "custom_llm.md": "https://python.langchain.com/docs/modules/model_io/llms/custom_llm/"}, "CommaSeparatedListOutputParser": {"Quickstart": "https://python.langchain.com/docs/modules/model_io/.ipynb_checkpoints/quick_start-checkpoint/", "Model I/O": "https://python.langchain.com/docs/modules/model_io/index/", "csv.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/csv/"}, "get_bedrock_anthropic_callback": {"!pip install -qU langchain-openai": "https://python.langchain.com/docs/modules/model_io/chat/token_usage_tracking/"}, "AIMessageChunk": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/", "The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/"}, "FunctionMessageChunk": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "HumanMessageChunk": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "SystemMessageChunk": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "ToolMessageChunk": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "AsyncCallbackManagerForLLMRun": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "CallbackManagerForLLMRun": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/", "custom_llm.md": "https://python.langchain.com/docs/modules/model_io/llms/custom_llm/"}, "SimpleChatModel": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "ChatGeneration": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/", "The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/"}, "ChatResult": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "run_in_executor": {"custom_chat_model.md": "https://python.langchain.com/docs/modules/model_io/chat/custom_chat_model/"}, "AIMessagePromptTemplate": {"Prompts": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/prompts-checkpoint/"}, "JsonOutputToolsParser": {"Function calling": "https://python.langchain.com/docs/modules/model_io/chat/.ipynb_checkpoints/function_calling-checkpoint/", "openai_tools.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_tools/"}, "RunnableGenerator": {"The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/"}, "OutputParserException": {"The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/"}, "BaseGenerationOutputParser": {"The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/"}, "Generation": {"The [bool] desribes a parameterization of a generic.": "https://python.langchain.com/docs/modules/model_io/output_parsers/custom/"}, "SimpleJsonOutputParser": {"Define your desired data structure.": "https://python.langchain.com/docs/modules/model_io/output_parsers/quick_start/"}, "ResponseSchema": {"structured.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/structured/"}, "StructuredOutputParser": {"structured.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/structured/"}, "YamlOutputParser": {"Define your desired data structure.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/yaml/"}, "OutputFixingParser": {"retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/", "output_fixing.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/output_fixing/"}, "RetryOutputParser": {"retry.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/retry/"}, "EnumOutputParser": {"enum.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/enum/"}, "JsonOutputFunctionsParser": {"openai_functions.md": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/openai_functions/", "prompt_llm_parser.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_llm_parser/"}, "PandasDataFrameOutputParser": {"Solely for documentation purposes.": "https://python.langchain.com/docs/modules/model_io/output_parsers/types/pandas_dataframe/"}, "PipelinePromptTemplate": {"composition.md": "https://python.langchain.com/docs/modules/model_io/prompts/composition/"}, "ChatMessagePromptTemplate": {"Quick reference {#quick-reference}": "https://python.langchain.com/docs/modules/model_io/prompts/quick_start/"}, "MaxMarginalRelevanceExampleSelector": {"Examples of a pretend task of creating antonyms.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/mmr/"}, "LengthBasedExampleSelector": {"Examples of a pretend task of creating antonyms.": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/length_based/"}, "BaseExampleSelector": {"index.md": "https://python.langchain.com/docs/modules/model_io/prompts/example_selectors/index/"}, "LLM": {"custom_llm.md": "https://python.langchain.com/docs/modules/model_io/llms/custom_llm/"}, "ChatPromptValue": {"prompt_size.md": "https://python.langchain.com/docs/expression_language/cookbook/prompt_size/"}, "cosine_similarity": {"Dynamically route logic based on input {#dynamically-route-logic-based-on-input}": "https://python.langchain.com/docs/expression_language/how_to/routing/"}, "ConfigurableFieldSpec": {"Remembers": "https://python.langchain.com/docs/expression_language/how_to/message_history/"}, "HubRunnable": {"Configure chain internals at runtime {#configure-chain-internals-at-runtime}": "https://python.langchain.com/docs/expression_language/primitives/configure/"}} \ No newline at end of file diff --git a/docs/docs/guides/productionization/safety/hugging_face_prompt_injection.ipynb b/docs/docs/guides/productionization/safety/hugging_face_prompt_injection.ipynb index c138f1a2d3284..f4b306f45536d 100644 --- a/docs/docs/guides/productionization/safety/hugging_face_prompt_injection.ipynb +++ b/docs/docs/guides/productionization/safety/hugging_face_prompt_injection.ipynb @@ -9,7 +9,7 @@ "\n", "This notebook shows how to prevent prompt injection attacks using the text classification model from `HuggingFace`.\n", "\n", - "By default, it uses a *[laiyer/deberta-v3-base-prompt-injection](https://huggingface.co/laiyer/deberta-v3-base-prompt-injection)* model trained to identify prompt injections. \n", + "By default, it uses a *[protectai/deberta-v3-base-prompt-injection-v2](https://huggingface.co/protectai/deberta-v3-base-prompt-injection-v2)* model trained to identify prompt injections. \n", "\n", "In this notebook, we will use the ONNX version of the model to speed up the inference. " ] @@ -49,11 +49,15 @@ "from optimum.onnxruntime import ORTModelForSequenceClassification\n", "from transformers import AutoTokenizer, pipeline\n", "\n", - "# Using https://huggingface.co/laiyer/deberta-v3-base-prompt-injection\n", - "model_path = \"laiyer/deberta-v3-base-prompt-injection\"\n", - "tokenizer = AutoTokenizer.from_pretrained(model_path)\n", - "tokenizer.model_input_names = [\"input_ids\", \"attention_mask\"] # Hack to run the model\n", - "model = ORTModelForSequenceClassification.from_pretrained(model_path, subfolder=\"onnx\")\n", + "# Using https://huggingface.co/protectai/deberta-v3-base-prompt-injection-v2\n", + "model_path = \"laiyer/deberta-v3-base-prompt-injection-v2\"\n", + "revision = None # We recommend specifiying the revision to avoid breaking changes or supply chain attacks\n", + "tokenizer = AutoTokenizer.from_pretrained(\n", + " model_path, revision=revision, model_input_names=[\"input_ids\", \"attention_mask\"]\n", + ")\n", + "model = ORTModelForSequenceClassification.from_pretrained(\n", + " model_path, revision=revision, subfolder=\"onnx\"\n", + ")\n", "\n", "classifier = pipeline(\n", " \"text-classification\",\n", diff --git a/docs/docs/integrations/vectorstores/neo4jvector.ipynb b/docs/docs/integrations/vectorstores/neo4jvector.ipynb index 4040101b76d05..cf9773f33ba43 100644 --- a/docs/docs/integrations/vectorstores/neo4jvector.ipynb +++ b/docs/docs/integrations/vectorstores/neo4jvector.ipynb @@ -8,7 +8,7 @@ "\n", ">[Neo4j](https://neo4j.com/) is an open-source graph database with integrated support for vector similarity search\n", "\n", - "It supports:\n", + "It supports:\n\n", "- approximate nearest neighbor search\n", "- Euclidean similarity and cosine similarity\n", "- Hybrid search combining vector and keyword searches\n", diff --git a/docs/docs/modules/data_connection/retrievers/custom_retriever.ipynb b/docs/docs/modules/data_connection/retrievers/custom_retriever.ipynb index 1bab3e68f2d9a..ddb81fc0e483a 100644 --- a/docs/docs/modules/data_connection/retrievers/custom_retriever.ipynb +++ b/docs/docs/modules/data_connection/retrievers/custom_retriever.ipynb @@ -98,7 +98,7 @@ " ) -> List[Document]:\n", " \"\"\"Sync implementations for retriever.\"\"\"\n", " matching_documents = []\n", - " for document in documents:\n", + " for document in self.documents:\n", " if len(matching_documents) > self.k:\n", " return matching_documents\n", "\n", diff --git a/libs/community/langchain_community/chat_message_histories/file.py b/libs/community/langchain_community/chat_message_histories/file.py index d6f2f43c3d652..41dbd2afaad22 100644 --- a/libs/community/langchain_community/chat_message_histories/file.py +++ b/libs/community/langchain_community/chat_message_histories/file.py @@ -1,45 +1,5 @@ -import json -import logging -from pathlib import Path -from typing import List +from langchain_core.chat_history import FileChatMessageHistory -from langchain_core.chat_history import BaseChatMessageHistory -from langchain_core.messages import ( - BaseMessage, - messages_from_dict, - messages_to_dict, -) - -logger = logging.getLogger(__name__) - - -class FileChatMessageHistory(BaseChatMessageHistory): - """ - Chat message history that stores history in a local file. - - Args: - file_path: path of the local file to store the messages. - """ - - def __init__(self, file_path: str): - self.file_path = Path(file_path) - if not self.file_path.exists(): - self.file_path.touch() - self.file_path.write_text(json.dumps([])) - - @property - def messages(self) -> List[BaseMessage]: # type: ignore - """Retrieve the messages from the local file""" - items = json.loads(self.file_path.read_text()) - messages = messages_from_dict(items) - return messages - - def add_message(self, message: BaseMessage) -> None: - """Append the message to the record in the local file""" - messages = messages_to_dict(self.messages) - messages.append(messages_to_dict([message])[0]) - self.file_path.write_text(json.dumps(messages)) - - def clear(self) -> None: - """Clear session memory from the local file""" - self.file_path.write_text(json.dumps([])) +__all__ = [ + "FileChatMessageHistory", +] diff --git a/libs/community/langchain_community/chat_message_histories/in_memory.py b/libs/community/langchain_community/chat_message_histories/in_memory.py index fe6c6406524c5..679c9ce665e7c 100644 --- a/libs/community/langchain_community/chat_message_histories/in_memory.py +++ b/libs/community/langchain_community/chat_message_histories/in_memory.py @@ -1,31 +1,5 @@ -from typing import List, Sequence +from langchain_core.chat_history import InMemoryChatMessageHistory as ChatMessageHistory -from langchain_core.chat_history import BaseChatMessageHistory -from langchain_core.messages import BaseMessage -from langchain_core.pydantic_v1 import BaseModel, Field - - -class ChatMessageHistory(BaseChatMessageHistory, BaseModel): - """In memory implementation of chat message history. - - Stores messages in an in memory list. - """ - - messages: List[BaseMessage] = Field(default_factory=list) - - async def aget_messages(self) -> List[BaseMessage]: - return self.messages - - def add_message(self, message: BaseMessage) -> None: - """Add a self-created message to the store""" - self.messages.append(message) - - async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None: - """Add messages to the store""" - self.add_messages(messages) - - def clear(self) -> None: - self.messages = [] - - async def aclear(self) -> None: - self.clear() +__all__ = [ + "ChatMessageHistory", +] diff --git a/libs/community/langchain_community/llms/llamafile.py b/libs/community/langchain_community/llms/llamafile.py index 1aff521ee3300..933ed5e025877 100644 --- a/libs/community/langchain_community/llms/llamafile.py +++ b/libs/community/langchain_community/llms/llamafile.py @@ -139,6 +139,7 @@ def _param_fieldnames(self) -> List[str]: "streaming", "tags", "verbose", + "custom_get_token_ids", ] attrs = [ k for k in get_pydantic_field_names(self.__class__) if k not in ignore_keys diff --git a/libs/core/langchain_core/caches.py b/libs/core/langchain_core/caches.py index 4c494c4fbc849..4e9e2993a0428 100644 --- a/libs/core/langchain_core/caches.py +++ b/libs/core/langchain_core/caches.py @@ -22,7 +22,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import Any, Optional, Sequence +from typing import Any, Dict, Optional, Sequence, Tuple from langchain_core.outputs import Generation from langchain_core.runnables import run_in_executor @@ -105,3 +105,37 @@ async def aupdate( async def aclear(self, **kwargs: Any) -> None: """Clear cache that can take additional keyword arguments.""" return await run_in_executor(None, self.clear, **kwargs) + + +class InMemoryCache(BaseCache): + """Cache that stores things in memory.""" + + def __init__(self) -> None: + """Initialize with empty cache.""" + self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {} + + def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: + """Look up based on prompt and llm_string.""" + return self._cache.get((prompt, llm_string), None) + + def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: + """Update cache based on prompt and llm_string.""" + self._cache[(prompt, llm_string)] = return_val + + def clear(self, **kwargs: Any) -> None: + """Clear cache.""" + self._cache = {} + + async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: + """Look up based on prompt and llm_string.""" + return self.lookup(prompt, llm_string) + + async def aupdate( + self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE + ) -> None: + """Update cache based on prompt and llm_string.""" + self.update(prompt, llm_string, return_val) + + async def aclear(self, **kwargs: Any) -> None: + """Clear cache.""" + self.clear() diff --git a/libs/core/langchain_core/chat_history.py b/libs/core/langchain_core/chat_history.py index 8f93074558597..4388b373305e8 100644 --- a/libs/core/langchain_core/chat_history.py +++ b/libs/core/langchain_core/chat_history.py @@ -16,7 +16,9 @@ """ # noqa: E501 from __future__ import annotations +import json from abc import ABC, abstractmethod +from pathlib import Path from typing import List, Sequence, Union from langchain_core.messages import ( @@ -24,7 +26,10 @@ BaseMessage, HumanMessage, get_buffer_string, + messages_from_dict, + messages_to_dict, ) +from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import run_in_executor @@ -184,3 +189,61 @@ async def aclear(self) -> None: def __str__(self) -> str: """Return a string representation of the chat history.""" return get_buffer_string(self.messages) + + +class InMemoryChatMessageHistory(BaseChatMessageHistory, BaseModel): + """In memory implementation of chat message history. + + Stores messages in an in memory list. + """ + + messages: List[BaseMessage] = Field(default_factory=list) + + async def aget_messages(self) -> List[BaseMessage]: + return self.messages + + def add_message(self, message: BaseMessage) -> None: + """Add a self-created message to the store""" + self.messages.append(message) + + async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None: + """Add messages to the store""" + self.add_messages(messages) + + def clear(self) -> None: + self.messages = [] + + async def aclear(self) -> None: + self.clear() + + +class FileChatMessageHistory(BaseChatMessageHistory): + """Chat message history that stores history in a local file.""" + + def __init__(self, file_path: str) -> None: + """Initialize the file path for the chat history. + + Args: + file_path: The path to the local file to store the chat history. + """ + self.file_path = Path(file_path) + if not self.file_path.exists(): + self.file_path.touch() + self.file_path.write_text(json.dumps([])) + + @property + def messages(self) -> List[BaseMessage]: # type: ignore + """Retrieve the messages from the local file""" + items = json.loads(self.file_path.read_text()) + messages = messages_from_dict(items) + return messages + + def add_message(self, message: BaseMessage) -> None: + """Append the message to the record in the local file""" + messages = messages_to_dict(self.messages) + messages.append(messages_to_dict([message])[0]) + self.file_path.write_text(json.dumps(messages)) + + def clear(self) -> None: + """Clear session memory from the local file""" + self.file_path.write_text(json.dumps([])) diff --git a/libs/core/langchain_core/language_models/base.py b/libs/core/langchain_core/language_models/base.py index 4941faea34589..a6addf8f6b0aa 100644 --- a/libs/core/langchain_core/language_models/base.py +++ b/libs/core/langchain_core/language_models/base.py @@ -5,6 +5,7 @@ from typing import ( TYPE_CHECKING, Any, + Callable, Dict, List, Mapping, @@ -97,6 +98,10 @@ class BaseLanguageModel( """Tags to add to the run trace.""" metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True) """Metadata to add to the run trace.""" + custom_get_token_ids: Optional[Callable[[str], List[int]]] = Field( + default=None, exclude=True + ) + """Optional encoder to use for counting tokens.""" @validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: @@ -310,7 +315,10 @@ def get_token_ids(self, text: str) -> List[int]: A list of ids corresponding to the tokens in the text, in order they occur in the text. """ - return _get_token_ids_default_method(text) + if self.custom_get_token_ids is not None: + return self.custom_get_token_ids(text) + else: + return _get_token_ids_default_method(text) def get_num_tokens(self, text: str) -> int: """Get the number of tokens present in the text. diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index a29a0677ee990..ee48234df3a80 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -69,7 +69,6 @@ accepts_config, accepts_context, accepts_run_manager, - adapt_first_streaming_chunk, create_model, gather_with_concurrency, get_function_first_arg_dict_keys, @@ -1280,21 +1279,22 @@ def transform( final: Input got_first_val = False - for chunk in input: + for ichunk in input: + # The default implementation of transform is to buffer input and + # then call stream. + # It'll attempt to gather all input into a single chunk using + # the `+` operator. + # If the input is not addable, then we'll assume that we can + # only operate on the last chunk, + # and we'll iterate until we get to the last chunk. if not got_first_val: - final = adapt_first_streaming_chunk(chunk) # type: ignore + final = ichunk got_first_val = True else: - # Make a best effort to gather, for any type that supports `+` - # This method should throw an error if gathering fails. try: - final = final + chunk # type: ignore[operator] + final = final + ichunk # type: ignore[operator] except TypeError: - raise TypeError( - f"Failed while trying to add together " - f"type {type(final)} and {type(chunk)}." - f"These types should be addable for transform to work." - ) + final = ichunk if got_first_val: yield from self.stream(final, config, **kwargs) @@ -1313,21 +1313,22 @@ async def atransform( final: Input got_first_val = False - async for chunk in input: + async for ichunk in input: + # The default implementation of transform is to buffer input and + # then call stream. + # It'll attempt to gather all input into a single chunk using + # the `+` operator. + # If the input is not addable, then we'll assume that we can + # only operate on the last chunk, + # and we'll iterate until we get to the last chunk. if not got_first_val: - final = adapt_first_streaming_chunk(chunk) # type: ignore + final = ichunk got_first_val = True else: - # Make a best effort to gather, for any type that supports `+` - # This method should throw an error if gathering fails. try: - final = final + chunk # type: ignore[operator] + final = final + ichunk # type: ignore[operator] except TypeError: - raise TypeError( - f"Failed while trying to add together " - f"type {type(final)} and {type(chunk)}." - f"These types should be addable for atransform to work." - ) + final = ichunk if got_first_val: async for output in self.astream(final, config, **kwargs): @@ -3998,10 +3999,16 @@ def _transform( config: RunnableConfig, **kwargs: Any, ) -> Iterator[Output]: - final: Optional[Input] = None + final: Input + got_first_val = False for ichunk in input: - if final is None: - final = adapt_first_streaming_chunk(ichunk) # type: ignore + # By definitions, RunnableLambdas consume all input before emitting output. + # If the input is not addable, then we'll assume that we can + # only operate on the last chunk. + # So we'll iterate until we get to the last chunk! + if not got_first_val: + final = ichunk + got_first_val = True else: try: final = final + ichunk # type: ignore[operator] @@ -4082,10 +4089,16 @@ async def _atransform( config: RunnableConfig, **kwargs: Any, ) -> AsyncIterator[Output]: - final: Optional[Input] = None + final: Input + got_first_val = False async for ichunk in input: - if final is None: - final = adapt_first_streaming_chunk(ichunk) + # By definitions, RunnableLambdas consume all input before emitting output. + # If the input is not addable, then we'll assume that we can + # only operate on the last chunk. + # So we'll iterate until we get to the last chunk! + if not got_first_val: + final = ichunk + got_first_val = True else: try: final = final + ichunk # type: ignore[operator] diff --git a/libs/core/langchain_core/runnables/passthrough.py b/libs/core/langchain_core/runnables/passthrough.py index d2fbf30e4b9c8..ec081aea97f5e 100644 --- a/libs/core/langchain_core/runnables/passthrough.py +++ b/libs/core/langchain_core/runnables/passthrough.py @@ -40,7 +40,6 @@ from langchain_core.runnables.utils import ( AddableDict, ConfigurableFieldSpec, - adapt_first_streaming_chunk, create_model, ) from langchain_core.utils.aiter import atee, py_anext @@ -243,16 +242,22 @@ def transform( for chunk in self._transform_stream_with_config(input, identity, config): yield chunk else: - final = None + final: Other + got_first_chunk = False for chunk in self._transform_stream_with_config(input, identity, config): yield chunk - if final is None: - final = adapt_first_streaming_chunk(chunk) + + if not got_first_chunk: + final = chunk + got_first_chunk = True else: - final = final + chunk + try: + final = final + chunk # type: ignore[operator] + except TypeError: + final = chunk - if final is not None: + if got_first_chunk: call_func_with_variable_args( self.func, final, ensure_config(config), **kwargs ) @@ -269,18 +274,28 @@ async def atransform( ): yield chunk else: - final = None + got_first_chunk = False async for chunk in self._atransform_stream_with_config( input, identity, config ): yield chunk - if final is None: - final = adapt_first_streaming_chunk(chunk) + + # By definitions, a function will operate on the aggregated + # input. So we'll aggregate the input until we get to the last + # chunk. + # If the input is not addable, then we'll assume that we can + # only operate on the last chunk. + if not got_first_chunk: + final = chunk + got_first_chunk = True else: - final = final + chunk + try: + final = final + chunk # type: ignore[operator] + except TypeError: + final = chunk - if final is not None: + if got_first_chunk: config = ensure_config(config) if self.afunc is not None: await acall_func_with_variable_args( diff --git a/libs/core/langchain_core/runnables/utils.py b/libs/core/langchain_core/runnables/utils.py index dff10ad04957a..d5553e786f519 100644 --- a/libs/core/langchain_core/runnables/utils.py +++ b/libs/core/langchain_core/runnables/utils.py @@ -524,11 +524,3 @@ def _create_model_cached( return _create_model_base( __model_name, __config__=_SchemaConfig, **field_definitions ) - - -def adapt_first_streaming_chunk(chunk: Any) -> Any: - """This might transform the first chunk of a stream into an AddableDict.""" - if isinstance(chunk, dict) and not isinstance(chunk, AddableDict): - return AddableDict(chunk) - else: - return chunk diff --git a/libs/community/tests/unit_tests/chat_message_histories/test_file.py b/libs/core/tests/unit_tests/chat_history/test_file_chat_message_history.py similarity index 97% rename from libs/community/tests/unit_tests/chat_message_histories/test_file.py rename to libs/core/tests/unit_tests/chat_history/test_file_chat_message_history.py index f069ff2493540..4c292c61e5a93 100644 --- a/libs/community/tests/unit_tests/chat_message_histories/test_file.py +++ b/libs/core/tests/unit_tests/chat_history/test_file_chat_message_history.py @@ -3,9 +3,9 @@ from typing import Generator import pytest -from langchain_core.messages import AIMessage, HumanMessage -from langchain_community.chat_message_histories import FileChatMessageHistory +from langchain_core.chat_history import FileChatMessageHistory +from langchain_core.messages import AIMessage, HumanMessage @pytest.fixture diff --git a/libs/core/tests/unit_tests/runnables/test_runnable.py b/libs/core/tests/unit_tests/runnables/test_runnable.py index ca6d2a3adafac..07ebd37abf2c1 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable.py @@ -5401,11 +5401,21 @@ def test_transform_of_runnable_lambda_with_dicts() -> None: runnable = RunnableLambda(lambda x: x) chunks = iter( [ - {"foo": "a"}, {"foo": "n"}, ] ) - assert list(runnable.transform(chunks)) == [{"foo": "an"}] + assert list(runnable.transform(chunks)) == [{"foo": "n"}] + + # Test as part of a sequence + seq = runnable | runnable + chunks = iter( + [ + {"foo": "n"}, + ] + ) + assert list(seq.transform(chunks)) == [{"foo": "n"}] + # Test some other edge cases + assert list(seq.stream({"foo": "n"})) == [{"foo": "n"}] async def test_atransform_of_runnable_lambda_with_dicts() -> None: @@ -5420,7 +5430,11 @@ async def chunk_iterator() -> AsyncIterator[Dict[str, str]]: yield {"foo": "n"} chunks = [chunk async for chunk in runnable.atransform(chunk_iterator())] - assert chunks == [{"foo": "an"}] + assert chunks == [{"foo": "n"}] + + seq = runnable | runnable + chunks = [chunk async for chunk in seq.atransform(chunk_iterator())] + assert chunks == [{"foo": "n"}] def test_default_transform_with_dicts() -> None: @@ -5440,7 +5454,8 @@ def invoke( ] ) - assert list(runnable.transform(chunks)) == [{"foo": "an"}] + assert list(runnable.transform(chunks)) == [{"foo": "n"}] + assert list(runnable.stream({"foo": "n"})) == [{"foo": "n"}] async def test_default_atransform_with_dicts() -> None: @@ -5460,6 +5475,17 @@ async def chunk_iterator() -> AsyncIterator[Dict[str, str]]: chunks = [chunk async for chunk in runnable.atransform(chunk_iterator())] + assert chunks == [{"foo": "n"}] + + # Test with addable dict + async def chunk_iterator_with_addable() -> AsyncIterator[Dict[str, str]]: + yield AddableDict({"foo": "a"}) + yield AddableDict({"foo": "n"}) + + chunks = [ + chunk async for chunk in runnable.atransform(chunk_iterator_with_addable()) + ] + assert chunks == [{"foo": "an"}] diff --git a/libs/experimental/langchain_experimental/prompt_injection_identifier/hugging_face_identifier.py b/libs/experimental/langchain_experimental/prompt_injection_identifier/hugging_face_identifier.py index a2868477611e2..ecd5f7f682263 100644 --- a/libs/experimental/langchain_experimental/prompt_injection_identifier/hugging_face_identifier.py +++ b/libs/experimental/langchain_experimental/prompt_injection_identifier/hugging_face_identifier.py @@ -23,7 +23,7 @@ def __init__( def _model_default_factory( - model_name: str = "laiyer/deberta-v3-base-prompt-injection", + model_name: str = "protectai/deberta-v3-base-prompt-injection-v2", ) -> Pipeline: try: from transformers import ( @@ -64,7 +64,7 @@ class HuggingFaceInjectionIdentifier(BaseTool): Can be specified as transformers Pipeline or string. String should correspond to the model name of a text-classification transformers model. Defaults to - ``laiyer/deberta-v3-base-prompt-injection`` model. + ``protectai/deberta-v3-base-prompt-injection-v2`` model. """ threshold: float = Field( description="Threshold for prompt injection detection.", default=0.5 diff --git a/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py b/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py index 65e4e37458714..50b60390f84e9 100644 --- a/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py +++ b/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py @@ -2,7 +2,6 @@ from typing import List from langchain_community.agent_toolkits.base import BaseToolkit -from langchain_community.llms.openai import OpenAI from langchain_community.tools.vectorstore.tool import ( VectorStoreQATool, VectorStoreQAWithSourcesTool, @@ -31,7 +30,7 @@ class VectorStoreToolkit(BaseToolkit): """Toolkit for interacting with a Vector Store.""" vectorstore_info: VectorStoreInfo = Field(exclude=True) - llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0)) + llm: BaseLanguageModel class Config: """Configuration for this pydantic object.""" @@ -65,7 +64,7 @@ class VectorStoreRouterToolkit(BaseToolkit): """Toolkit for routing between Vector Stores.""" vectorstores: List[VectorStoreInfo] = Field(exclude=True) - llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0)) + llm: BaseLanguageModel class Config: """Configuration for this pydantic object.""" diff --git a/libs/langchain/langchain/cache.py b/libs/langchain/langchain/cache.py index d0badba70c79f..7d84971d16fa9 100644 --- a/libs/langchain/langchain/cache.py +++ b/libs/langchain/langchain/cache.py @@ -18,7 +18,6 @@ ) __all__ = [ - "InMemoryCache", "FullLLMCache", "SQLAlchemyCache", "SQLiteCache", @@ -27,6 +26,7 @@ "RedisSemanticCache", "GPTCache", "MomentoCache", + "InMemoryCache", "CassandraCache", "CassandraSemanticCache", "FullMd5LLMCache", diff --git a/libs/langchain/langchain/chains/natbot/base.py b/libs/langchain/langchain/chains/natbot/base.py index e74c3477b1419..3c6a9aa2e891e 100644 --- a/libs/langchain/langchain/chains/natbot/base.py +++ b/libs/langchain/langchain/chains/natbot/base.py @@ -4,7 +4,6 @@ import warnings from typing import Any, Dict, List, Optional -from langchain_community.llms.openai import OpenAI from langchain_core.callbacks import CallbackManagerForChainRun from langchain_core.language_models import BaseLanguageModel from langchain_core.pydantic_v1 import Extra, root_validator @@ -68,8 +67,11 @@ def raise_deprecation(cls, values: Dict) -> Dict: @classmethod def from_default(cls, objective: str, **kwargs: Any) -> NatBotChain: """Load with default LLMChain.""" - llm = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50) - return cls.from_llm(llm, objective, **kwargs) + raise NotImplementedError( + "This method is no longer implemented. Please use from_llm." + "llm = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50)" + "For example, NatBotChain.from_llm(llm, objective)" + ) @classmethod def from_llm( diff --git a/libs/langchain/langchain/chains/openai_functions/openapi.py b/libs/langchain/langchain/chains/openai_functions/openapi.py index 681618815e1db..b7c98348eae0b 100644 --- a/libs/langchain/langchain/chains/openai_functions/openapi.py +++ b/libs/langchain/langchain/chains/openai_functions/openapi.py @@ -6,7 +6,6 @@ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import requests -from langchain_community.chat_models import ChatOpenAI from langchain_community.utilities.openapi import OpenAPISpec from langchain_core.callbacks import CallbackManagerForChainRun from langchain_core.language_models import BaseLanguageModel @@ -272,9 +271,12 @@ def get_openapi_chain( if isinstance(spec, str): raise ValueError(f"Unable to parse spec from source {spec}") openai_fns, call_api_fn = openapi_spec_to_openai_fn(spec) - llm = llm or ChatOpenAI( - model="gpt-3.5-turbo-0613", - ) + if not llm: + raise ValueError( + "Must provide an LLM for this chain.For example,\n" + "from langchain_openai import ChatOpenAI\n" + "llm = ChatOpenAI()\n" + ) prompt = prompt or ChatPromptTemplate.from_template( "Use the provided API's to respond to this user query:\n\n{query}" ) diff --git a/libs/langchain/langchain/chains/router/multi_retrieval_qa.py b/libs/langchain/langchain/chains/router/multi_retrieval_qa.py index d9b0b924edb7d..90d84a01132d4 100644 --- a/libs/langchain/langchain/chains/router/multi_retrieval_qa.py +++ b/libs/langchain/langchain/chains/router/multi_retrieval_qa.py @@ -3,7 +3,6 @@ from typing import Any, Dict, List, Mapping, Optional -from langchain_community.chat_models import ChatOpenAI from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import PromptTemplate from langchain_core.retrievers import BaseRetriever @@ -42,6 +41,8 @@ def from_retrievers( default_retriever: Optional[BaseRetriever] = None, default_prompt: Optional[PromptTemplate] = None, default_chain: Optional[Chain] = None, + *, + default_chain_llm: Optional[BaseLanguageModel] = None, **kwargs: Any, ) -> MultiRetrievalQAChain: if default_prompt and not default_retriever: @@ -78,8 +79,20 @@ def from_retrievers( prompt = PromptTemplate( template=prompt_template, input_variables=["history", "query"] ) + if default_chain_llm is None: + raise NotImplementedError( + "conversation_llm must be provided if default_chain is not " + "specified. This API has been changed to avoid instantiating " + "default LLMs on behalf of users." + "You can provide a conversation LLM like so:\n" + "from langchain_openai import ChatOpenAI\n" + "llm = ChatOpenAI()" + ) _default_chain = ConversationChain( - llm=ChatOpenAI(), prompt=prompt, input_key="query", output_key="result" + llm=default_chain_llm, + prompt=prompt, + input_key="query", + output_key="result", ) return cls( router_chain=router_chain, diff --git a/libs/langchain/langchain/indexes/vectorstore.py b/libs/langchain/langchain/indexes/vectorstore.py index b70cf33f0f717..44c95e7b85f57 100644 --- a/libs/langchain/langchain/indexes/vectorstore.py +++ b/libs/langchain/langchain/indexes/vectorstore.py @@ -1,8 +1,6 @@ from typing import Any, Dict, List, Optional, Type from langchain_community.document_loaders.base import BaseLoader -from langchain_community.embeddings.openai import OpenAIEmbeddings -from langchain_community.llms.openai import OpenAI from langchain_community.vectorstores.inmemory import InMemoryVectorStore from langchain_core.documents import Document from langchain_core.embeddings import Embeddings @@ -38,7 +36,14 @@ def query( **kwargs: Any, ) -> str: """Query the vectorstore.""" - llm = llm or OpenAI(temperature=0) + if llm is None: + raise NotImplementedError( + "This API has been changed to require an LLM. " + "Please provide an llm to use for querying the vectorstore.\n" + "For example,\n" + "from langchain_openai import OpenAI\n" + "llm = OpenAI(temperature=0)" + ) retriever_kwargs = retriever_kwargs or {} chain = RetrievalQA.from_chain_type( llm, retriever=self.vectorstore.as_retriever(**retriever_kwargs), **kwargs @@ -53,7 +58,14 @@ async def aquery( **kwargs: Any, ) -> str: """Query the vectorstore.""" - llm = llm or OpenAI(temperature=0) + if llm is None: + raise NotImplementedError( + "This API has been changed to require an LLM. " + "Please provide an llm to use for querying the vectorstore.\n" + "For example,\n" + "from langchain_openai import OpenAI\n" + "llm = OpenAI(temperature=0)" + ) retriever_kwargs = retriever_kwargs or {} chain = RetrievalQA.from_chain_type( llm, retriever=self.vectorstore.as_retriever(**retriever_kwargs), **kwargs @@ -68,7 +80,14 @@ def query_with_sources( **kwargs: Any, ) -> dict: """Query the vectorstore and get back sources.""" - llm = llm or OpenAI(temperature=0) + if llm is None: + raise NotImplementedError( + "This API has been changed to require an LLM. " + "Please provide an llm to use for querying the vectorstore.\n" + "For example,\n" + "from langchain_openai import OpenAI\n" + "llm = OpenAI(temperature=0)" + ) retriever_kwargs = retriever_kwargs or {} chain = RetrievalQAWithSourcesChain.from_chain_type( llm, retriever=self.vectorstore.as_retriever(**retriever_kwargs), **kwargs @@ -83,7 +102,14 @@ async def aquery_with_sources( **kwargs: Any, ) -> dict: """Query the vectorstore and get back sources.""" - llm = llm or OpenAI(temperature=0) + if llm is None: + raise NotImplementedError( + "This API has been changed to require an LLM. " + "Please provide an llm to use for querying the vectorstore.\n" + "For example,\n" + "from langchain_openai import OpenAI\n" + "llm = OpenAI(temperature=0)" + ) retriever_kwargs = retriever_kwargs or {} chain = RetrievalQAWithSourcesChain.from_chain_type( llm, retriever=self.vectorstore.as_retriever(**retriever_kwargs), **kwargs @@ -95,7 +121,7 @@ class VectorstoreIndexCreator(BaseModel): """Logic for creating indexes.""" vectorstore_cls: Type[VectorStore] = InMemoryVectorStore - embedding: Embeddings = Field(default_factory=OpenAIEmbeddings) + embedding: Embeddings text_splitter: TextSplitter = Field(default_factory=_get_default_text_splitter) vectorstore_kwargs: dict = Field(default_factory=dict) diff --git a/libs/langchain/langchain/memory/chat_memory.py b/libs/langchain/langchain/memory/chat_memory.py index 671edf9f31b2c..10feaa3e1b95f 100644 --- a/libs/langchain/langchain/memory/chat_memory.py +++ b/libs/langchain/langchain/memory/chat_memory.py @@ -2,8 +2,10 @@ from abc import ABC from typing import Any, Dict, Optional, Tuple -from langchain_community.chat_message_histories.in_memory import ChatMessageHistory -from langchain_core.chat_history import BaseChatMessageHistory +from langchain_core.chat_history import ( + BaseChatMessageHistory, + InMemoryChatMessageHistory, +) from langchain_core.memory import BaseMemory from langchain_core.messages import AIMessage, HumanMessage from langchain_core.pydantic_v1 import Field @@ -14,7 +16,9 @@ class BaseChatMemory(BaseMemory, ABC): """Abstract base class for chat memory.""" - chat_memory: BaseChatMessageHistory = Field(default_factory=ChatMessageHistory) + chat_memory: BaseChatMessageHistory = Field( + default_factory=InMemoryChatMessageHistory + ) output_key: Optional[str] = None input_key: Optional[str] = None return_messages: bool = False diff --git a/libs/langchain/tests/unit_tests/llms/test_base.py b/libs/langchain/tests/unit_tests/llms/test_base.py index 37d9b802ed275..1b19b88ee7209 100644 --- a/libs/langchain/tests/unit_tests/llms/test_base.py +++ b/libs/langchain/tests/unit_tests/llms/test_base.py @@ -6,9 +6,10 @@ except ImportError: from sqlalchemy.ext.declarative import declarative_base +from langchain_core.caches import InMemoryCache from langchain_core.outputs import Generation, LLMResult -from langchain.cache import InMemoryCache, SQLAlchemyCache +from langchain.cache import SQLAlchemyCache from langchain.globals import get_llm_cache, set_llm_cache from langchain.llms.base import __all__ from tests.unit_tests.llms.fake_llm import FakeLLM diff --git a/libs/langchain/tests/unit_tests/test_cache.py b/libs/langchain/tests/unit_tests/test_cache.py index 42cc6c36b6847..13e27c8e024ad 100644 --- a/libs/langchain/tests/unit_tests/test_cache.py +++ b/libs/langchain/tests/unit_tests/test_cache.py @@ -6,6 +6,7 @@ from _pytest.fixtures import FixtureRequest from langchain_community.chat_models import FakeListChatModel from langchain_community.llms import FakeListLLM +from langchain_core.caches import InMemoryCache from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.language_models.llms import BaseLLM from langchain_core.load import dumps @@ -14,7 +15,7 @@ from sqlalchemy import create_engine from sqlalchemy.orm import Session -from langchain.cache import InMemoryCache, SQLAlchemyCache +from langchain.cache import SQLAlchemyCache from langchain.globals import get_llm_cache, set_llm_cache diff --git a/libs/partners/openai/langchain_openai/llms/base.py b/libs/partners/openai/langchain_openai/llms/base.py index f5602a816baf9..f59b2c6b6fa4d 100644 --- a/libs/partners/openai/langchain_openai/llms/base.py +++ b/libs/partners/openai/langchain_openai/llms/base.py @@ -521,6 +521,8 @@ def _llm_type(self) -> str: def get_token_ids(self, text: str) -> List[int]: """Get the token IDs using the tiktoken package.""" + if self.custom_get_token_ids is not None: + return self.custom_get_token_ids(text) # tiktoken NOT supported for Python < 3.8 if sys.version_info[1] < 8: return super().get_num_tokens(text) diff --git a/libs/partners/openai/tests/unit_tests/llms/test_base.py b/libs/partners/openai/tests/unit_tests/llms/test_base.py index d05bb0bfe546c..122846e2def13 100644 --- a/libs/partners/openai/tests/unit_tests/llms/test_base.py +++ b/libs/partners/openai/tests/unit_tests/llms/test_base.py @@ -1,4 +1,5 @@ import os +from typing import List import pytest @@ -54,3 +55,11 @@ def mock_completion() -> dict: def test_get_token_ids(model: str) -> None: OpenAI(model=model).get_token_ids("foo") return + + +def test_custom_token_counting() -> None: + def token_encoder(text: str) -> List[int]: + return [1, 2, 3] + + llm = OpenAI(custom_get_token_ids=token_encoder) + assert llm.get_token_ids("foo") == [1, 2, 3]