Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

docs: remove __call__ #20861

Merged
merged 10 commits into from
Apr 24, 2024
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cookbook/Multi_modal_RAG_google.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@
" \"\"\"Make image summary\"\"\"\n",
" model = ChatVertexAI(model_name=\"gemini-pro-vision\", max_output_tokens=1024)\n",
"\n",
" msg = model(\n",
" msg = model.invoke(\n",
" [\n",
" HumanMessage(\n",
" content=[\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,7 @@
],
"source": [
"llm = OpenAI()\n",
"llm(query)"
"llm.invoke(query)"
]
},
{
Expand Down
4 changes: 2 additions & 2 deletions docs/docs/integrations/callbacks/labelstudio.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@
"llm = OpenAI(\n",
" temperature=0, callbacks=[LabelStudioCallbackHandler(project_name=\"My Project\")]\n",
")\n",
"print(llm(\"Tell me a joke\"))"
"print(llm.invoke(\"Tell me a joke\"))"
]
},
{
Expand Down Expand Up @@ -270,7 +270,7 @@
" )\n",
" ]\n",
")\n",
"llm_results = chat_llm(\n",
"llm_results = chat_llm.invoke(\n",
" [\n",
" SystemMessage(content=\"Always use a lot of emojis\"),\n",
" HumanMessage(content=\"Tell me a joke\"),\n",
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/callbacks/llmonitor.md
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ User tracking allows you to identify your users, track their cost, conversations
from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler, identify

with identify("user-123"):
llm("Tell me a joke")
llm.invoke("Tell me a joke")

with identify("user-456", user_props={"email": "[email protected]"}):
agen.run("Who is Leo DiCaprio's girlfriend?")
Expand Down
9 changes: 5 additions & 4 deletions docs/docs/integrations/callbacks/promptlayer.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@
" temperature=0,\n",
" callbacks=[PromptLayerCallbackHandler(pl_tags=[\"chatopenai\"])],\n",
")\n",
"llm_results = chat_llm(\n",
"llm_results = chat_llm.invoke(\n",
" [\n",
" HumanMessage(content=\"What comes after 1,2,3 ?\"),\n",
" HumanMessage(content=\"Tell me another joke?\"),\n",
Expand All @@ -129,10 +129,11 @@
"from langchain_community.llms import GPT4All\n",
"\n",
"model = GPT4All(model=\"./models/gpt4all-model.bin\", n_ctx=512, n_threads=8)\n",
"callbacks = [PromptLayerCallbackHandler(pl_tags=[\"langchain\", \"gpt4all\"])]\n",
"\n",
"response = model(\n",
"response = model.invoke(\n",
" \"Once upon a time, \",\n",
" callbacks=[PromptLayerCallbackHandler(pl_tags=[\"langchain\", \"gpt4all\"])],\n",
" config={\"callbacks\": callbacks},\n",
")"
]
},
Expand Down Expand Up @@ -181,7 +182,7 @@
")\n",
"\n",
"example_prompt = promptlayer.prompts.get(\"example\", version=1, langchain=True)\n",
"openai_llm(example_prompt.format(product=\"toasters\"))"
"openai_llm.invoke(example_prompt.format(product=\"toasters\"))"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/callbacks/trubrics.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@
}
],
"source": [
"chat_res = chat_llm(\n",
"chat_res = chat_llm.invoke(\n",
" [\n",
" SystemMessage(content=\"Every answer of yours must be about OpenAI.\"),\n",
" HumanMessage(content=\"Tell me a joke\"),\n",
Expand Down
4 changes: 2 additions & 2 deletions docs/docs/integrations/chat/alibaba_cloud_pai_eas.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@
"metadata": {},
"outputs": [],
"source": [
"output = chat([HumanMessage(content=\"write a funny joke\")])\n",
"output = chat.invoke([HumanMessage(content=\"write a funny joke\")])\n",
"print(\"output:\", output)"
]
},
Expand All @@ -90,7 +90,7 @@
"outputs": [],
"source": [
"kwargs = {\"temperature\": 0.8, \"top_p\": 0.8, \"top_k\": 5}\n",
"output = chat([HumanMessage(content=\"write a funny joke\")], **kwargs)\n",
"output = chat.invoke([HumanMessage(content=\"write a funny joke\")], **kwargs)\n",
"print(\"output:\", output)"
]
},
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/chat/llama_edge.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
"messages = [system_message, user_message]\n",
"\n",
"# chat with wasm-chat service\n",
"response = chat(messages)\n",
"response = chat.invoke(messages)\n",
"\n",
"print(f\"[Bot] {response.content}\")"
]
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/chat/zhipuai.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@
"metadata": {},
"outputs": [],
"source": [
"response = chat(messages)\n",
"response = chat.invoke(messages)\n",
"print(response.content) # Displays the AI-generated poem"
]
},
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/llms/anyscale.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@
"\n",
"@ray.remote(num_cpus=0.1)\n",
"def send_query(llm, prompt):\n",
" resp = llm(prompt)\n",
" resp = llm.invoke(prompt)\n",
" return resp\n",
"\n",
"\n",
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/llms/aphrodite.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@
")\n",
"\n",
"print(\n",
" llm(\n",
" llm.invoke(\n",
" '<|system|>Enter RP mode. You are Ayumu \"Osaka\" Kasuga.<|user|>Hey Osaka. Tell me about yourself.<|model|>'\n",
" )\n",
")"
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/llms/baichuan.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
"# Load the model\n",
"llm = BaichuanLLM()\n",
"\n",
"res = llm(\"What's your name?\")\n",
"res = llm.invoke(\"What's your name?\")\n",
"print(res)"
]
},
Expand Down
4 changes: 2 additions & 2 deletions docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@
"os.environ[\"QIANFAN_SK\"] = \"your_sk\"\n",
"\n",
"llm = QianfanLLMEndpoint(streaming=True)\n",
"res = llm(\"hi\")\n",
"res = llm.invoke(\"hi\")\n",
"print(res)"
]
},
Expand Down Expand Up @@ -185,7 +185,7 @@
" model=\"ERNIE-Bot-turbo\",\n",
" endpoint=\"eb-instant\",\n",
")\n",
"res = llm(\"hi\")"
"res = llm.invoke(\"hi\")"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/llms/bittensor.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
" } \"\"\"\n",
"\n",
"multi_response_llm = NIBittensorLLM(top_responses=10)\n",
"multi_resp = multi_response_llm(\"What is Neural Network Feeding Mechanism?\")\n",
"multi_resp = multi_response_llm.invoke(\"What is Neural Network Feeding Mechanism?\")\n",
"json_multi_resp = json.loads(multi_resp)\n",
"pprint(json_multi_resp)"
]
Expand Down
4 changes: 2 additions & 2 deletions docs/docs/integrations/llms/ctransformers.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(llm(\"AI is going to\"))"
"print(llm.invoke(\"AI is going to\"))"
]
},
{
Expand All @@ -85,7 +85,7 @@
" model=\"marella/gpt-2-ggml\", callbacks=[StreamingStdOutCallbackHandler()]\n",
")\n",
"\n",
"response = llm(\"AI is going to\")"
"response = llm.invoke(\"AI is going to\")"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/llms/ctranslate2.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@
],
"source": [
"print(\n",
" llm(\n",
" llm.invoke(\n",
" \"He presented me with plausible evidence for the existence of unicorns: \",\n",
" max_length=256,\n",
" sampling_topk=50,\n",
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/llms/deepsparse.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
" model=\"zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none\"\n",
")\n",
"\n",
"print(llm(\"def fib():\"))"
"print(llm.invoke(\"def fib():\"))"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/llms/edenai.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@
"User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?\n",
"Assistant:\n",
"\"\"\"\n",
"print(llm(prompt))"
"print(llm.invoke(prompt))"
]
},
{
Expand Down
8 changes: 4 additions & 4 deletions docs/docs/integrations/llms/google_vertex_ai_palm.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@
"}\n",
"message = HumanMessage(content=[text_message, image_message])\n",
"\n",
"output = llm([message])\n",
"output = llm.invoke([message])\n",
"print(output.content)"
]
},
Expand Down Expand Up @@ -432,7 +432,7 @@
"}\n",
"message = HumanMessage(content=[text_message, image_message])\n",
"\n",
"output = llm([message])\n",
"output = llm.invoke([message])\n",
"print(output.content)"
]
},
Expand All @@ -457,7 +457,7 @@
"outputs": [],
"source": [
"message2 = HumanMessage(content=\"And where the image is taken?\")\n",
"output2 = llm([message, output, message2])\n",
"output2 = llm.invoke([message, output, message2])\n",
"print(output2.content)"
]
},
Expand Down Expand Up @@ -486,7 +486,7 @@
"}\n",
"message = HumanMessage(content=[text_message, image_message])\n",
"\n",
"output = llm([message])\n",
"output = llm.invoke([message])\n",
"print(output.content)"
]
},
Expand Down
4 changes: 3 additions & 1 deletion docs/docs/integrations/llms/koboldai.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,9 @@
},
"outputs": [],
"source": [
"response = llm(\"### Instruction:\\nWhat is the first book of the bible?\\n### Response:\")"
"response = llm.invoke(\n",
" \"### Instruction:\\nWhat is the first book of the bible?\\n### Response:\"\n",
")"
]
}
],
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/llms/konko.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@
"llm = Konko(model=\"mistralai/mistral-7b-v0.1\", temperature=0.1, max_tokens=128)\n",
"\n",
"input_ = \"\"\"You are a helpful assistant. Explain Big Bang Theory briefly.\"\"\"\n",
"print(llm(input_))"
"print(llm.invoke(input_))"
]
},
{
Expand Down
16 changes: 8 additions & 8 deletions docs/docs/integrations/llms/llm_caching.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1020,7 +1020,7 @@
"source": [
"%%time\n",
"\n",
"print(llm(\"Why is the Moon always showing the same side?\"))"
"print(llm.invoke(\"Why is the Moon always showing the same side?\"))"
]
},
{
Expand All @@ -1044,7 +1044,7 @@
"source": [
"%%time\n",
"\n",
"print(llm(\"Why is the Moon always showing the same side?\"))"
"print(llm.invoke(\"Why is the Moon always showing the same side?\"))"
]
},
{
Expand Down Expand Up @@ -1109,7 +1109,7 @@
"source": [
"%%time\n",
"\n",
"print(llm(\"Why is the Moon always showing the same side?\"))"
"print(llm.invoke(\"Why is the Moon always showing the same side?\"))"
]
},
{
Expand All @@ -1133,7 +1133,7 @@
"source": [
"%%time\n",
"\n",
"print(llm(\"How come we always see one face of the moon?\"))"
"print(llm.invoke(\"How come we always see one face of the moon?\"))"
]
},
{
Expand Down Expand Up @@ -1238,7 +1238,7 @@
"source": [
"%%time\n",
"\n",
"print(llm(\"Is a true fakery the same as a fake truth?\"))"
"print(llm.invoke(\"Is a true fakery the same as a fake truth?\"))"
]
},
{
Expand All @@ -1262,7 +1262,7 @@
"source": [
"%%time\n",
"\n",
"print(llm(\"Is a true fakery the same as a fake truth?\"))"
"print(llm.invoke(\"Is a true fakery the same as a fake truth?\"))"
]
},
{
Expand Down Expand Up @@ -1327,7 +1327,7 @@
"source": [
"%%time\n",
"\n",
"print(llm(\"Are there truths that are false?\"))"
"print(llm.invoke(\"Are there truths that are false?\"))"
]
},
{
Expand All @@ -1351,7 +1351,7 @@
"source": [
"%%time\n",
"\n",
"print(llm(\"Is is possible that something false can be also true?\"))"
"print(llm.invoke(\"Is is possible that something false can be also true?\"))"
]
},
{
Expand Down
4 changes: 2 additions & 2 deletions docs/docs/integrations/llms/predibase.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@
"metadata": {},
"outputs": [],
"source": [
"response = model(\"Can you recommend me a nice dry wine?\")\n",
"response = model.invoke(\"Can you recommend me a nice dry wine?\")\n",
"print(response)"
]
},
Expand Down Expand Up @@ -262,7 +262,7 @@
"metadata": {},
"outputs": [],
"source": [
"# response = model(\"Can you help categorize the following emails into positive, negative, and neutral?\")"
"# response = model.invoke(\"Can you help categorize the following emails into positive, negative, and neutral?\")"
]
}
],
Expand Down
6 changes: 3 additions & 3 deletions docs/docs/integrations/llms/replicate.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@
"User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?\n",
"Assistant:\n",
"\"\"\"\n",
"_ = llm(prompt)"
"_ = llm.invoke(prompt)"
]
},
{
Expand Down Expand Up @@ -376,13 +376,13 @@
"Assistant:\n",
"\"\"\"\n",
"start_time = time.perf_counter()\n",
"raw_output = llm(prompt) # raw output, no stop\n",
"raw_output = llm.invoke(prompt) # raw output, no stop\n",
"end_time = time.perf_counter()\n",
"print(f\"Raw output:\\n {raw_output}\")\n",
"print(f\"Raw output runtime: {end_time - start_time} seconds\")\n",
"\n",
"start_time = time.perf_counter()\n",
"stopped_output = llm(prompt, stop=[\"\\n\\n\"]) # stop on double newlines\n",
"stopped_output = llm.invoke(prompt, stop=[\"\\n\\n\"]) # stop on double newlines\n",
"end_time = time.perf_counter()\n",
"print(f\"Stopped output:\\n {stopped_output}\")\n",
"print(f\"Stopped output runtime: {end_time - start_time} seconds\")"
Expand Down
Loading
Loading