diff --git a/cookbook/example_external_evaluation_pipelines.ipynb b/cookbook/example_external_evaluation_pipelines.ipynb
index acf288b12..c6aabb5d7 100644
--- a/cookbook/example_external_evaluation_pipelines.ipynb
+++ b/cookbook/example_external_evaluation_pipelines.ipynb
@@ -41,7 +41,7 @@
" frameborder=\"0\"\n",
" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\"\n",
" referrerpolicy=\"strict-origin-when-cross-origin\"\n",
- " allowfullscreen\n",
+ " allowFullScreen\n",
">"
]
},
diff --git a/cookbook/example_llm_security_monitoring.ipynb b/cookbook/example_llm_security_monitoring.ipynb
index 169cd2e15..b5de57c57 100644
--- a/cookbook/example_llm_security_monitoring.ipynb
+++ b/cookbook/example_llm_security_monitoring.ipynb
@@ -892,18 +892,18 @@
"provenance": []
},
"kernelspec": {
- "display_name": "Deno",
- "language": "typescript",
- "name": "deno"
+ "display_name": ".venv",
+ "language": "python",
+ "name": "python3"
},
"language_info": {
"codemirror_mode": "typescript",
"file_extension": ".ts",
"mimetype": "text/x.typescript",
- "name": "typescript",
+ "name": "python",
"nbconvert_exporter": "script",
"pygments_lexer": "typescript",
- "version": "5.4.5"
+ "version": "3.9.18"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
diff --git a/cookbook/integration_langchain.ipynb b/cookbook/integration_langchain.ipynb
index cf091ea2f..9a51d53cc 100644
--- a/cookbook/integration_langchain.ipynb
+++ b/cookbook/integration_langchain.ipynb
@@ -1,722 +1,1011 @@
{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "KlceIPalN3QR"
- },
- "source": [
- "---\n",
- "description: Cookbook with examples of the Langfuse Integration for Langchain (Python).\n",
- "category: Integrations\n",
- "---"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "mqBspBzuRk9C"
- },
- "source": [
- "# Cookbook: Langchain Integration"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "x1oaA7XYGOfX"
- },
- "source": [
- "This is a cookbook with examples of the Langfuse Integration for Langchain (Python).\n",
- "\n",
- "Follow the [integration guide](https://langfuse.com/docs/integrations/langchain) to add this integration to your Langchain project. The integration also supports Langchain JS."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "AbSpd5EiZouE"
- },
- "source": [
- "## Setup"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "YNyU6IzCZouE"
- },
- "outputs": [],
- "source": [
- "%pip install langfuse langchain langchain_openai --upgrade"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "OpE57ujJZouE"
- },
- "source": [
- "Initialize the Langfuse client with your API keys from the project settings in the Langfuse UI and add them to your environment."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "dEdF-668ZouF"
- },
- "outputs": [],
- "source": [
- "import os\n",
- "\n",
- "# get keys for your project from https://cloud.langfuse.com\n",
- "os.environ[\"LANGFUSE_PUBLIC_KEY\"] = \"pk-lf-***\"\n",
- "os.environ[\"LANGFUSE_SECRET_KEY\"] = \"sk-lf-***\"\n",
- "os.environ[\"LANGFUSE_HOST\"] = \"https://cloud.langfuse.com\" # for EU data region\n",
- "# os.environ[\"LANGFUSE_HOST\"] = \"https://us.cloud.langfuse.com\" # for US data region\n",
- "\n",
- "# your openai key\n",
- "os.environ[\"OPENAI_API_KEY\"] = \"***\""
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "divRadPqZouF"
- },
- "outputs": [],
- "source": [
- "from langfuse.callback import CallbackHandler\n",
- "\n",
- "langfuse_handler = CallbackHandler()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "8FVbg1RWoT8W"
- },
- "outputs": [],
- "source": [
- "# Tests the SDK connection with the server\n",
- "langfuse_handler.auth_check()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Examples"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "QvRWPsZ-NoAr"
- },
- "source": [
- "### Sequential Chain in Langchain Expression Language (LCEL)\n",
- "\n",
- "![Trace of Langchain LCEL](https://langfuse.com/images/docs/langchain_LCEL.png)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "3-HEia6gNoAr"
- },
- "outputs": [],
- "source": [
- "from operator import itemgetter\n",
- "from langchain_openai import ChatOpenAI\n",
- "from langchain.prompts import ChatPromptTemplate\n",
- "from langchain.schema import StrOutputParser\n",
- "\n",
- "langfuse_handler = CallbackHandler()\n",
- "\n",
- "prompt1 = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
- "prompt2 = ChatPromptTemplate.from_template(\n",
- " \"what country is the city {city} in? respond in {language}\"\n",
- ")\n",
- "model = ChatOpenAI()\n",
- "chain1 = prompt1 | model | StrOutputParser()\n",
- "chain2 = (\n",
- " {\"city\": chain1, \"language\": itemgetter(\"language\")}\n",
- " | prompt2\n",
- " | model\n",
- " | StrOutputParser()\n",
- ")\n",
- "\n",
- "chain2.invoke({\"person\": \"obama\", \"language\": \"spanish\"}, config={\"callbacks\":[langfuse_handler]})"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "#### Runnable methods\n",
- "\n",
- "Runnables are units of work that can be invoked, batched, streamed, transformed and composed.\n",
- "\n",
- "The examples below show how to use the following methods with Langfuse:\n",
- "\n",
- "- invoke/ainvoke: Transforms a single input into an output.\n",
- "\n",
- "- batch/abatch: Efficiently transforms multiple inputs into outputs.\n",
- "\n",
- "- stream/astream: Streams output from a single input as it’s produced."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "# Async Invoke\n",
- "await chain2.ainvoke({\"person\": \"biden\", \"language\": \"german\"}, config={\"callbacks\":[langfuse_handler]})\n",
- "\n",
- "# Batch\n",
- "chain2.batch([{\"person\": \"elon musk\", \"language\": \"english\"}, {\"person\": \"mark zuckerberg\", \"language\": \"english\"}], config={\"callbacks\":[langfuse_handler]})\n",
- "\n",
- "# Async Batch\n",
- "await chain2.abatch([{\"person\": \"jeff bezos\", \"language\": \"english\"}, {\"person\": \"tim cook\", \"language\": \"english\"}], config={\"callbacks\":[langfuse_handler]})\n",
- "\n",
- "# Stream\n",
- "for chunk in chain2.stream({\"person\": \"steve jobs\", \"language\": \"english\"}, config={\"callbacks\":[langfuse_handler]}):\n",
- " print(\"Streaming chunk:\", chunk)\n",
- "\n",
- "# Async Stream\n",
- "async for chunk in chain2.astream({\"person\": \"bill gates\", \"language\": \"english\"}, config={\"callbacks\":[langfuse_handler]}):\n",
- " print(\"Async Streaming chunk:\", chunk)\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "5v6TUIrVGkn3"
- },
- "source": [
- "### ConversationChain\n",
- "\n",
- "We'll use a [session](https://langfuse.com/docs/tracing-features/sessions) in Langfuse to track this conversation with each invocation being a single trace.\n",
- "\n",
- "In addition to the traces of each run, you also get a conversation view of the entire session:\n",
- "\n",
- "![Session view of ConversationChain in Langfuse](https://langfuse.com/images/docs/langchain_session.png)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "8HXyXC2LGga6"
- },
- "outputs": [],
- "source": [
- "from langchain.chains import ConversationChain\n",
- "from langchain.memory import ConversationBufferMemory\n",
- "from langchain_openai import OpenAI\n",
- "\n",
- "llm = OpenAI(temperature=0)\n",
- "\n",
- "conversation = ConversationChain(\n",
- " llm=llm, memory=ConversationBufferMemory()\n",
- ")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "EWRj0qvKHLNE"
- },
- "outputs": [],
- "source": [
- "# Create a callback handler with a session\n",
- "langfuse_handler = CallbackHandler(session_id=\"conversation_chain\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "aIHmNekVHItt"
- },
- "outputs": [],
- "source": [
- "conversation.predict(input=\"Hi there!\", callbacks=[langfuse_handler])"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "tsAunGSwHkrt"
- },
- "outputs": [],
- "source": [
- "conversation.predict(input=\"How to build great developer tools?\", callbacks=[langfuse_handler])"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "m8O6hShcHsGe"
- },
- "outputs": [],
- "source": [
- "conversation.predict(input=\"Summarize your last response\", callbacks=[langfuse_handler])"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "FP5avhNb3TBH"
- },
- "source": [
- "### RetrievalQA\n",
- "\n",
- "![Trace of Langchain QA Retrieval in Langfuse](https://langfuse.com/images/docs/langchain_qa_retrieval.jpg)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "wjiWEkRUFzCf"
- },
- "outputs": [],
- "source": [
- "import os\n",
- "os.environ[\"SERPAPI_API_KEY\"] = \"\""
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "p0CgEPSlEpkC"
- },
- "outputs": [],
- "source": [
- "%pip install unstructured selenium langchain-chroma --upgrade"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "kHDVa-Ssb-KT"
- },
- "outputs": [],
- "source": [
- "from langchain_community.document_loaders import SeleniumURLLoader\n",
- "from langchain_chroma import Chroma\n",
- "from langchain_text_splitters import CharacterTextSplitter\n",
- "from langchain_openai import OpenAIEmbeddings\n",
- "from langchain.chains import RetrievalQA\n",
- "\n",
- "langfuse_handler = CallbackHandler()\n",
- "\n",
- "urls = [\n",
- " \"https://raw.githubusercontent.com/langfuse/langfuse-docs/main/public/state_of_the_union.txt\",\n",
- "]\n",
- "loader = SeleniumURLLoader(urls=urls)\n",
- "llm = OpenAI()\n",
- "documents = loader.load()\n",
- "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
- "texts = text_splitter.split_documents(documents)\n",
- "embeddings = OpenAIEmbeddings()\n",
- "docsearch = Chroma.from_documents(texts, embeddings)\n",
- "query = \"What did the president say about Ketanji Brown Jackson\"\n",
- "chain = RetrievalQA.from_chain_type(\n",
- " llm,\n",
- " retriever=docsearch.as_retriever(search_kwargs={\"k\": 1}),\n",
- ")\n",
- "\n",
- "chain.invoke(query, config={\"callbacks\":[langfuse_handler]})"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "JCmI0I20-sbI"
- },
- "source": [
- "### Agent"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "ReaHdQOT-S3n"
- },
- "outputs": [],
- "source": [
- "from langchain.agents import AgentExecutor, load_tools, create_openai_functions_agent\n",
- "from langchain_openai import ChatOpenAI\n",
- "from langchain import hub\n",
- "\n",
- "langfuse_handler = CallbackHandler()\n",
- "\n",
- "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
- "tools = load_tools([\"serpapi\"])\n",
- "prompt = hub.pull(\"hwchase17/openai-functions-agent\")\n",
- "agent = create_openai_functions_agent(llm, tools, prompt)\n",
- "agent_executor = AgentExecutor(agent=agent, tools=tools)\n",
- "\n",
- "agent_executor.invoke({\"input\": \"What is Langfuse?\"}, config={\"callbacks\":[langfuse_handler]})"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "OIxwkX9p1ZR7"
- },
- "source": [
- "### AzureOpenAI"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "b43rIMig1ZR7"
- },
- "outputs": [],
- "source": [
- "os.environ[\"AZURE_OPENAI_ENDPOINT\"] = \"\"\n",
- "os.environ[\"AZURE_OPENAI_API_KEY\"] = \"\"\n",
- "os.environ[\"OPENAI_API_TYPE\"] = \"azure\"\n",
- "os.environ[\"OPENAI_API_VERSION\"] = \"2023-09-01-preview\""
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "_lLdPwnr1ZR7"
- },
- "outputs": [],
- "source": [
- "from langchain_openai import AzureChatOpenAI\n",
- "from langchain.prompts import ChatPromptTemplate\n",
- "\n",
- "langfuse_handler = CallbackHandler()\n",
- "\n",
- "prompt = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
- "model = AzureChatOpenAI(\n",
- " deployment_name=\"gpt-35-turbo\",\n",
- " model_name=\"gpt-3.5-turbo\",\n",
- ")\n",
- "chain = prompt | model\n",
- "\n",
- "chain.invoke({\"person\": \"Satya Nadella\"}, config={\"callbacks\":[langfuse_handler]})"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "ZUenj0aca9qo"
- },
- "source": [
- "### Sequential Chain [Legacy]\n",
- "\n",
- "![Trace of Langchain Sequential Chain in Langfuse](https://langfuse.com/images/docs/langchain_chain.jpg)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "dTagwV_cbFVr"
- },
- "outputs": [],
- "source": [
- "# further imports\n",
- "from langchain_openai import OpenAI\n",
- "from langchain.chains import LLMChain, SimpleSequentialChain\n",
- "from langchain.prompts import PromptTemplate\n",
- "\n",
- "llm = OpenAI()\n",
- "template = \"\"\"You are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n",
- " Title: {title}\n",
- " Playwright: This is a synopsis for the above play:\"\"\"\n",
- "prompt_template = PromptTemplate(input_variables=[\"title\"], template=template)\n",
- "synopsis_chain = LLMChain(llm=llm, prompt=prompt_template)\n",
- "template = \"\"\"You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.\n",
- " Play Synopsis:\n",
- " {synopsis}\n",
- " Review from a New York Times play critic of the above play:\"\"\"\n",
- "prompt_template = PromptTemplate(input_variables=[\"synopsis\"], template=template)\n",
- "review_chain = LLMChain(llm=llm, prompt=prompt_template)\n",
- "overall_chain = SimpleSequentialChain(\n",
- " chains=[synopsis_chain, review_chain],\n",
- ")\n",
- "\n",
- "# invoke\n",
- "review = overall_chain.invoke(\"Tragedy at sunset on the beach\", {\"callbacks\":[langfuse_handler]}) # add the handler to the run method\n",
- "# run [LEGACY]\n",
- "review = overall_chain.run(\"Tragedy at sunset on the beach\", callbacks=[langfuse_handler])# add the handler to the run method"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "AxQlUOmVPEwz"
- },
- "source": [
- "## Adding scores to traces\n",
- "\n",
- "In addition to the attributes automatically captured by the decorator, you can add others to use the full features of Langfuse.\n",
- "\n",
- "Two utility methods:\n",
- "* `langfuse_context.update_current_observation`: Update the trace/span of the current function scope\n",
- "* `langfuse_context.update_current_trace`: Update the trace itself, can also be called within any deeply nested span within the trace\n",
- "\n",
- "For details on available attributes, have a look at the [reference](https://python.reference.langfuse.com/langfuse/decorators#LangfuseDecorator.update_current_observation).\n",
- "\n",
- "Below is an example demonstrating how to enrich traces and observations with custom parameters:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "PudCopwEPFgh"
- },
- "outputs": [],
- "source": [
- "from langfuse.decorators import langfuse_context, observe\n",
- " \n",
- "@observe(as_type=\"generation\")\n",
- "def deeply_nested_llm_call():\n",
- " # Enrich the current observation with a custom name, input, and output\n",
- " langfuse_context.update_current_observation(\n",
- " name=\"Deeply nested LLM call\", input=\"Ping?\", output=\"Pong!\"\n",
- " )\n",
- " # Set the parent trace's name from within a nested observation\n",
- " langfuse_context.update_current_trace(\n",
- " name=\"Trace name set from deeply_nested_llm_call\",\n",
- " session_id=\"1234\",\n",
- " user_id=\"5678\",\n",
- " tags=[\"tag1\", \"tag2\"],\n",
- " public=True\n",
- " )\n",
- " \n",
- "@observe()\n",
- "def nested_span():\n",
- " # Update the current span with a custom name and level\n",
- " langfuse_context.update_current_observation(name=\"Nested Span\", level=\"WARNING\")\n",
- " deeply_nested_llm_call()\n",
- " \n",
- "@observe()\n",
- "def main():\n",
- " nested_span()\n",
- " \n",
- "# Execute the main function to generate the enriched trace\n",
- "main()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "On the Langfuse platform the trace now shows with the updated name from the `deeply_nested_llm_call`, and the observations will be enriched with the appropriate data points.\n",
- "\n",
- "**Example trace:** https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/f16e0151-cca8-4d90-bccf-1d9ea0958afb"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "GEWWS8PGo4A1"
- },
- "source": [
- "## Interoperability with Langfuse Python SDK\n",
- "\n",
- "You can use this integration in combination with the `observe()` decorator from the Langfuse Python SDK. Thereby, you can trace non-Langchain code, combine multiple Langchain invocations in a single trace, and use the full functionality of the Langfuse Python SDK.\n",
- "\n",
- "The `langfuse_context.get_current_langchain_handler()` method exposes a LangChain callback handler in the context of a trace or span when using `decorators`. Learn more about Langfuse Tracing [here](https://langfuse.com/docs/tracing) and this functionality [here](https://langfuse.com/docs/sdk/python/decorators#langchain).\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "q1zlFuIimJfT"
- },
- "source": [
- "### How it works"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "Op7qwM0Y-1bp"
- },
- "outputs": [],
- "source": [
- "from langfuse.decorators import langfuse_context, observe\n",
- "\n",
- "# Create a trace via Langfuse decorators and get a Langchain Callback handler for it\n",
- "@observe() # automtically log function as a trace to Langfuse\n",
- "def main():\n",
- " # update trace attributes (e.g, name, session_id, user_id)\n",
- " langfuse_context.update_current_trace(\n",
- " name=\"custom-trace\",\n",
- " session_id=\"user-1234\",\n",
- " user_id=\"session-1234\",\n",
- " )\n",
- " # get the langchain handler for the current trace\n",
- " langfuse_context.get_current_langchain_handler()\n",
- "\n",
- " # use the handler to trace langchain runs ...\n",
- "\n",
- "main()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "HRX2zFCOmwXH"
- },
- "source": [
- "### Example\n",
- "\n",
- "We'll run the same chain multiple times at different places within the hierarchy of a trace.\n",
- "\n",
- "```\n",
- "TRACE: person-locator\n",
- "|\n",
- "|-- SPAN: Chain (Alan Turing)\n",
- "|\n",
- "|-- SPAN: Physics\n",
- "| |\n",
- "| |-- SPAN: Chain (Albert Einstein)\n",
- "| |\n",
- "| |-- SPAN: Chain (Isaac Newton)\n",
- "| |\n",
- "| |-- SPAN: Favorites\n",
- "| | |\n",
- "| | |-- SPAN: Chain (Richard Feynman)\n",
- "```\n",
- "\n",
- "Setup chain"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "ASq5sHErkmLB"
- },
- "outputs": [],
- "source": [
- "from langchain_openai import ChatOpenAI\n",
- "from langchain.prompts import ChatPromptTemplate\n",
- "\n",
- "prompt = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
- "model = ChatOpenAI()\n",
- "\n",
- "chain = prompt | model"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "fvJ1pv4MqzTi"
- },
- "source": [
- "Invoke it multiple times as part of a nested trace."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "CnHq-7QD3uAa"
- },
- "outputs": [],
- "source": [
- "from langfuse.decorators import langfuse_context, observe\n",
- "\n",
- "# On span \"Physics\".\"Favorites\"\n",
- "@observe() # decorator to automatically log function as sub-span to Langfuse\n",
- "def favorites():\n",
- " # get the langchain handler for the current sub-span\n",
- " langfuse_handler = langfuse_context.get_current_langchain_handler()\n",
- " # invoke chain with langfuse handler\n",
- " chain.invoke({\"person\": \"Richard Feynman\"},\n",
- " config={\"callbacks\": [langfuse_handler]})\n",
- "\n",
- "# On span \"Physics\"\n",
- "@observe() # decorator to automatically log function as span to Langfuse\n",
- "def physics():\n",
- " # get the langchain handler for the current span\n",
- " langfuse_handler = langfuse_context.get_current_langchain_handler()\n",
- " # invoke chains with langfuse handler\n",
- " chain.invoke({\"person\": \"Albert Einstein\"},\n",
- " config={\"callbacks\": [langfuse_handler]})\n",
- " chain.invoke({\"person\": \"Isaac Newton\"},\n",
- " config={\"callbacks\": [langfuse_handler]})\n",
- " favorites()\n",
- "\n",
- "# On trace\n",
- "@observe() # decorator to automatically log function as trace to Langfuse\n",
- "def main():\n",
- " # get the langchain handler for the current trace\n",
- " langfuse_handler = langfuse_context.get_current_langchain_handler()\n",
- " # invoke chain with langfuse handler\n",
- " chain.invoke({\"person\": \"Alan Turing\"},\n",
- " config={\"callbacks\": [langfuse_handler]})\n",
- " physics()\n",
- "\n",
- "main()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "MZ3q8iMDGOfd"
- },
- "source": [
- "View it in Langfuse\n",
- "\n",
- "![Trace of Nested Langchain Runs in Langfuse](https://langfuse.com/images/docs/langchain_python_trace_interoperability.png)"
- ]
- }
- ],
- "metadata": {
- "colab": {
- "provenance": []
- },
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.12.2"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 4
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "KlceIPalN3QR"
+ },
+ "source": [
+ "---\n",
+ "description: Cookbook with examples of the Langfuse Integration for Langchain (Python).\n",
+ "category: Integrations\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "mqBspBzuRk9C"
+ },
+ "source": [
+ "# Cookbook: Langchain Integration"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "x1oaA7XYGOfX"
+ },
+ "source": [
+ "This is a cookbook with examples of the Langfuse Integration for Langchain (Python).\n",
+ "\n",
+ "Follow the [integration guide](https://langfuse.com/docs/integrations/langchain) to add this integration to your Langchain project. The integration also supports Langchain JS."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "AbSpd5EiZouE"
+ },
+ "source": [
+ "## Setup"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "YNyU6IzCZouE",
+ "outputId": "234c71fb-f822-4b48-f4c0-94efe5f79305"
+ },
+ "outputs": [],
+ "source": [
+ "%pip install langfuse langchain langchain_openai langchain_community --upgrade"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "OpE57ujJZouE"
+ },
+ "source": [
+ "Initialize the Langfuse client with your API keys from the project settings in the Langfuse UI and add them to your environment."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "dEdF-668ZouF"
+ },
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "# Get keys for your project from the project settings page\n",
+ "# https://cloud.langfuse.com\n",
+ "os.environ[\"LANGFUSE_PUBLIC_KEY\"] = \"\"\n",
+ "os.environ[\"LANGFUSE_SECRET_KEY\"] = \"\"\n",
+ "os.environ[\"LANGFUSE_HOST\"] = \"https://cloud.langfuse.com\" # 🇪🇺 EU region\n",
+ "# os.environ[\"LANGFUSE_HOST\"] = \"https://us.cloud.langfuse.com\" # 🇺🇸 US region\n",
+ "\n",
+ "# Your openai key\n",
+ "os.environ[\"OPENAI_API_KEY\"] = \"\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "divRadPqZouF"
+ },
+ "outputs": [],
+ "source": [
+ "from langfuse.callback import CallbackHandler\n",
+ "\n",
+ "langfuse_handler = CallbackHandler()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "8FVbg1RWoT8W",
+ "outputId": "00f24ad8-f8ec-4ee2-fd4b-0c117ee8c557"
+ },
+ "outputs": [],
+ "source": [
+ "# Tests the SDK connection with the server\n",
+ "langfuse_handler.auth_check()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "1ZXRf2FZXEXV"
+ },
+ "source": [
+ "## Examples"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "QvRWPsZ-NoAr"
+ },
+ "source": [
+ "### Sequential Chain in Langchain Expression Language (LCEL)\n",
+ "\n",
+ "![Trace of Langchain LCEL](https://langfuse.com/images/docs/langchain_LCEL.png)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 35
+ },
+ "id": "3-HEia6gNoAr",
+ "outputId": "ab223b99-4719-420c-ffe7-5444e4b67806"
+ },
+ "outputs": [],
+ "source": [
+ "from operator import itemgetter\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "from langchain.prompts import ChatPromptTemplate\n",
+ "from langchain.schema import StrOutputParser\n",
+ "\n",
+ "langfuse_handler = CallbackHandler()\n",
+ "\n",
+ "prompt1 = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
+ "prompt2 = ChatPromptTemplate.from_template(\n",
+ " \"what country is the city {city} in? respond in {language}\"\n",
+ ")\n",
+ "model = ChatOpenAI()\n",
+ "chain1 = prompt1 | model | StrOutputParser()\n",
+ "chain2 = (\n",
+ " {\"city\": chain1, \"language\": itemgetter(\"language\")}\n",
+ " | prompt2\n",
+ " | model\n",
+ " | StrOutputParser()\n",
+ ")\n",
+ "\n",
+ "chain2.invoke({\"person\": \"obama\", \"language\": \"spanish\"}, config={\"callbacks\":[langfuse_handler]})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "NlBSpILFXEXV"
+ },
+ "source": [
+ "#### Runnable methods\n",
+ "\n",
+ "Runnables are units of work that can be invoked, batched, streamed, transformed and composed.\n",
+ "\n",
+ "The examples below show how to use the following methods with Langfuse:\n",
+ "\n",
+ "- invoke/ainvoke: Transforms a single input into an output.\n",
+ "\n",
+ "- batch/abatch: Efficiently transforms multiple inputs into outputs.\n",
+ "\n",
+ "- stream/astream: Streams output from a single input as it’s produced."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "Y8N8pybGXEXV",
+ "outputId": "df6d23fc-ed65-4fd2-d0d6-3b9f97e3a497"
+ },
+ "outputs": [],
+ "source": [
+ "# Async Invoke\n",
+ "await chain2.ainvoke({\"person\": \"biden\", \"language\": \"german\"}, config={\"callbacks\":[langfuse_handler]})\n",
+ "\n",
+ "# Batch\n",
+ "chain2.batch([{\"person\": \"elon musk\", \"language\": \"english\"}, {\"person\": \"mark zuckerberg\", \"language\": \"english\"}], config={\"callbacks\":[langfuse_handler]})\n",
+ "\n",
+ "# Async Batch\n",
+ "await chain2.abatch([{\"person\": \"jeff bezos\", \"language\": \"english\"}, {\"person\": \"tim cook\", \"language\": \"english\"}], config={\"callbacks\":[langfuse_handler]})\n",
+ "\n",
+ "# Stream\n",
+ "for chunk in chain2.stream({\"person\": \"steve jobs\", \"language\": \"english\"}, config={\"callbacks\":[langfuse_handler]}):\n",
+ " print(\"Streaming chunk:\", chunk)\n",
+ "\n",
+ "# Async Stream\n",
+ "async for chunk in chain2.astream({\"person\": \"bill gates\", \"language\": \"english\"}, config={\"callbacks\":[langfuse_handler]}):\n",
+ " print(\"Async Streaming chunk:\", chunk)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "5v6TUIrVGkn3"
+ },
+ "source": [
+ "### ConversationChain\n",
+ "\n",
+ "We'll use a [session](https://langfuse.com/docs/tracing-features/sessions) in Langfuse to track this conversation with each invocation being a single trace.\n",
+ "\n",
+ "In addition to the traces of each run, you also get a conversation view of the entire session:\n",
+ "\n",
+ "![Session view of ConversationChain in Langfuse](https://langfuse.com/images/docs/langchain_session.png)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "8HXyXC2LGga6",
+ "outputId": "5a0a0674-4982-4758-d077-4012bb1fa0ec"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain.chains import ConversationChain\n",
+ "from langchain.memory import ConversationBufferMemory\n",
+ "from langchain_openai import OpenAI\n",
+ "\n",
+ "llm = OpenAI(temperature=0)\n",
+ "\n",
+ "conversation = ConversationChain(\n",
+ " llm=llm, memory=ConversationBufferMemory()\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "EWRj0qvKHLNE"
+ },
+ "outputs": [],
+ "source": [
+ "# Create a callback handler with a session\n",
+ "langfuse_handler = CallbackHandler(session_id=\"conversation_chain\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 71
+ },
+ "id": "aIHmNekVHItt",
+ "outputId": "4a7193f0-94f8-4f86-fac9-5ef94ef4b5f8"
+ },
+ "outputs": [],
+ "source": [
+ "conversation.predict(input=\"Hi there!\", callbacks=[langfuse_handler])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 160
+ },
+ "id": "tsAunGSwHkrt",
+ "outputId": "6bfa7080-aa89-4173-9440-1bdd06a0aa4f"
+ },
+ "outputs": [],
+ "source": [
+ "conversation.predict(input=\"How to build great developer tools?\", callbacks=[langfuse_handler])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 107
+ },
+ "id": "m8O6hShcHsGe",
+ "outputId": "b0b216ce-e6b8-4c44-8083-41a56ba0408b"
+ },
+ "outputs": [],
+ "source": [
+ "conversation.predict(input=\"Summarize your last response\", callbacks=[langfuse_handler])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "FP5avhNb3TBH"
+ },
+ "source": [
+ "### RetrievalQA\n",
+ "\n",
+ "![Trace of Langchain QA Retrieval in Langfuse](https://langfuse.com/images/docs/langchain_qa_retrieval.jpg)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "wjiWEkRUFzCf"
+ },
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "os.environ[\"SERPAPI_API_KEY\"] = \"\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "p0CgEPSlEpkC",
+ "outputId": "36c800af-025d-407e-eca1-c215bba62cd2"
+ },
+ "outputs": [],
+ "source": [
+ "%pip install unstructured selenium langchain-chroma --upgrade"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 438
+ },
+ "id": "kHDVa-Ssb-KT",
+ "outputId": "efab8170-76b9-412e-c086-365a16f065a9"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_community.document_loaders import SeleniumURLLoader\n",
+ "from langchain_chroma import Chroma\n",
+ "from langchain_text_splitters import CharacterTextSplitter\n",
+ "from langchain_openai import OpenAIEmbeddings\n",
+ "from langchain.chains import RetrievalQA\n",
+ "\n",
+ "langfuse_handler = CallbackHandler()\n",
+ "\n",
+ "urls = [\n",
+ " \"https://raw.githubusercontent.com/langfuse/langfuse-docs/main/public/state_of_the_union.txt\",\n",
+ "]\n",
+ "loader = SeleniumURLLoader(urls=urls)\n",
+ "llm = OpenAI()\n",
+ "documents = loader.load()\n",
+ "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
+ "texts = text_splitter.split_documents(documents)\n",
+ "embeddings = OpenAIEmbeddings()\n",
+ "docsearch = Chroma.from_documents(texts, embeddings)\n",
+ "query = \"What did the president say about Ketanji Brown Jackson\"\n",
+ "chain = RetrievalQA.from_chain_type(\n",
+ " llm,\n",
+ " retriever=docsearch.as_retriever(search_kwargs={\"k\": 1}),\n",
+ ")\n",
+ "\n",
+ "chain.invoke(query, config={\"callbacks\":[langfuse_handler]})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "JCmI0I20-sbI"
+ },
+ "source": [
+ "### Agent"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "5zNewss5YsbF",
+ "outputId": "76f875d3-db43-4466-997b-9c7a2ecd77dc"
+ },
+ "outputs": [],
+ "source": [
+ "%pip install google-search-results"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "ReaHdQOT-S3n",
+ "outputId": "177a9a49-539e-4d36-a54c-94d8eb2de067"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain.agents import AgentExecutor, load_tools, create_openai_functions_agent\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "from langchain import hub\n",
+ "\n",
+ "langfuse_handler = CallbackHandler()\n",
+ "\n",
+ "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
+ "tools = load_tools([\"serpapi\"])\n",
+ "prompt = hub.pull(\"hwchase17/openai-functions-agent\")\n",
+ "agent = create_openai_functions_agent(llm, tools, prompt)\n",
+ "agent_executor = AgentExecutor(agent=agent, tools=tools)\n",
+ "\n",
+ "agent_executor.invoke({\"input\": \"What is Langfuse?\"}, config={\"callbacks\":[langfuse_handler]})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "OIxwkX9p1ZR7"
+ },
+ "source": [
+ "### AzureOpenAI"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "b43rIMig1ZR7"
+ },
+ "outputs": [],
+ "source": [
+ "os.environ[\"AZURE_OPENAI_ENDPOINT\"] = \"\"\n",
+ "os.environ[\"AZURE_OPENAI_API_KEY\"] = \"\"\n",
+ "os.environ[\"OPENAI_API_TYPE\"] = \"azure\"\n",
+ "os.environ[\"OPENAI_API_VERSION\"] = \"2023-09-01-preview\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "_lLdPwnr1ZR7"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_openai import AzureChatOpenAI\n",
+ "from langchain.prompts import ChatPromptTemplate\n",
+ "\n",
+ "langfuse_handler = CallbackHandler()\n",
+ "\n",
+ "prompt = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
+ "model = AzureChatOpenAI(\n",
+ " deployment_name=\"gpt-35-turbo\",\n",
+ " model_name=\"gpt-3.5-turbo\",\n",
+ ")\n",
+ "chain = prompt | model\n",
+ "\n",
+ "chain.invoke({\"person\": \"Satya Nadella\"}, config={\"callbacks\":[langfuse_handler]})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "ZUenj0aca9qo"
+ },
+ "source": [
+ "### Sequential Chain [Legacy]\n",
+ "\n",
+ "![Trace of Langchain Sequential Chain in Langfuse](https://langfuse.com/images/docs/langchain_chain.jpg)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "dTagwV_cbFVr"
+ },
+ "outputs": [],
+ "source": [
+ "# further imports\n",
+ "from langchain_openai import OpenAI\n",
+ "from langchain.chains import LLMChain, SimpleSequentialChain\n",
+ "from langchain.prompts import PromptTemplate\n",
+ "\n",
+ "llm = OpenAI()\n",
+ "template = \"\"\"You are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n",
+ " Title: {title}\n",
+ " Playwright: This is a synopsis for the above play:\"\"\"\n",
+ "prompt_template = PromptTemplate(input_variables=[\"title\"], template=template)\n",
+ "synopsis_chain = LLMChain(llm=llm, prompt=prompt_template)\n",
+ "template = \"\"\"You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.\n",
+ " Play Synopsis:\n",
+ " {synopsis}\n",
+ " Review from a New York Times play critic of the above play:\"\"\"\n",
+ "prompt_template = PromptTemplate(input_variables=[\"synopsis\"], template=template)\n",
+ "review_chain = LLMChain(llm=llm, prompt=prompt_template)\n",
+ "overall_chain = SimpleSequentialChain(\n",
+ " chains=[synopsis_chain, review_chain],\n",
+ ")\n",
+ "\n",
+ "# invoke\n",
+ "review = overall_chain.invoke(\"Tragedy at sunset on the beach\", {\"callbacks\":[langfuse_handler]}) # add the handler to the run method\n",
+ "# run [LEGACY]\n",
+ "review = overall_chain.run(\"Tragedy at sunset on the beach\", callbacks=[langfuse_handler])# add the handler to the run method"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "AmntBjLSc_pZ"
+ },
+ "source": [
+ "## Customize trace names via run_name\n",
+ "\n",
+ "By default, Langfuse uses the Langchain run_name as trace/observation names. For more complex/custom chains, it can be useful to customize the names via own run_names.\n",
+ "\n",
+ "![Custom LangChain Run Names](https://langfuse.com/images/cookbook/integration-langchain/custom_langchain_run_names.png)\n",
+ "\n",
+ "**Example without custom run names**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 35
+ },
+ "id": "JQWBXsbudMdW",
+ "outputId": "e3249b07-0094-401c-c371-6a5159d6ebf0"
+ },
+ "outputs": [],
+ "source": [
+ "prompt = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
+ "model = ChatOpenAI()\n",
+ "chain = prompt1 | model | StrOutputParser()\n",
+ "chain.invoke({\"person\": \"Grace Hopper\"}, config={\n",
+ " \"callbacks\":[langfuse_handler]\n",
+ " })"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "BCwmF2MFdq2P"
+ },
+ "source": [
+ "### Via Runnable Config"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 35
+ },
+ "id": "rWiKr2i7dVrI",
+ "outputId": "3bdc4e3f-bb16-44f7-a479-6855002fdafc"
+ },
+ "outputs": [],
+ "source": [
+ "prompt = ChatPromptTemplate.from_template(\"what is the city {person} is from?\").with_config(run_name=\"Famous Person Prompt\")\n",
+ "model = ChatOpenAI().with_config(run_name=\"Famous Person LLM\")\n",
+ "output_parser = StrOutputParser().with_config(run_name=\"Famous Person Output Parser\")\n",
+ "chain = (prompt1 | model | output_parser).with_config(run_name=\"Famous Person Locator\")\n",
+ "\n",
+ "chain.invoke({\"person\": \"Grace Hopper\"}, config={\n",
+ " \"callbacks\":[langfuse_handler]\n",
+ "})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "TnUWivDZfElb"
+ },
+ "source": [
+ "Example trace: https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/ec9fcc46-ca38-4bdb-9482-eb06a5f90944"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "oRWBK-XIe6C0"
+ },
+ "source": [
+ "### Via Run Config"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 35
+ },
+ "id": "ybc7Bno7eGvh",
+ "outputId": "02700a8e-a83c-4402-f436-ba5e5ef1288c"
+ },
+ "outputs": [],
+ "source": [
+ "\n",
+ "prompt = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
+ "model = ChatOpenAI()\n",
+ "chain = prompt1 | model | StrOutputParser()\n",
+ "chain.invoke({\"person\": \"Grace Hopper\"}, config={\n",
+ " \"run_name\": \"Famous Person Locator\",\n",
+ " \"callbacks\":[langfuse_handler]\n",
+ " })"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "uK_GDS0ofGOt"
+ },
+ "source": [
+ "Example trace: https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/b48204e2-fd48-487b-8f66-015e3f10613d"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "GEWWS8PGo4A1"
+ },
+ "source": [
+ "## Interoperability with Langfuse Python SDK\n",
+ "\n",
+ "You can use this integration in combination with the `observe()` decorator from the Langfuse Python SDK. Thereby, you can trace non-Langchain code, combine multiple Langchain invocations in a single trace, and use the full functionality of the Langfuse Python SDK.\n",
+ "\n",
+ "The `langfuse_context.get_current_langchain_handler()` method exposes a LangChain callback handler in the context of a trace or span when using `decorators`. Learn more about Langfuse Tracing [here](https://langfuse.com/docs/tracing) and this functionality [here](https://langfuse.com/docs/sdk/python/decorators#langchain).\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "q1zlFuIimJfT"
+ },
+ "source": [
+ "### How it works"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "Op7qwM0Y-1bp"
+ },
+ "outputs": [],
+ "source": [
+ "from langfuse.decorators import langfuse_context, observe\n",
+ "\n",
+ "# Create a trace via Langfuse decorators and get a Langchain Callback handler for it\n",
+ "@observe() # automtically log function as a trace to Langfuse\n",
+ "def main():\n",
+ " # update trace attributes (e.g, name, session_id, user_id)\n",
+ " langfuse_context.update_current_trace(\n",
+ " name=\"custom-trace\",\n",
+ " session_id=\"user-1234\",\n",
+ " user_id=\"session-1234\",\n",
+ " )\n",
+ " # get the langchain handler for the current trace\n",
+ " langfuse_context.get_current_langchain_handler()\n",
+ "\n",
+ " # use the handler to trace langchain runs ...\n",
+ "\n",
+ "main()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "HRX2zFCOmwXH"
+ },
+ "source": [
+ "### Example\n",
+ "\n",
+ "We'll run the same chain multiple times at different places within the hierarchy of a trace.\n",
+ "\n",
+ "```\n",
+ "TRACE: person-locator\n",
+ "|\n",
+ "|-- SPAN: Chain (Alan Turing)\n",
+ "|\n",
+ "|-- SPAN: Physics\n",
+ "| |\n",
+ "| |-- SPAN: Chain (Albert Einstein)\n",
+ "| |\n",
+ "| |-- SPAN: Chain (Isaac Newton)\n",
+ "| |\n",
+ "| |-- SPAN: Favorites\n",
+ "| | |\n",
+ "| | |-- SPAN: Chain (Richard Feynman)\n",
+ "```\n",
+ "\n",
+ "Setup chain"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "ASq5sHErkmLB"
+ },
+ "outputs": [],
+ "source": [
+ "from langchain_openai import ChatOpenAI\n",
+ "from langchain.prompts import ChatPromptTemplate\n",
+ "\n",
+ "prompt = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
+ "model = ChatOpenAI()\n",
+ "\n",
+ "chain = prompt | model"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "fvJ1pv4MqzTi"
+ },
+ "source": [
+ "Invoke it multiple times as part of a nested trace."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "CnHq-7QD3uAa"
+ },
+ "outputs": [],
+ "source": [
+ "from langfuse.decorators import langfuse_context, observe\n",
+ "\n",
+ "# On span \"Physics\".\"Favorites\"\n",
+ "@observe() # decorator to automatically log function as sub-span to Langfuse\n",
+ "def favorites():\n",
+ " # get the langchain handler for the current sub-span\n",
+ " langfuse_handler = langfuse_context.get_current_langchain_handler()\n",
+ " # invoke chain with langfuse handler\n",
+ " chain.invoke({\"person\": \"Richard Feynman\"},\n",
+ " config={\"callbacks\": [langfuse_handler]})\n",
+ "\n",
+ "# On span \"Physics\"\n",
+ "@observe() # decorator to automatically log function as span to Langfuse\n",
+ "def physics():\n",
+ " # get the langchain handler for the current span\n",
+ " langfuse_handler = langfuse_context.get_current_langchain_handler()\n",
+ " # invoke chains with langfuse handler\n",
+ " chain.invoke({\"person\": \"Albert Einstein\"},\n",
+ " config={\"callbacks\": [langfuse_handler]})\n",
+ " chain.invoke({\"person\": \"Isaac Newton\"},\n",
+ " config={\"callbacks\": [langfuse_handler]})\n",
+ " favorites()\n",
+ "\n",
+ "# On trace\n",
+ "@observe() # decorator to automatically log function as trace to Langfuse\n",
+ "def main():\n",
+ " # get the langchain handler for the current trace\n",
+ " langfuse_handler = langfuse_context.get_current_langchain_handler()\n",
+ " # invoke chain with langfuse handler\n",
+ " chain.invoke({\"person\": \"Alan Turing\"},\n",
+ " config={\"callbacks\": [langfuse_handler]})\n",
+ " physics()\n",
+ "\n",
+ "main()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "MZ3q8iMDGOfd"
+ },
+ "source": [
+ "View it in Langfuse\n",
+ "\n",
+ "![Trace of Nested Langchain Runs in Langfuse](https://langfuse.com/images/docs/langchain_python_trace_interoperability.png)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "AxQlUOmVPEwz"
+ },
+ "source": [
+ "## Adding evaluation/feedback scores to traces\n",
+ "\n",
+ "Evaluation results and user feedback are recorded as [scores](https://langfuse.com/docs/scores) in Langfuse.\n",
+ "\n",
+ "To add a score to a trace, you need to know the trace_id. There are two options to achieve this when using LangChain:\n",
+ "\n",
+ "1. Provide a predefined LangChain run_id\n",
+ "2. Use the Langfuse Decorator to get the trace_id\n",
+ "\n",
+ "![Langchain Trace in Langfuse with Score](https://langfuse.com/images/cookbook/integration-langchain/langchain_trace_with_score.png)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "O-g2NmhDZW0C"
+ },
+ "source": [
+ "### Predefined LangChain `run_id`\n",
+ "\n",
+ "Langfuse uses the LangChain run_id as a trace_id. Thus you can provide a custom run_id to the runnable config in order to later add scores to the trace."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 35
+ },
+ "id": "qy0YKqnuZs4t",
+ "outputId": "19173bf1-422b-4bb2-c2cf-28578b3b06df"
+ },
+ "outputs": [],
+ "source": [
+ "from operator import itemgetter\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "from langchain.prompts import ChatPromptTemplate\n",
+ "from langchain.schema import StrOutputParser\n",
+ "import uuid\n",
+ "\n",
+ "predefined_run_id = str(uuid.uuid4())\n",
+ "\n",
+ "langfuse_handler = CallbackHandler()\n",
+ "\n",
+ "prompt = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
+ "model = ChatOpenAI()\n",
+ "chain = prompt1 | model | StrOutputParser()\n",
+ "\n",
+ "chain.invoke({\"person\": \"Ada Lovelace\"}, config={\n",
+ " \"run_id\": predefined_run_id,\n",
+ " \"callbacks\":[langfuse_handler]\n",
+ "})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "69_1fkiBaX6v"
+ },
+ "outputs": [],
+ "source": [
+ "from langfuse import Langfuse\n",
+ "\n",
+ "langfuse = Langfuse()\n",
+ "\n",
+ "langfuse.score(\n",
+ " trace_id=predefined_run_id,\n",
+ " name=\"user-feedback\",\n",
+ " value=1,\n",
+ " comment=\"This was correct, thank you\"\n",
+ ");"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "TyUVWU_abAyX"
+ },
+ "source": [
+ "Example Trace in Langfuse: https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/9860fffa-02ed-4278-bcf7-c856c569cead"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "fcnVMNogZlEr"
+ },
+ "source": [
+ "### Via Langfuse Decorator\n",
+ "\n",
+ "Alternatively, you can use the LangChain integration together with the [Langfuse @observe-decorator](https://langfuse.com/docs/sdk/python/decorators) for Python."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "PudCopwEPFgh"
+ },
+ "outputs": [],
+ "source": [
+ "from langfuse.decorators import langfuse_context, observe\n",
+ "from operator import itemgetter\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "from langchain.prompts import ChatPromptTemplate\n",
+ "from langchain.schema import StrOutputParser\n",
+ "import uuid\n",
+ "\n",
+ "prompt = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
+ "model = ChatOpenAI()\n",
+ "chain = prompt1 | model | StrOutputParser()\n",
+ "\n",
+ "@observe()\n",
+ "def main(person):\n",
+ "\n",
+ " langfuse_handler = langfuse_context.get_current_langchain_handler()\n",
+ "\n",
+ " response = chain.invoke({\"person\": person}, config={\n",
+ " \"callbacks\":[langfuse_handler]\n",
+ " })\n",
+ "\n",
+ " trace_id = langfuse_context.get_current_trace_id()\n",
+ "\n",
+ " return trace_id, response\n",
+ "\n",
+ "\n",
+ "trace_id, response = main(\"Ada Lovelace\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "JgzuVhwycjTE"
+ },
+ "outputs": [],
+ "source": [
+ "from langfuse import Langfuse\n",
+ "\n",
+ "langfuse = Langfuse()\n",
+ "\n",
+ "langfuse.score(\n",
+ " trace_id=trace_id,\n",
+ " name=\"user-feedback\",\n",
+ " value=1,\n",
+ " comment=\"This was correct, thank you\"\n",
+ ");"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "XZfTCj-9c0yP"
+ },
+ "source": [
+ "Example trace: https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/08bb7cf3-87c6-4a78-a3fc-72af8959a106"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.2"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
}
diff --git a/next-sitemap.config.js b/next-sitemap.config.js
index 6f84baac6..668c9983e 100644
--- a/next-sitemap.config.js
+++ b/next-sitemap.config.js
@@ -15,5 +15,9 @@ module.exports = {
// Exclude non-canonical pages from sitemap which are also part of the docs
...cookbookRoutes
.filter(({ docsPath }) => !!docsPath)
- .map(({ notebook }) => `/guides/cookbook/${notebook.replace(".ipynb", "")}`)],
+ .map(({ notebook }) => `/guides/cookbook/${notebook.replace(".ipynb", "")}`),
+ // Exclude _meta files
+ '*/_meta'
+ ],
+
}
\ No newline at end of file
diff --git a/package.json b/package.json
index ab8cb04ae..4e09fd931 100644
--- a/package.json
+++ b/package.json
@@ -39,10 +39,10 @@
"gpt3-tokenizer": "^1.1.5",
"langfuse": "^3.25.0",
"lucide-react": "^0.441.0",
- "next": "^14.2.12",
+ "next": "^14.2.13",
"next-sitemap": "^4.2.3",
- "nextra": "^2.13.4",
- "nextra-theme-docs": "^2.13.4",
+ "nextra": "^3.0.1",
+ "nextra-theme-docs": "^3.0.1",
"openai-edge": "^1.2.2",
"postcss": "^8.4.47",
"posthog-js": "^1.161.6",
diff --git a/pages/_app.mdx b/pages/_app.tsx
similarity index 100%
rename from pages/_app.mdx
rename to pages/_app.tsx
diff --git a/pages/_meta.json b/pages/_meta.json
deleted file mode 100644
index 6d27c6612..000000000
--- a/pages/_meta.json
+++ /dev/null
@@ -1,159 +0,0 @@
-{
- "index": {
- "type": "page",
- "title": "Langfuse",
- "display": "hidden",
- "theme": {
- "layout": "raw"
- }
- },
- "experimentation": {
- "title": "Experimentation",
- "type": "page",
- "display": "hidden"
- },
- "imprint": {
- "title": "Imprint",
- "type": "page",
- "display": "hidden"
- },
- "schedule-demo": {
- "type": "page",
- "title": "Schedule demo",
- "display": "hidden",
- "theme": {
- "layout": "raw"
- }
- },
- "docs": {
- "type": "page",
- "title": "Docs"
- },
- "guides": {
- "type": "page",
- "title": "Guides",
- "display": "hidden"
- },
- "faq": {
- "type": "page",
- "title": "FAQ",
- "display": "hidden"
- },
- "cookbook": {
- "type": "page",
- "title": "Cookbook",
- "display": "hidden"
- },
- "pricing": {
- "title": "Pricing",
- "type": "page",
- "theme": {
- "layout": "full"
- }
- },
- "changelog": {
- "type": "page",
- "title": "Changelog",
- "theme": {
- "layout": "full"
- }
- },
- "blog": {
- "title": "Blog",
- "type": "page",
- "theme": {
- "layout": "full"
- }
- },
- "demo": {
- "title": "Demo",
- "type": "menu",
- "items": {
- "try-yourself": {
- "title": "Interactive demo",
- "href": "/docs/demo"
- },
- "video": {
- "title": "Video (3 min)",
- "href": "/video"
- },
- "schedule-demo": {
- "title": "Schedule demo",
- "href": "/schedule-demo"
- }
- }
- },
- "careers": {
- "title": "Careers",
- "type": "page",
- "display": "hidden"
- },
- "support": {
- "title": "Support",
- "type": "page",
- "display": "hidden"
- },
- "why": {
- "title": "Why Langfuse",
- "type": "page",
- "display": "hidden",
- "theme": {
- "typesetting": "article",
- "timestamp": false
- }
- },
- "enterprise": {
- "title": "Enterprise",
- "type": "page",
- "display": "hidden",
- "theme": {
- "typesetting": "article",
- "timestamp": false
- }
- },
- "library": {
- "title": "Library",
- "type": "page",
- "display": "hidden",
- "theme": {
- "typesetting": "article",
- "timestamp": false
- }
- },
- "terms": {
- "title": "Terms and Conditions",
- "type": "page",
- "display": "hidden"
- },
- "privacy": {
- "title": "Privacy Policy",
- "type": "page",
- "display": "hidden"
- },
- "cookie-policy": {
- "title": "Cookie Policy",
- "type": "page",
- "display": "hidden"
- },
- "oss-friends": {
- "title": "OSS Friends",
- "type": "page",
- "display": "hidden"
- },
- "about": {
- "title": "About us",
- "type": "page",
- "display": "hidden",
- "theme": {
- "typesetting": "article",
- "timestamp": false
- }
- },
- "404": {
- "type": "page",
- "theme": {
- "typesetting": "article",
- "timestamp": false
- }
- }
-}
diff --git a/pages/_meta.tsx b/pages/_meta.tsx
new file mode 100644
index 000000000..d90359df0
--- /dev/null
+++ b/pages/_meta.tsx
@@ -0,0 +1,158 @@
+export default {
+ index: {
+ type: "page",
+ title: "Langfuse",
+ display: "hidden",
+ theme: {
+ layout: "raw",
+ },
+ },
+ experimentation: {
+ title: "Experimentation",
+ type: "page",
+ display: "hidden",
+ },
+ imprint: {
+ title: "Imprint",
+ type: "page",
+ display: "hidden",
+ },
+ "schedule-demo": {
+ type: "page",
+ title: "Schedule demo",
+ display: "hidden",
+ theme: {
+ layout: "raw",
+ },
+ },
+ docs: {
+ type: "page",
+ title: "Docs",
+ },
+ guides: {
+ type: "page",
+ title: "Guides",
+ // hidden from main menu via overrides.css, nextra display:hidden otherwise breaks type:page
+ },
+ faq: {
+ type: "page",
+ title: "FAQ",
+ // hidden from main menu via overrides.css, nextra display:hidden otherwise breaks type:page
+ },
+ pricing: {
+ title: "Pricing",
+ type: "page",
+ theme: {
+ layout: "full",
+ },
+ },
+ changelog: {
+ type: "page",
+ title: "Changelog",
+ theme: {
+ layout: "full",
+ },
+ },
+ blog: {
+ title: "Blog",
+ type: "page",
+ theme: {
+ layout: "full",
+ },
+ },
+ demo: {
+ title: "Demo",
+ type: "menu",
+ items: {
+ "try-yourself": {
+ title: "Interactive demo",
+ href: "/docs/demo",
+ },
+ video: {
+ title: "Video (3 min)",
+ href: "/video",
+ },
+ "schedule-demo": {
+ title: "Schedule demo",
+ href: "/schedule-demo",
+ },
+ },
+ },
+ careers: {
+ title: "Careers",
+ type: "page",
+ display: "hidden",
+ },
+ support: {
+ title: "Support",
+ type: "page",
+ display: "hidden",
+ },
+ why: {
+ title: "Why Langfuse",
+ type: "page",
+ display: "hidden",
+ theme: {
+ typesetting: "article",
+ timestamp: false,
+ },
+ },
+ enterprise: {
+ title: "Enterprise",
+ type: "page",
+ display: "hidden",
+ theme: {
+ typesetting: "article",
+ timestamp: false,
+ },
+ },
+ library: {
+ title: "Library",
+ type: "page",
+ display: "hidden",
+ theme: {
+ typesetting: "article",
+ timestamp: false,
+ },
+ },
+ terms: {
+ title: "Terms and Conditions",
+ type: "page",
+ display: "hidden",
+ },
+ privacy: {
+ title: "Privacy Policy",
+ type: "page",
+ display: "hidden",
+ },
+ "cookie-policy": {
+ title: "Cookie Policy",
+ type: "page",
+ display: "hidden",
+ },
+ "oss-friends": {
+ title: "OSS Friends",
+ type: "page",
+ display: "hidden",
+ theme: {
+ typesetting: "article",
+ timestamp: false,
+ },
+ },
+ about: {
+ title: "About us",
+ type: "page",
+ display: "hidden",
+ theme: {
+ typesetting: "article",
+ timestamp: false,
+ },
+ },
+ "404": {
+ type: "page",
+ theme: {
+ typesetting: "article",
+ timestamp: false,
+ },
+ },
+};
diff --git a/pages/blog/_meta.json b/pages/blog/_meta.json
deleted file mode 100644
index b30af8b8c..000000000
--- a/pages/blog/_meta.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "*": {
- "theme": {
- "toc": false,
- "sidebar": false,
- "pagination": true,
- "typesetting": "article",
- "layout": "default",
- "breadcrumb": false
- }
- }
-}
diff --git a/pages/blog/_meta.tsx b/pages/blog/_meta.tsx
new file mode 100644
index 000000000..544e65982
--- /dev/null
+++ b/pages/blog/_meta.tsx
@@ -0,0 +1,12 @@
+export default {
+ "*": {
+ theme: {
+ toc: false,
+ sidebar: false,
+ pagination: true,
+ typesetting: "article",
+ layout: "default",
+ breadcrumb: false,
+ },
+ },
+};
diff --git a/pages/blog/announcing-our-seed-round.mdx b/pages/blog/announcing-our-seed-round.mdx
index 3d3e3789c..f17b48147 100644
--- a/pages/blog/announcing-our-seed-round.mdx
+++ b/pages/blog/announcing-our-seed-round.mdx
@@ -41,7 +41,7 @@ We took these first-hand learnings and began working on solutions. As engineers,
-
+
Challenges of building LLM applications and how Langfuse helps
diff --git a/pages/blog/launch-week-1.mdx b/pages/blog/launch-week-1.mdx
index 2a8071a4d..fbb6c56d3 100644
--- a/pages/blog/launch-week-1.mdx
+++ b/pages/blog/launch-week-1.mdx
@@ -35,7 +35,7 @@ import { Tweet } from "@/components/Tweet";
### Day 0: OpenAI JS SDK Integration
-```typescript /import { observeOpenAI } from "langfuse"/ /observeOpenAI/
+```ts /import { observeOpenAI } from "langfuse"/ /observeOpenAI/
import OpenAI from "openai";
import { observeOpenAI } from "langfuse";
diff --git a/pages/blog/showcase-llm-chatbot.mdx b/pages/blog/showcase-llm-chatbot.mdx
index 109d06382..080df61dc 100644
--- a/pages/blog/showcase-llm-chatbot.mdx
+++ b/pages/blog/showcase-llm-chatbot.mdx
@@ -72,7 +72,7 @@ npm i langfuse
**Initialize client**
-```typescript
+```ts
const langfuse = new Langfuse({
secretKey: process.env.LANGFUSE_SECRET_KEY,
publicKey: process.env.NEXT_PUBLIC_LANGFUSE_PUBLIC_KEY,
@@ -81,7 +81,7 @@ const langfuse = new Langfuse({
**Grouping conversation as trace in Langfuse**
-```typescript
+```ts
const trace = langfuse.trace({
name: "chat",
id: `chat:${chatId}`,
@@ -100,7 +100,7 @@ const trace = langfuse.trace({
Before starting the LLM call, we create a generation object in Langfuse. This sets the start_time used for latency analysis in Langfuse, configures the generation object (e.g. which tokenizer to use to estimate token amounts), and provides us with the `generation_id` which we need to use in the frontend to log user feedback.
-```typescript
+```ts
const lfGeneration = trace.generation({
name: "chat",
input: openAiMessages,
@@ -115,7 +115,7 @@ const lfGeneration = trace.generation({
Thanks to the Vercel AI SDK, we can use the `onStart` and `onCompletion` callbacks to update/end the generation object in Langfuse.
-```typescript
+```ts
// once streaming started
async onStart() {
lfGeneration.update({
@@ -135,7 +135,7 @@ async onCompletion(completion) {
The simplest way to provide the `generation_id` to the frontend when using streaming responses is to add it as a custom header. This id is required to log user feedback in the frontend and relate it to the individual message.
-```typescript
+```ts
return new StreamingTextResponse(stream, {
headers: {
"X-Message-Id": lfGeneration.id,
@@ -147,7 +147,7 @@ return new StreamingTextResponse(stream, {
The ai-chatbot uses Vercel-KV to store the chat history. We can add debug events to the generation object to track the usage of the KV store.
-```typescript
+```ts
lfGeneration.event({
name: "kv-hmset",
level: "DEBUG",
@@ -175,7 +175,7 @@ Feedback modal:
We use the Langfuse Typescript SDK directly in the frontend to log the user feedback to Langfuse.
The SDK requires the `publicKey` which can be safely exposed as it can only be used to log user feedback.
-```typescript
+```ts
const langfuse = new LangfuseWeb({
publicKey: process.env.NEXT_PUBLIC_LANGFUSE_PUBLIC_KEY ?? "",
});
@@ -188,7 +188,7 @@ We created an event handler for the feedback form. The feedback is then logged a
1. `traceId`, which is the unique identifier of the conversation thread. As in the backend, we use the `chatId` which is the same for all messages in a conversation and already available in the frontend.
2. `observationId` which is the unique identifier of the observation within the trace that we want to relate the feedback to. In this case we made the langfuse `generation.id` (from the backend) in the backend available as the `message.id` (in the frontend). _For details on how we captured the custom streaming response header which included the id, see [components/chat.tsx](https://github.com/langfuse/ai-chatbot/blob/main/components/chat.tsx)._
-```typescript
+```ts
await langfuse.score({
traceId: `chat:${chatId}`,
observationId: message.id,
diff --git a/pages/blog/update-2023-08.mdx b/pages/blog/update-2023-08.mdx
index 27d46ae88..70d6aeaea 100644
--- a/pages/blog/update-2023-08.mdx
+++ b/pages/blog/update-2023-08.mdx
@@ -47,7 +47,7 @@ The details 👇
Last month we released the Python Integration for Langchain and now shipped the same for teams building with JS/TS. We released a new package [langfuse-langchain](https://www.npmjs.com/package/langfuse-langchain) which exposes a `CallbackHandler` that automatically traces your complex Langchain chains and agents. Simply pass it as a callback.
-```typescript /{ callbacks: [handler] }/
+```ts /{ callbacks: [handler] }/
// Initialize Langfuse handler
import CallbackHandler from "langfuse-langchain";
@@ -114,7 +114,7 @@ Releases are available for all SDKs. They can be added in three ways (in order o
langfuse = Langfuse(ENV_PUBLIC_KEY, ENV_SECRET_KEY, ENV_HOST, release='ba7816b')
```
- ```typescript
+ ```ts
// TypeScript
langfuse = new Langfuse({
publicKey: ENV_PUBLIC_KEY,
diff --git a/pages/blog/update-2023-09.mdx b/pages/blog/update-2023-09.mdx
index c60b2eb8b..a364ffafd 100644
--- a/pages/blog/update-2023-09.mdx
+++ b/pages/blog/update-2023-09.mdx
@@ -195,9 +195,11 @@ Share traces with anyone via public links. The other person doesn't need a Langf
_Example: https://cloud.langfuse.com/project/clkpwwm0m000gmm094odg11gi/traces/2d6b96f2-0a4d-4366-99a5-1ad558c66e99_
-
- ![Share Langfuse trace via public link](/images/docs/trace-share-link.gif)
-
+
## 💾 Export generations (for fine-tuning) [#export-generations]
diff --git a/pages/blog/update-2023-10.mdx b/pages/blog/update-2023-10.mdx
index 9cae75143..2ade417ba 100644
--- a/pages/blog/update-2023-10.mdx
+++ b/pages/blog/update-2023-10.mdx
@@ -140,12 +140,12 @@ handler.get_trace_url()
-```typescript
+```ts
// trace object
trace.getTraceUrl()
```
-```typescript
+```ts
// Langchain callback handler
handler.getTraceUrl();
```
diff --git a/pages/changelog/2024-01-16-trace-tagging.mdx b/pages/changelog/2024-01-16-trace-tagging.mdx
index 3180e1bb0..3c97a0cbd 100644
--- a/pages/changelog/2024-01-16-trace-tagging.mdx
+++ b/pages/changelog/2024-01-16-trace-tagging.mdx
@@ -29,7 +29,7 @@ trace = langfuse.trace(
**Typescript**
-```typescript
+```ts
const trace = langfuse.trace({
name: "docs-retrieval",
tags: ["my-first-tag", "even-better-tag"],
diff --git a/pages/changelog/2024-02-05-sdk-level-prompt-caching.mdx b/pages/changelog/2024-02-05-sdk-level-prompt-caching.mdx
index 1b9ca0900..53b436de9 100644
--- a/pages/changelog/2024-02-05-sdk-level-prompt-caching.mdx
+++ b/pages/changelog/2024-02-05-sdk-level-prompt-caching.mdx
@@ -31,7 +31,7 @@ prompt = langfuse.get_prompt("prompt name", cache_ttl_seconds=0)
-```typescript
+```ts
// Get current production version and cache prompt for 5 minutes
const prompt = await langfuse.getPrompt("prompt name", undefined, {
cacheTtlSeconds: 300,
diff --git a/pages/changelog/2024-04-21-openai-integration-JS-SDK.mdx b/pages/changelog/2024-04-21-openai-integration-JS-SDK.mdx
index f3f833abf..f90f955b4 100644
--- a/pages/changelog/2024-04-21-openai-integration-JS-SDK.mdx
+++ b/pages/changelog/2024-04-21-openai-integration-JS-SDK.mdx
@@ -22,7 +22,7 @@ Thanks to [@noble-varghese](https://github.com/noble-varghese) and [@RichardKrue
### Quickstart
-```typescript /import { observeOpenAI } from "langfuse"/ /observeOpenAI/
+```ts /import { observeOpenAI } from "langfuse"/ /observeOpenAI/
import OpenAI from "openai";
import { observeOpenAI } from "langfuse";
diff --git a/pages/changelog/2024-05-07-prompts-api-and-deployment-labels.mdx b/pages/changelog/2024-05-07-prompts-api-and-deployment-labels.mdx
index 7f4e5109a..8acf24eb2 100644
--- a/pages/changelog/2024-05-07-prompts-api-and-deployment-labels.mdx
+++ b/pages/changelog/2024-05-07-prompts-api-and-deployment-labels.mdx
@@ -13,7 +13,7 @@ import { ChangelogHeader } from "@/components/changelog/ChangelogHeader";
## REST API for prompt management
-We revamped the API for prompt management to allow you to fully manage prompts via the REST API. You can now fetch, create, update, and delete prompts programmatically.
+We revamped the API for prompt management to allow you to fully manage prompts via the REST API. You can now fetch, create, and update prompts programmatically.
New endpoints ([API reference](https://api.reference.langfuse.com)):
diff --git a/pages/changelog/2024-07-04-query-traces-via-sdks.mdx b/pages/changelog/2024-07-04-query-traces-via-sdks.mdx
index c25684af5..7ca11e1aa 100644
--- a/pages/changelog/2024-07-04-query-traces-via-sdks.mdx
+++ b/pages/changelog/2024-07-04-query-traces-via-sdks.mdx
@@ -47,7 +47,7 @@ sessions = langfuse.fetch_sessions()
npm install langfuse
```
-```typescript
+```ts
import { Langfuse } from "langfuse";
const langfuse = new Langfuse();
diff --git a/pages/changelog/2024-07-11-non-numeric-scores-api.mdx b/pages/changelog/2024-07-11-non-numeric-scores-api.mdx
index 09bd66817..d9166d775 100644
--- a/pages/changelog/2024-07-11-non-numeric-scores-api.mdx
+++ b/pages/changelog/2024-07-11-non-numeric-scores-api.mdx
@@ -1,7 +1,7 @@
---
date: 2024-07-11
-title: Create Non-Numeric Scores via SDKs
-description: The Langfuse API, Python and JS SDKs now support creating categorical, boolean and numeric scores.
+title: Create Non-Numeric Scores via SDKs
+description: The Langfuse API, Python and JS SDKs now support creating categorical, boolean and numeric scores.
author: Marlies
---
@@ -12,7 +12,7 @@ import { BookOpen } from "lucide-react";
Our improved API allows you to create categorical and boolean scores, in addition to the previously supported numeric scores. If you need your scores to follow a specific format (e.g. data type, range or name), you can now define a score configuration using the API or UI and validate scores against it.
-Check out our [scores documentation](/docs/scores/custom) for score ingestion examples and code snippets.
+Check out our [scores documentation](/docs/scores/custom) for score ingestion examples and code snippets.
### New API routes
@@ -20,7 +20,7 @@ Check out our [scores documentation](/docs/scores/custom) for score ingestion ex
- `GET /api/public/score-configs/{configId}`
- `POST /api/public/score-configs`
-### Existing API Routes with new parameters
+### Existing API Routes with new parameters
- `GET /api/public/scores` can now retrieve scores by `dataType` and/or `configId`
- `POST /api/public/scores` now supports defining `dataType` and/or `configId`
@@ -54,16 +54,16 @@ langfuse.score(
npm install langfuse
```
-```typescript
+```ts
import { Langfuse } from "langfuse";
const langfuse = new Langfuse();
// Create a single numeric, categorical or boolean score
await langfuse.score({
- scoreName: "accuracy",
- scoreValue: 0.95,
- dataType: "NUMERIC",
- traceId: "traceId"
+ scoreName: "accuracy",
+ scoreValue: 0.95,
+ dataType: "NUMERIC",
+ traceId: "traceId",
});
```
@@ -73,13 +73,10 @@ await langfuse.score({
## Learn more
-} />
+ } />
}
/>
-
-
-
diff --git a/pages/changelog/2024-08-02-vercel-ai-sdk-integration.mdx b/pages/changelog/2024-08-02-vercel-ai-sdk-integration.mdx
index 2a0cdd942..7aa0a7f5d 100644
--- a/pages/changelog/2024-08-02-vercel-ai-sdk-integration.mdx
+++ b/pages/changelog/2024-08-02-vercel-ai-sdk-integration.mdx
@@ -11,7 +11,7 @@ import { ChangelogHeader } from "@/components/changelog/ChangelogHeader";
-```typescript filename="instrumentation.ts" {7}
+```ts filename="instrumentation.ts" {7}
import { registerOTel } from "@vercel/otel";
import { LangfuseExporter } from "langfuse-vercel";
@@ -23,7 +23,7 @@ export function register() {
}
```
-```typescript {4}
+```ts {4}
const result = await generateText({
model: openai("gpt-4-turbo"),
prompt: "Write a short story about a cat.",
diff --git a/pages/changelog/2024-09-04-prompt-management-zero-latency.mdx b/pages/changelog/2024-09-04-prompt-management-zero-latency.mdx
index 57265820e..28faa1913 100644
--- a/pages/changelog/2024-09-04-prompt-management-zero-latency.mdx
+++ b/pages/changelog/2024-09-04-prompt-management-zero-latency.mdx
@@ -22,7 +22,7 @@ prompt = langfuse.get_prompt("movie-critic")
-```typescript
+```ts
const prompt = await langfuse.getPrompt("movie-critic");
```
diff --git a/pages/changelog/2024-09-17-prompt-linking-langchain.mdx b/pages/changelog/2024-09-17-prompt-linking-langchain.mdx
index 62618cc56..d14df4fd4 100644
--- a/pages/changelog/2024-09-17-prompt-linking-langchain.mdx
+++ b/pages/changelog/2024-09-17-prompt-linking-langchain.mdx
@@ -2,7 +2,6 @@
date: 2024-09-17
title: Link prompts to Langchain executions
description: Prompt management just got more powerful for Langchain users by linking Langfuse prompts to Langchain executions.
-showOgInHeader: false
author: Hassieb
ogImage: /images/changelog/2024-09-17-prompt-link-langchain-trace.png
---
@@ -17,9 +16,7 @@ With the link established between a prompt managed in Langfuse and its correspon
In the trace view on the Langfuse platform you will notice your prompt link appearing alongside your Langchain generation. You may also filter your generations for a given prompt in the generations table and list all generations that originated from a prompt in the prompt detail view.
-![Trace view Langchain execution with Langfuse prompt link](/images/changelog/2024-09-17-prompt-link-langchain-trace.png)
-
-##### Learn more
+**Learn more**
- [Documentation](/docs/prompts/get-started#link-with-langfuse-tracing-optional)
- [Python Cookbook Prompt Management with Langchain](/docs/prompts/example-langchain)
diff --git a/pages/changelog/2024-09-20-aws-marketplace.mdx b/pages/changelog/2024-09-20-aws-marketplace.mdx
index 9564716fe..d6a3b0ec1 100644
--- a/pages/changelog/2024-09-20-aws-marketplace.mdx
+++ b/pages/changelog/2024-09-20-aws-marketplace.mdx
@@ -11,7 +11,7 @@ import { ChangelogHeader } from "@/components/changelog/ChangelogHeader";
-The **AWS Marketplace** listing makes it easier to **purchase, manage and deploy Langfuse in the AWS ecosystem.**
+The **AWS Marketplace** listing makes it easier to **purchase Langfuse in the AWS ecosystem.**
AWS Marketplace is a digital catalog with thousands of software listings from independent software vendors (ISVs) that makes it easy to find, test, buy, and deploy the right software for your business.
diff --git a/pages/changelog/_meta.json b/pages/changelog/_meta.json
deleted file mode 100644
index e23bae0c0..000000000
--- a/pages/changelog/_meta.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "*": {
- "type": "page",
- "theme": {
- "layout": "default"
- }
- }
-}
diff --git a/pages/changelog/_meta.tsx b/pages/changelog/_meta.tsx
new file mode 100644
index 000000000..aa1ca6b20
--- /dev/null
+++ b/pages/changelog/_meta.tsx
@@ -0,0 +1,8 @@
+export default {
+ "*": {
+ type: "page",
+ theme: {
+ layout: "default",
+ },
+ },
+};
diff --git a/pages/docs/_meta.json b/pages/docs/_meta.json
deleted file mode 100644
index 5930e7e0f..000000000
--- a/pages/docs/_meta.json
+++ /dev/null
@@ -1,84 +0,0 @@
-{
- "-- Switcher": {
- "type": "separator",
- "title": "Switcher"
- },
- "index": "Overview",
- "demo": "Interactive Demo",
- "deployment": "Self-host",
- "-- Tracing": {
- "type": "separator",
- "title": "Tracing"
- },
- "tracing": "Introduction",
- "get-started": "Quickstart",
- "tracing-features": "Features",
- "sdk": "SDKs",
- "integrations": "Integrations",
- "query-traces": "Query Traces",
-
- "-- Develop": {
- "type": "separator",
- "title": "Develop"
- },
- "prompts": "Prompt Management",
- "playground": "Playground",
- "fine-tuning": "Fine-tuning",
-
- "-- Monitor": {
- "type": "separator",
- "title": "Monitor"
- },
- "analytics": "Analytics",
- "model-usage-and-cost": "Model Usage & Cost",
- "scores": "Scores & Evaluation",
- "security": "LLM Security",
-
- "-- Test": {
- "type": "separator",
- "title": "Test"
- },
- "experimentation": "Experimentation",
- "datasets": "Datasets",
-
- "-- References": {
- "type": "separator",
- "title": "References"
- },
- "api-ref": {
- "title": "API ↗",
- "href": "https://api.reference.langfuse.com",
- "newWindow": true
- },
- "python-ref": {
- "title": "Python SDK ↗",
- "href": "https://python.reference.langfuse.com",
- "newWindow": true
- },
- "js-ref": {
- "title": "JS SDK ↗",
- "href": "https://js.reference.langfuse.com",
- "newWindow": true
- },
- "-- More": {
- "type": "separator",
- "title": "More"
- },
- "rbac": "Access Control (RBAC)",
- "data-security-privacy": "Data Security & Privacy",
- "open-source": "Open Source",
- "roadmap": "Roadmap",
- "support": {
- "title": "Support ↗",
- "href": "/support",
- "newWindow": true
- },
- "video": {
- "title": "Video (2 min)",
- "type": "page",
- "display": "hidden",
- "theme": {
- "typesetting": "article"
- }
- }
-}
diff --git a/pages/docs/_meta.tsx b/pages/docs/_meta.tsx
new file mode 100644
index 000000000..c9e3e4216
--- /dev/null
+++ b/pages/docs/_meta.tsx
@@ -0,0 +1,86 @@
+import { MenuSwitcher } from "@/components/MenuSwitcher";
+
+export default {
+ "-- Switcher": {
+ type: "separator",
+ title: ,
+ },
+ index: "Overview",
+ demo: "Interactive Demo",
+ deployment: "Self-host",
+ "-- Tracing": {
+ type: "separator",
+ title: "Tracing",
+ },
+ tracing: "Introduction",
+ "get-started": "Quickstart",
+ "tracing-features": "Features",
+ sdk: "SDKs",
+ integrations: "Integrations",
+ "query-traces": "Query Traces",
+
+ "-- Develop": {
+ type: "separator",
+ title: "Develop",
+ },
+ prompts: "Prompt Management",
+ playground: "Playground",
+ "fine-tuning": "Fine-tuning",
+
+ "-- Monitor": {
+ type: "separator",
+ title: "Monitor",
+ },
+ analytics: "Analytics",
+ "model-usage-and-cost": "Model Usage & Cost",
+ scores: "Scores & Evaluation",
+ security: "LLM Security",
+
+ "-- Test": {
+ type: "separator",
+ title: "Test",
+ },
+ experimentation: "Experimentation",
+ datasets: "Datasets",
+
+ "-- References": {
+ type: "separator",
+ title: "References",
+ },
+ "api-ref": {
+ title: "API ↗",
+ href: "https://api.reference.langfuse.com",
+ newWindow: true,
+ },
+ "python-ref": {
+ title: "Python SDK ↗",
+ href: "https://python.reference.langfuse.com",
+ newWindow: true,
+ },
+ "js-ref": {
+ title: "JS SDK ↗",
+ href: "https://js.reference.langfuse.com",
+ newWindow: true,
+ },
+ "-- More": {
+ type: "separator",
+ title: "More",
+ },
+ rbac: "Access Control (RBAC)",
+ "data-security-privacy": "Data Security & Privacy",
+ "open-source": "Open Source",
+ roadmap: "Roadmap",
+ support: {
+ title: "Support ↗",
+ href: "/support",
+ newWindow: true,
+ },
+ video: {
+ title: "Video (2 min)",
+ type: "page",
+ display: "hidden",
+ theme: {
+ typesetting: "article",
+ },
+ },
+};
diff --git a/pages/docs/analytics/_meta.json b/pages/docs/analytics/_meta.json
deleted file mode 100644
index 4ce4d5023..000000000
--- a/pages/docs/analytics/_meta.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "overview": "Overview",
- "posthog": "PostHog Integration"
-}
diff --git a/pages/docs/analytics/_meta.tsx b/pages/docs/analytics/_meta.tsx
new file mode 100644
index 000000000..c35fd16e4
--- /dev/null
+++ b/pages/docs/analytics/_meta.tsx
@@ -0,0 +1,4 @@
+export default {
+ overview: "Overview",
+ posthog: "PostHog Integration",
+};
diff --git a/pages/docs/datasets/_meta.json b/pages/docs/datasets/_meta.json
deleted file mode 100644
index b3f387d78..000000000
--- a/pages/docs/datasets/_meta.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "overview": "Overview",
- "python-cookbook": "Cookbook"
-}
diff --git a/pages/docs/datasets/_meta.tsx b/pages/docs/datasets/_meta.tsx
new file mode 100644
index 000000000..8d68a305a
--- /dev/null
+++ b/pages/docs/datasets/_meta.tsx
@@ -0,0 +1,4 @@
+export default {
+ overview: "Overview",
+ "python-cookbook": "Cookbook",
+};
diff --git a/pages/docs/deployment/_meta.json b/pages/docs/deployment/_meta.json
deleted file mode 100644
index 80e967e95..000000000
--- a/pages/docs/deployment/_meta.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
- "feature-overview": "Deployment & Features",
- "local": "Local (docker compose)",
- "self-host": "Self-host (docker)",
- "v3": {
- "title": "V3 (in development)",
- "display": "hidden"
- }
-}
diff --git a/pages/docs/deployment/_meta.tsx b/pages/docs/deployment/_meta.tsx
new file mode 100644
index 000000000..84f0bdfac
--- /dev/null
+++ b/pages/docs/deployment/_meta.tsx
@@ -0,0 +1,9 @@
+export default {
+ "feature-overview": "Deployment & Features",
+ local: "Local (docker compose)",
+ "self-host": "Self-host (docker)",
+ v3: {
+ title: "V3 (in development)",
+ display: "hidden",
+ },
+};
diff --git a/pages/docs/deployment/self-host.mdx b/pages/docs/deployment/self-host.mdx
index 9c47aa48c..5f92e8aaf 100644
--- a/pages/docs/deployment/self-host.mdx
+++ b/pages/docs/deployment/self-host.mdx
@@ -96,7 +96,7 @@ Langfuse can be configured using environment variables ([.env.prod.example](http
| `LANGFUSE_DEFAULT_PROJECT_ROLE` | `VIEWER` | Role of the user in the default project (if set). Possible values are `OWNER`, `ADMIN`, `MEMBER`, `VIEWER`. See [roles](/docs/rbac) for details. |
| `SMTP_CONNECTION_URL` | | Configure optional SMTP server connection for transactional email. Connection URL is passed to Nodemailer ([docs](https://nodemailer.com/smtp)). |
| `EMAIL_FROM_ADDRESS` | | Configure from address for transactional email. Required if `SMTP_CONNECTION_URL` is set. |
-| `S3_ENDPOINT` `S3_ACCESS_KEY_ID` `S3_SECRET_ACCESS_KEY` `S3_BUCKET_NAME` `S3_REGION` | | Optional S3 configuration to enable large exports from the UI. |
+| `S3_ENDPOINT` `S3_ACCESS_KEY_ID` `S3_SECRET_ACCESS_KEY` `S3_BUCKET_NAME` `S3_REGION` | | Optional S3 configuration for enabling large exports from the UI. `S3_BUCKET_NAME` is required to enable exports. The other variables are optional and will use the default provider credential chain if not specified. |
| `DB_EXPORT_PAGE_SIZE` | `1000` | Optional page size for streaming exports to S3 to avoid memory issues. The page size can be adjusted if needed to optimize performance. |
| `LANGFUSE_AUTO_POSTGRES_MIGRATION_DISABLED` | `false` | Set to `true` to disable automatic database migrations on docker startup. |
| `LANGFUSE_LOG_LEVEL` | `info` | Set the log level for the application. Possible values are `trace`, `debug`, `info`, `warn`, `error`, `fatal`. |
@@ -179,6 +179,7 @@ Organization
Troubleshooting:
- If you use `LANGFUSE_INIT_*` in Docker Compose, do not double-quote the values ([GitHub issue](https://github.com/langfuse/langfuse/issues/3398)).
+- The resources depend on one another (see note above). For example, you must create an organization to initialize a project.
### Configuring the Enterprise Edition [#ee]
diff --git a/pages/docs/experimentation.mdx b/pages/docs/experimentation.mdx
index 9ef4e8cb9..9789761fa 100644
--- a/pages/docs/experimentation.mdx
+++ b/pages/docs/experimentation.mdx
@@ -63,7 +63,7 @@ langfuse = Langfuse(
-```typescript /release: ""/
+```ts /release: ""/
import { Langfuse } from "langfuse";
langfuse = new Langfuse({
@@ -81,7 +81,7 @@ handler = CallbackHandler(release="")
-```typescript /release: ""/
+```ts /release: ""/
import { CallbackHandler } from "langfuse-langchain";
const handler = new CallbackHandler({
@@ -156,7 +156,7 @@ langfuse.generation(
-```typescript /version: "1.0"/
+```ts /version: "1.0"/
langfuse.generation({
name: "guess-countries",
version: "1.0",
@@ -175,7 +175,7 @@ handler = CallbackHandler(version="1.0")
-```typescript /version: "1.0"/
+```ts /version: "1.0"/
import { CallbackHandler } from "langfuse-langchain";
const handler = new CallbackHandler({
diff --git a/pages/docs/get-started.mdx b/pages/docs/get-started.mdx
index dcd5ff362..42f18d1df 100644
--- a/pages/docs/get-started.mdx
+++ b/pages/docs/get-started.mdx
@@ -73,7 +73,7 @@ import { Langfuse } from "https://esm.sh/langfuse"
Example usage, most of the parameters are optional and depend on the use case. For more information, see the [typescript SDK docs](/docs/sdk/typescript/guide).
-```typescript filename="server.ts"
+```ts filename="server.ts"
import { Langfuse } from "langfuse";
const langfuse = new Langfuse();
@@ -137,7 +137,7 @@ completion = openai.chat.completions.create(
With your environment configured, call OpenAI SDK methods as usual from the wrapped client.
-```typescript
+```ts
import OpenAI from "openai";
import { observeOpenAI } from "langfuse";
@@ -354,7 +354,6 @@ import IconOpenai from "@/components/icons/openai";
/>
-
## FAQ
import { FaqPreview } from "@/components/faq/FaqPreview";
diff --git a/pages/docs/index.mdx b/pages/docs/index.mdx
index a99036ffd..a48f39cdf 100644
--- a/pages/docs/index.mdx
+++ b/pages/docs/index.mdx
@@ -68,7 +68,7 @@ import { Callout } from "nextra/components";
-
+
Challenges of building LLM applications and how Langfuse helps
@@ -117,6 +117,9 @@ Subscribe to the **mailing list** to get notified about new major features:
## Get in touch
-We actively develop Langfuse in [open source](/docs/open-source). Join our [Discord](/discord), provide feedback, report bugs, or request features via GitHub [issues](/issue).
+We actively develop Langfuse in [open source](/docs/open-source):
-Learn more about ways to get in touch on our [Support](/support) page.
+- Contribute to and vote on the Langfuse [roadmap](/docs/roadmap).
+- Ask questions through [GitHub Discussions](/gh-support) or private [support channels](/support).
+- Report bugs using [GitHub Issues](/issue).
+- Chat with the Langfuse maintainers and community on [Discord](/discord).
diff --git a/pages/docs/integrations/_meta.json b/pages/docs/integrations/_meta.json
deleted file mode 100644
index 4a93f55d9..000000000
--- a/pages/docs/integrations/_meta.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "overview": "Overview",
- "openai": "OpenAI SDK",
- "langchain": "Langchain",
- "llama-index": "LlamaIndex",
- "haystack": "Haystack",
- "litellm": "LiteLLM",
- "vercel-ai-sdk": "Vercel AI SDK",
- "dify": "Dify.AI",
- "instructor": "Instructor",
- "dspy": "DSPy",
- "ollama": "Ollama",
- "mirascope": "Mirascope",
- "flowise": "Flowise",
- "langflow": "Langflow",
- "aws-bedrock": "AWS Bedrock",
- "mistral-sdk": "Mistral SDK"
-}
diff --git a/pages/docs/integrations/_meta.tsx b/pages/docs/integrations/_meta.tsx
new file mode 100644
index 000000000..a99f6fbcc
--- /dev/null
+++ b/pages/docs/integrations/_meta.tsx
@@ -0,0 +1,18 @@
+export default {
+ overview: "Overview",
+ openai: "OpenAI SDK",
+ langchain: "Langchain",
+ "llama-index": "LlamaIndex",
+ haystack: "Haystack",
+ litellm: "LiteLLM",
+ "vercel-ai-sdk": "Vercel AI SDK",
+ dify: "Dify.AI",
+ instructor: "Instructor",
+ dspy: "DSPy",
+ ollama: "Ollama",
+ mirascope: "Mirascope",
+ flowise: "Flowise",
+ langflow: "Langflow",
+ "aws-bedrock": "AWS Bedrock",
+ "mistral-sdk": "Mistral SDK",
+};
diff --git a/pages/docs/integrations/haystack/_meta.json b/pages/docs/integrations/haystack/_meta.json
deleted file mode 100644
index 2a29a9621..000000000
--- a/pages/docs/integrations/haystack/_meta.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "get-started": "Get Started",
- "example-python": "Example (Python)"
-}
diff --git a/pages/docs/integrations/haystack/_meta.tsx b/pages/docs/integrations/haystack/_meta.tsx
new file mode 100644
index 000000000..edc3e95a6
--- /dev/null
+++ b/pages/docs/integrations/haystack/_meta.tsx
@@ -0,0 +1,4 @@
+export default {
+ "get-started": "Get Started",
+ "example-python": "Example (Python)",
+};
diff --git a/pages/docs/integrations/langchain/_meta.json b/pages/docs/integrations/langchain/_meta.tsx
similarity index 69%
rename from pages/docs/integrations/langchain/_meta.json
rename to pages/docs/integrations/langchain/_meta.tsx
index 72942b03a..d51f61baf 100644
--- a/pages/docs/integrations/langchain/_meta.json
+++ b/pages/docs/integrations/langchain/_meta.tsx
@@ -1,8 +1,8 @@
-{
- "tracing": "Tracing",
+export default {
+ tracing: "Tracing",
"example-python": "Example Python",
"example-javascript": "Example JS",
"example-python-langgraph": "Example LangGraph",
"example-python-langserve": "Example LangServe",
- "upgrade-paths": "Upgrade Paths"
-}
+ "upgrade-paths": "Upgrade Paths",
+};
diff --git a/pages/docs/integrations/langchain/example-python.md b/pages/docs/integrations/langchain/example-python.md
index b6ba031cd..ba3f3686b 100644
--- a/pages/docs/integrations/langchain/example-python.md
+++ b/pages/docs/integrations/langchain/example-python.md
@@ -13,7 +13,7 @@ Follow the [integration guide](https://langfuse.com/docs/integrations/langchain)
```python
-%pip install langfuse langchain langchain_openai --upgrade
+%pip install langfuse langchain langchain_openai langchain_community --upgrade
```
Initialize the Langfuse client with your API keys from the project settings in the Langfuse UI and add them to your environment.
@@ -22,14 +22,15 @@ Initialize the Langfuse client with your API keys from the project settings in t
```python
import os
-# get keys for your project from https://cloud.langfuse.com
-os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-***"
-os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-***"
-os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # for EU data region
-# os.environ["LANGFUSE_HOST"] = "https://us.cloud.langfuse.com" # for US data region
+# Get keys for your project from the project settings page
+# https://cloud.langfuse.com
+os.environ["LANGFUSE_PUBLIC_KEY"] = ""
+os.environ["LANGFUSE_SECRET_KEY"] = ""
+os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # 🇪🇺 EU region
+# os.environ["LANGFUSE_HOST"] = "https://us.cloud.langfuse.com" # 🇺🇸 US region
-# your openai key
-os.environ["OPENAI_API_KEY"] = "***"
+# Your openai key
+os.environ["OPENAI_API_KEY"] = ""
```
@@ -198,6 +199,11 @@ chain.invoke(query, config={"callbacks":[langfuse_handler]})
### Agent
+```python
+%pip install google-search-results
+```
+
+
```python
from langchain.agents import AgentExecutor, load_tools, create_openai_functions_agent
from langchain_openai import ChatOpenAI
@@ -274,54 +280,55 @@ review = overall_chain.invoke("Tragedy at sunset on the beach", {"callbacks":[la
review = overall_chain.run("Tragedy at sunset on the beach", callbacks=[langfuse_handler])# add the handler to the run method
```
-## Adding scores to traces
+## Customize trace names via run_name
-In addition to the attributes automatically captured by the decorator, you can add others to use the full features of Langfuse.
+By default, Langfuse uses the Langchain run_name as trace/observation names. For more complex/custom chains, it can be useful to customize the names via own run_names.
-Two utility methods:
-* `langfuse_context.update_current_observation`: Update the trace/span of the current function scope
-* `langfuse_context.update_current_trace`: Update the trace itself, can also be called within any deeply nested span within the trace
+![Custom LangChain Run Names](https://langfuse.com/images/cookbook/integration-langchain/custom_langchain_run_names.png)
-For details on available attributes, have a look at the [reference](https://python.reference.langfuse.com/langfuse/decorators#LangfuseDecorator.update_current_observation).
+**Example without custom run names**
-Below is an example demonstrating how to enrich traces and observations with custom parameters:
+
+```python
+prompt = ChatPromptTemplate.from_template("what is the city {person} is from?")
+model = ChatOpenAI()
+chain = prompt1 | model | StrOutputParser()
+chain.invoke({"person": "Grace Hopper"}, config={
+ "callbacks":[langfuse_handler]
+ })
+```
+
+### Via Runnable Config
```python
-from langfuse.decorators import langfuse_context, observe
-
-@observe(as_type="generation")
-def deeply_nested_llm_call():
- # Enrich the current observation with a custom name, input, and output
- langfuse_context.update_current_observation(
- name="Deeply nested LLM call", input="Ping?", output="Pong!"
- )
- # Set the parent trace's name from within a nested observation
- langfuse_context.update_current_trace(
- name="Trace name set from deeply_nested_llm_call",
- session_id="1234",
- user_id="5678",
- tags=["tag1", "tag2"],
- public=True
- )
-
-@observe()
-def nested_span():
- # Update the current span with a custom name and level
- langfuse_context.update_current_observation(name="Nested Span", level="WARNING")
- deeply_nested_llm_call()
-
-@observe()
-def main():
- nested_span()
-
-# Execute the main function to generate the enriched trace
-main()
+prompt = ChatPromptTemplate.from_template("what is the city {person} is from?").with_config(run_name="Famous Person Prompt")
+model = ChatOpenAI().with_config(run_name="Famous Person LLM")
+output_parser = StrOutputParser().with_config(run_name="Famous Person Output Parser")
+chain = (prompt1 | model | output_parser).with_config(run_name="Famous Person Locator")
+
+chain.invoke({"person": "Grace Hopper"}, config={
+ "callbacks":[langfuse_handler]
+})
```
-On the Langfuse platform the trace now shows with the updated name from the `deeply_nested_llm_call`, and the observations will be enriched with the appropriate data points.
+Example trace: https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/ec9fcc46-ca38-4bdb-9482-eb06a5f90944
+
+### Via Run Config
+
+
+```python
+
+prompt = ChatPromptTemplate.from_template("what is the city {person} is from?")
+model = ChatOpenAI()
+chain = prompt1 | model | StrOutputParser()
+chain.invoke({"person": "Grace Hopper"}, config={
+ "run_name": "Famous Person Locator",
+ "callbacks":[langfuse_handler]
+ })
+```
-**Example trace:** https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/f16e0151-cca8-4d90-bccf-1d9ea0958afb
+Example trace: https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/b48204e2-fd48-487b-8f66-015e3f10613d
## Interoperability with Langfuse Python SDK
@@ -429,3 +436,106 @@ main()
View it in Langfuse
![Trace of Nested Langchain Runs in Langfuse](https://langfuse.com/images/docs/langchain_python_trace_interoperability.png)
+
+## Adding evaluation/feedback scores to traces
+
+Evaluation results and user feedback are recorded as [scores](https://langfuse.com/docs/scores) in Langfuse.
+
+To add a score to a trace, you need to know the trace_id. There are two options to achieve this when using LangChain:
+
+1. Provide a predefined LangChain run_id
+2. Use the Langfuse Decorator to get the trace_id
+
+![Langchain Trace in Langfuse with Score](https://langfuse.com/images/cookbook/integration-langchain/langchain_trace_with_score.png)
+
+### Predefined LangChain `run_id`
+
+Langfuse uses the LangChain run_id as a trace_id. Thus you can provide a custom run_id to the runnable config in order to later add scores to the trace.
+
+
+```python
+from operator import itemgetter
+from langchain_openai import ChatOpenAI
+from langchain.prompts import ChatPromptTemplate
+from langchain.schema import StrOutputParser
+import uuid
+
+predefined_run_id = str(uuid.uuid4())
+
+langfuse_handler = CallbackHandler()
+
+prompt = ChatPromptTemplate.from_template("what is the city {person} is from?")
+model = ChatOpenAI()
+chain = prompt1 | model | StrOutputParser()
+
+chain.invoke({"person": "Ada Lovelace"}, config={
+ "run_id": predefined_run_id,
+ "callbacks":[langfuse_handler]
+})
+```
+
+
+```python
+from langfuse import Langfuse
+
+langfuse = Langfuse()
+
+langfuse.score(
+ trace_id=predefined_run_id,
+ name="user-feedback",
+ value=1,
+ comment="This was correct, thank you"
+);
+```
+
+Example Trace in Langfuse: https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/9860fffa-02ed-4278-bcf7-c856c569cead
+
+### Via Langfuse Decorator
+
+Alternatively, you can use the LangChain integration together with the [Langfuse @observe-decorator](https://langfuse.com/docs/sdk/python/decorators) for Python.
+
+
+```python
+from langfuse.decorators import langfuse_context, observe
+from operator import itemgetter
+from langchain_openai import ChatOpenAI
+from langchain.prompts import ChatPromptTemplate
+from langchain.schema import StrOutputParser
+import uuid
+
+prompt = ChatPromptTemplate.from_template("what is the city {person} is from?")
+model = ChatOpenAI()
+chain = prompt1 | model | StrOutputParser()
+
+@observe()
+def main(person):
+
+ langfuse_handler = langfuse_context.get_current_langchain_handler()
+
+ response = chain.invoke({"person": person}, config={
+ "callbacks":[langfuse_handler]
+ })
+
+ trace_id = langfuse_context.get_current_trace_id()
+
+ return trace_id, response
+
+
+trace_id, response = main("Ada Lovelace")
+```
+
+
+```python
+from langfuse import Langfuse
+
+langfuse = Langfuse()
+
+langfuse.score(
+ trace_id=trace_id,
+ name="user-feedback",
+ value=1,
+ comment="This was correct, thank you"
+);
+```
+
+Example trace: https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/08bb7cf3-87c6-4a78-a3fc-72af8959a106
diff --git a/pages/docs/integrations/langchain/tracing.mdx b/pages/docs/integrations/langchain/tracing.mdx
index 0d95bac52..5a9de3815 100644
--- a/pages/docs/integrations/langchain/tracing.mdx
+++ b/pages/docs/integrations/langchain/tracing.mdx
@@ -116,10 +116,11 @@ When initializing the Langfuse handler, you can pass the following **optional**
### Dynamic Trace Attributes in Chain Invocation
-You can also set the `trace_name`, `user_id`, `session_id`, and `tags` for a trace that corresponds to a LangChain execution through the runnable configuration of the chain without instantiating a new `CallbackHandler` each time. This allows you to dynamically set these attributes for each specific execution. Here's an example:
+You can also set the `trace_name`, [`user_id`](/docs/tracing-features/users), [`session_id`](/docs/tracing-features/sessions), and [`tags`](/docs/tracing-features/tags) for a trace that corresponds to a LangChain execution through the runnable configuration of the chain without instantiating a new `CallbackHandler` each time. This allows you to dynamically set these attributes for each specific execution. Here's an example:
+
```python
from langfuse.callback import CallbackHandler
@@ -131,9 +132,9 @@ user_id = "random-user"
tags = ["random-tag-1", "random-tag-2"]
# Your existing Langchain code to create the chain
-...
-# Pass config to the chain invocation to be parsed as Langfuse trace attributes
+# Pass config to the chain invocation to be parsed as Langfuse trace attributes
+
chain.invoke(
{"animal": "dog"},
config={
@@ -147,13 +148,15 @@ chain.invoke(
},
)
```
+
-```typescript
+
+```ts
import { CallbackHandler } from "langfuse-langchain";
-const langfuseHandler = new CallbackHandler()
+const langfuseHandler = new CallbackHandler();
const traceName = "langchain_trace_name";
const sessionId = "random-session";
@@ -170,10 +173,100 @@ await chain.invoke(
}
);
```
+
+### Predefined Trace ID + Add Evaluation or User Feedback Score
+
+**Predefined Trace ID**
+
+To query traces or add evaluation and feedback scores, you need to know the ID of a trace. The LangChain integration automatically assigns the `trace_id` to the `run_id` of the LangChain run. You can set a predefined `run_id` through the run configuration.
+
+
+
+
+```python
+import uuid
+
+predefined_run_id = str(uuid.uuid4())
+
+langfuse_handler = CallbackHandler()
+
+# Your existing Langchain code to create the chain
+
+# Pass run_id to the chain invocation
+chain.invoke({"person": "Ada Lovelace"}, config={
+ "run_id": predefined_run_id,
+ "callbacks": [langfuse_handler],
+ }
+)
+```
+
+
+
+
+```typescript
+import { CallbackHandler } from "langfuse-langchain";
+import { v4 as uuidv4 } from "uuid";
+
+const langfuseHandler = new CallbackHandler();
+
+const predefinedRunId = uuid4();
+
+await chain.invoke(
+ { animal: "dog" },
+ {
+ callbacks: [langfuseHandler],
+ runId: predefinedRunId,
+ }
+);
+```
+
+
+
+
+**Add Score to Trace**
+
+Evaluation results and user feedback are recorded as [scores](https://langfuse.com/docs/scores) in Langfuse. You can add them to a trace via the `trace_id`. Scores do not need to be numeric, see scores documentation for more details.
+
+
+
+
+```python
+from langfuse import Langfuse
+
+langfuse = Langfuse()
+
+langfuse.score(
+ trace_id=predefined_run_id,
+ name="user-feedback",
+ value=1,
+ comment="This was correct, thank you"
+)
+```
+
+
+
+
+
+```typescript
+import { Langfuse } from "langfuse";
+
+const langfuse = new Langfuse();
+
+await langfuse.score({
+ traceId: predefinedRunId,
+ name: "user-feedback",
+ value: 1,
+ comment: "This was correct, thank you",
+});
+```
+
+
+
+
### Interoperability with Langfuse SDKs [#interoperability]
Use the Langchain integration in combination with the regular Langfuse SDKs if you want to:
@@ -216,7 +309,7 @@ Limitation (decorator + langchain): The input/output of the Langchain code will
{/* JS */}
-```typescript
+```ts
import { CallbackHandler, Langfuse } from "langfuse-langchain";
const langfuse = new Langfuse();
@@ -243,7 +336,7 @@ await chain.invoke(
If you want to add the input/output of the Langchain run to the trace or span itself, use the `updateRoot` flag in the `CallbackHandler` constructor.
-```typescript
+```ts
const langfuseHandlerTrace = new CallbackHandler({
root: trace,
updateRoot: true,
@@ -306,7 +399,7 @@ langfuse_handler.shutdown()
{/* JS */}
-```typescript
+```ts
await langfuseHandler.shutdownAsync();
```
@@ -328,7 +421,7 @@ langfuse_handler.flush()
{/* JS */}
-```typescript
+```ts
await langfuseHandler.flushAsync();
```
@@ -339,7 +432,7 @@ await langfuseHandler.flushAsync();
Since Langchain version > 0.3.0, the callbacks on which Langfuse relies have been backgrounded. This means that execution will not wait for the callback to either return before continuing. Prior to 0.3.0, this behavior was the opposite. If you are running code in serverless environments such as Google Cloud Functions, AWS Lambda or Cloudflare Workers you should set your callbacks to be blocking to allow them time to finish or timeout. This can be done either by
-- setting the `LANGCHAIN_CALLBACKS_BACKGROUND` environment variable to "false"
+- setting the `LANGCHAIN_CALLBACKS_BACKGROUND` environment variable to "false"
- importing the global `awaitAllCallbacks` method to ensure all callbacks finish if necessary
Read more about awaiting callbacks here in the [Langchain docs](https://js.langchain.com/docs/how_to/callbacks_serverless).
diff --git a/pages/docs/integrations/langchain/upgrade-paths.mdx b/pages/docs/integrations/langchain/upgrade-paths.mdx
index 3bff2096c..5dcd5715d 100644
--- a/pages/docs/integrations/langchain/upgrade-paths.mdx
+++ b/pages/docs/integrations/langchain/upgrade-paths.mdx
@@ -100,7 +100,7 @@ Requires [`langchain ^0.1.10`](https://github.com/langchain-ai/langchainjs/relea
The `CallbackHandler` can be used in multiple invocations of a Langchain chain as shown below.
-```typescript
+```ts
import { CallbackHandler } from "langfuse-langchain";
// create a handler
@@ -168,7 +168,7 @@ TRACE_2
If you still want to group multiple invocations on one trace, you can use the Langfuse SDK combined with the Langchain integration ([more details](/docs/integrations/langchain/tracing)).
-```typescript
+```ts
const trace = langfuse.trace({ id: "special-id" });
// CallbackHandler will use the trace with the id "special-id" for all invocations
const langfuseHandler = new CallbackHandler({ root: trace });
diff --git a/pages/docs/integrations/litellm/_meta.json b/pages/docs/integrations/litellm/_meta.json
deleted file mode 100644
index a3e433911..000000000
--- a/pages/docs/integrations/litellm/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "tracing": "Tracing",
- "example-proxy-python": "Example Proxy (Python)",
- "example-proxy-js": "Example Proxy (JS/TS)"
-}
diff --git a/pages/docs/integrations/litellm/_meta.tsx b/pages/docs/integrations/litellm/_meta.tsx
new file mode 100644
index 000000000..d5d871bf3
--- /dev/null
+++ b/pages/docs/integrations/litellm/_meta.tsx
@@ -0,0 +1,5 @@
+export default {
+ tracing: "Tracing",
+ "example-proxy-python": "Example Proxy (Python)",
+ "example-proxy-js": "Example Proxy (JS/TS)",
+};
diff --git a/pages/docs/integrations/llama-index/_meta.json b/pages/docs/integrations/llama-index/_meta.json
deleted file mode 100644
index 570bfa1e5..000000000
--- a/pages/docs/integrations/llama-index/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "get-started": "Get Started",
- "example-python": "Example (Python)",
- "example-python-instrumentation-module": "Example Instrumentation Module (Python)"
-}
diff --git a/pages/docs/integrations/llama-index/_meta.tsx b/pages/docs/integrations/llama-index/_meta.tsx
new file mode 100644
index 000000000..0ad63306e
--- /dev/null
+++ b/pages/docs/integrations/llama-index/_meta.tsx
@@ -0,0 +1,6 @@
+export default {
+ "get-started": "Get Started",
+ "example-python": "Example (Python)",
+ "example-python-instrumentation-module":
+ "Example Instrumentation Module (Python)",
+};
diff --git a/pages/docs/integrations/llama-index/get-started.mdx b/pages/docs/integrations/llama-index/get-started.mdx
index 0ca1cc1d7..1b797f95b 100644
--- a/pages/docs/integrations/llama-index/get-started.mdx
+++ b/pages/docs/integrations/llama-index/get-started.mdx
@@ -195,9 +195,12 @@ def my_func():
-### 🛠️ Beta: Observability based on LlamaIndex instrumentation module
+## Beta: Observability based on LlamaIndex instrumentation module
-**_⚠️ For production use cases, we recommend using the callback-based LlamaIndex integration with Langfuse as described above until this integration is stable._**
+
+ For production use cases, we recommend using the callback-based LlamaIndex
+ integration with Langfuse as described above until this integration is stable.
+
The new [LlamaIndex instrumentation module](https://docs.llamaindex.ai/en/stable/module_guides/observability/instrumentation/) allows for seamless instrumentation of LlamaIndex applications. In particular, one can handle events and track spans using both custom logic as well as those offered in the module. Users can also define their own events and specify where and when in the code logic that they should be emitted.
diff --git a/pages/docs/integrations/mirascope/_meta.json b/pages/docs/integrations/mirascope/_meta.json
deleted file mode 100644
index 38020a64f..000000000
--- a/pages/docs/integrations/mirascope/_meta.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "tracing": "Tracing",
- "example-python": "Example Notebook"
-}
diff --git a/pages/docs/integrations/mirascope/_meta.tsx b/pages/docs/integrations/mirascope/_meta.tsx
new file mode 100644
index 000000000..2bb300af8
--- /dev/null
+++ b/pages/docs/integrations/mirascope/_meta.tsx
@@ -0,0 +1,4 @@
+export default {
+ tracing: "Tracing",
+ "example-python": "Example Notebook",
+};
diff --git a/pages/docs/integrations/openai/_meta.json b/pages/docs/integrations/openai/_meta.json
deleted file mode 100644
index cba1ade4a..000000000
--- a/pages/docs/integrations/openai/_meta.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "python": "Python",
- "js": "JS/TS"
-}
diff --git a/pages/docs/integrations/openai/_meta.tsx b/pages/docs/integrations/openai/_meta.tsx
new file mode 100644
index 000000000..237abcb7d
--- /dev/null
+++ b/pages/docs/integrations/openai/_meta.tsx
@@ -0,0 +1,4 @@
+export default {
+ python: "Python",
+ js: "JS/TS",
+};
diff --git a/pages/docs/integrations/openai/js/_meta.json b/pages/docs/integrations/openai/js/_meta.json
deleted file mode 100644
index 4537a07a4..000000000
--- a/pages/docs/integrations/openai/js/_meta.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "get-started": "Get Started",
- "examples": "Example Notebook"
-}
diff --git a/pages/docs/integrations/openai/js/_meta.tsx b/pages/docs/integrations/openai/js/_meta.tsx
new file mode 100644
index 000000000..2d0d1f961
--- /dev/null
+++ b/pages/docs/integrations/openai/js/_meta.tsx
@@ -0,0 +1,4 @@
+export default {
+ "get-started": "Get Started",
+ examples: "Example Notebook",
+};
diff --git a/pages/docs/integrations/openai/js/get-started.mdx b/pages/docs/integrations/openai/js/get-started.mdx
index c0d3e6f09..a59f6c53d 100644
--- a/pages/docs/integrations/openai/js/get-started.mdx
+++ b/pages/docs/integrations/openai/js/get-started.mdx
@@ -12,7 +12,7 @@ description: Simple wrapper around OpenAI SDK (JS/TS) to get full observability
The Langfuse JS/TS SDK offers a wrapper function around the OpenAI SDK, enabling you to easily add observability to your OpenAI calls. This includes tracking latencies, time-to-first-token on stream responses, errors, and model usage.
-```typescript {2, 4}
+```ts {2, 4}
import OpenAI from "openai";
import { observeOpenAI } from "langfuse";
@@ -66,7 +66,7 @@ import EnvJS from "@/components-mdx/env-js.mdx";
With your environment configured, call OpenAI SDK methods as usual from the wrapped client.
- ```typescript
+ ```ts
import OpenAI from "openai";
import { observeOpenAI } from "langfuse";
@@ -83,7 +83,7 @@ import EnvJS from "@/components-mdx/env-js.mdx";
Pass your Langfuse credentials directly as `clientInitParams` to the `observeOpenAI` function. You can find your credentials in your project settings in the Langfuse UI.
- ```typescript
+ ```ts
import OpenAI from "openai";
import { observeOpenAI } from "langfuse";
@@ -130,7 +130,7 @@ The Langfuse SDKs queue and batches events in the background to reduce the numbe
If you are running a short-lived application, you need to flush Langfuse to ensure that all events are flushed before the application exits.
-```typescript
+```ts
await openai.flushAsync(); // method added by Langfuse wrapper
// If you have previously passed a parent span or trace for nesting, use that client for the flush call
@@ -162,7 +162,7 @@ You can add the following properties to the `langfuseConfig` of the `observeOpen
Example:
-```typescript
+```ts
const res = await observeOpenAI(new OpenAI(), {
generationName: "Traced generation",
metadata: { someMetadataKey: "someValue" },
@@ -190,7 +190,7 @@ const res = await observeOpenAI(new OpenAI(), {
With [Langfuse Prompt management](/docs/prompts/get-started) you can effectively manage and version your prompts. You can link your OpenAI generations to a prompt by passing the `langfusePrompt` property to the `observeOpenAI` function.
-```typescript
+```ts
import { observeOpenAI } from "langfuse";
import OpenAI from "openai";
@@ -243,7 +243,7 @@ TRACE: capital-poem-generator
**Implementation**
-```typescript {12, 17, 30}
+```ts {12, 17, 30}
import Langfuse, { observeOpenAI } from "langfuse";
// Initialize SDKs
diff --git a/pages/docs/integrations/openai/python/_meta.json b/pages/docs/integrations/openai/python/_meta.tsx
similarity index 51%
rename from pages/docs/integrations/openai/python/_meta.json
rename to pages/docs/integrations/openai/python/_meta.tsx
index d69ea51a3..03e969767 100644
--- a/pages/docs/integrations/openai/python/_meta.json
+++ b/pages/docs/integrations/openai/python/_meta.tsx
@@ -1,7 +1,7 @@
-{
+export default {
"get-started": "Get Started",
"track-errors": "Track Errors",
- "examples": "Example Notebook",
+ examples: "Example Notebook",
"assistants-api": "Assistants API",
- "structured-outputs": "Structured Outputs"
-}
+ "structured-outputs": "Structured Outputs",
+};
diff --git a/pages/docs/integrations/vercel-ai-sdk.mdx b/pages/docs/integrations/vercel-ai-sdk.mdx
index 8346e0cd6..8b07be3e6 100644
--- a/pages/docs/integrations/vercel-ai-sdk.mdx
+++ b/pages/docs/integrations/vercel-ai-sdk.mdx
@@ -39,7 +39,7 @@ _Full Demo_
While telemetry is experimental ([docs](https://sdk.vercel.ai/docs/ai-sdk-core/telemetry#enabling-telemetry)), you can enable it by setting `experimental_telemetry` on each request that you want to trace.
-```typescript {4}
+```ts {4}
const result = await generateText({
model: openai("gpt-4-turbo"),
prompt: "Write a short story about a cat.",
@@ -63,7 +63,7 @@ You can set the Langfuse credentials via environment variables or directly to th
-```typescript
+```ts
import { LangfuseExporter } from "langfuse-vercel";
new LangfuseExporter({
@@ -92,7 +92,7 @@ npm install @vercel/otel langfuse-vercel @opentelemetry/api-logs @opentelemetry/
Enable the `instrumentationHook` in your `next.config.js`:
-```typescript filename="next.config.js" {4}
+```ts filename="next.config.js" {4}
/** @type {import('next').NextConfig} */
const nextConfig = {
experimental: {
@@ -105,7 +105,7 @@ module.exports = nextConfig;
Add `LangfuseExporter` to your instrumentation:
-```typescript filename="instrumentation.ts" {7}
+```ts filename="instrumentation.ts" {7}
import { registerOTel } from "@vercel/otel";
import { LangfuseExporter } from "langfuse-vercel";
@@ -120,7 +120,7 @@ export function register() {
-```typescript {5, 8, 31}
+```ts {5, 8, 31}
import { openai } from "@ai-sdk/openai";
import { generateText } from "ai";
import { NodeSDK } from "@opentelemetry/sdk-node";
@@ -181,7 +181,7 @@ By default, the exporter captures the input and output of each request. You can
All of the `metadata` fields are automatically captured by the exporter. You can also pass custom trace attributes to e.g. track users or sessions.
-```typescript showLineNumbers {6-12}
+```ts showLineNumbers {6-12}
const result = await generateText({
model: openai("gpt-4-turbo"),
prompt: "Write a short story about a cat.",
@@ -203,7 +203,7 @@ const result = await generateText({
Enable the `debug` option to see the logs of the exporter.
-```typescript
+```ts
new LangfuseExporter({ debug: true });
```
diff --git a/pages/docs/model-usage-and-cost.mdx b/pages/docs/model-usage-and-cost.mdx
index 0e27544a8..b32400089 100644
--- a/pages/docs/model-usage-and-cost.mdx
+++ b/pages/docs/model-usage-and-cost.mdx
@@ -112,7 +112,7 @@ generation = langfuse.generation(
-```typescript
+```ts
const generation = langfuse.generation({
// ...
usage: {
@@ -159,7 +159,7 @@ generation = langfuse.generation(
-```typescript
+```ts
const generation = langfuse.generation({
// ...
usage: {
diff --git a/pages/docs/prompts/_meta.json b/pages/docs/prompts/_meta.tsx
similarity index 65%
rename from pages/docs/prompts/_meta.json
rename to pages/docs/prompts/_meta.tsx
index 6b62057ed..6e350569d 100644
--- a/pages/docs/prompts/_meta.json
+++ b/pages/docs/prompts/_meta.tsx
@@ -1,6 +1,6 @@
-{
+export default {
"get-started": "Get Started",
"example-openai-functions": "Example OpenAI Functions",
"example-langchain": "Example Langchain (Py)",
- "example-langchain-js": "Example Langchain (JS)"
-}
+ "example-langchain-js": "Example Langchain (JS)",
+};
diff --git a/pages/docs/prompts/get-started.mdx b/pages/docs/prompts/get-started.mdx
index 0f1a6ced1..82fca5825 100644
--- a/pages/docs/prompts/get-started.mdx
+++ b/pages/docs/prompts/get-started.mdx
@@ -116,7 +116,7 @@ If you already have a prompt with the same name, the prompt will be added as a n
-```typescript
+```ts
// Create a text prompt
await langfuse.createPrompt({
name: "movie-critic",
@@ -221,7 +221,7 @@ prompt.config
-```typescript
+```ts
import { Langfuse } from "langfuse";
// Iniitialize the Langfuse client
@@ -240,7 +240,7 @@ const compiledPrompt = prompt.compile({
Chat prompts
-```typescript
+```ts
// Get current `production` version of a chat prompt
const chatPrompt = await langfuse.getPrompt("movie-critic-chat", undefined, {
type: "chat",
@@ -256,7 +256,7 @@ const compiledChatPrompt = chatPrompt.compile({
Optional parameters
-```typescript
+```ts
// Get specific version of a prompt (here version 1)
const prompt = await langfuse.getPrompt("movie-critic", 1);
@@ -296,7 +296,7 @@ const promptWithFetchTimeout = await langfuse.getPrompt(
Attributes
-```typescript
+```ts
// Raw prompt including {{variables}}. For chat prompts, this is a list of chat messages.
prompt.prompt;
@@ -369,7 +369,7 @@ prompt.config
As Langfuse and Langchain process input variables of prompt templates differently (`{}` instead of `{{}}`), we provide the `prompt.getLangchainPrompt()` method to transform the Langfuse prompt into a string that can be used with Langchain's PromptTemplate.
-```typescript
+```ts
import { Langfuse } from "langfuse";
import { ChatPromptTemplate } from "@langchain/core/prompts";
@@ -386,7 +386,7 @@ const promptTemplate = PromptTemplate.fromTemplate(
Chat prompts
-```typescript
+```ts
// Get current `production` version of a chat prompt
const langfusePrompt = await langfuse.getPrompt(
"movie-critic-chat",
@@ -402,7 +402,7 @@ const promptTemplate = ChatPromptTemplate.fromMessages(
Optional parameters
-```typescript
+```ts
// Get specific version of a prompt (here version 1)
const prompt = await langfuse.getPrompt("movie-critic", 1);
@@ -424,7 +424,7 @@ const prompt = await langfuse.getPrompt("movie-critic", undefined, {
Attributes
-```typescript
+```ts
// Raw prompt including {{variables}}. For chat prompts, this is a list of chat messages.
prompt.prompt;
@@ -507,7 +507,7 @@ openai.chat.completions.create(
-```typescript /langfusePrompt,/
+```ts /langfusePrompt,/
import { observeOpenAI } from "langfuse";
import OpenAI from "openai";
@@ -579,7 +579,7 @@ chat_chain.invoke({"movie": "Dune 2", "criticlevel": "expert"})
-```typescript /metadata: { langfusePrompt:/
+```ts /metadata: { langfusePrompt:/
import { Langfuse } from "langfuse";
import { PromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI, OpenAI } from "@langchain/openai";
@@ -699,7 +699,7 @@ if __name__ == '__main__':
-```typescript
+```ts
import express from "express";
import { Langfuse } from "langfuse";
@@ -746,7 +746,7 @@ prompt = langfuse.get_prompt("movie-critic", cache_ttl_seconds=300)
-```typescript
+```ts
// Get current `production` version and cache prompt for 5 minutes
const prompt = await langfuse.getPrompt("movie-critic", undefined, {
cacheTtlSeconds: 300,
@@ -775,7 +775,7 @@ prompt = langfuse.get_prompt("movie-critic", cache_ttl_seconds=0, label="latest"
-```typescript
+```ts
const prompt = await langfuse.getPrompt("movie-critic", undefined, {
cacheTtlSeconds: 0,
});
@@ -869,7 +869,7 @@ fetch_prompts_on_startup()
-```typescript
+```ts
import { Langfuse } from "langfuse";
// Initialize Langfuse client
@@ -923,7 +923,7 @@ prompt.is_fallback
-```typescript /fallback: "Do you like {{movie}}?"/ /fallback: [{ role: "system", content: "You are an expert on {{movie}}" }]/
+```ts /fallback: "Do you like {{movie}}?"/ /fallback: [{ role: "system", content: "You are an expert on {{movie}}" }]/
import { Langfuse } from "langfuse";
const langfuse = new Langfuse();
diff --git a/pages/docs/query-traces.mdx b/pages/docs/query-traces.mdx
index ad9171635..57c117b46 100644
--- a/pages/docs/query-traces.mdx
+++ b/pages/docs/query-traces.mdx
@@ -65,7 +65,7 @@ Python SDK reference including all available filters:
npm install langfuse
```
-```typescript
+```ts
import { Langfuse } from "langfuse";
const langfuse = new Langfuse({
secretKey: "sk-lf-...",
diff --git a/pages/docs/roadmap.mdx b/pages/docs/roadmap.mdx
index 58510b661..270b95be7 100644
--- a/pages/docs/roadmap.mdx
+++ b/pages/docs/roadmap.mdx
@@ -37,7 +37,7 @@ export const ChangelogList = () => (
{page.meta?.title || page.frontMatter?.title || page.name}
diff --git a/pages/docs/scores/_meta.json b/pages/docs/scores/_meta.tsx
similarity index 63%
rename from pages/docs/scores/_meta.json
rename to pages/docs/scores/_meta.tsx
index 1a7f9acac..c22009f6c 100644
--- a/pages/docs/scores/_meta.json
+++ b/pages/docs/scores/_meta.tsx
@@ -1,9 +1,9 @@
-{
- "overview": "Overview",
+export default {
+ overview: "Overview",
"getting-started": "Getting Started",
- "annotation": "Annotation in UI",
+ annotation: "Annotation in UI",
"user-feedback": "User Feedback",
"model-based-evals": "Model-based Evaluation",
"external-evaluation-pipelines": "External Evaluation Pipelines",
- "custom": "Custom via SDKs/API"
-}
+ custom: "Custom via SDKs/API",
+};
diff --git a/pages/docs/scores/custom.mdx b/pages/docs/scores/custom.mdx
index f02cb48e8..e51859ac8 100644
--- a/pages/docs/scores/custom.mdx
+++ b/pages/docs/scores/custom.mdx
@@ -42,7 +42,7 @@ langfuse.score(
JavaScript/TypeScript SDK example
-```typescript
+```ts
await langfuse.score({
id: "unique_id", // optional, can be used as an indempotency key to update the score subsequently
traceId: message.traceId,
@@ -75,7 +75,7 @@ langfuse.score(
JavaScript/TypeScript SDK example
-```typescript
+```ts
await langfuse.score({
id: "unique_id", // optional, can be used as an indempotency key to update the score subsequently
traceId: message.traceId,
@@ -108,7 +108,7 @@ langfuse.score(
JavaScript/TypeScript SDK example
-```typescript
+```ts
await langfuse.score({
id: "unique_id", // optional, can be used as an indempotency key to update the score subsequently
traceId: message.traceId,
@@ -157,7 +157,7 @@ langfuse.score(
)
```
-```typescript
+```ts
await langfuse.score({
traceId: message.traceId,
observationId: message.generationId, // optional
@@ -187,7 +187,7 @@ langfuse.score(
)
```
-```typescript
+```ts
await langfuse.score({
traceId: message.traceId,
observationId: message.generationId, // optional
@@ -217,7 +217,7 @@ langfuse.score(
)
```
-```typescript
+```ts
await langfuse.score({
traceId: message.traceId,
observationId: message.generationId, // optional
diff --git a/pages/docs/scores/external-evaluation-pipelines.md b/pages/docs/scores/external-evaluation-pipelines.md
index 6aa79b726..51ab879de 100644
--- a/pages/docs/scores/external-evaluation-pipelines.md
+++ b/pages/docs/scores/external-evaluation-pipelines.md
@@ -26,7 +26,7 @@ If your use case meets any of this situations, let’s go ahead and implement yo
frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
referrerpolicy="strict-origin-when-cross-origin"
- allowfullscreen
+ allowFullScreen
>
---
diff --git a/pages/docs/scores/user-feedback.mdx b/pages/docs/scores/user-feedback.mdx
index c0480e07c..678ef86ce 100644
--- a/pages/docs/scores/user-feedback.mdx
+++ b/pages/docs/scores/user-feedback.mdx
@@ -50,7 +50,7 @@ The easiest way to collect user feedback is via the **Langfuse Web SDK**. Thereb
-```typescript {1, 9, 17-18} filename="UserFeedbackComponent.tsx"
+```ts {1, 9, 17-18} filename="UserFeedbackComponent.tsx"
import { LangfuseWeb } from "langfuse";
export function UserFeedbackComponent(props: { traceId: string }) {
diff --git a/pages/docs/sdk/_meta.json b/pages/docs/sdk/_meta.json
deleted file mode 100644
index c6f095ff9..000000000
--- a/pages/docs/sdk/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "overview": "Overview",
- "python": "Python",
- "typescript": "JS/TS"
-}
diff --git a/pages/docs/sdk/_meta.tsx b/pages/docs/sdk/_meta.tsx
new file mode 100644
index 000000000..b31463039
--- /dev/null
+++ b/pages/docs/sdk/_meta.tsx
@@ -0,0 +1,5 @@
+export default {
+ overview: "Overview",
+ python: "Python",
+ typescript: "JS/TS",
+};
diff --git a/pages/docs/sdk/python/_meta.json b/pages/docs/sdk/python/_meta.json
deleted file mode 100644
index b0143bf1d..000000000
--- a/pages/docs/sdk/python/_meta.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "decorators": "Decorators",
- "example": "Example Notebook",
- "low-level-sdk": "Low-level SDK",
- "reference": {
- "title": "Reference ↗",
- "href": "https://python.reference.langfuse.com",
- "newWindow": true
- }
-}
diff --git a/pages/docs/sdk/python/_meta.tsx b/pages/docs/sdk/python/_meta.tsx
new file mode 100644
index 000000000..03f75f721
--- /dev/null
+++ b/pages/docs/sdk/python/_meta.tsx
@@ -0,0 +1,10 @@
+export default {
+ decorators: "Decorators",
+ example: "Example Notebook",
+ "low-level-sdk": "Low-level SDK",
+ reference: {
+ title: "Reference ↗",
+ href: "https://python.reference.langfuse.com",
+ newWindow: true,
+ },
+};
diff --git a/pages/docs/sdk/typescript/_meta.json b/pages/docs/sdk/typescript/_meta.json
deleted file mode 100644
index e5e88a721..000000000
--- a/pages/docs/sdk/typescript/_meta.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "guide": "Guide",
- "guide-web": "Guide (Web)",
- "example-vercel-ai": "Example (Vercel AI)",
- "reference": {
- "title": "Reference ↗",
- "href": "https://js.reference.langfuse.com",
- "newWindow": true
- }
-}
diff --git a/pages/docs/sdk/typescript/_meta.tsx b/pages/docs/sdk/typescript/_meta.tsx
new file mode 100644
index 000000000..b47115c59
--- /dev/null
+++ b/pages/docs/sdk/typescript/_meta.tsx
@@ -0,0 +1,10 @@
+export default {
+ guide: "Guide",
+ "guide-web": "Guide (Web)",
+ "example-vercel-ai": "Example (Vercel AI)",
+ reference: {
+ title: "Reference ↗",
+ href: "https://js.reference.langfuse.com",
+ newWindow: true,
+ },
+};
diff --git a/pages/docs/sdk/typescript/guide-web.mdx b/pages/docs/sdk/typescript/guide-web.mdx
index 3df164253..04d0ec871 100644
--- a/pages/docs/sdk/typescript/guide-web.mdx
+++ b/pages/docs/sdk/typescript/guide-web.mdx
@@ -34,7 +34,7 @@ The langfuse JS/TS SDK can be used to report scores client-side directly from th
-```typescript
+```ts
import { LangfuseWeb } from "langfuse";
export function UserFeedbackComponent(props: { traceId: string }) {
@@ -119,7 +119,7 @@ npm i langfuse
In your application, set the **public api key** to create a client.
-```typescript
+```ts
import { LangfuseWeb } from "langfuse";
const langfuseWeb = new LangfuseWeb({
@@ -153,7 +153,7 @@ import { Callout } from "nextra/components";
available in both backend and frontend.
-```typescript
+```ts
// pass traceId and observationId to front end
await langfuseWeb.score({
traceId: message.traceId,
diff --git a/pages/docs/sdk/typescript/guide.mdx b/pages/docs/sdk/typescript/guide.mdx
index 0c51a81e5..d60a525de 100644
--- a/pages/docs/sdk/typescript/guide.mdx
+++ b/pages/docs/sdk/typescript/guide.mdx
@@ -67,7 +67,7 @@ LANGFUSE_BASEURL="https://cloud.langfuse.com"; # 🇪🇺 EU region
# LANGFUSE_BASEURL="https://us.cloud.langfuse.com"; # 🇺🇸 US region
```
-```typescript
+```ts
import { Langfuse } from "langfuse"; // or "langfuse-node"
// without additional options
@@ -83,7 +83,7 @@ const langfuse = new Langfuse({
-```typescript
+```ts
import { Langfuse } from "langfuse"; // or "langfuse-node"
const langfuse = new Langfuse({
@@ -142,7 +142,7 @@ import { FileCode } from "lucide-react";
Traces are the top-level entity in the Langfuse API. They represent an execution flow in a LLM application usually triggered by an external event.
-```typescript
+```ts
// Example trace creation
const trace = langfuse.trace({
name: "chat-app-session",
@@ -196,7 +196,7 @@ trace.score({});
Events are used to track discrete events in a trace.
-```typescript
+```ts
// Example event
const event = trace.event({
name: "get-user-profile",
@@ -246,7 +246,7 @@ event.score({});
Spans represent durations of units of work in a trace. We generated convenient SDK functions for generic spans as well as LLM spans.
-```typescript
+```ts
// Example span creation
const span = trace.span({
name: "embedding-retrieval",
@@ -306,7 +306,7 @@ span.score({});
Generations are used to log generations of AI model. They contain additional attributes about the model and the prompt/completion and are specifically rendered in the Langfuse UI.
-```typescript
+```ts
// Example generation creation
const generation = trace.generation({
name: "chat-completion",
@@ -382,7 +382,7 @@ There are two options to nest observations:
-```typescript
+```ts
const trace = langfuse.trace({ name: "chat-app-session" });
const span = trace.span({ name: "chat-interaction" });
@@ -396,7 +396,7 @@ span.generation({ name: "chat-completion" });
Especially in distributed applications it is often not possible to nest observations via the SDK. In this case, you can use the `traceId` and `parentObservationId` properties to manually nest observations.
-```typescript
+```ts
const trace = langfuse.trace({
name: "chat-app-session",
});
@@ -432,7 +432,7 @@ Links
- Learn more about [Scores in Langfuse](/docs/scores)
- Report scores from the browser (e.g. user feedback) using the [Web SDK](/docs/sdk/typescript/guide-web)
-```typescript
+```ts
await langfuse.score({
traceId: message.traceId,
observationId: message.generationId,
@@ -460,7 +460,7 @@ generation.score({});
The Langfuse SDKs sends events asynchronously to the Langfuse server. You should call shutdown to exit cleanly before your application exits.
-```typescript
+```ts
langfuse.shutdown();
// or
await langfuse.shutdownAsync();
@@ -472,7 +472,7 @@ Issues with the SDKs can be caused by various reasons ranging from incorrectly c
The SDK does not throw errors to protect your application process. Instead, you can optionally listen to errors:
-```typescript
+```ts
langfuse.on("error", (error) => {
// Whatever you want to do with the error
console.error(error);
@@ -481,7 +481,7 @@ langfuse.on("error", (error) => {
Alternatively, you can enable debugging to get detailed logs of what's happening in the SDK.
-```typescript
+```ts
langfuse.debug();
```
@@ -502,7 +502,7 @@ Note: Most of these execution environments have a timeout after which the proces
npm i @vercel/functions
```
- ```typescript
+ ```ts
import { waitUntil } from "@vercel/functions";
// within the api handler
waitUntil(langfuse.flushAsync());
@@ -514,7 +514,7 @@ When the process exits use `await langfuse.shutdownAsync()` to make sure all req
Example:
-```typescript
+```ts
const langfuse = new Langfuse({
secretKey: "sk-lf-...",
publicKey: "pk-lf-...",
@@ -564,7 +564,7 @@ We improved the flexibility of the SDK by allowing you to ingest any type of usa
**v1.x.x**
-```typescript
+```ts
langfuse.generation({
name: "chat-completion",
usage = {
@@ -579,7 +579,7 @@ langfuse.generation({
The usage object supports the OpenAi structure with `{'promptTokens', 'completionTokens', 'totalTokens'}` and a more generic version `{'input', 'output', 'total', 'unit'}` where unit can be of value `"TOKENS"`, `"CHARACTERS"`, `"MILLISECONDS"`, `"SECONDS"`, `"IMAGES"`. For some models the token counts are [automatically calculated](https://langfuse.com/docs/model-usage-and-cost) by Langfuse. Create an issue to request support for other units and models.
-```typescript
+```ts
// Generic style
langfuse.generation({
name = "my-claude-generation",
@@ -610,7 +610,7 @@ langfuse.generation({
We deprecated the external trace id to simplify the API. Instead, you can now (optionally) directly set the trace id when creating the trace. Traces are still upserted in case a trace with this id already exists in your project.
-```typescript
+```ts
// v0.x
const trace = langfuse.trace({ externalId: "123" });
// When manually linking observations and scores to the trace
@@ -637,7 +637,7 @@ With v1.0.0 we introduced the `shutdownAsync` method to make sure all requests a
This is especially important for short-lived execution environments such as [lambdas and serverless functions](#lambda).
-```typescript
+```ts
export const handler() {
// Lambda / serverless function
diff --git a/pages/docs/security/_meta.json b/pages/docs/security/_meta.json
deleted file mode 100644
index 3347cacaa..000000000
--- a/pages/docs/security/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "overview": "Overview",
- "getting-started": "Getting Started",
- "example-python": "Example Python"
-}
diff --git a/pages/docs/security/_meta.tsx b/pages/docs/security/_meta.tsx
new file mode 100644
index 000000000..46dbc111f
--- /dev/null
+++ b/pages/docs/security/_meta.tsx
@@ -0,0 +1,5 @@
+export default {
+ overview: "Overview",
+ "getting-started": "Getting Started",
+ "example-python": "Example Python",
+};
diff --git a/pages/docs/security/example-python.md b/pages/docs/security/example-python.md
index d422716a9..121fca5d7 100644
--- a/pages/docs/security/example-python.md
+++ b/pages/docs/security/example-python.md
@@ -19,12 +19,12 @@ Want to learn more? Check out our [documentation on LLM Security](https://langfu
## Installation and Setup
-```typescript
+```python
%pip install llm-guard langfuse openai
```
-```typescript
+```python
import os
# Get keys for your project from the project settings page
@@ -51,7 +51,7 @@ The following example walks through an example of kid-friendly storytelling appl
Without security measures, it is possible to generate stories for inappropriate topics, such as those that include violence.
-```typescript
+```python
from langfuse.decorators import observe
from langfuse.openai import openai # OpenAI integration
@@ -84,7 +84,7 @@ LLM Guard uses the following [models](https://huggingface.co/collections/MoritzL
The example below adds the detected "violence" score to the trace in Langfuse. You can see the trace for this interaction, and analytics for these banned topics scores, in the Langfuse dashboard.
-```typescript
+```python
from langfuse.decorators import observe, langfuse_context
from langfuse.openai import openai # OpenAI integration
from llm_guard.input_scanners import BanTopics
@@ -123,7 +123,7 @@ main()
> This is not child safe, please request another topic
-```typescript
+```python
sanitized_prompt, is_valid, risk_score = violence_scanner.scan("war crimes")
print(sanitized_prompt)
print(is_valid)
@@ -147,14 +147,14 @@ Use LLM Guard's [Anonymize scanner](https://llm-guard.com/input_scanners/anonymi
In the example below Langfuse is used to track each of these steps separately to measure the accuracy and latency.
-```typescript
+```python
from llm_guard.vault import Vault
vault = Vault()
```
-```typescript
+```python
from llm_guard.input_scanners import Anonymize
from llm_guard.input_scanners.anonymize_helpers import BERT_LARGE_NER_CONF
from langfuse.openai import openai # OpenAI integration
@@ -208,7 +208,7 @@ main()
You can stack multiple scanners if you want to filter for multiple security risks.
-```typescript
+```python
from langfuse.decorators import observe, langfuse_context
from langfuse.openai import openai # OpenAI integration
@@ -261,7 +261,7 @@ main()
And you can also use the same methond to scan the model's output to ensure the quality of the response:
-```typescript
+```python
from llm_guard import scan_output
from llm_guard.output_scanners import NoRefusal, Relevance, Sensitive
@@ -312,7 +312,7 @@ Below is an example of the infamous "Grandma trick", which allows users to trick
We use the LLM Guard [Prompt Injection scanner](https://llm-guard.com/input_scanners/prompt_injection/) to try to detect and block these types of prompts.
-```typescript
+```python
from llm_guard.input_scanners import PromptInjection
from llm_guard.input_scanners.prompt_injection import MatchType
from langfuse.decorators import observe, langfuse_context
@@ -351,12 +351,12 @@ main()
As you can see, LLM Guard fails to catch the injected Grandma Trick prompt. Let's see how another security library, Lakera, performs:
-```typescript
+```python
os.environ["LAKERA_GUARD_API_KEY"] = ""
```
-```typescript
+```python
import os
# requests library must be available in current Python environment
import requests
@@ -409,7 +409,7 @@ Luckily, Lakera Guard is able to catch and block the prompt injection. Langfuse
Here is another example which directly inject a malicious link into the prompt.
-```typescript
+```python
@observe()
def answer_question(question: str, context: str):
scanner = PromptInjection(threshold=0.5, match_type=MatchType.FULL)
diff --git a/pages/docs/tracing-features/_meta.json b/pages/docs/tracing-features/_meta.json
deleted file mode 100644
index 753963f39..000000000
--- a/pages/docs/tracing-features/_meta.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "sessions": "Sessions",
- "users": "Users",
- "metadata": "Metadata",
- "tags": "Tags",
- "url": "Trace URL"
-}
diff --git a/pages/docs/tracing-features/_meta.tsx b/pages/docs/tracing-features/_meta.tsx
new file mode 100644
index 000000000..244bbb50e
--- /dev/null
+++ b/pages/docs/tracing-features/_meta.tsx
@@ -0,0 +1,7 @@
+export default {
+ sessions: "Sessions",
+ users: "Users",
+ metadata: "Metadata",
+ tags: "Tags",
+ url: "Trace URL",
+};
diff --git a/pages/docs/tracing-features/log-levels.mdx b/pages/docs/tracing-features/log-levels.mdx
index 458b84a58..eb41d7487 100644
--- a/pages/docs/tracing-features/log-levels.mdx
+++ b/pages/docs/tracing-features/log-levels.mdx
@@ -53,7 +53,7 @@ trace.span(
-```typescript
+```ts
import { Langfuse } from "langfuse";
const langfuse = new Langfuse();
@@ -86,3 +86,9 @@ When using the [LlamaIndex Integration](/docs/integrations/llama-index), `level`
+
+## GitHub Discussions
+
+import { GhDiscussionsPreview } from "@/components/gh-discussions/GhDiscussionsPreview";
+
+
diff --git a/pages/docs/tracing-features/metadata.mdx b/pages/docs/tracing-features/metadata.mdx
index 67aefe70f..7bc07d830 100644
--- a/pages/docs/tracing-features/metadata.mdx
+++ b/pages/docs/tracing-features/metadata.mdx
@@ -37,7 +37,7 @@ trace = langfuse.trace(
-```typescript
+```ts
import { Langfuse } from "langfuse";
const langfuse = new Langfuse();
@@ -98,7 +98,7 @@ fn()
When using the [OpenAI SDK Integration (JS)](/docs/integrations/openai/js), pass `metadata` as an additional argument:
-```typescript
+```ts
import OpenAI from "openai";
import { observeOpenAI } from "langfuse";
@@ -146,7 +146,7 @@ fn()
When using the [CallbackHandler](/docs/integrations/langchain/tracing), you can pass `metadata` to the constructor:
-```typescript
+```ts
const handler = new CallbackHandler({
metadata: { key: "value" },
});
@@ -154,7 +154,7 @@ const handler = new CallbackHandler({
When using the integration with the JS SDK (see [interop docs](/docs/integrations/langchain/tracing#interoperability)), set `metadata` via `langfuse.trace()`:
-```typescript
+```ts
import { CallbackHandler, Langfuse } from "langfuse-langchain";
const langfuse = new Langfuse();
diff --git a/pages/docs/tracing-features/sampling.mdx b/pages/docs/tracing-features/sampling.mdx
index 19a351e34..3d3adf012 100644
--- a/pages/docs/tracing-features/sampling.mdx
+++ b/pages/docs/tracing-features/sampling.mdx
@@ -4,13 +4,16 @@ description: Configure sampling to control the volume of traces collected by the
# Sampling
-Sampling can be used to control the volume of traces collected by the Langfuse server.
+Sampling can be used to control the volume of traces collected by the Langfuse server.
+
+ Sampling is not yet supported by the JS SDK and integrations. Please upvote
+ [this feature request](https://github.com/orgs/langfuse/discussions/3529) if
+ you are interested in this.
+
You can configure the sample rate by setting the `LANGFUSE_SAMPLE_RATE` environment variable or by using the `sample_rate` parameter in the constructors of the Python SDK. The value has to be between 0 and 1. The default value is 1, meaning that all traces are collected. A value of 0.2 means that only 20% of the traces are collected. The SDK samples on the trace level meaning that if a trace is sampled, all observations and scores within that trace will be sampled as well.
-Support for the JS SDK is coming soon.
-
@@ -77,6 +80,7 @@ handler = CallbackHandler(
sample_rate=0.5
)
```
+
@@ -99,3 +103,9 @@ Settings.callback_manager = CallbackManager([langfuse_callback_handler])
+
+## GitHub Discussions
+
+import { GhDiscussionsPreview } from "@/components/gh-discussions/GhDiscussionsPreview";
+
+
diff --git a/pages/docs/tracing-features/sessions.mdx b/pages/docs/tracing-features/sessions.mdx
index f2d749b3b..971b3652c 100644
--- a/pages/docs/tracing-features/sessions.mdx
+++ b/pages/docs/tracing-features/sessions.mdx
@@ -44,7 +44,7 @@ trace = langfuse.trace(
-```typescript
+```ts
import { Langfuse } from "langfuse";
const langfuse = new Langfuse();
@@ -157,7 +157,7 @@ fn()
When using the [CallbackHandler](/docs/integrations/langchain/tracing), you can pass the `sessionId` to the constructor:
-```typescript
+```ts
const handler = new CallbackHandler({
sessionId: "your-session-id",
});
@@ -165,7 +165,7 @@ const handler = new CallbackHandler({
You can also set the `session_id` dynamically via the runnable configuration in the chain invocation:
-```typescript
+```ts
import { CallbackHandler } from "langfuse-langchain";
const langfuseHandler = new CallbackHandler();
@@ -182,7 +182,7 @@ await chain.invoke(
When using the integration with the JS SDK (see [interop docs](/docs/integrations/langchain/tracing#interoperability)), set the sessionId via `langfuse.trace()`:
-```typescript
+```ts
import { CallbackHandler, Langfuse } from "langfuse-langchain";
const langfuse = new Langfuse();
diff --git a/pages/docs/tracing-features/tags.mdx b/pages/docs/tracing-features/tags.mdx
index ed6e93274..993a5bfdb 100644
--- a/pages/docs/tracing-features/tags.mdx
+++ b/pages/docs/tracing-features/tags.mdx
@@ -4,7 +4,7 @@ description: Tags help to filter and organize traces in Langfuse based on use ca
# Tagging traces
-Tags allow you to categorize and filter traces. You can tag traces only from the [Langfuse SDKs](/docs/sdk) or from the Langfuse UI. To tag a trace, add a list of tags to the tags field of the trace object. Tags are strings and a trace may have multiple tags.
+Tags allow you to categorize and filter traces. You can tag traces (1) when they are created using the Langfuse SDKs and integrations or (2) from the Langfuse UI. To tag a trace, add a list of tags to the tags field of the trace object. Tags are strings and a trace may have multiple tags.
@@ -37,7 +37,7 @@ trace = langfuse.trace(
-```typescript
+```ts
import { Langfuse } from "langfuse";
const langfuse = new Langfuse();
@@ -145,7 +145,7 @@ fn()
When using the [CallbackHandler](/docs/integrations/langchain/tracing), you can pass `tags` to the constructor:
-```typescript
+```ts
const handler = new CallbackHandler({
tags: ["tag-1", "tag-2"],
});
@@ -153,20 +153,20 @@ const handler = new CallbackHandler({
You can also set tags dynamically via the runnable configuration in the chain invocation:
-```typescript
+```ts
const langfuseHandler = new CallbackHandler()
const tags = ["tag-1", "tag-2"];
// Your existing Langchain code to create the chain
-...
+...
-// Pass config to the chain invocation to be parsed as Langfuse trace attributes
+// Pass config to the chain invocation to be parsed as Langfuse trace attributes
await chain.invoke({ input: "" }, { callbacks: [langfuseHandler], tags: tags });
```
When using the integration with the JS SDK (see [interop docs](/docs/integrations/langchain/tracing#interoperability)), set tags via `langfuse.trace()`:
-```typescript
+```ts
import { CallbackHandler, Langfuse } from "langfuse-langchain";
const langfuse = new Langfuse();
diff --git a/pages/docs/tracing-features/url.mdx b/pages/docs/tracing-features/url.mdx
index 5f6473340..09a1f05a8 100644
--- a/pages/docs/tracing-features/url.mdx
+++ b/pages/docs/tracing-features/url.mdx
@@ -34,7 +34,7 @@ trace.get_trace_url()
-```typescript
+```ts
const trace = langfuse.trace(...)
trace.getTraceUrl()
```
@@ -72,7 +72,7 @@ handler.get_trace_url()
Use the interoperability of the Langfuse SDK with the Langchain integration to get the URL of a trace ([interop docs](/docs/integrations/langchain/tracing#interoperability)).
-```typescript
+```ts
// Intialize Langfuse Client
import { CallbackHandler, Langfuse } from "langfuse-langchain";
const langfuse = new Langfuse();
@@ -89,7 +89,7 @@ langfuseHandler.getTraceUrl();
**Deprecated:** flaky in cases of concurrent requests as it depends on the state of the handler.
-```typescript
+```ts
handler.getTraceUrl();
```
diff --git a/pages/docs/tracing-features/users.mdx b/pages/docs/tracing-features/users.mdx
index f739a16fb..b7dd698ba 100644
--- a/pages/docs/tracing-features/users.mdx
+++ b/pages/docs/tracing-features/users.mdx
@@ -39,7 +39,7 @@ trace = langfuse.trace(
-```typescript
+```ts
import { Langfuse } from "langfuse";
const langfuse = new Langfuse();
@@ -152,7 +152,7 @@ fn()
When using the [CallbackHandler](/docs/integrations/langchain/tracing), you can pass `userId` to the constructor:
-```typescript
+```ts
const handler = new CallbackHandler({
userId: "user-id",
});
@@ -160,7 +160,7 @@ const handler = new CallbackHandler({
You can also set the `userId` dynamically via the runnable configuration in the chain invocation:
-```typescript
+```ts
import { CallbackHandler } from "langfuse-langchain";
const langfuseHandler = new CallbackHandler();
@@ -177,7 +177,7 @@ await chain.invoke(
When using the integration with the JS SDK (see [interop docs](/docs/integrations/langchain/tracing#interoperability)), set `userId` via `langfuse.trace()`:
-```typescript
+```ts
import { CallbackHandler, Langfuse } from "langfuse-langchain";
const langfuse = new Langfuse();
diff --git a/pages/docs/tracing.mdx b/pages/docs/tracing.mdx
index c106e0a4c..6312b1951 100644
--- a/pages/docs/tracing.mdx
+++ b/pages/docs/tracing.mdx
@@ -52,7 +52,7 @@ A trace in Langfuse consists of the following objects:
-
+
**Hierarchical structure of traces in Langfuse**
@@ -67,7 +67,7 @@ classDiagram
-
+
**Example trace in Langfuse UI**
diff --git a/pages/faq/_meta.json b/pages/faq/_meta.json
deleted file mode 100644
index ce34551c7..000000000
--- a/pages/faq/_meta.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "-- Switcher": {
- "type": "separator",
- "title": "Switcher"
- },
- "index": "Overview",
- "tag": "By Tags",
- "all": {
- "type": "children",
- "display": "hidden"
- }
-}
diff --git a/pages/faq/_meta.tsx b/pages/faq/_meta.tsx
new file mode 100644
index 000000000..a3f7ca74a
--- /dev/null
+++ b/pages/faq/_meta.tsx
@@ -0,0 +1,14 @@
+import { MenuSwitcher } from "@/components/MenuSwitcher";
+
+export default {
+ "-- Switcher": {
+ type: "separator",
+ title: ,
+ },
+ index: "Overview",
+ tag: "By Tags",
+ all: {
+ type: "children",
+ display: "hidden",
+ },
+};
diff --git a/pages/faq/all/llm-analytics-101.mdx b/pages/faq/all/llm-analytics-101.mdx
index e025eccb8..f39ef15c9 100644
--- a/pages/faq/all/llm-analytics-101.mdx
+++ b/pages/faq/all/llm-analytics-101.mdx
@@ -24,7 +24,7 @@ The new logging stack needs to think LLM-native from the ground up. That means g
## Let's Dive in: What to Measure?
-```typescript
+```ts
// Example generation creation
const generation = trace.generation({
name: "chat-completion",
diff --git a/pages/faq/tag/[tag].mdx b/pages/faq/tag/[tag].mdx
index 865cf79f8..92175ac0b 100644
--- a/pages/faq/tag/[tag].mdx
+++ b/pages/faq/tag/[tag].mdx
@@ -47,7 +47,7 @@ export const getStaticProps = async ({ params }) => {
};
};
-import { useData } from "nextra/data";
+import { useData } from "nextra/hooks";
export const SsgFaqList = () => {
// Get the data from SSG, and render it as a component.
@@ -61,9 +61,9 @@ export const SsgTagName = () => {
return {formattedTag};
};
-
diff --git a/pages/faq/tag/_meta.json b/pages/faq/tag/_meta.json
deleted file mode 100644
index 43fd52028..000000000
--- a/pages/faq/tag/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "*": {
- "display": "normal"
- }
-}
diff --git a/pages/faq/tag/_meta.tsx b/pages/faq/tag/_meta.tsx
new file mode 100644
index 000000000..45872c0de
--- /dev/null
+++ b/pages/faq/tag/_meta.tsx
@@ -0,0 +1,5 @@
+export default {
+ "*": {
+ display: "normal",
+ },
+};
diff --git a/pages/guides/_meta.json b/pages/guides/_meta.json
deleted file mode 100644
index 3ade3571d..000000000
--- a/pages/guides/_meta.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
- "-- Switcher": {
- "type": "separator",
- "title": "Switcher"
- },
- "index": "Overview",
- "cookbook": "Cookbooks",
- "videos": "Videos"
-}
diff --git a/pages/guides/_meta.tsx b/pages/guides/_meta.tsx
new file mode 100644
index 000000000..15f847fa1
--- /dev/null
+++ b/pages/guides/_meta.tsx
@@ -0,0 +1,11 @@
+import { MenuSwitcher } from "@/components/MenuSwitcher";
+
+export default {
+ "-- Switcher": {
+ type: "separator",
+ title: ,
+ },
+ index: "Overview",
+ cookbook: "Cookbooks",
+ videos: "Videos",
+};
diff --git a/pages/guides/cookbook/example_external_evaluation_pipelines.md b/pages/guides/cookbook/example_external_evaluation_pipelines.md
index 6aa79b726..51ab879de 100644
--- a/pages/guides/cookbook/example_external_evaluation_pipelines.md
+++ b/pages/guides/cookbook/example_external_evaluation_pipelines.md
@@ -26,7 +26,7 @@ If your use case meets any of this situations, let’s go ahead and implement yo
frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
referrerpolicy="strict-origin-when-cross-origin"
- allowfullscreen
+ allowFullScreen
>
---
diff --git a/pages/guides/cookbook/example_llm_security_monitoring.md b/pages/guides/cookbook/example_llm_security_monitoring.md
index d422716a9..121fca5d7 100644
--- a/pages/guides/cookbook/example_llm_security_monitoring.md
+++ b/pages/guides/cookbook/example_llm_security_monitoring.md
@@ -19,12 +19,12 @@ Want to learn more? Check out our [documentation on LLM Security](https://langfu
## Installation and Setup
-```typescript
+```python
%pip install llm-guard langfuse openai
```
-```typescript
+```python
import os
# Get keys for your project from the project settings page
@@ -51,7 +51,7 @@ The following example walks through an example of kid-friendly storytelling appl
Without security measures, it is possible to generate stories for inappropriate topics, such as those that include violence.
-```typescript
+```python
from langfuse.decorators import observe
from langfuse.openai import openai # OpenAI integration
@@ -84,7 +84,7 @@ LLM Guard uses the following [models](https://huggingface.co/collections/MoritzL
The example below adds the detected "violence" score to the trace in Langfuse. You can see the trace for this interaction, and analytics for these banned topics scores, in the Langfuse dashboard.
-```typescript
+```python
from langfuse.decorators import observe, langfuse_context
from langfuse.openai import openai # OpenAI integration
from llm_guard.input_scanners import BanTopics
@@ -123,7 +123,7 @@ main()
> This is not child safe, please request another topic
-```typescript
+```python
sanitized_prompt, is_valid, risk_score = violence_scanner.scan("war crimes")
print(sanitized_prompt)
print(is_valid)
@@ -147,14 +147,14 @@ Use LLM Guard's [Anonymize scanner](https://llm-guard.com/input_scanners/anonymi
In the example below Langfuse is used to track each of these steps separately to measure the accuracy and latency.
-```typescript
+```python
from llm_guard.vault import Vault
vault = Vault()
```
-```typescript
+```python
from llm_guard.input_scanners import Anonymize
from llm_guard.input_scanners.anonymize_helpers import BERT_LARGE_NER_CONF
from langfuse.openai import openai # OpenAI integration
@@ -208,7 +208,7 @@ main()
You can stack multiple scanners if you want to filter for multiple security risks.
-```typescript
+```python
from langfuse.decorators import observe, langfuse_context
from langfuse.openai import openai # OpenAI integration
@@ -261,7 +261,7 @@ main()
And you can also use the same methond to scan the model's output to ensure the quality of the response:
-```typescript
+```python
from llm_guard import scan_output
from llm_guard.output_scanners import NoRefusal, Relevance, Sensitive
@@ -312,7 +312,7 @@ Below is an example of the infamous "Grandma trick", which allows users to trick
We use the LLM Guard [Prompt Injection scanner](https://llm-guard.com/input_scanners/prompt_injection/) to try to detect and block these types of prompts.
-```typescript
+```python
from llm_guard.input_scanners import PromptInjection
from llm_guard.input_scanners.prompt_injection import MatchType
from langfuse.decorators import observe, langfuse_context
@@ -351,12 +351,12 @@ main()
As you can see, LLM Guard fails to catch the injected Grandma Trick prompt. Let's see how another security library, Lakera, performs:
-```typescript
+```python
os.environ["LAKERA_GUARD_API_KEY"] = ""
```
-```typescript
+```python
import os
# requests library must be available in current Python environment
import requests
@@ -409,7 +409,7 @@ Luckily, Lakera Guard is able to catch and block the prompt injection. Langfuse
Here is another example which directly inject a malicious link into the prompt.
-```typescript
+```python
@observe()
def answer_question(question: str, context: str):
scanner = PromptInjection(threshold=0.5, match_type=MatchType.FULL)
diff --git a/pages/guides/cookbook/integration_langchain.md b/pages/guides/cookbook/integration_langchain.md
index b6ba031cd..ba3f3686b 100644
--- a/pages/guides/cookbook/integration_langchain.md
+++ b/pages/guides/cookbook/integration_langchain.md
@@ -13,7 +13,7 @@ Follow the [integration guide](https://langfuse.com/docs/integrations/langchain)
```python
-%pip install langfuse langchain langchain_openai --upgrade
+%pip install langfuse langchain langchain_openai langchain_community --upgrade
```
Initialize the Langfuse client with your API keys from the project settings in the Langfuse UI and add them to your environment.
@@ -22,14 +22,15 @@ Initialize the Langfuse client with your API keys from the project settings in t
```python
import os
-# get keys for your project from https://cloud.langfuse.com
-os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-***"
-os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-***"
-os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # for EU data region
-# os.environ["LANGFUSE_HOST"] = "https://us.cloud.langfuse.com" # for US data region
+# Get keys for your project from the project settings page
+# https://cloud.langfuse.com
+os.environ["LANGFUSE_PUBLIC_KEY"] = ""
+os.environ["LANGFUSE_SECRET_KEY"] = ""
+os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # 🇪🇺 EU region
+# os.environ["LANGFUSE_HOST"] = "https://us.cloud.langfuse.com" # 🇺🇸 US region
-# your openai key
-os.environ["OPENAI_API_KEY"] = "***"
+# Your openai key
+os.environ["OPENAI_API_KEY"] = ""
```
@@ -198,6 +199,11 @@ chain.invoke(query, config={"callbacks":[langfuse_handler]})
### Agent
+```python
+%pip install google-search-results
+```
+
+
```python
from langchain.agents import AgentExecutor, load_tools, create_openai_functions_agent
from langchain_openai import ChatOpenAI
@@ -274,54 +280,55 @@ review = overall_chain.invoke("Tragedy at sunset on the beach", {"callbacks":[la
review = overall_chain.run("Tragedy at sunset on the beach", callbacks=[langfuse_handler])# add the handler to the run method
```
-## Adding scores to traces
+## Customize trace names via run_name
-In addition to the attributes automatically captured by the decorator, you can add others to use the full features of Langfuse.
+By default, Langfuse uses the Langchain run_name as trace/observation names. For more complex/custom chains, it can be useful to customize the names via own run_names.
-Two utility methods:
-* `langfuse_context.update_current_observation`: Update the trace/span of the current function scope
-* `langfuse_context.update_current_trace`: Update the trace itself, can also be called within any deeply nested span within the trace
+![Custom LangChain Run Names](https://langfuse.com/images/cookbook/integration-langchain/custom_langchain_run_names.png)
-For details on available attributes, have a look at the [reference](https://python.reference.langfuse.com/langfuse/decorators#LangfuseDecorator.update_current_observation).
+**Example without custom run names**
-Below is an example demonstrating how to enrich traces and observations with custom parameters:
+
+```python
+prompt = ChatPromptTemplate.from_template("what is the city {person} is from?")
+model = ChatOpenAI()
+chain = prompt1 | model | StrOutputParser()
+chain.invoke({"person": "Grace Hopper"}, config={
+ "callbacks":[langfuse_handler]
+ })
+```
+
+### Via Runnable Config
```python
-from langfuse.decorators import langfuse_context, observe
-
-@observe(as_type="generation")
-def deeply_nested_llm_call():
- # Enrich the current observation with a custom name, input, and output
- langfuse_context.update_current_observation(
- name="Deeply nested LLM call", input="Ping?", output="Pong!"
- )
- # Set the parent trace's name from within a nested observation
- langfuse_context.update_current_trace(
- name="Trace name set from deeply_nested_llm_call",
- session_id="1234",
- user_id="5678",
- tags=["tag1", "tag2"],
- public=True
- )
-
-@observe()
-def nested_span():
- # Update the current span with a custom name and level
- langfuse_context.update_current_observation(name="Nested Span", level="WARNING")
- deeply_nested_llm_call()
-
-@observe()
-def main():
- nested_span()
-
-# Execute the main function to generate the enriched trace
-main()
+prompt = ChatPromptTemplate.from_template("what is the city {person} is from?").with_config(run_name="Famous Person Prompt")
+model = ChatOpenAI().with_config(run_name="Famous Person LLM")
+output_parser = StrOutputParser().with_config(run_name="Famous Person Output Parser")
+chain = (prompt1 | model | output_parser).with_config(run_name="Famous Person Locator")
+
+chain.invoke({"person": "Grace Hopper"}, config={
+ "callbacks":[langfuse_handler]
+})
```
-On the Langfuse platform the trace now shows with the updated name from the `deeply_nested_llm_call`, and the observations will be enriched with the appropriate data points.
+Example trace: https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/ec9fcc46-ca38-4bdb-9482-eb06a5f90944
+
+### Via Run Config
+
+
+```python
+
+prompt = ChatPromptTemplate.from_template("what is the city {person} is from?")
+model = ChatOpenAI()
+chain = prompt1 | model | StrOutputParser()
+chain.invoke({"person": "Grace Hopper"}, config={
+ "run_name": "Famous Person Locator",
+ "callbacks":[langfuse_handler]
+ })
+```
-**Example trace:** https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/f16e0151-cca8-4d90-bccf-1d9ea0958afb
+Example trace: https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/b48204e2-fd48-487b-8f66-015e3f10613d
## Interoperability with Langfuse Python SDK
@@ -429,3 +436,106 @@ main()
View it in Langfuse
![Trace of Nested Langchain Runs in Langfuse](https://langfuse.com/images/docs/langchain_python_trace_interoperability.png)
+
+## Adding evaluation/feedback scores to traces
+
+Evaluation results and user feedback are recorded as [scores](https://langfuse.com/docs/scores) in Langfuse.
+
+To add a score to a trace, you need to know the trace_id. There are two options to achieve this when using LangChain:
+
+1. Provide a predefined LangChain run_id
+2. Use the Langfuse Decorator to get the trace_id
+
+![Langchain Trace in Langfuse with Score](https://langfuse.com/images/cookbook/integration-langchain/langchain_trace_with_score.png)
+
+### Predefined LangChain `run_id`
+
+Langfuse uses the LangChain run_id as a trace_id. Thus you can provide a custom run_id to the runnable config in order to later add scores to the trace.
+
+
+```python
+from operator import itemgetter
+from langchain_openai import ChatOpenAI
+from langchain.prompts import ChatPromptTemplate
+from langchain.schema import StrOutputParser
+import uuid
+
+predefined_run_id = str(uuid.uuid4())
+
+langfuse_handler = CallbackHandler()
+
+prompt = ChatPromptTemplate.from_template("what is the city {person} is from?")
+model = ChatOpenAI()
+chain = prompt1 | model | StrOutputParser()
+
+chain.invoke({"person": "Ada Lovelace"}, config={
+ "run_id": predefined_run_id,
+ "callbacks":[langfuse_handler]
+})
+```
+
+
+```python
+from langfuse import Langfuse
+
+langfuse = Langfuse()
+
+langfuse.score(
+ trace_id=predefined_run_id,
+ name="user-feedback",
+ value=1,
+ comment="This was correct, thank you"
+);
+```
+
+Example Trace in Langfuse: https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/9860fffa-02ed-4278-bcf7-c856c569cead
+
+### Via Langfuse Decorator
+
+Alternatively, you can use the LangChain integration together with the [Langfuse @observe-decorator](https://langfuse.com/docs/sdk/python/decorators) for Python.
+
+
+```python
+from langfuse.decorators import langfuse_context, observe
+from operator import itemgetter
+from langchain_openai import ChatOpenAI
+from langchain.prompts import ChatPromptTemplate
+from langchain.schema import StrOutputParser
+import uuid
+
+prompt = ChatPromptTemplate.from_template("what is the city {person} is from?")
+model = ChatOpenAI()
+chain = prompt1 | model | StrOutputParser()
+
+@observe()
+def main(person):
+
+ langfuse_handler = langfuse_context.get_current_langchain_handler()
+
+ response = chain.invoke({"person": person}, config={
+ "callbacks":[langfuse_handler]
+ })
+
+ trace_id = langfuse_context.get_current_trace_id()
+
+ return trace_id, response
+
+
+trace_id, response = main("Ada Lovelace")
+```
+
+
+```python
+from langfuse import Langfuse
+
+langfuse = Langfuse()
+
+langfuse.score(
+ trace_id=trace_id,
+ name="user-feedback",
+ value=1,
+ comment="This was correct, thank you"
+);
+```
+
+Example trace: https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/08bb7cf3-87c6-4a78-a3fc-72af8959a106
diff --git a/pages/guides/videos/beginners-guide-to-rag-evaluation.mdx b/pages/guides/videos/beginners-guide-to-rag-evaluation.mdx
index 47567dca9..5d070011e 100644
--- a/pages/guides/videos/beginners-guide-to-rag-evaluation.mdx
+++ b/pages/guides/videos/beginners-guide-to-rag-evaluation.mdx
@@ -26,7 +26,7 @@ particularly when combined with [Ragas](https://docs.ragas.io) metrics.
frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
referrerpolicy="strict-origin-when-cross-origin"
- allowfullscreen
+ allowFullScreen
>
## Our Notes
diff --git a/pages/guides/videos/external-evaluation-pipelines.mdx b/pages/guides/videos/external-evaluation-pipelines.mdx
index 517be3c52..e66166189 100644
--- a/pages/guides/videos/external-evaluation-pipelines.mdx
+++ b/pages/guides/videos/external-evaluation-pipelines.mdx
@@ -14,7 +14,7 @@ ogImage: /images/videos/external-evaluation-pipelines.jpg
frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
referrerpolicy="strict-origin-when-cross-origin"
- allowfullscreen
+ allowFullScreen
>
### Learn more
diff --git a/pages/guides/videos/webinar-observability-llm-systems.mdx b/pages/guides/videos/webinar-observability-llm-systems.mdx
index cb1e7f253..237f8a1d5 100644
--- a/pages/guides/videos/webinar-observability-llm-systems.mdx
+++ b/pages/guides/videos/webinar-observability-llm-systems.mdx
@@ -24,7 +24,7 @@ Topics that were covered:
frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
referrerpolicy="strict-origin-when-cross-origin"
- allowfullscreen
+ allowFullScreen
>
## Slides
@@ -34,7 +34,5 @@ Topics that were covered:
frameborder="0"
width="100%"
className="aspect-video rounded mt-10"
- allowfullscreen="true"
- mozallowfullscreen="true"
- webkitallowfullscreen="true"
+ allowFullScreen
>
diff --git a/pages/oss-friends.mdx b/pages/oss-friends.mdx
index f165df89b..59f6fe63f 100644
--- a/pages/oss-friends.mdx
+++ b/pages/oss-friends.mdx
@@ -2,7 +2,7 @@
description: "We are proud to collaborate with a diverse group of partners to promote open-source software and the values of transparency, collaboration, and community that it represents."
---
-import { useData } from "nextra/data";
+import { useData } from "nextra/hooks";
import { Button } from "@/components/ui/button";
import Link from "next/link";
@@ -25,7 +25,7 @@ export function OSSFriendsPage() {
{OSSFriends.map((friend, index) => (