From 4df70ed473223d3b6724fc4370ea39f88f4003f6 Mon Sep 17 00:00:00 2001 From: joelsprunger Date: Tue, 6 Feb 2024 22:32:40 -0800 Subject: [PATCH] fix: repeated calls to split() will clear chunks before splitting chore: documentation ipynb --- .../recursive_json_text_splitter.ipynb | 163 ++++++++++++++++++ libs/langchain/langchain/text_splitter.py | 1 + 2 files changed, 164 insertions(+) create mode 100644 docs/docs/modules/data_connection/document_transformers/recursive_json_text_splitter.ipynb diff --git a/docs/docs/modules/data_connection/document_transformers/recursive_json_text_splitter.ipynb b/docs/docs/modules/data_connection/document_transformers/recursive_json_text_splitter.ipynb new file mode 100644 index 0000000000000..e69a2ec786369 --- /dev/null +++ b/docs/docs/modules/data_connection/document_transformers/recursive_json_text_splitter.ipynb @@ -0,0 +1,163 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a678d550", + "metadata": {}, + "source": [ + "# Recursively split JSON\n", + "\n", + "This text splitter traverses json data depth first and builds smaller json chunks. It attempts to keep nested json objects whole but will split them if needed to keep chunks between a min_chunk_size and the max_chunk_size. If the value is not a nested json, but rather a very large string the string will not be split. There is an optional pre-processing step to split lists, by first converting them to json (dict) and then splitting them as such.\n", + "\n", + "1. How the text is split: json value.\n", + "2. How the chunk size is measured: by number of characters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a504e1e7", + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import requests " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3390ae1d", + "metadata": {}, + "outputs": [], + "source": [ + "# This is a large nested json object and will be loaded as a python dict\n", + "json_data = requests.get(\"https://api.smith.langchain.com/openapi.json\").json()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7bfe2c1e", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.text_splitter import RecursiveJsonTextSplitter" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2833c409", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "json_splitter = RecursiveJsonTextSplitter(max_chunk_size=300)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f941aa56", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "# Recursively split json data\n", + "json_splitter.split(json_data=json_data)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0839f4f0", + "metadata": {}, + "outputs": [], + "source": [ + "# The splitter now has json chunks that can be accessed as a list of Documents\n", + "docs = json_splitter.to_documents()\n", + "\n", + "# or strings\n", + "texts = json_splitter.to_string()\n", + "\n", + "print(texts[0])\n", + "print(texts[1])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c34b1f7f", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's look at the size of the chunks\n", + "print([len(text) for text in texts][:10])\n", + "\n", + "# Reviewing one of these chunks that was bigger we see there is a list object there\n", + "print(texts[1])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "992477c2", + "metadata": {}, + "outputs": [], + "source": [ + "# The json splitter by default does not split lists\n", + "# the following will preprocess the json and convert list to dict with index:item key:val pairs\n", + "json_splitter.split(json_data=json_data, convert_lists=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2d23b3aa", + "metadata": {}, + "outputs": [], + "source": [ + "# let's see the strings\n", + "texts = json_splitter.to_string()\n", + "\n", + "# Let's look at the size of the chunks. Now they are all under the max\n", + "print([len(text) for text in texts][:10])\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d2c2773e", + "metadata": {}, + "outputs": [], + "source": [ + "# The list has been converted to a dict, but retains all the needed contextual information even if split into many chunks\n", + "print(texts[1])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/libs/langchain/langchain/text_splitter.py b/libs/langchain/langchain/text_splitter.py index 64cb889b93fb4..6ae0c910a983a 100644 --- a/libs/langchain/langchain/text_splitter.py +++ b/libs/langchain/langchain/text_splitter.py @@ -1616,6 +1616,7 @@ def split( with index:item as the key:value pairs """ + self._chunks.clear() # Clear any existing chunks self._chunks.append({}) # Start with an empty chunk if convert_lists: self._json_split(self._list_to_dict_preprocessing(json_data))