diff --git a/libs/community/langchain_community/agent_toolkits/nla/tool.py b/libs/community/langchain_community/agent_toolkits/nla/tool.py
index 47f25d13b687b..f097edaa1184d 100644
--- a/libs/community/langchain_community/agent_toolkits/nla/tool.py
+++ b/libs/community/langchain_community/agent_toolkits/nla/tool.py
@@ -30,7 +30,7 @@ def from_open_api_endpoint_chain(
The API endpoint tool.
"""
expanded_name = (
- f'{api_title.replace(" ", "_")}.{chain.api_operation.operation_id}'
+ f"{api_title.replace(' ', '_')}.{chain.api_operation.operation_id}"
)
description = (
f"I'm an AI from {api_title}. Instruct what you want,"
diff --git a/libs/community/langchain_community/callbacks/fiddler_callback.py b/libs/community/langchain_community/callbacks/fiddler_callback.py
index 95df0851d55d4..0ff6ed894d0b6 100644
--- a/libs/community/langchain_community/callbacks/fiddler_callback.py
+++ b/libs/community/langchain_community/callbacks/fiddler_callback.py
@@ -100,7 +100,7 @@ def __init__(
if self.project not in self.fiddler_client.get_project_names():
print( # noqa: T201
- f"adding project {self.project}." "This only has to be done once."
+ f"adding project {self.project}.This only has to be done once."
)
try:
self.fiddler_client.add_project(self.project)
diff --git a/libs/community/langchain_community/callbacks/manager.py b/libs/community/langchain_community/callbacks/manager.py
index ba942084953f6..8e8d052560602 100644
--- a/libs/community/langchain_community/callbacks/manager.py
+++ b/libs/community/langchain_community/callbacks/manager.py
@@ -61,9 +61,9 @@ def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
@contextmanager
-def get_bedrock_anthropic_callback() -> (
- Generator[BedrockAnthropicTokenUsageCallbackHandler, None, None]
-):
+def get_bedrock_anthropic_callback() -> Generator[
+ BedrockAnthropicTokenUsageCallbackHandler, None, None
+]:
"""Get the Bedrock anthropic callback handler in a context manager.
which conveniently exposes token and cost information.
diff --git a/libs/community/langchain_community/callbacks/streamlit/streamlit_callback_handler.py b/libs/community/langchain_community/callbacks/streamlit/streamlit_callback_handler.py
index 5d5985c039cb7..4747bfc2f690d 100644
--- a/libs/community/langchain_community/callbacks/streamlit/streamlit_callback_handler.py
+++ b/libs/community/langchain_community/callbacks/streamlit/streamlit_callback_handler.py
@@ -211,9 +211,9 @@ def on_agent_action(
def complete(self, final_label: Optional[str] = None) -> None:
"""Finish the thought."""
if final_label is None and self._state == LLMThoughtState.RUNNING_TOOL:
- assert (
- self._last_tool is not None
- ), "_last_tool should never be null when _state == RUNNING_TOOL"
+ assert self._last_tool is not None, (
+ "_last_tool should never be null when _state == RUNNING_TOOL"
+ )
final_label = self._labeler.get_tool_label(
self._last_tool, is_complete=True
)
diff --git a/libs/community/langchain_community/chains/pebblo_retrieval/utilities.py b/libs/community/langchain_community/chains/pebblo_retrieval/utilities.py
index e6e36a505a947..0ad9ff3656036 100644
--- a/libs/community/langchain_community/chains/pebblo_retrieval/utilities.py
+++ b/libs/community/langchain_community/chains/pebblo_retrieval/utilities.py
@@ -467,7 +467,7 @@ async def amake_request(
logger.warning(f"Pebblo Server: Error {response.status}")
elif response.status >= HTTPStatus.BAD_REQUEST:
logger.warning(
- f"Pebblo received an invalid payload: " f"{response.text}"
+ f"Pebblo received an invalid payload: {response.text}"
)
elif response.status != HTTPStatus.OK:
logger.warning(
diff --git a/libs/community/langchain_community/chat_loaders/facebook_messenger.py b/libs/community/langchain_community/chat_loaders/facebook_messenger.py
index 2bf883b0f0fe9..44900e0f84eef 100644
--- a/libs/community/langchain_community/chat_loaders/facebook_messenger.py
+++ b/libs/community/langchain_community/chat_loaders/facebook_messenger.py
@@ -37,7 +37,7 @@ def lazy_load(self) -> Iterator[ChatSession]:
if "content" not in m:
logger.info(
f"""Skipping Message No.
- {index+1} as no content is present in the message"""
+ {index + 1} as no content is present in the message"""
)
continue
messages.append(
diff --git a/libs/community/langchain_community/chat_message_histories/neo4j.py b/libs/community/langchain_community/chat_message_histories/neo4j.py
index 5a054c706de25..9d2cb317874cc 100644
--- a/libs/community/langchain_community/chat_message_histories/neo4j.py
+++ b/libs/community/langchain_community/chat_message_histories/neo4j.py
@@ -87,7 +87,7 @@ def messages(self) -> List[BaseMessage]:
query = (
f"MATCH (s:`{self._node_label}`)-[:LAST_MESSAGE]->(last_message) "
"WHERE s.id = $session_id MATCH p=(last_message)<-[:NEXT*0.."
- f"{self._window*2}]-() WITH p, length(p) AS length "
+ f"{self._window * 2}]-() WITH p, length(p) AS length "
"ORDER BY length DESC LIMIT 1 UNWIND reverse(nodes(p)) AS node "
"RETURN {data:{content: node.content}, type:node.type} AS result"
)
diff --git a/libs/community/langchain_community/chat_message_histories/sql.py b/libs/community/langchain_community/chat_message_histories/sql.py
index 2c3b2351c471d..8c0706b7ee0d9 100644
--- a/libs/community/langchain_community/chat_message_histories/sql.py
+++ b/libs/community/langchain_community/chat_message_histories/sql.py
@@ -177,9 +177,9 @@ def __init__(
engine_args: Additional configuration for creating database engines.
async_mode: Whether it is an asynchronous connection.
"""
- assert not (
- connection_string and connection
- ), "connection_string and connection are mutually exclusive"
+ assert not (connection_string and connection), (
+ "connection_string and connection are mutually exclusive"
+ )
if connection_string:
global _warned_once_already
if not _warned_once_already:
diff --git a/libs/community/langchain_community/chat_models/bedrock.py b/libs/community/langchain_community/chat_models/bedrock.py
index 6b36208390379..086a4d461301f 100644
--- a/libs/community/langchain_community/chat_models/bedrock.py
+++ b/libs/community/langchain_community/chat_models/bedrock.py
@@ -110,9 +110,9 @@ def _format_anthropic_messages(
if not isinstance(message.content, str):
# parse as dict
- assert isinstance(
- message.content, list
- ), "Anthropic message content must be str or list of dicts"
+ assert isinstance(message.content, list), (
+ "Anthropic message content must be str or list of dicts"
+ )
# populate content
content = []
diff --git a/libs/community/langchain_community/chat_models/deepinfra.py b/libs/community/langchain_community/chat_models/deepinfra.py
index b23402cdc06f0..546171f876249 100644
--- a/libs/community/langchain_community/chat_models/deepinfra.py
+++ b/libs/community/langchain_community/chat_models/deepinfra.py
@@ -468,8 +468,7 @@ def _handle_status(self, code: int, text: Any) -> None:
raise ValueError(f"DeepInfra received an invalid payload: {text}")
elif code != 200:
raise Exception(
- f"DeepInfra returned an unexpected response with status "
- f"{code}: {text}"
+ f"DeepInfra returned an unexpected response with status {code}: {text}"
)
def _url(self) -> str:
diff --git a/libs/community/langchain_community/chat_models/konko.py b/libs/community/langchain_community/chat_models/konko.py
index 3aec395cf5304..a1164d6d17804 100644
--- a/libs/community/langchain_community/chat_models/konko.py
+++ b/libs/community/langchain_community/chat_models/konko.py
@@ -179,8 +179,7 @@ def get_available_models(
if models_response.status_code != 200:
raise ValueError(
- f"Error getting models from {models_url}: "
- f"{models_response.status_code}"
+ f"Error getting models from {models_url}: {models_response.status_code}"
)
return {model["id"] for model in models_response.json()["data"]}
diff --git a/libs/community/langchain_community/chat_models/premai.py b/libs/community/langchain_community/chat_models/premai.py
index 5be59560ac74e..15a765148ca74 100644
--- a/libs/community/langchain_community/chat_models/premai.py
+++ b/libs/community/langchain_community/chat_models/premai.py
@@ -196,7 +196,10 @@ def _messages_to_prompt_dict(
elif isinstance(input_msg, HumanMessage):
if template_id is None:
examples_and_messages.append(
- {"role": "user", "content": str(input_msg.content)}
+ {
+ "role": "user",
+ "content": str(input_msg.content),
+ }
)
else:
params: Dict[str, str] = {}
@@ -206,12 +209,19 @@ def _messages_to_prompt_dict(
)
params[str(input_msg.id)] = str(input_msg.content)
examples_and_messages.append(
- {"role": "user", "template_id": template_id, "params": params}
+ {
+ "role": "user",
+ "template_id": template_id,
+ "params": params,
+ }
)
elif isinstance(input_msg, AIMessage):
if input_msg.tool_calls is None or len(input_msg.tool_calls) == 0:
examples_and_messages.append(
- {"role": "assistant", "content": str(input_msg.content)}
+ {
+ "role": "assistant",
+ "content": str(input_msg.content),
+ }
)
else:
ai_msg_to_json = {
diff --git a/libs/community/langchain_community/chat_models/writer.py b/libs/community/langchain_community/chat_models/writer.py
index 4101b6e23eb35..23ec851210bca 100644
--- a/libs/community/langchain_community/chat_models/writer.py
+++ b/libs/community/langchain_community/chat_models/writer.py
@@ -277,7 +277,10 @@ def _stream(
if not delta or not delta.content:
continue
chunk = self._convert_writer_to_langchain(
- {"role": "assistant", "content": delta.content}
+ {
+ "role": "assistant",
+ "content": delta.content,
+ }
)
chunk = ChatGenerationChunk(message=chunk)
@@ -303,7 +306,10 @@ async def _astream(
if not delta or not delta.content:
continue
chunk = self._convert_writer_to_langchain(
- {"role": "assistant", "content": delta.content}
+ {
+ "role": "assistant",
+ "content": delta.content,
+ }
)
chunk = ChatGenerationChunk(message=chunk)
diff --git a/libs/community/langchain_community/chat_models/zhipuai.py b/libs/community/langchain_community/chat_models/zhipuai.py
index 99b58697f1a01..18dc9dd196046 100644
--- a/libs/community/langchain_community/chat_models/zhipuai.py
+++ b/libs/community/langchain_community/chat_models/zhipuai.py
@@ -121,7 +121,7 @@ def _get_jwt_token(api_key: str) -> str:
import jwt
except ImportError:
raise ImportError(
- "jwt package not found, please install it with" "`pip install pyjwt`"
+ "jwt package not found, please install it with`pip install pyjwt`"
)
try:
diff --git a/libs/community/langchain_community/document_compressors/dashscope_rerank.py b/libs/community/langchain_community/document_compressors/dashscope_rerank.py
index 6e298c2c30989..cc77ec9545712 100644
--- a/libs/community/langchain_community/document_compressors/dashscope_rerank.py
+++ b/libs/community/langchain_community/document_compressors/dashscope_rerank.py
@@ -89,7 +89,10 @@ def rerank(
result_dicts = []
for res in results.output.results:
result_dicts.append(
- {"index": res.index, "relevance_score": res.relevance_score}
+ {
+ "index": res.index,
+ "relevance_score": res.relevance_score,
+ }
)
return result_dicts
diff --git a/libs/community/langchain_community/document_compressors/infinity_rerank.py b/libs/community/langchain_community/document_compressors/infinity_rerank.py
index 91d07889fce4e..d8d89ae1d22e7 100644
--- a/libs/community/langchain_community/document_compressors/infinity_rerank.py
+++ b/libs/community/langchain_community/document_compressors/infinity_rerank.py
@@ -101,7 +101,10 @@ def rerank(
result_dicts = []
for res in results:
result_dicts.append(
- {"index": res.index, "relevance_score": res.relevance_score}
+ {
+ "index": res.index,
+ "relevance_score": res.relevance_score,
+ }
)
result_dicts.sort(key=lambda x: x["relevance_score"], reverse=True)
diff --git a/libs/community/langchain_community/document_compressors/jina_rerank.py b/libs/community/langchain_community/document_compressors/jina_rerank.py
index 0a769b311e54e..91dc7f30951c5 100644
--- a/libs/community/langchain_community/document_compressors/jina_rerank.py
+++ b/libs/community/langchain_community/document_compressors/jina_rerank.py
@@ -95,7 +95,10 @@ def rerank(
result_dicts = []
for res in results:
result_dicts.append(
- {"index": res["index"], "relevance_score": res["relevance_score"]}
+ {
+ "index": res["index"],
+ "relevance_score": res["relevance_score"],
+ }
)
return result_dicts
diff --git a/libs/community/langchain_community/document_loaders/blob_loaders/youtube_audio.py b/libs/community/langchain_community/document_loaders/blob_loaders/youtube_audio.py
index 8397f32fab89b..b0b2dc6daa8f4 100644
--- a/libs/community/langchain_community/document_loaders/blob_loaders/youtube_audio.py
+++ b/libs/community/langchain_community/document_loaders/blob_loaders/youtube_audio.py
@@ -21,8 +21,7 @@ def yield_blobs(self) -> Iterable[Blob]:
import yt_dlp
except ImportError:
raise ImportError(
- "yt_dlp package not found, please install it with "
- "`pip install yt_dlp`"
+ "yt_dlp package not found, please install it with `pip install yt_dlp`"
)
# Use yt_dlp to download audio given a YouTube url
diff --git a/libs/community/langchain_community/document_loaders/chm.py b/libs/community/langchain_community/document_loaders/chm.py
index 42ef6457bb8b7..6ac2c209ddebc 100644
--- a/libs/community/langchain_community/document_loaders/chm.py
+++ b/libs/community/langchain_community/document_loaders/chm.py
@@ -117,6 +117,10 @@ def load_all(self) -> List[Dict[str, str]]:
for item in index:
content = self.load(item["local"])
res.append(
- {"name": item["name"], "local": item["local"], "content": content}
+ {
+ "name": item["name"],
+ "local": item["local"],
+ "content": content,
+ }
)
return res
diff --git a/libs/community/langchain_community/document_loaders/confluence.py b/libs/community/langchain_community/document_loaders/confluence.py
index d8c5efa87482d..8db53974b60f6 100644
--- a/libs/community/langchain_community/document_loaders/confluence.py
+++ b/libs/community/langchain_community/document_loaders/confluence.py
@@ -652,7 +652,7 @@ def process_attachment(
from PIL import Image # noqa: F401
except ImportError:
raise ImportError(
- "`Pillow` package not found, " "please run `pip install Pillow`"
+ "`Pillow` package not found, please run `pip install Pillow`"
)
# depending on setup you may also need to set the correct path for
diff --git a/libs/community/langchain_community/document_loaders/csv_loader.py b/libs/community/langchain_community/document_loaders/csv_loader.py
index 1e126709d4add..59927920569b0 100644
--- a/libs/community/langchain_community/document_loaders/csv_loader.py
+++ b/libs/community/langchain_community/document_loaders/csv_loader.py
@@ -164,9 +164,13 @@ def __read_file(self, csvfile: TextIOWrapper) -> Iterator[Document]:
f"Source column '{self.source_column}' not found in CSV file."
)
content = "\n".join(
- f"""{k.strip() if k is not None else k}: {v.strip()
- if isinstance(v, str) else ','.join(map(str.strip, v))
- if isinstance(v, list) else v}"""
+ f"""{k.strip() if k is not None else k}: {
+ v.strip()
+ if isinstance(v, str)
+ else ",".join(map(str.strip, v))
+ if isinstance(v, list)
+ else v
+ }"""
for k, v in row.items()
if (
k in self.content_columns
diff --git a/libs/community/langchain_community/document_loaders/dropbox.py b/libs/community/langchain_community/document_loaders/dropbox.py
index c689e3a455633..a0350914536f5 100644
--- a/libs/community/langchain_community/document_loaders/dropbox.py
+++ b/libs/community/langchain_community/document_loaders/dropbox.py
@@ -54,7 +54,7 @@ def _create_dropbox_client(self) -> Any:
try:
from dropbox import Dropbox, exceptions
except ImportError:
- raise ImportError("You must run " "`pip install dropbox")
+ raise ImportError("You must run `pip install dropbox")
try:
dbx = Dropbox(self.dropbox_access_token)
@@ -73,7 +73,7 @@ def _load_documents_from_folder(self, folder_path: str) -> List[Document]:
from dropbox import exceptions
from dropbox.files import FileMetadata
except ImportError:
- raise ImportError("You must run " "`pip install dropbox")
+ raise ImportError("You must run `pip install dropbox")
try:
results = dbx.files_list_folder(folder_path, recursive=self.recursive)
@@ -98,7 +98,7 @@ def _load_file_from_path(self, file_path: str) -> Optional[Document]:
try:
from dropbox import exceptions
except ImportError:
- raise ImportError("You must run " "`pip install dropbox")
+ raise ImportError("You must run `pip install dropbox")
try:
file_metadata = dbx.files_get_metadata(file_path)
diff --git a/libs/community/langchain_community/document_loaders/mediawikidump.py b/libs/community/langchain_community/document_loaders/mediawikidump.py
index 288312d6c9c61..a778335646b8e 100644
--- a/libs/community/langchain_community/document_loaders/mediawikidump.py
+++ b/libs/community/langchain_community/document_loaders/mediawikidump.py
@@ -65,7 +65,7 @@ def _load_dump_file(self): # type: ignore[no-untyped-def]
import mwxml
except ImportError as e:
raise ImportError(
- "Unable to import 'mwxml'. Please install with" " `pip install mwxml`."
+ "Unable to import 'mwxml'. Please install with `pip install mwxml`."
) from e
return mwxml.Dump.from_file(open(self.file_path, encoding=self.encoding))
diff --git a/libs/community/langchain_community/document_loaders/mongodb.py b/libs/community/langchain_community/document_loaders/mongodb.py
index 7eddb2f7eb1a1..5f456ab8801e2 100644
--- a/libs/community/langchain_community/document_loaders/mongodb.py
+++ b/libs/community/langchain_community/document_loaders/mongodb.py
@@ -98,7 +98,10 @@ async def aload(self) -> List[Document]:
# Optionally add database and collection names to metadata
if self.include_db_collection_in_metadata:
metadata.update(
- {"database": self.db_name, "collection": self.collection_name}
+ {
+ "database": self.db_name,
+ "collection": self.collection_name,
+ }
)
# Extract text content from filtered fields or use the entire document
diff --git a/libs/community/langchain_community/document_loaders/notiondb.py b/libs/community/langchain_community/document_loaders/notiondb.py
index 3e43a3fd811a1..37c367dcb486d 100644
--- a/libs/community/langchain_community/document_loaders/notiondb.py
+++ b/libs/community/langchain_community/document_loaders/notiondb.py
@@ -126,7 +126,7 @@ def load_page(self, page_summary: Dict[str, Any]) -> Document:
value = prop_data["url"]
elif prop_type == "unique_id":
value = (
- f'{prop_data["unique_id"]["prefix"]}-{prop_data["unique_id"]["number"]}'
+ f"{prop_data['unique_id']['prefix']}-{prop_data['unique_id']['number']}"
if prop_data["unique_id"]
else None
)
diff --git a/libs/community/langchain_community/document_loaders/nuclia.py b/libs/community/langchain_community/document_loaders/nuclia.py
index 0744b778b4e8f..97e9337b0b9e5 100644
--- a/libs/community/langchain_community/document_loaders/nuclia.py
+++ b/libs/community/langchain_community/document_loaders/nuclia.py
@@ -19,7 +19,12 @@ def __init__(self, path: str, nuclia_tool: NucliaUnderstandingAPI):
def load(self) -> List[Document]:
"""Load documents."""
data = self.nua.run(
- {"action": "pull", "id": self.id, "path": None, "text": None}
+ {
+ "action": "pull",
+ "id": self.id,
+ "path": None,
+ "text": None,
+ }
)
if not data:
return []
diff --git a/libs/community/langchain_community/document_loaders/oracleadb_loader.py b/libs/community/langchain_community/document_loaders/oracleadb_loader.py
index 35da49c9affe9..ebf0c446c4485 100644
--- a/libs/community/langchain_community/document_loaders/oracleadb_loader.py
+++ b/libs/community/langchain_community/document_loaders/oracleadb_loader.py
@@ -82,8 +82,7 @@ def _run_query(self) -> List[Dict[str, Any]]:
import oracledb
except ImportError as e:
raise ImportError(
- "Could not import oracledb, "
- "please install with 'pip install oracledb'"
+ "Could not import oracledb, please install with 'pip install oracledb'"
) from e
connect_param = {"user": self.user, "password": self.password, "dsn": self.dsn}
if self.dsn == self.tns_name:
diff --git a/libs/community/langchain_community/document_loaders/parsers/audio.py b/libs/community/langchain_community/document_loaders/parsers/audio.py
index fc4a9704deb2f..6d35336300ecc 100644
--- a/libs/community/langchain_community/document_loaders/parsers/audio.py
+++ b/libs/community/langchain_community/document_loaders/parsers/audio.py
@@ -148,8 +148,7 @@ def __init__(
import openai
except ImportError:
raise ImportError(
- "openai package not found, please install it with "
- "`pip install openai`"
+ "openai package not found, please install it with `pip install openai`"
)
if is_openai_v1():
@@ -278,14 +277,13 @@ def lazy_parse(self, blob: Blob) -> Iterator[Document]:
import openai
except ImportError:
raise ImportError(
- "openai package not found, please install it with "
- "`pip install openai`"
+ "openai package not found, please install it with `pip install openai`"
)
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
- "pydub package not found, please install it with " "`pip install pydub`"
+ "pydub package not found, please install it with `pip install pydub`"
)
if is_openai_v1():
@@ -402,7 +400,7 @@ def __init__(
import torch
except ImportError:
raise ImportError(
- "torch package not found, please install it with " "`pip install torch`"
+ "torch package not found, please install it with `pip install torch`"
)
# Determine the device to use
@@ -533,7 +531,7 @@ def lazy_parse(self, blob: Blob) -> Iterator[Document]:
from pydub import AudioSegment
except ImportError:
raise ImportError(
- "pydub package not found, please install it with " "`pip install pydub`"
+ "pydub package not found, please install it with `pip install pydub`"
)
if self.api_key:
diff --git a/libs/community/langchain_community/document_loaders/parsers/docai.py b/libs/community/langchain_community/document_loaders/parsers/docai.py
index 517ea0140aacf..74b80f6af35cc 100644
--- a/libs/community/langchain_community/document_loaders/parsers/docai.py
+++ b/libs/community/langchain_community/document_loaders/parsers/docai.py
@@ -230,7 +230,7 @@ def batch_parse(
time_elapsed += check_in_interval_sec
if time_elapsed > timeout_sec:
raise TimeoutError(
- "Timeout exceeded! Check operations " f"{operation_names} later!"
+ f"Timeout exceeded! Check operations {operation_names} later!"
)
logger.debug(".")
diff --git a/libs/community/langchain_community/document_loaders/parsers/grobid.py b/libs/community/langchain_community/document_loaders/parsers/grobid.py
index 2ffe2998fa37f..ee287cade6e71 100644
--- a/libs/community/langchain_community/document_loaders/parsers/grobid.py
+++ b/libs/community/langchain_community/document_loaders/parsers/grobid.py
@@ -44,7 +44,7 @@ def process_xml(
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
- "`bs4` package not found, please install it with " "`pip install bs4`"
+ "`bs4` package not found, please install it with `pip install bs4`"
)
soup = BeautifulSoup(xml_data, "xml")
sections = soup.find_all("div")
diff --git a/libs/community/langchain_community/document_loaders/parsers/pdf.py b/libs/community/langchain_community/document_loaders/parsers/pdf.py
index 254849df80273..64ec599793feb 100644
--- a/libs/community/langchain_community/document_loaders/parsers/pdf.py
+++ b/libs/community/langchain_community/document_loaders/parsers/pdf.py
@@ -117,9 +117,7 @@ def _format_inner_image(blob: Blob, content: str, format: str) -> str:
content = content.replace("]", r"\\]")
content = f"![{content}]({source})"
elif format == "html-img":
- content = (
- f''
- )
+ content = f''
return content
@@ -261,8 +259,7 @@ def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-ty
import pypdf
except ImportError:
raise ImportError(
- "`pypdf` package not found, please install it with "
- "`pip install pypdf`"
+ "`pypdf` package not found, please install it with `pip install pypdf`"
)
def _extract_text_from_page(page: pypdf.PageObject) -> str:
@@ -859,8 +856,7 @@ def __init__(
import PIL # noqa:F401
except ImportError:
raise ImportError(
- "pillow package not found, please install it with"
- " `pip install pillow`"
+ "pillow package not found, please install it with `pip install pillow`"
)
self.text_kwargs = text_kwargs or {}
self.dedupe = dedupe
diff --git a/libs/community/langchain_community/document_loaders/pyspark_dataframe.py b/libs/community/langchain_community/document_loaders/pyspark_dataframe.py
index 410b7d07afc20..ff1c7fc7ee855 100644
--- a/libs/community/langchain_community/document_loaders/pyspark_dataframe.py
+++ b/libs/community/langchain_community/document_loaders/pyspark_dataframe.py
@@ -36,8 +36,7 @@ def __init__(
from pyspark.sql import DataFrame, SparkSession
except ImportError:
raise ImportError(
- "pyspark is not installed. "
- "Please install it with `pip install pyspark`"
+ "pyspark is not installed. Please install it with `pip install pyspark`"
)
self.spark = (
diff --git a/libs/community/langchain_community/document_loaders/quip.py b/libs/community/langchain_community/document_loaders/quip.py
index 540ef8f945c79..bb3a002912bd6 100644
--- a/libs/community/langchain_community/document_loaders/quip.py
+++ b/libs/community/langchain_community/document_loaders/quip.py
@@ -40,7 +40,7 @@ def __init__(
from quip_api.quip import QuipClient
except ImportError:
raise ImportError(
- "`quip_api` package not found, please run " "`pip install quip_api`"
+ "`quip_api` package not found, please run `pip install quip_api`"
)
self.quip_client = QuipClient(
diff --git a/libs/community/langchain_community/document_loaders/rspace.py b/libs/community/langchain_community/document_loaders/rspace.py
index cb12c6d6c848b..244b92bb4f159 100644
--- a/libs/community/langchain_community/document_loaders/rspace.py
+++ b/libs/community/langchain_community/document_loaders/rspace.py
@@ -58,7 +58,7 @@ def _create_rspace_client(self) -> Any:
from rspace_client.eln import eln, field_content
except ImportError:
- raise ImportError("You must run " "`pip install rspace_client`")
+ raise ImportError("You must run `pip install rspace_client`")
try:
eln = eln.ELNClient(self.url, self.api_key)
@@ -66,8 +66,7 @@ def _create_rspace_client(self) -> Any:
except Exception:
raise Exception(
- f"Unable to initialize client - is url {self.url} or "
- f"api key correct?"
+ f"Unable to initialize client - is url {self.url} or api key correct?"
)
return eln, field_content.FieldContent
diff --git a/libs/community/langchain_community/embeddings/ascend.py b/libs/community/langchain_community/embeddings/ascend.py
index d01f7967491d8..c8cb059177b19 100644
--- a/libs/community/langchain_community/embeddings/ascend.py
+++ b/libs/community/langchain_community/embeddings/ascend.py
@@ -89,7 +89,7 @@ def encode(self, sentences: Any) -> Any:
import torch
except ImportError as e:
raise ImportError(
- "Unable to import torch, please install with " "`pip install -U torch`."
+ "Unable to import torch, please install with `pip install -U torch`."
) from e
last_hidden_state = self.model(
inputs.input_ids.npu(), inputs.attention_mask.npu(), return_dict=True
@@ -103,7 +103,7 @@ def pooling(self, last_hidden_state: Any, attention_mask: Any = None) -> Any:
import torch
except ImportError as e:
raise ImportError(
- "Unable to import torch, please install with " "`pip install -U torch`."
+ "Unable to import torch, please install with `pip install -U torch`."
) from e
if self.pooling_method == "cls":
return last_hidden_state[:, 0]
diff --git a/libs/community/langchain_community/embeddings/bookend.py b/libs/community/langchain_community/embeddings/bookend.py
index 8e977834a47bf..76aac46fd8ff9 100644
--- a/libs/community/langchain_community/embeddings/bookend.py
+++ b/libs/community/langchain_community/embeddings/bookend.py
@@ -67,7 +67,12 @@ def embed_documents(self, texts: List[str]) -> List[List[float]]:
for text in texts:
data = json.dumps(
- {"text": text, "question": None, "context": None, "instruction": None}
+ {
+ "text": text,
+ "question": None,
+ "context": None,
+ "instruction": None,
+ }
)
r = requests.request(
"POST",
diff --git a/libs/community/langchain_community/embeddings/naver.py b/libs/community/langchain_community/embeddings/naver.py
index 0bdc7b9c79015..ce20130e52c30 100644
--- a/libs/community/langchain_community/embeddings/naver.py
+++ b/libs/community/langchain_community/embeddings/naver.py
@@ -108,7 +108,7 @@ def _api_url(self) -> str:
app_type = "serviceapp" if self.service_app else "testapp"
model_name = self.model_name if self.model_name != "bge-m3" else "v2"
if self._is_new_api_key():
- return f"{self.base_url}/{app_type}" f"/v1/api-tools/embedding/{model_name}"
+ return f"{self.base_url}/{app_type}/v1/api-tools/embedding/{model_name}"
else:
return (
f"{self.base_url}/{app_type}"
diff --git a/libs/community/langchain_community/embeddings/nemo.py b/libs/community/langchain_community/embeddings/nemo.py
index 478928e047fd5..fb71bd5e3c62e 100644
--- a/libs/community/langchain_community/embeddings/nemo.py
+++ b/libs/community/langchain_community/embeddings/nemo.py
@@ -69,7 +69,11 @@ def validate_environment(cls, values: Dict) -> Dict:
# Optional: A minimal test payload and headers required by the endpoint
headers = {"Content-Type": "application/json"}
payload = json.dumps(
- {"input": "Hello World", "model": model, "input_type": "query"}
+ {
+ "input": "Hello World",
+ "model": model,
+ "input_type": "query",
+ }
)
is_endpoint_live(url, headers, payload)
@@ -111,7 +115,11 @@ def _embedding_func(self, text: str, input_type: str) -> List[float]:
"""
payload = json.dumps(
- {"input": text, "model": self.model, "input_type": input_type}
+ {
+ "input": text,
+ "model": self.model,
+ "input_type": input_type,
+ }
)
headers = {"Content-Type": "application/json"}
diff --git a/libs/community/langchain_community/embeddings/openvino.py b/libs/community/langchain_community/embeddings/openvino.py
index 3dbbdef45f1a6..930453247c349 100644
--- a/libs/community/langchain_community/embeddings/openvino.py
+++ b/libs/community/langchain_community/embeddings/openvino.py
@@ -166,19 +166,19 @@ def encode(
import numpy as np
except ImportError as e:
raise ImportError(
- "Unable to import numpy, please install with " "`pip install -U numpy`."
+ "Unable to import numpy, please install with `pip install -U numpy`."
) from e
try:
from tqdm import trange
except ImportError as e:
raise ImportError(
- "Unable to import tqdm, please install with " "`pip install -U tqdm`."
+ "Unable to import tqdm, please install with `pip install -U tqdm`."
) from e
try:
import torch
except ImportError as e:
raise ImportError(
- "Unable to import torch, please install with " "`pip install -U torch`."
+ "Unable to import torch, please install with `pip install -U torch`."
) from e
def run_mean_pooling(model_output: Any, attention_mask: Any) -> Any:
diff --git a/libs/community/langchain_community/embeddings/spacy_embeddings.py b/libs/community/langchain_community/embeddings/spacy_embeddings.py
index 1d7c6ed4e95ad..cd862d3ba901f 100644
--- a/libs/community/langchain_community/embeddings/spacy_embeddings.py
+++ b/libs/community/langchain_community/embeddings/spacy_embeddings.py
@@ -48,8 +48,7 @@ def validate_environment(cls, values: Dict) -> Any:
# Check if the spaCy package is installed
if importlib.util.find_spec("spacy") is None:
raise ValueError(
- "SpaCy package not found. "
- "Please install it with `pip install spacy`."
+ "SpaCy package not found. Please install it with `pip install spacy`."
)
try:
# Try to load the spaCy model
diff --git a/libs/community/langchain_community/graphs/falkordb_graph.py b/libs/community/langchain_community/graphs/falkordb_graph.py
index 99e3e4592b3e9..56ce03c1f9a09 100644
--- a/libs/community/langchain_community/graphs/falkordb_graph.py
+++ b/libs/community/langchain_community/graphs/falkordb_graph.py
@@ -167,7 +167,7 @@ def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]:
data = self._graph.query(query, params)
return data.result_set
except Exception as e:
- raise ValueError("Generated Cypher Statement is not valid\n" f"{e}")
+ raise ValueError(f"Generated Cypher Statement is not valid\n{e}")
def add_graph_documents(
self, graph_documents: List[GraphDocument], include_source: bool = False
diff --git a/libs/community/langchain_community/graphs/gremlin_graph.py b/libs/community/langchain_community/graphs/gremlin_graph.py
index 934ccb3f53c99..d2c1a1725dce2 100644
--- a/libs/community/langchain_community/graphs/gremlin_graph.py
+++ b/libs/community/langchain_community/graphs/gremlin_graph.py
@@ -57,7 +57,7 @@ def __init__(
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
except ImportError:
raise ImportError(
- "Please install gremlin-python first: " "`pip3 install gremlinpython"
+ "Please install gremlin-python first: `pip3 install gremlinpython"
)
self.client = client.Client(
diff --git a/libs/community/langchain_community/graphs/kuzu_graph.py b/libs/community/langchain_community/graphs/kuzu_graph.py
index 3fe3d60c283c2..b658d9510df60 100644
--- a/libs/community/langchain_community/graphs/kuzu_graph.py
+++ b/libs/community/langchain_community/graphs/kuzu_graph.py
@@ -78,7 +78,10 @@ def refresh_schema(self) -> None:
list_type_flag += "[]"
property_type += list_type_flag
current_table_schema["properties"].append(
- (property_name, property_type)
+ (
+ property_name,
+ property_type,
+ )
)
node_properties.append(current_table_schema)
diff --git a/libs/community/langchain_community/graphs/memgraph_graph.py b/libs/community/langchain_community/graphs/memgraph_graph.py
index fa829a0e5db81..4180b49ce3d49 100644
--- a/libs/community/langchain_community/graphs/memgraph_graph.py
+++ b/libs/community/langchain_community/graphs/memgraph_graph.py
@@ -488,8 +488,7 @@ def add_graph_documents(
if baseEntityLabel:
self.query(
- f"CREATE CONSTRAINT ON (b:{BASE_ENTITY_LABEL}) "
- "ASSERT b.id IS UNIQUE;"
+ f"CREATE CONSTRAINT ON (b:{BASE_ENTITY_LABEL}) ASSERT b.id IS UNIQUE;"
)
self.query(f"CREATE INDEX ON :{BASE_ENTITY_LABEL}(id);")
self.query(f"CREATE INDEX ON :{BASE_ENTITY_LABEL};")
diff --git a/libs/community/langchain_community/graphs/nebula_graph.py b/libs/community/langchain_community/graphs/nebula_graph.py
index 31b69fffafc61..81634fd6ead20 100644
--- a/libs/community/langchain_community/graphs/nebula_graph.py
+++ b/libs/community/langchain_community/graphs/nebula_graph.py
@@ -69,7 +69,13 @@ def __init__(
def _get_session_pool(self) -> Any:
assert all(
- [self.username, self.password, self.address, self.port, self.space]
+ [
+ self.username,
+ self.password,
+ self.address,
+ self.port,
+ self.space,
+ ]
), (
"Please provide all of the following parameters: "
"username, password, address, port, space"
diff --git a/libs/community/langchain_community/graphs/neo4j_graph.py b/libs/community/langchain_community/graphs/neo4j_graph.py
index dd2a7937f7f81..f96cd276967d1 100644
--- a/libs/community/langchain_community/graphs/neo4j_graph.py
+++ b/libs/community/langchain_community/graphs/neo4j_graph.py
@@ -204,7 +204,7 @@ def _format_schema(schema: Dict, is_enhanced: bool) -> str:
example = (
(
"Available options: "
- f'{[clean_string_values(el) for el in prop["values"]]}'
+ f"{[clean_string_values(el) for el in prop['values']]}"
)
if prop["values"]
else ""
@@ -218,7 +218,7 @@ def _format_schema(schema: Dict, is_enhanced: bool) -> str:
"LOCAL_DATE_TIME",
]:
if prop.get("min") is not None:
- example = f'Min: {prop["min"]}, Max: {prop["max"]}'
+ example = f"Min: {prop['min']}, Max: {prop['max']}"
else:
example = (
f'Example: "{prop["values"][0]}"'
@@ -230,7 +230,7 @@ def _format_schema(schema: Dict, is_enhanced: bool) -> str:
if not prop.get("min_size") or prop["min_size"] > LIST_LIMIT:
continue
example = (
- f'Min Size: {prop["min_size"]}, Max Size: {prop["max_size"]}'
+ f"Min Size: {prop['min_size']}, Max Size: {prop['max_size']}"
)
formatted_node_props.append(
f" - `{prop['property']}`: {prop['type']} {example}"
@@ -252,7 +252,7 @@ def _format_schema(schema: Dict, is_enhanced: bool) -> str:
example = (
(
"Available options: "
- f'{[clean_string_values(el) for el in prop["values"]]}'
+ f"{[clean_string_values(el) for el in prop['values']]}"
)
if prop["values"]
else ""
@@ -265,7 +265,7 @@ def _format_schema(schema: Dict, is_enhanced: bool) -> str:
"LOCAL_DATE_TIME",
]:
if prop.get("min"): # If we have min/max
- example = f'Min: {prop["min"]}, Max: {prop["max"]}'
+ example = f"Min: {prop['min']}, Max: {prop['max']}"
else: # return a single value
example = (
f'Example: "{prop["values"][0]}"' if prop["values"] else ""
@@ -275,7 +275,7 @@ def _format_schema(schema: Dict, is_enhanced: bool) -> str:
if not prop.get("min_size") or prop["min_size"] > LIST_LIMIT:
continue
example = (
- f'Min Size: {prop["min_size"]}, Max Size: {prop["max_size"]}'
+ f"Min Size: {prop['min_size']}, Max Size: {prop['max_size']}"
)
formatted_rel_props.append(
f" - `{prop['property']}: {prop['type']}` {example}"
diff --git a/libs/community/langchain_community/graphs/neptune_rdf_graph.py b/libs/community/langchain_community/graphs/neptune_rdf_graph.py
index f560768a15ccc..7f2aefac96ed2 100644
--- a/libs/community/langchain_community/graphs/neptune_rdf_graph.py
+++ b/libs/community/langchain_community/graphs/neptune_rdf_graph.py
@@ -249,7 +249,7 @@ def _get_local_name(self, iri: str) -> Sequence[str]:
return [f"{tokens[0]}#", tokens[-1]]
elif "/" in iri:
tokens = iri.split("/")
- return [f"{'/'.join(tokens[0:len(tokens)-1])}/", tokens[-1]]
+ return [f"{'/'.join(tokens[0 : len(tokens) - 1])}/", tokens[-1]]
else:
raise ValueError(f"Unexpected IRI '{iri}', contains neither '#' nor '/'.")
diff --git a/libs/community/langchain_community/graphs/rdf_graph.py b/libs/community/langchain_community/graphs/rdf_graph.py
index edc46d5d18d6d..ca8595c1620ff 100644
--- a/libs/community/langchain_community/graphs/rdf_graph.py
+++ b/libs/community/langchain_community/graphs/rdf_graph.py
@@ -217,7 +217,7 @@ def query(
try:
res = self.graph.query(query)
except ParserError as e:
- raise ValueError("Generated SPARQL statement is invalid\n" f"{e}")
+ raise ValueError(f"Generated SPARQL statement is invalid\n{e}")
return [r for r in res if isinstance(r, ResultRow)]
def update(
@@ -232,7 +232,7 @@ def update(
try:
self.graph.update(query)
except ParserError as e:
- raise ValueError("Generated SPARQL statement is invalid\n" f"{e}")
+ raise ValueError(f"Generated SPARQL statement is invalid\n{e}")
if self.local_copy:
self.graph.serialize(
destination=self.local_copy, format=self.local_copy.split(".")[-1]
@@ -274,9 +274,9 @@ def _rdf_s_schema(
f"In the following, each IRI is followed by the local name and "
f"optionally its description in parentheses. \n"
f"The RDF graph supports the following node types:\n"
- f'{", ".join([self._res_to_str(r, "cls") for r in classes])}\n'
+ f"{', '.join([self._res_to_str(r, 'cls') for r in classes])}\n"
f"The RDF graph supports the following relationships:\n"
- f'{", ".join([self._res_to_str(r, "rel") for r in relationships])}\n'
+ f"{', '.join([self._res_to_str(r, 'rel') for r in relationships])}\n"
)
if self.standard == "rdf":
@@ -295,13 +295,13 @@ def _rdf_s_schema(
f"In the following, each IRI is followed by the local name and "
f"optionally its description in parentheses. \n"
f"The OWL graph supports the following node types:\n"
- f'{", ".join([self._res_to_str(r, "cls") for r in clss])}\n'
+ f"{', '.join([self._res_to_str(r, 'cls') for r in clss])}\n"
f"The OWL graph supports the following object properties, "
f"i.e., relationships between objects:\n"
- f'{", ".join([self._res_to_str(r, "op") for r in ops])}\n'
+ f"{', '.join([self._res_to_str(r, 'op') for r in ops])}\n"
f"The OWL graph supports the following data properties, "
f"i.e., relationships between objects and literals:\n"
- f'{", ".join([self._res_to_str(r, "dp") for r in dps])}\n'
+ f"{', '.join([self._res_to_str(r, 'dp') for r in dps])}\n"
)
else:
raise ValueError(f"Mode '{self.standard}' is currently not supported.")
diff --git a/libs/community/langchain_community/indexes/_document_manager.py b/libs/community/langchain_community/indexes/_document_manager.py
index 64fbe1b94616e..45dc2476ff080 100644
--- a/libs/community/langchain_community/indexes/_document_manager.py
+++ b/libs/community/langchain_community/indexes/_document_manager.py
@@ -27,7 +27,7 @@ def _get_pymongo_client(mongodb_url: str, **kwargs: Any) -> Any:
client = pymongo(mongodb_url, **kwargs)
except ValueError as e:
raise ImportError(
- f"MongoClient string provided is not in proper format. " f"Got error: {e} "
+ f"MongoClient string provided is not in proper format. Got error: {e} "
)
return client
@@ -221,11 +221,17 @@ async def alist_keys(
def delete_keys(self, keys: Sequence[str]) -> None:
"""Delete documents from the MongoDB collection."""
self.sync_collection.delete_many(
- {"namespace": self.namespace, "key": {"$in": keys}}
+ {
+ "namespace": self.namespace,
+ "key": {"$in": keys},
+ }
)
async def adelete_keys(self, keys: Sequence[str]) -> None:
"""Asynchronously delete documents from the MongoDB collection."""
await self.async_collection.delete_many(
- {"namespace": self.namespace, "key": {"$in": keys}}
+ {
+ "namespace": self.namespace,
+ "key": {"$in": keys},
+ }
)
diff --git a/libs/community/langchain_community/llms/azureml_endpoint.py b/libs/community/langchain_community/llms/azureml_endpoint.py
index c09ea2edcf482..5b4d39bbc4dff 100644
--- a/libs/community/langchain_community/llms/azureml_endpoint.py
+++ b/libs/community/langchain_community/llms/azureml_endpoint.py
@@ -181,7 +181,10 @@ def format_request_payload( # type: ignore[override]
) -> bytes:
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
- {"inputs": {"input_string": [f'"{prompt}"']}, "parameters": model_kwargs}
+ {
+ "inputs": {"input_string": [f'"{prompt}"']},
+ "parameters": model_kwargs,
+ }
)
return str.encode(request_payload)
@@ -223,7 +226,10 @@ def format_request_payload( # type: ignore[override]
) -> bytes:
ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps(
- {"inputs": [f'"{prompt}"'], "parameters": model_kwargs}
+ {
+ "inputs": [f'"{prompt}"'],
+ "parameters": model_kwargs,
+ }
)
return str.encode(request_payload)
diff --git a/libs/community/langchain_community/llms/bedrock.py b/libs/community/langchain_community/llms/bedrock.py
index 4c5fbdc476d66..e1b6570cf3083 100644
--- a/libs/community/langchain_community/llms/bedrock.py
+++ b/libs/community/langchain_community/llms/bedrock.py
@@ -454,8 +454,7 @@ def _get_provider(self) -> str:
return self.provider
if self.model_id.startswith("arn"):
raise ValueError(
- "Model provider should be supplied when passing a model ARN as "
- "model_id"
+ "Model provider should be supplied when passing a model ARN as model_id"
)
return self.model_id.split(".")[0]
diff --git a/libs/community/langchain_community/llms/databricks.py b/libs/community/langchain_community/llms/databricks.py
index 3f7aa42f2fda6..4b656192e2b51 100644
--- a/libs/community/langchain_community/llms/databricks.py
+++ b/libs/community/langchain_community/llms/databricks.py
@@ -457,12 +457,12 @@ def set_cluster_id(cls, values: Dict[str, Any]) -> dict:
pass
if model_kwargs := values.get("model_kwargs"):
- assert (
- "prompt" not in model_kwargs
- ), "model_kwargs must not contain key 'prompt'"
- assert (
- "stop" not in model_kwargs
- ), "model_kwargs must not contain key 'stop'"
+ assert "prompt" not in model_kwargs, (
+ "model_kwargs must not contain key 'prompt'"
+ )
+ assert "stop" not in model_kwargs, (
+ "model_kwargs must not contain key 'stop'"
+ )
return values
def __init__(self, **data: Any):
diff --git a/libs/community/langchain_community/llms/deepinfra.py b/libs/community/langchain_community/llms/deepinfra.py
index 47551dd4e6520..bd0e21df5260b 100644
--- a/libs/community/langchain_community/llms/deepinfra.py
+++ b/libs/community/langchain_community/llms/deepinfra.py
@@ -97,8 +97,7 @@ def _handle_status(self, code: int, text: Any) -> None:
raise ValueError(f"DeepInfra received an invalid payload: {text}")
elif code != 200:
raise Exception(
- f"DeepInfra returned an unexpected response with status "
- f"{code}: {text}"
+ f"DeepInfra returned an unexpected response with status {code}: {text}"
)
def _call(
diff --git a/libs/community/langchain_community/llms/oci_data_science_model_deployment_endpoint.py b/libs/community/langchain_community/llms/oci_data_science_model_deployment_endpoint.py
index e18a1a0847d0f..83001e6e6ef1d 100644
--- a/libs/community/langchain_community/llms/oci_data_science_model_deployment_endpoint.py
+++ b/libs/community/langchain_community/llms/oci_data_science_model_deployment_endpoint.py
@@ -135,7 +135,10 @@ def _headers(
if self.streaming:
headers.update(
- {"enable-streaming": "true", "Accept": "text/event-stream"}
+ {
+ "enable-streaming": "true",
+ "Accept": "text/event-stream",
+ }
)
return headers
diff --git a/libs/community/langchain_community/llms/oci_generative_ai.py b/libs/community/langchain_community/llms/oci_generative_ai.py
index a9f48a97528ba..ee355867e9631 100644
--- a/libs/community/langchain_community/llms/oci_generative_ai.py
+++ b/libs/community/langchain_community/llms/oci_generative_ai.py
@@ -274,8 +274,7 @@ def _prepare_invocation_object(
if self.model_id is None:
raise ValueError(
- "model_id is required to call the model, "
- "please provide the model_id."
+ "model_id is required to call the model, please provide the model_id."
)
if self.model_id.startswith(CUSTOM_ENDPOINT_PREFIX):
diff --git a/libs/community/langchain_community/llms/outlines.py b/libs/community/langchain_community/llms/outlines.py
index 97ea7a1175b99..7636084a6eb80 100644
--- a/libs/community/langchain_community/llms/outlines.py
+++ b/libs/community/langchain_community/llms/outlines.py
@@ -195,7 +195,13 @@ def check_packages_installed(
self.client = models.transformers(self.model, **self.model_kwargs)
elif self.backend == "transformers_vision":
check_packages_installed(
- ["transformers", "datasets", "torchvision", "PIL", "flash_attn"]
+ [
+ "transformers",
+ "datasets",
+ "torchvision",
+ "PIL",
+ "flash_attn",
+ ]
)
from transformers import LlavaNextForConditionalGeneration
diff --git a/libs/community/langchain_community/llms/sambanova.py b/libs/community/langchain_community/llms/sambanova.py
index 994fb7888c875..18f2810262a21 100644
--- a/libs/community/langchain_community/llms/sambanova.py
+++ b/libs/community/langchain_community/llms/sambanova.py
@@ -711,7 +711,7 @@ def _handle_request(
}
data = {key: value for key, value in data.items() if value is not None}
headers = {
- "Authorization": f"Bearer " f"{self.sambanova_api_key.get_secret_value()}",
+ "Authorization": f"Bearer {self.sambanova_api_key.get_secret_value()}",
"Content-Type": "application/json",
}
diff --git a/libs/community/langchain_community/llms/self_hosted_hugging_face.py b/libs/community/langchain_community/llms/self_hosted_hugging_face.py
index 016da2e48f9be..e43ca9e312454 100644
--- a/libs/community/langchain_community/llms/self_hosted_hugging_face.py
+++ b/libs/community/langchain_community/llms/self_hosted_hugging_face.py
@@ -69,8 +69,7 @@ def _load_transformer(
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs)
else:
raise ValueError(
- f"Got invalid task {task}, "
- f"currently only {VALID_TASKS} are supported"
+ f"Got invalid task {task}, currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ImportError(
diff --git a/libs/community/langchain_community/memory/kg.py b/libs/community/langchain_community/memory/kg.py
index 2a4828d81ff8d..f60e4f5b75842 100644
--- a/libs/community/langchain_community/memory/kg.py
+++ b/libs/community/langchain_community/memory/kg.py
@@ -134,6 +134,7 @@ def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.kg.clear()
+
except ImportError:
# Placeholder object
class ConversationKGMemory: # type: ignore[no-redef]
diff --git a/libs/community/langchain_community/memory/motorhead_memory.py b/libs/community/langchain_community/memory/motorhead_memory.py
index 88c96bff59423..3b41be424e931 100644
--- a/libs/community/langchain_community/memory/motorhead_memory.py
+++ b/libs/community/langchain_community/memory/motorhead_memory.py
@@ -94,6 +94,7 @@ def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
def delete_session(self) -> None:
"""Delete a session"""
requests.delete(f"{self.url}/sessions/{self.session_id}/memory")
+
except ImportError:
# Placeholder object
class MotorheadMemory: # type: ignore[no-redef]
diff --git a/libs/community/langchain_community/memory/zep_cloud_memory.py b/libs/community/langchain_community/memory/zep_cloud_memory.py
index cc65c11c8a79d..538fcb3b2c7ac 100644
--- a/libs/community/langchain_community/memory/zep_cloud_memory.py
+++ b/libs/community/langchain_community/memory/zep_cloud_memory.py
@@ -118,6 +118,7 @@ def save_context(
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str, metadata=metadata)
self.chat_memory.add_ai_message(output_str, metadata=metadata)
+
except ImportError:
# Placeholder object
class ZepCloudMemory: # type: ignore[no-redef]
diff --git a/libs/community/langchain_community/memory/zep_memory.py b/libs/community/langchain_community/memory/zep_memory.py
index 3f05febc3ae6c..e4cae4488382e 100644
--- a/libs/community/langchain_community/memory/zep_memory.py
+++ b/libs/community/langchain_community/memory/zep_memory.py
@@ -123,6 +123,7 @@ def save_context(
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str, metadata=metadata)
self.chat_memory.add_ai_message(output_str, metadata=metadata)
+
except ImportError:
# Placeholder object
class ZepMemory: # type: ignore[no-redef]
diff --git a/libs/community/langchain_community/retrievers/nanopq.py b/libs/community/langchain_community/retrievers/nanopq.py
index ca4162240d098..274ad4b42e15f 100644
--- a/libs/community/langchain_community/retrievers/nanopq.py
+++ b/libs/community/langchain_community/retrievers/nanopq.py
@@ -86,7 +86,7 @@ def _get_relevant_documents(
from nanopq import PQ
except ImportError:
raise ImportError(
- "Could not import nanopq, please install with `pip install " "nanopq`."
+ "Could not import nanopq, please install with `pip install nanopq`."
)
query_embeds = np.array(self.embeddings.embed_query(query))
diff --git a/libs/community/langchain_community/storage/astradb.py b/libs/community/langchain_community/storage/astradb.py
index 8ed1246a0743d..be6a6a32d03a8 100644
--- a/libs/community/langchain_community/storage/astradb.py
+++ b/libs/community/langchain_community/storage/astradb.py
@@ -70,7 +70,10 @@ async def amset(self, key_value_pairs: Sequence[Tuple[str, V]]) -> None:
await self.astra_env.aensure_db_setup()
for k, v in key_value_pairs:
await self.async_collection.upsert(
- {"_id": k, "value": self.encode_value(v)}
+ {
+ "_id": k,
+ "value": self.encode_value(v),
+ }
)
def mdelete(self, keys: Sequence[str]) -> None:
diff --git a/libs/community/langchain_community/tools/azure_ai_services/text_analytics_for_health.py b/libs/community/langchain_community/tools/azure_ai_services/text_analytics_for_health.py
index b47b29210fde6..b7fcd310e9cdf 100644
--- a/libs/community/langchain_community/tools/azure_ai_services/text_analytics_for_health.py
+++ b/libs/community/langchain_community/tools/azure_ai_services/text_analytics_for_health.py
@@ -83,7 +83,7 @@ def _format_text_analysis_result(self, text_analysis_result: Dict) -> str:
if "entities" in text_analysis_result:
formatted_result.append(
f"""The text contains the following healthcare entities: {
- ', '.join(text_analysis_result['entities'])
+ ", ".join(text_analysis_result["entities"])
}""".replace("\n", " ")
)
diff --git a/libs/community/langchain_community/tools/azure_cognitive_services/text_analytics_health.py b/libs/community/langchain_community/tools/azure_cognitive_services/text_analytics_health.py
index 0827cb9b93c18..53463f4d592ab 100644
--- a/libs/community/langchain_community/tools/azure_cognitive_services/text_analytics_health.py
+++ b/libs/community/langchain_community/tools/azure_cognitive_services/text_analytics_health.py
@@ -83,7 +83,7 @@ def _format_text_analysis_result(self, text_analysis_result: Dict) -> str:
if "entities" in text_analysis_result:
formatted_result.append(
f"""The text contains the following healthcare entities: {
- ', '.join(text_analysis_result['entities'])
+ ", ".join(text_analysis_result["entities"])
}""".replace("\n", " ")
)
diff --git a/libs/community/langchain_community/tools/databricks/_execution.py b/libs/community/langchain_community/tools/databricks/_execution.py
index 62e8414fe49a3..09693e5510754 100644
--- a/libs/community/langchain_community/tools/databricks/_execution.py
+++ b/libs/community/langchain_community/tools/databricks/_execution.py
@@ -66,9 +66,9 @@ def get_execute_function_sql_stmt(
else:
parts.append(f"SELECT * FROM {function.full_name}(")
if function.input_params is None or function.input_params.parameters is None:
- assert (
- not json_params
- ), "Function has no parameters but parameters were provided."
+ assert not json_params, (
+ "Function has no parameters but parameters were provided."
+ )
else:
args = []
use_named_args = False
@@ -213,17 +213,17 @@ def execute_function(
assert response.status is not None, f"Statement execution failed: {response}"
if response.status.state != StatementState.SUCCEEDED:
error = response.status.error
- assert (
- error is not None
- ), f"Statement execution failed but no error message was provided: {response}"
+ assert error is not None, (
+ f"Statement execution failed but no error message was provided: {response}"
+ )
return FunctionExecutionResult(error=f"{error.error_code}: {error.message}")
manifest = response.manifest
assert manifest is not None
truncated = manifest.truncated
result = response.result
- assert (
- result is not None
- ), "Statement execution succeeded but no result was provided."
+ assert result is not None, (
+ "Statement execution succeeded but no result was provided."
+ )
data_array = result.data_array
if is_scalar(function):
value = None
@@ -234,9 +234,9 @@ def execute_function(
)
else:
schema = manifest.schema
- assert (
- schema is not None and schema.columns is not None
- ), "Statement execution succeeded but no schema was provided."
+ assert schema is not None and schema.columns is not None, (
+ "Statement execution succeeded but no schema was provided."
+ )
columns = [c.name for c in schema.columns]
if data_array is None:
data_array = []
diff --git a/libs/community/langchain_community/tools/edenai/audio_speech_to_text.py b/libs/community/langchain_community/tools/edenai/audio_speech_to_text.py
index b4a218dc3daa6..b3a70d4e6424a 100644
--- a/libs/community/langchain_community/tools/edenai/audio_speech_to_text.py
+++ b/libs/community/langchain_community/tools/edenai/audio_speech_to_text.py
@@ -70,7 +70,7 @@ def _wait_processing(self, url: str) -> requests.Response:
if temp["results"][self.providers[0]]["error"] is not None:
raise Exception(
f"""EdenAI returned an unexpected response
- {temp['results'][self.providers[0]]['error']}"""
+ {temp["results"][self.providers[0]]["error"]}"""
)
else:
return audio_analysis_result
diff --git a/libs/community/langchain_community/tools/edenai/image_objectdetection.py b/libs/community/langchain_community/tools/edenai/image_objectdetection.py
index cb8209f577cbc..ed4daac712dae 100644
--- a/libs/community/langchain_community/tools/edenai/image_objectdetection.py
+++ b/libs/community/langchain_community/tools/edenai/image_objectdetection.py
@@ -53,7 +53,12 @@ def _parse_json(self, json_data: dict) -> str:
y_min = found_obj.get("y_min")
y_max = found_obj.get("y_max")
if self.show_positions and all(
- [x_min, x_max, y_min, y_max]
+ [
+ x_min,
+ x_max,
+ y_min,
+ y_max,
+ ]
): # some providers don't return positions
label_str += f""",at the position x_min: {x_min}, x_max: {x_max},
y_min: {y_min}, y_max: {y_max}"""
diff --git a/libs/community/langchain_community/tools/financial_datasets/balance_sheets.py b/libs/community/langchain_community/tools/financial_datasets/balance_sheets.py
index 0b02b549ad6c2..4be8125cb5a44 100644
--- a/libs/community/langchain_community/tools/financial_datasets/balance_sheets.py
+++ b/libs/community/langchain_community/tools/financial_datasets/balance_sheets.py
@@ -20,7 +20,7 @@ class BalanceSheetsSchema(BaseModel):
"Default is 'annual'.",
)
limit: int = Field(
- description="The number of balance sheets to return. " "Default is 10.",
+ description="The number of balance sheets to return. Default is 10.",
)
diff --git a/libs/community/langchain_community/tools/financial_datasets/cash_flow_statements.py b/libs/community/langchain_community/tools/financial_datasets/cash_flow_statements.py
index a627300a3a433..5f621085a1838 100644
--- a/libs/community/langchain_community/tools/financial_datasets/cash_flow_statements.py
+++ b/libs/community/langchain_community/tools/financial_datasets/cash_flow_statements.py
@@ -20,7 +20,7 @@ class CashFlowStatementsSchema(BaseModel):
"Default is 'annual'.",
)
limit: int = Field(
- description="The number of cash flow statements to return. " "Default is 10.",
+ description="The number of cash flow statements to return. Default is 10.",
)
diff --git a/libs/community/langchain_community/tools/financial_datasets/income_statements.py b/libs/community/langchain_community/tools/financial_datasets/income_statements.py
index dc06d358df117..d181ef7a1c1f6 100644
--- a/libs/community/langchain_community/tools/financial_datasets/income_statements.py
+++ b/libs/community/langchain_community/tools/financial_datasets/income_statements.py
@@ -20,7 +20,7 @@ class IncomeStatementsSchema(BaseModel):
"Default is 'annual'.",
)
limit: int = Field(
- description="The number of income statements to return. " "Default is 10.",
+ description="The number of income statements to return. Default is 10.",
)
diff --git a/libs/community/langchain_community/tools/gmail/create_draft.py b/libs/community/langchain_community/tools/gmail/create_draft.py
index b35b0f8758025..a1cb44cd3cf9a 100644
--- a/libs/community/langchain_community/tools/gmail/create_draft.py
+++ b/libs/community/langchain_community/tools/gmail/create_draft.py
@@ -81,7 +81,7 @@ def _run(
.create(userId="me", body=create_message)
.execute()
)
- output = f'Draft created. Draft Id: {draft["id"]}'
+ output = f"Draft created. Draft Id: {draft['id']}"
return output
except Exception as e:
raise Exception(f"An error occurred: {e}")
diff --git a/libs/community/langchain_community/tools/gmail/send_message.py b/libs/community/langchain_community/tools/gmail/send_message.py
index 0d9ab4634993b..fadab49014434 100644
--- a/libs/community/langchain_community/tools/gmail/send_message.py
+++ b/libs/community/langchain_community/tools/gmail/send_message.py
@@ -41,7 +41,7 @@ class GmailSendMessage(GmailBaseTool): # type: ignore[override, override]
name: str = "send_gmail_message"
description: str = (
- "Use this tool to send email messages." " The input is the message, recipients"
+ "Use this tool to send email messages. The input is the message, recipients"
)
args_schema: Type[SendMessageSchema] = SendMessageSchema
@@ -86,6 +86,6 @@ def _run(
.send(userId="me", body=create_message)
)
sent_message = send_message.execute()
- return f'Message sent. Message Id: {sent_message["id"]}'
+ return f"Message sent. Message Id: {sent_message['id']}"
except Exception as error:
raise Exception(f"An error occurred: {error}")
diff --git a/libs/community/langchain_community/tools/nuclia/tool.py b/libs/community/langchain_community/tools/nuclia/tool.py
index 02cb335367a53..8bae0929a744c 100644
--- a/libs/community/langchain_community/tools/nuclia/tool.py
+++ b/libs/community/langchain_community/tools/nuclia/tool.py
@@ -161,7 +161,7 @@ def _pushField(self, id: str, field: Any) -> str:
)
if response.status_code != 200:
logger.info(
- f"Error pushing field {id}:" f"{response.status_code} {response.text}"
+ f"Error pushing field {id}:{response.status_code} {response.text}"
)
raise ValueError("Error pushing field")
else:
@@ -177,7 +177,7 @@ def _pull(self, id: str) -> str:
logger.info(f"{id} not in queue")
return ""
elif result["status"] == "pending":
- logger.info(f'Waiting for {result["uuid"]} to be processed')
+ logger.info(f"Waiting for {result['uuid']} to be processed")
return ""
else:
return result["data"]
diff --git a/libs/community/langchain_community/tools/office365/send_event.py b/libs/community/langchain_community/tools/office365/send_event.py
index e7513025ac1f4..052fc19c0e3d2 100644
--- a/libs/community/langchain_community/tools/office365/send_event.py
+++ b/libs/community/langchain_community/tools/office365/send_event.py
@@ -6,10 +6,10 @@
from datetime import datetime as dt
from typing import List, Optional, Type
+from zoneinfo import ZoneInfo
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
-from zoneinfo import ZoneInfo
from langchain_community.tools.office365.base import O365BaseTool
from langchain_community.tools.office365.utils import UTC_FORMAT
diff --git a/libs/community/langchain_community/tools/openapi/utils/api_models.py b/libs/community/langchain_community/tools/openapi/utils/api_models.py
index fef0b849ae37d..8358305464d7f 100644
--- a/libs/community/langchain_community/tools/openapi/utils/api_models.py
+++ b/libs/community/langchain_community/tools/openapi/utils/api_models.py
@@ -576,8 +576,7 @@ def _format_nested_properties(
prop_type = f"{{\n{nested_props}\n{' ' * indent}}}"
formatted_props.append(
- f"{prop_desc}\n{' ' * indent}{prop_name}"
- f"{prop_required}: {prop_type},"
+ f"{prop_desc}\n{' ' * indent}{prop_name}{prop_required}: {prop_type},"
)
return "\n".join(formatted_props)
diff --git a/libs/community/langchain_community/utilities/bibtex.py b/libs/community/langchain_community/utilities/bibtex.py
index a3bf82ab73889..050b3b6102509 100644
--- a/libs/community/langchain_community/utilities/bibtex.py
+++ b/libs/community/langchain_community/utilities/bibtex.py
@@ -70,7 +70,7 @@ def get_metadata(
if "url" in entry:
url = entry["url"]
elif "doi" in entry:
- url = f'https://doi.org/{entry["doi"]}'
+ url = f"https://doi.org/{entry['doi']}"
else:
url = None
meta = {
diff --git a/libs/community/langchain_community/utilities/cassandra_database.py b/libs/community/langchain_community/utilities/cassandra_database.py
index 4ec1973b1668e..de34ce786b356 100644
--- a/libs/community/langchain_community/utilities/cassandra_database.py
+++ b/libs/community/langchain_community/utilities/cassandra_database.py
@@ -433,7 +433,7 @@ def _resolve_session(
import cassio.config
except ImportError:
raise ValueError(
- "cassio package not found, please install with" " `pip install cassio`"
+ "cassio package not found, please install with `pip install cassio`"
)
# Use pre-existing session on cassio
@@ -610,7 +610,11 @@ def _resolve_columns(
partition_info.append((row["column_name"], row["position"]))
elif row["kind"] == "clustering":
cluster_info.append(
- (row["column_name"], row["clustering_order"], row["position"])
+ (
+ row["column_name"],
+ row["clustering_order"],
+ row["position"],
+ )
)
partition = [
diff --git a/libs/community/langchain_community/utilities/clickup.py b/libs/community/langchain_community/utilities/clickup.py
index f5f00b22a0884..ef4d6140998ba 100644
--- a/libs/community/langchain_community/utilities/clickup.py
+++ b/libs/community/langchain_community/utilities/clickup.py
@@ -460,7 +460,7 @@ def get_task_attribute(self, query: str) -> Dict:
if params["attribute_name"] not in task:
return {
- "Error": f"""attribute_name = {params['attribute_name']} was not
+ "Error": f"""attribute_name = {params["attribute_name"]} was not
found in task keys {task.keys()}. Please call again with one of the key names."""
}
diff --git a/libs/community/langchain_community/utilities/github.py b/libs/community/langchain_community/utilities/github.py
index c50a1a77c43ed..89cd366bfc3b5 100644
--- a/libs/community/langchain_community/utilities/github.py
+++ b/libs/community/langchain_community/utilities/github.py
@@ -20,8 +20,7 @@ def _import_tiktoken() -> Any:
import tiktoken
except ImportError:
raise ImportError(
- "tiktoken is not installed. "
- "Please install it with `pip install tiktoken`"
+ "tiktoken is not installed. Please install it with `pip install tiktoken`"
)
return tiktoken
@@ -90,8 +89,7 @@ def validate_environment(cls, values: Dict) -> Any:
installation = installation[0]
except ValueError as e:
raise ValueError(
- "Please make sure to give correct github parameters "
- f"Error message: {e}"
+ f"Please make sure to give correct github parameters Error message: {e}"
)
# create a GitHub instance:
g = installation.get_github_for_installation()
@@ -257,8 +255,7 @@ def list_branches_in_repo(self) -> str:
if branches:
branches_str = "\n".join(branches)
return (
- f"Found {len(branches)} branches in the repository:"
- f"\n{branches_str}"
+ f"Found {len(branches)} branches in the repository:\n{branches_str}"
)
else:
return "No branches found in the repository"
@@ -774,8 +771,7 @@ def search_code(self, query: str) -> str:
code.path, ref=self.active_branch
).decoded_content.decode()
results.append(
- f"Filepath: `{code.path}`\nFile contents: "
- f"{file_content}\n"
+ f"Filepath: `{code.path}`\nFile contents: {file_content}\n"
)
count += 1
return "\n".join(results)
@@ -839,9 +835,7 @@ def get_releases(self) -> str:
results = [f"Top {max_results} results:"]
for release in releases[:max_results]:
results.append(
- f"Title: {release.title}, "
- f"Tag: {release.tag_name}, "
- f"Body: {release.body}"
+ f"Title: {release.title}, Tag: {release.tag_name}, Body: {release.body}"
)
return "\n".join(results)
@@ -857,11 +851,7 @@ def get_release(self, tag_name: str) -> str:
str: The release
"""
release = self.github_repo_instance.get_release(tag_name)
- return (
- f"Release: {release.title} "
- f"tag: {release.tag_name} "
- f"body: {release.body}"
- )
+ return f"Release: {release.title} tag: {release.tag_name} body: {release.body}"
def run(self, mode: str, query: str) -> str:
if mode == "get_issue":
diff --git a/libs/community/langchain_community/utilities/gitlab.py b/libs/community/langchain_community/utilities/gitlab.py
index b33ddc1180306..9fa20370d4cf7 100644
--- a/libs/community/langchain_community/utilities/gitlab.py
+++ b/libs/community/langchain_community/utilities/gitlab.py
@@ -135,7 +135,10 @@ def get_issue(self, issue_number: int) -> Dict[str, Any]:
for comment in comments_page:
comment = issue.notes.get(comment.id)
comments.append(
- {"body": comment.body, "user": comment.author["username"]}
+ {
+ "body": comment.body,
+ "user": comment.author["username"],
+ }
)
page += 1
diff --git a/libs/community/langchain_community/utilities/google_places_api.py b/libs/community/langchain_community/utilities/google_places_api.py
index b0c6f152bd26b..423aeee6ec02f 100644
--- a/libs/community/langchain_community/utilities/google_places_api.py
+++ b/libs/community/langchain_community/utilities/google_places_api.py
@@ -84,7 +84,7 @@ def run(self, query: str) -> str:
if details is not None:
places.append(details)
- return "\n".join([f"{i+1}. {item}" for i, item in enumerate(places)])
+ return "\n".join([f"{i + 1}. {item}" for i, item in enumerate(places)])
def fetch_place_details(self, place_id: str) -> Optional[str]:
try:
diff --git a/libs/community/langchain_community/utilities/google_scholar.py b/libs/community/langchain_community/utilities/google_scholar.py
index ffc94848a61a5..2285f086891c9 100644
--- a/libs/community/langchain_community/utilities/google_scholar.py
+++ b/libs/community/langchain_community/utilities/google_scholar.py
@@ -121,10 +121,10 @@ def run(self, query: str) -> str:
if not total_results:
return "No good Google Scholar Result was found"
docs = [
- f"Title: {result.get('title','')}\n"
- f"Authors: {','.join([author.get('name') for author in result.get('publication_info',{}).get('authors',[])])}\n" # noqa: E501
- f"Summary: {result.get('publication_info',{}).get('summary','')}\n"
- f"Total-Citations: {result.get('inline_links',{}).get('cited_by',{}).get('total','')}" # noqa: E501
+ f"Title: {result.get('title', '')}\n"
+ f"Authors: {','.join([author.get('name') for author in result.get('publication_info', {}).get('authors', [])])}\n" # noqa: E501
+ f"Summary: {result.get('publication_info', {}).get('summary', '')}\n"
+ f"Total-Citations: {result.get('inline_links', {}).get('cited_by', {}).get('total', '')}" # noqa: E501
for result in total_results
]
return "\n\n".join(docs)
diff --git a/libs/community/langchain_community/utilities/jira.py b/libs/community/langchain_community/utilities/jira.py
index d96b72efb1815..1609a3c40bc4f 100644
--- a/libs/community/langchain_community/utilities/jira.py
+++ b/libs/community/langchain_community/utilities/jira.py
@@ -123,7 +123,13 @@ def parse_projects(self, projects: List[dict]) -> List[dict]:
type = project["projectTypeKey"]
style = project["style"]
parsed.append(
- {"id": id, "key": key, "name": name, "type": type, "style": style}
+ {
+ "id": id,
+ "key": key,
+ "name": name,
+ "type": type,
+ "style": style,
+ }
)
return parsed
diff --git a/libs/community/langchain_community/utilities/merriam_webster.py b/libs/community/langchain_community/utilities/merriam_webster.py
index 8cf9e18a107e8..94268556d02b5 100644
--- a/libs/community/langchain_community/utilities/merriam_webster.py
+++ b/libs/community/langchain_community/utilities/merriam_webster.py
@@ -84,7 +84,7 @@ def _format_definitions(self, query: str, definitions: List[Dict]) -> str:
formatted_definitions.extend(self._format_definition(definition))
if len(formatted_definitions) == 1:
- return f"Definition of '{query}':\n" f"{formatted_definitions[0]}"
+ return f"Definition of '{query}':\n{formatted_definitions[0]}"
result = f"Definitions of '{query}':\n\n"
for i, formatted_definition in enumerate(formatted_definitions, 1):
diff --git a/libs/community/langchain_community/utilities/openapi.py b/libs/community/langchain_community/utilities/openapi.py
index 1d99f7e182301..9e6ff44cf12a2 100644
--- a/libs/community/langchain_community/utilities/openapi.py
+++ b/libs/community/langchain_community/utilities/openapi.py
@@ -211,8 +211,7 @@ def _alert_unsupported_spec(obj: dict) -> None:
)
else:
raise ValueError(
- "Attempting to load an unsupported spec:"
- f"\n\n{obj}\n{warning_message}"
+ f"Attempting to load an unsupported spec:\n\n{obj}\n{warning_message}"
)
@classmethod
diff --git a/libs/community/langchain_community/utilities/portkey.py b/libs/community/langchain_community/utilities/portkey.py
index dbaf41840f072..5eb16f7af518b 100644
--- a/libs/community/langchain_community/utilities/portkey.py
+++ b/libs/community/langchain_community/utilities/portkey.py
@@ -26,9 +26,9 @@ def Config(
cache_force_refresh: Optional[str] = None,
cache_age: Optional[int] = None,
) -> Dict[str, str]:
- assert retry_count is None or retry_count in range(
- 1, 6
- ), "retry_count must be an integer and in range [1, 2, 3, 4, 5]"
+ assert retry_count is None or retry_count in range(1, 6), (
+ "retry_count must be an integer and in range [1, 2, 3, 4, 5]"
+ )
assert cache is None or cache in [
"simple",
"semantic",
@@ -37,9 +37,9 @@ def Config(
isinstance(cache_force_refresh, str)
and cache_force_refresh in ["True", "False"]
), "cache_force_refresh must be 'True' or 'False'"
- assert cache_age is None or isinstance(
- cache_age, int
- ), "cache_age must be an integer"
+ assert cache_age is None or isinstance(cache_age, int), (
+ "cache_age must be an integer"
+ )
os.environ["OPENAI_API_BASE"] = Portkey.base
diff --git a/libs/community/langchain_community/utilities/pubmed.py b/libs/community/langchain_community/utilities/pubmed.py
index 20185d0c2858e..7de01da501a64 100644
--- a/libs/community/langchain_community/utilities/pubmed.py
+++ b/libs/community/langchain_community/utilities/pubmed.py
@@ -193,7 +193,11 @@ def _parse_article(self, uid: str, text_dict: dict) -> dict:
)
a_d = ar.get("ArticleDate", {})
pub_date = "-".join(
- [a_d.get("Year", ""), a_d.get("Month", ""), a_d.get("Day", "")]
+ [
+ a_d.get("Year", ""),
+ a_d.get("Month", ""),
+ a_d.get("Day", ""),
+ ]
)
return {
diff --git a/libs/community/langchain_community/utilities/sql_database.py b/libs/community/langchain_community/utilities/sql_database.py
index 2a091fd2cf3cd..d6ecc4ca008b3 100644
--- a/libs/community/langchain_community/utilities/sql_database.py
+++ b/libs/community/langchain_community/utilities/sql_database.py
@@ -24,8 +24,8 @@
def _format_index(index: sqlalchemy.engine.interfaces.ReflectedIndex) -> str:
return (
- f'Name: {index["name"]}, Unique: {index["unique"]},'
- f' Columns: {str(index["column_names"])}'
+ f"Name: {index['name']}, Unique: {index['unique']},"
+ f" Columns: {str(index['column_names'])}"
)
diff --git a/libs/community/langchain_community/vectorstores/aerospike.py b/libs/community/langchain_community/vectorstores/aerospike.py
index 997646a00f98a..96ef3c659eb68 100644
--- a/libs/community/langchain_community/vectorstores/aerospike.py
+++ b/libs/community/langchain_community/vectorstores/aerospike.py
@@ -149,7 +149,7 @@ def convert_distance_strategy(
return DistanceStrategy.EUCLIDEAN_DISTANCE
raise ValueError(
- "Unknown distance strategy, must be cosine, dot_product" ", or euclidean"
+ "Unknown distance strategy, must be cosine, dot_product, or euclidean"
)
def add_texts(
@@ -437,8 +437,7 @@ def _select_relevance_score_fn(self) -> Callable[[float], float]:
return self._euclidean_relevance_score_fn
else:
raise ValueError(
- "Unknown distance strategy, must be cosine, dot_product"
- ", or euclidean"
+ "Unknown distance strategy, must be cosine, dot_product, or euclidean"
)
@staticmethod
diff --git a/libs/community/langchain_community/vectorstores/apache_doris.py b/libs/community/langchain_community/vectorstores/apache_doris.py
index 4d25f0a0aeb76..56ee6c0f64ffd 100644
--- a/libs/community/langchain_community/vectorstores/apache_doris.py
+++ b/libs/community/langchain_community/vectorstores/apache_doris.py
@@ -123,10 +123,10 @@ def __init__(
self.schema = f"""\
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
- {self.config.column_map['id']} varchar(50),
- {self.config.column_map['document']} string,
- {self.config.column_map['embedding']} array,
- {self.config.column_map['metadata']} string
+ {self.config.column_map["id"]} varchar(50),
+ {self.config.column_map["document"]} string,
+ {self.config.column_map["embedding"]} array,
+ {self.config.column_map["metadata"]} string
) ENGINE = OLAP UNIQUE KEY(id) DISTRIBUTED BY HASH(id) \
PROPERTIES ("replication_allocation" = "tag.location.default: 1")\
"""
@@ -179,7 +179,7 @@ def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> s
INSERT INTO
{self.config.database}.{self.config.table}({ks})
VALUES
- {','.join(_data)}
+ {",".join(_data)}
"""
return i_str
@@ -310,10 +310,10 @@ def _build_query_sql(
where_str = ""
q_str = f"""
- SELECT {self.config.column_map['document']},
- {self.config.column_map['metadata']},
+ SELECT {self.config.column_map["document"]},
+ {self.config.column_map["metadata"]},
cosine_distance(array[{q_emb_str}],
- {self.config.column_map['embedding']}) as dist
+ {self.config.column_map["embedding"]}) as dist
FROM {self.config.database}.{self.config.table}
{where_str}
ORDER BY dist {self.dist_order}
diff --git a/libs/community/langchain_community/vectorstores/azure_cosmos_db_no_sql.py b/libs/community/langchain_community/vectorstores/azure_cosmos_db_no_sql.py
index 8d0d90dd92e9c..2adba6eb40bcc 100644
--- a/libs/community/langchain_community/vectorstores/azure_cosmos_db_no_sql.py
+++ b/libs/community/langchain_community/vectorstores/azure_cosmos_db_no_sql.py
@@ -617,7 +617,7 @@ def _construct_query(
):
query = f"SELECT {'TOP ' + str(k) + ' ' if not offset_limit else ''}"
else:
- query = f"""SELECT {'TOP @limit ' if not offset_limit else ''}"""
+ query = f"""SELECT {"TOP @limit " if not offset_limit else ""}"""
query += self._generate_projection_fields(
projection_mapping, query_type, embeddings
)
@@ -790,7 +790,7 @@ def _build_where_clause(self, pre_filter: PreFilter) -> str:
# e.g., for IN clauses
value = f"({', '.join(map(str, condition.value))})"
clauses.append(f"c.{condition.property} {sql_operator} {value}")
- return f""" WHERE {' {} '.format(sql_logical_operator).join(clauses)}""".strip()
+ return f""" WHERE {" {} ".format(sql_logical_operator).join(clauses)}""".strip()
def _execute_query(
self,
@@ -827,7 +827,10 @@ def _execute_query(
if with_embedding:
metadata[self._embedding_key] = item[self._embedding_key]
docs_and_scores.append(
- (Document(page_content=text, metadata=metadata), score)
+ (
+ Document(page_content=text, metadata=metadata),
+ score,
+ )
)
return docs_and_scores
diff --git a/libs/community/langchain_community/vectorstores/bigquery_vector_search.py b/libs/community/langchain_community/vectorstores/bigquery_vector_search.py
index 07114924dca6e..051b26a534324 100644
--- a/libs/community/langchain_community/vectorstores/bigquery_vector_search.py
+++ b/libs/community/langchain_community/vectorstores/bigquery_vector_search.py
@@ -122,9 +122,7 @@ def __init__(
self.text_embedding_field = text_embedding_field
self.doc_id_field = doc_id_field
self.distance_strategy = distance_strategy
- self._full_table_id = (
- f"{self.project_id}." f"{self.dataset_name}." f"{self.table_name}"
- )
+ self._full_table_id = f"{self.project_id}.{self.dataset_name}.{self.table_name}"
self._logger.debug("Using table `%s`", self.full_table_id)
with _vector_table_lock:
self.vectors_table = self._initialize_table()
@@ -149,7 +147,7 @@ def _initialize_table(self) -> Any:
columns[self.doc_id_field].field_type != "STRING"
or columns[self.doc_id_field].mode == "REPEATED"
):
- raise ValueError(f"Column {self.doc_id_field} must be of " "STRING type")
+ raise ValueError(f"Column {self.doc_id_field} must be of STRING type")
if self.metadata_field not in columns:
changed_schema = True
schema.append(
@@ -171,7 +169,7 @@ def _initialize_table(self) -> Any:
columns[self.content_field].field_type != "STRING"
or columns[self.content_field].mode == "REPEATED"
):
- raise ValueError(f"Column {self.content_field} must be of " "STRING type")
+ raise ValueError(f"Column {self.content_field} must be of STRING type")
if self.text_embedding_field not in columns:
changed_schema = True
schema.append(
@@ -186,7 +184,7 @@ def _initialize_table(self) -> Any:
or columns[self.text_embedding_field].mode != "REPEATED"
):
raise ValueError(
- f"Column {self.text_embedding_field} must be of " "ARRAY type"
+ f"Column {self.text_embedding_field} must be of ARRAY type"
)
if changed_schema:
self._logger.debug("Updated table `%s` schema.", self.full_table_id)
@@ -389,9 +387,7 @@ def get_documents(
)
else:
val = str(i[1]).replace('"', '\\"')
- expr = (
- f"JSON_VALUE(`{self.metadata_field}`,'$.{i[0]}')" f' = "{val}"'
- )
+ expr = f"JSON_VALUE(`{self.metadata_field}`,'$.{i[0]}') = \"{val}\""
filter_expressions.append(expr)
filter_expression_str = " AND ".join(filter_expressions)
where_filter_expr = f" AND ({filter_expression_str})"
@@ -520,7 +516,7 @@ def _search_with_score_and_embeddings_by_vector(
elif fraction_lists_to_search:
if fraction_lists_to_search == 0 or fraction_lists_to_search >= 1.0:
raise ValueError(
- "`fraction_lists_to_search` must be between " "0.0 and 1.0"
+ "`fraction_lists_to_search` must be between 0.0 and 1.0"
)
options_string = (
',options => \'{"fraction_lists_to_search":'
@@ -560,7 +556,11 @@ def _search_with_score_and_embeddings_by_vector(
metadata["__job_id"] = job.job_id
doc = Document(page_content=row[self.content_field], metadata=metadata)
document_tuples.append(
- (doc, row[self.text_embedding_field], row["_vector_search_distance"])
+ (
+ doc,
+ row[self.text_embedding_field],
+ row["_vector_search_distance"],
+ )
)
return document_tuples
diff --git a/libs/community/langchain_community/vectorstores/chroma.py b/libs/community/langchain_community/vectorstores/chroma.py
index 1ab7ddad140d6..ebf91f718af3a 100644
--- a/libs/community/langchain_community/vectorstores/chroma.py
+++ b/libs/community/langchain_community/vectorstores/chroma.py
@@ -643,7 +643,7 @@ def max_marginal_relevance_search(
"""
if self._embedding_function is None:
raise ValueError(
- "For MMR search, you must specify an embedding function on" "creation."
+ "For MMR search, you must specify an embedding function oncreation."
)
embedding = self._embedding_function.embed_query(query)
diff --git a/libs/community/langchain_community/vectorstores/clarifai.py b/libs/community/langchain_community/vectorstores/clarifai.py
index b37b99f02a706..4cae4bf7cf976 100644
--- a/libs/community/langchain_community/vectorstores/clarifai.py
+++ b/libs/community/langchain_community/vectorstores/clarifai.py
@@ -115,14 +115,14 @@ def add_texts(
assert length > 0, "No texts provided to add to the vectorstore."
if metadatas is not None:
- assert length == len(
- metadatas
- ), "Number of texts and metadatas should be the same."
+ assert length == len(metadatas), (
+ "Number of texts and metadatas should be the same."
+ )
if ids is not None:
- assert len(ltexts) == len(
- ids
- ), "Number of text inputs and input ids should be the same."
+ assert len(ltexts) == len(ids), (
+ "Number of text inputs and input ids should be the same."
+ )
input_obj = Inputs.from_auth_helper(auth=self._auth)
batch_size = 32
diff --git a/libs/community/langchain_community/vectorstores/clickhouse.py b/libs/community/langchain_community/vectorstores/clickhouse.py
index 4246abfc0ddde..b05898d55ccef 100644
--- a/libs/community/langchain_community/vectorstores/clickhouse.py
+++ b/libs/community/langchain_community/vectorstores/clickhouse.py
@@ -341,27 +341,28 @@ def _schema(self, dim: int, index_params: Optional[str] = "") -> str:
if self.config.index_type:
return f"""\
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
- {self.config.column_map['id']} Nullable(String),
- {self.config.column_map['document']} Nullable(String),
- {self.config.column_map['embedding']} Array(Float32),
- {self.config.column_map['metadata']} JSON,
- {self.config.column_map['uuid']} UUID DEFAULT generateUUIDv4(),
+ {self.config.column_map["id"]} Nullable(String),
+ {self.config.column_map["document"]} Nullable(String),
+ {self.config.column_map["embedding"]} Array(Float32),
+ {self.config.column_map["metadata"]} JSON,
+ {self.config.column_map["uuid"]} UUID DEFAULT generateUUIDv4(),
CONSTRAINT cons_vec_len CHECK length(
- {self.config.column_map['embedding']}) = {dim},
- INDEX vec_idx {self.config.column_map['embedding']} TYPE \
+ {self.config.column_map["embedding"]}) = {dim},
+ INDEX vec_idx {self.config.column_map["embedding"]} TYPE \
{self.config.index_type}({index_params}) GRANULARITY 1000
) ENGINE = MergeTree ORDER BY uuid SETTINGS index_granularity = 8192\
"""
else:
return f"""\
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
- {self.config.column_map['id']} Nullable(String),
- {self.config.column_map['document']} Nullable(String),
- {self.config.column_map['embedding']} Array(Float32),
- {self.config.column_map['metadata']} JSON,
- {self.config.column_map['uuid']} UUID DEFAULT generateUUIDv4(),
+ {self.config.column_map["id"]} Nullable(String),
+ {self.config.column_map["document"]} Nullable(String),
+ {self.config.column_map["embedding"]} Array(Float32),
+ {self.config.column_map["metadata"]} JSON,
+ {self.config.column_map["uuid"]} UUID DEFAULT generateUUIDv4(),
CONSTRAINT cons_vec_len CHECK length({
- self.config.column_map['embedding']}) = {dim}
+ self.config.column_map["embedding"]
+ }) = {dim}
) ENGINE = MergeTree ORDER BY uuid
"""
@@ -418,7 +419,7 @@ def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> s
INSERT INTO TABLE
{self.config.database}.{self.config.table}({ks})
VALUES
- {','.join(_data)}
+ {",".join(_data)}
"""
return i_str
@@ -574,13 +575,13 @@ def _build_query_sql(
for k in self.config.index_query_params:
settings_strs.append(f"SETTING {k}={self.config.index_query_params[k]}")
q_str = f"""
- SELECT {self.config.column_map['document']},
- {self.config.column_map['metadata']}, dist
+ SELECT {self.config.column_map["document"]},
+ {self.config.column_map["metadata"]}, dist
FROM {self.config.database}.{self.config.table}
{where_str}
- ORDER BY L2Distance({self.config.column_map['embedding']}, [{q_emb_str}])
+ ORDER BY L2Distance({self.config.column_map["embedding"]}, [{q_emb_str}])
AS dist {self.dist_order}
- LIMIT {topk} {' '.join(settings_strs)}
+ LIMIT {topk} {" ".join(settings_strs)}
"""
return q_str
diff --git a/libs/community/langchain_community/vectorstores/dashvector.py b/libs/community/langchain_community/vectorstores/dashvector.py
index 64f73630de693..639856fcbce99 100644
--- a/libs/community/langchain_community/vectorstores/dashvector.py
+++ b/libs/community/langchain_community/vectorstores/dashvector.py
@@ -392,9 +392,7 @@ def from_texts(
if resp:
collection = dashvector_client.get(collection_name)
else:
- raise ValueError(
- "Fail to create collection. " f"Error: {resp.message}."
- )
+ raise ValueError(f"Fail to create collection. Error: {resp.message}.")
dashvector_vector_db = cls(collection, embedding, text_field)
dashvector_vector_db.add_texts(texts, metadatas, ids, batch_size)
diff --git a/libs/community/langchain_community/vectorstores/deeplake.py b/libs/community/langchain_community/vectorstores/deeplake.py
index 0de4c6032a708..c0163560e3170 100644
--- a/libs/community/langchain_community/vectorstores/deeplake.py
+++ b/libs/community/langchain_community/vectorstores/deeplake.py
@@ -425,8 +425,7 @@ def _search(
if embedding is None:
if _embedding_function is None:
raise ValueError(
- "Either `embedding` or `embedding_function` needs to be"
- " specified."
+ "Either `embedding` or `embedding_function` needs to be specified."
)
embedding = _embedding_function(query) if query else None
diff --git a/libs/community/langchain_community/vectorstores/hanavector.py b/libs/community/langchain_community/vectorstores/hanavector.py
index 6c0a8040b255c..9212c53f5a4ca 100644
--- a/libs/community/langchain_community/vectorstores/hanavector.py
+++ b/libs/community/langchain_community/vectorstores/hanavector.py
@@ -669,7 +669,7 @@ def _process_filter_object(self, filter): # type: ignore[no-untyped-def]
if key in self.specific_metadata_columns
else f"JSON_VALUE({self.metadata_column}, '$.{key}')"
)
- where_str += f"{selector} " f"{operator} {sql_param}"
+ where_str += f"{selector} {operator} {sql_param}"
return where_str, query_tuple
diff --git a/libs/community/langchain_community/vectorstores/hippo.py b/libs/community/langchain_community/vectorstores/hippo.py
index 328a2f1e99226..373f6a0e06b15 100644
--- a/libs/community/langchain_community/vectorstores/hippo.py
+++ b/libs/community/langchain_community/vectorstores/hippo.py
@@ -118,7 +118,7 @@ def __init__(
self.hc.delete_table(self.table_name, self.database_name)
except Exception as e:
logging.error(
- f"An error occurred while deleting the table " f"{self.table_name}: {e}"
+ f"An error occurred while deleting the table {self.table_name}: {e}"
)
raise
@@ -127,7 +127,7 @@ def __init__(
self.col = self.hc.get_table(self.table_name, self.database_name)
except Exception as e:
logging.error(
- f"An error occurred while getting the table " f"{self.table_name}: {e}"
+ f"An error occurred while getting the table {self.table_name}: {e}"
)
raise
diff --git a/libs/community/langchain_community/vectorstores/infinispanvs.py b/libs/community/langchain_community/vectorstores/infinispanvs.py
index 3f7c34158df15..c9012e17fbef1 100644
--- a/libs/community/langchain_community/vectorstores/infinispanvs.py
+++ b/libs/community/langchain_community/vectorstores/infinispanvs.py
@@ -90,7 +90,7 @@ def __init__(
self._textfield = self._configuration.get("text_field", "text")
else:
warnings.warn(
- "`textfield` is deprecated. Please use `text_field` " "param.",
+ "`textfield` is deprecated. Please use `text_field` param.",
DeprecationWarning,
)
self._vectorfield = self._configuration.get("vectorfield", "")
@@ -98,7 +98,7 @@ def __init__(
self._vectorfield = self._configuration.get("vector_field", "vector")
else:
warnings.warn(
- "`vectorfield` is deprecated. Please use `vector_field` " "param.",
+ "`vectorfield` is deprecated. Please use `vector_field` param.",
DeprecationWarning,
)
self._to_content = self._configuration.get(
@@ -361,16 +361,16 @@ def _query_result_to_docs(
def configure(self, metadata: dict, dimension: int) -> None:
schema = self.schema_builder(metadata, dimension)
output = self.schema_create(schema)
- assert (
- output.status_code == self.ispn.Codes.OK
- ), "Unable to create schema. Already exists? "
+ assert output.status_code == self.ispn.Codes.OK, (
+ "Unable to create schema. Already exists? "
+ )
"Consider using clear_old=True"
assert json.loads(output.text)["error"] is None
if not self.cache_exists():
output = self.cache_create()
- assert (
- output.status_code == self.ispn.Codes.OK
- ), "Unable to create cache. Already exists? "
+ assert output.status_code == self.ispn.Codes.OK, (
+ "Unable to create cache. Already exists? "
+ )
"Consider using clear_old=True"
# Ensure index is clean
self.cache_index_clear()
diff --git a/libs/community/langchain_community/vectorstores/lancedb.py b/libs/community/langchain_community/vectorstores/lancedb.py
index f08e4380481b1..11cc955cac40a 100644
--- a/libs/community/langchain_community/vectorstores/lancedb.py
+++ b/libs/community/langchain_community/vectorstores/lancedb.py
@@ -562,7 +562,7 @@ def max_marginal_relevance_search(
if self._embedding is None:
raise ValueError(
- "For MMR search, you must specify an embedding function on" "creation."
+ "For MMR search, you must specify an embedding function oncreation."
)
embedding = self._embedding.embed_query(query)
diff --git a/libs/community/langchain_community/vectorstores/manticore_search.py b/libs/community/langchain_community/vectorstores/manticore_search.py
index 3743e603da797..027d4f6adc8c1 100644
--- a/libs/community/langchain_community/vectorstores/manticore_search.py
+++ b/libs/community/langchain_community/vectorstores/manticore_search.py
@@ -150,16 +150,16 @@ def __init__(
# Initialize the schema
self.schema = f"""\
CREATE TABLE IF NOT EXISTS {self.config.table}(
- {self.config.column_map['id']} bigint,
- {self.config.column_map['document']} text indexed stored,
- {self.config.column_map['embedding']} \
+ {self.config.column_map["id"]} bigint,
+ {self.config.column_map["document"]} text indexed stored,
+ {self.config.column_map["embedding"]} \
float_vector knn_type='{self.config.knn_type}' \
knn_dims='{self.dim}' \
hnsw_similarity='{self.config.hnsw_similarity}' \
hnsw_m='{self.config.hnsw_m}' \
hnsw_ef_construction='{self.config.hnsw_ef_construction}',
- {self.config.column_map['metadata']} json,
- {self.config.column_map['uuid']} text indexed stored
+ {self.config.column_map["metadata"]} json,
+ {self.config.column_map["uuid"]} text indexed stored
)\
"""
diff --git a/libs/community/langchain_community/vectorstores/marqo.py b/libs/community/langchain_community/vectorstores/marqo.py
index 96b762662d7d8..0213f86db5ae6 100644
--- a/libs/community/langchain_community/vectorstores/marqo.py
+++ b/libs/community/langchain_community/vectorstores/marqo.py
@@ -269,7 +269,10 @@ def _construct_documents_from_results_with_score(
metadata = json.loads(res.get("metadata", "{}"))
documents.append(
- (Document(page_content=text, metadata=metadata), res["_score"])
+ (
+ Document(page_content=text, metadata=metadata),
+ res["_score"],
+ )
)
return documents
diff --git a/libs/community/langchain_community/vectorstores/meilisearch.py b/libs/community/langchain_community/vectorstores/meilisearch.py
index 885d4d5cff441..add664a64fe3d 100644
--- a/libs/community/langchain_community/vectorstores/meilisearch.py
+++ b/libs/community/langchain_community/vectorstores/meilisearch.py
@@ -33,8 +33,7 @@ def _create_client(
client = meilisearch.Client(url=url, api_key=api_key)
elif not isinstance(client, meilisearch.Client):
raise ValueError(
- f"client should be an instance of meilisearch.Client, "
- f"got {type(client)}"
+ f"client should be an instance of meilisearch.Client, got {type(client)}"
)
try:
client.version()
@@ -253,7 +252,10 @@ def similarity_search_by_vector_with_scores(
text = metadata.pop(self._text_key)
semantic_score = result["_rankingScore"]
docs.append(
- (Document(page_content=text, metadata=metadata), semantic_score)
+ (
+ Document(page_content=text, metadata=metadata),
+ semantic_score,
+ )
)
return docs
diff --git a/libs/community/langchain_community/vectorstores/milvus.py b/libs/community/langchain_community/vectorstores/milvus.py
index 04bff999913db..7dd404999683c 100644
--- a/libs/community/langchain_community/vectorstores/milvus.py
+++ b/libs/community/langchain_community/vectorstores/milvus.py
@@ -550,15 +550,15 @@ def add_texts(
texts = list(texts)
if not self.auto_id:
- assert isinstance(
- ids, list
- ), "A list of valid ids are required when auto_id is False."
- assert len(set(ids)) == len(
- texts
- ), "Different lengths of texts and unique ids are provided."
- assert all(
- len(x.encode()) <= 65_535 for x in ids
- ), "Each id should be a string less than 65535 bytes."
+ assert isinstance(ids, list), (
+ "A list of valid ids are required when auto_id is False."
+ )
+ assert len(set(ids)) == len(texts), (
+ "Different lengths of texts and unique ids are provided."
+ )
+ assert all(len(x.encode()) <= 65_535 for x in ids), (
+ "Each id should be a string less than 65535 bytes."
+ )
try:
embeddings = self.embedding_func.embed_documents(texts)
@@ -953,13 +953,13 @@ def delete( # type: ignore[no-untyped-def]
if isinstance(ids, list) and len(ids) > 0:
if expr is not None:
logger.warning(
- "Both ids and expr are provided. " "Ignore expr and delete by ids."
+ "Both ids and expr are provided. Ignore expr and delete by ids."
)
expr = f"{self._primary_field} in {ids}"
else:
- assert isinstance(
- expr, str
- ), "Either ids list or expr string must be provided."
+ assert isinstance(expr, str), (
+ "Either ids list or expr string must be provided."
+ )
return self.col.delete(expr=expr, **kwargs) # type: ignore[union-attr]
@classmethod
diff --git a/libs/community/langchain_community/vectorstores/myscale.py b/libs/community/langchain_community/vectorstores/myscale.py
index d3dec65c8b71e..711b525e40606 100644
--- a/libs/community/langchain_community/vectorstores/myscale.py
+++ b/libs/community/langchain_community/vectorstores/myscale.py
@@ -166,16 +166,16 @@ def __init__(
)
schema_ = f"""
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
- {self.config.column_map['id']} String,
- {self.config.column_map['text']} String,
- {self.config.column_map['vector']} Array(Float32),
- {self.config.column_map['metadata']} JSON,
+ {self.config.column_map["id"]} String,
+ {self.config.column_map["text"]} String,
+ {self.config.column_map["vector"]} Array(Float32),
+ {self.config.column_map["metadata"]} JSON,
CONSTRAINT cons_vec_len CHECK length(\
- {self.config.column_map['vector']}) = {dim},
- VECTOR INDEX vidx {self.config.column_map['vector']} \
+ {self.config.column_map["vector"]}) = {dim},
+ VECTOR INDEX vidx {self.config.column_map["vector"]} \
TYPE {self.config.index_type}(\
'metric_type={self.config.metric}'{index_params})
- ) ENGINE = MergeTree ORDER BY {self.config.column_map['id']}
+ ) ENGINE = MergeTree ORDER BY {self.config.column_map["id"]}
"""
self.dim = dim
self.BS = "\\"
@@ -220,7 +220,7 @@ def _build_istr(self, transac: Iterable, column_names: Iterable[str]) -> str:
INSERT INTO TABLE
{self.config.database}.{self.config.table}({ks})
VALUES
- {','.join(_data)}
+ {",".join(_data)}
"""
return i_str
@@ -345,11 +345,11 @@ def _build_qstr(
where_str = ""
q_str = f"""
- SELECT {self.config.column_map['text']},
- {self.config.column_map['metadata']}, dist
+ SELECT {self.config.column_map["text"]},
+ {self.config.column_map["metadata"]}, dist
FROM {self.config.database}.{self.config.table}
{where_str}
- ORDER BY distance({self.config.column_map['vector']}, [{q_emb_str}])
+ ORDER BY distance({self.config.column_map["vector"]}, [{q_emb_str}])
AS dist {self.dist_order}
LIMIT {topk}
"""
@@ -475,9 +475,9 @@ def delete(
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
- assert not (
- ids is None and where_str is None
- ), "You need to specify where to be deleted! Either with `ids` or `where_str`"
+ assert not (ids is None and where_str is None), (
+ "You need to specify where to be deleted! Either with `ids` or `where_str`"
+ )
conds = []
if ids and len(ids) > 0:
id_list = ", ".join([f"'{id}'" for id in ids])
@@ -536,11 +536,11 @@ def _build_qstr(
where_str = ""
q_str = f"""
- SELECT {self.config.column_map['text']}, dist,
- {','.join(self.must_have_cols)}
+ SELECT {self.config.column_map["text"]}, dist,
+ {",".join(self.must_have_cols)}
FROM {self.config.database}.{self.config.table}
{where_str}
- ORDER BY distance({self.config.column_map['vector']}, [{q_emb_str}])
+ ORDER BY distance({self.config.column_map["vector"]}, [{q_emb_str}])
AS dist {self.dist_order}
LIMIT {topk}
"""
diff --git a/libs/community/langchain_community/vectorstores/neo4j_vector.py b/libs/community/langchain_community/vectorstores/neo4j_vector.py
index 03d97a5a9d034..b65f165abad75 100644
--- a/libs/community/langchain_community/vectorstores/neo4j_vector.py
+++ b/libs/community/langchain_community/vectorstores/neo4j_vector.py
@@ -323,8 +323,7 @@ def _handle_field_filter(
if field.startswith("$"):
raise ValueError(
- f"Invalid filter condition. Expected a field but got an operator: "
- f"{field}"
+ f"Invalid filter condition. Expected a field but got an operator: {field}"
)
# Allow [a-zA-Z0-9_], disallow $ for now until we support escape characters
@@ -344,8 +343,7 @@ def _handle_field_filter(
# Verify that that operator is an operator
if operator not in SUPPORTED_OPERATORS:
raise ValueError(
- f"Invalid operator: {operator}. "
- f"Expected one of {SUPPORTED_OPERATORS}"
+ f"Invalid operator: {operator}. Expected one of {SUPPORTED_OPERATORS}"
)
else: # Then we assume an equality operator
operator = "$eq"
@@ -423,8 +421,7 @@ def construct_metadata_filter(filter: Dict[str, Any]) -> Tuple[str, Dict]:
# Then it's an operator
if key.lower() not in ["$and", "$or"]:
raise ValueError(
- f"Invalid filter condition. Expected $and or $or "
- f"but got: {key}"
+ f"Invalid filter condition. Expected $and or $or but got: {key}"
)
else:
# Then it's a field
@@ -459,7 +456,7 @@ def construct_metadata_filter(filter: Dict[str, Any]) -> Tuple[str, Dict]:
)
else:
raise ValueError(
- f"Invalid filter condition. Expected $and or $or " f"but got: {key}"
+ f"Invalid filter condition. Expected $and or $or but got: {key}"
)
elif len(filter) > 1:
# Then all keys have to be fields (they cannot be operators)
@@ -1336,8 +1333,7 @@ def from_existing_index(
if search_type == SearchType.HYBRID and not keyword_index_name:
raise ValueError(
- "keyword_index name has to be specified "
- "when using hybrid search option"
+ "keyword_index name has to be specified when using hybrid search option"
)
store = cls(
diff --git a/libs/community/langchain_community/vectorstores/oraclevs.py b/libs/community/langchain_community/vectorstores/oraclevs.py
index 2ca39e9f6f1b5..6aa767927f5e6 100644
--- a/libs/community/langchain_community/vectorstores/oraclevs.py
+++ b/libs/community/langchain_community/vectorstores/oraclevs.py
@@ -75,8 +75,7 @@ def _table_exists(client: Connection, table_name: str) -> bool:
import oracledb
except ImportError as e:
raise ImportError(
- "Unable to import oracledb, please install with "
- "`pip install -U oracledb`."
+ "Unable to import oracledb, please install with `pip install -U oracledb`."
) from e
try:
@@ -775,8 +774,9 @@ def similarity_search_by_vector_returning_embeddings(
SELECT id,
text,
metadata,
- vector_distance(embedding, :embedding, {_get_distance_function(
- self.distance_strategy)}) as distance,
+ vector_distance(embedding, :embedding, {
+ _get_distance_function(self.distance_strategy)
+ }) as distance,
embedding
FROM {self.table_name}
ORDER BY distance
@@ -1010,7 +1010,7 @@ def from_texts(
)
if not isinstance(distance_strategy, DistanceStrategy):
raise TypeError(
- f"Expected DistanceStrategy got " f"{type(distance_strategy).__name__} "
+ f"Expected DistanceStrategy got {type(distance_strategy).__name__} "
)
query = kwargs.get("query", "What is a Oracle database")
diff --git a/libs/community/langchain_community/vectorstores/pgvector.py b/libs/community/langchain_community/vectorstores/pgvector.py
index 85814bef99488..1f4fef445dc22 100644
--- a/libs/community/langchain_community/vectorstores/pgvector.py
+++ b/libs/community/langchain_community/vectorstores/pgvector.py
@@ -181,6 +181,7 @@ class EmbeddingStore(BaseModel):
postgresql_ops={"cmetadata": "jsonb_path_ops"},
),
)
+
else:
# For backwards comaptibilty with older versions of pgvector
# This should be removed in the future (remove during migration)
@@ -900,8 +901,7 @@ def _create_filter_clause(self, filters: Any) -> Any:
)
else:
raise ValueError(
- f"Invalid filter condition. Expected $and or $or "
- f"but got: {key}"
+ f"Invalid filter condition. Expected $and or $or but got: {key}"
)
elif len(filters) > 1:
# Then all keys have to be fields (they cannot be operators)
diff --git a/libs/community/langchain_community/vectorstores/pinecone.py b/libs/community/langchain_community/vectorstores/pinecone.py
index 7d960409e6b0f..7ad40b2c0c889 100644
--- a/libs/community/langchain_community/vectorstores/pinecone.py
+++ b/libs/community/langchain_community/vectorstores/pinecone.py
@@ -71,7 +71,7 @@ def __init__(
)
if not isinstance(index, pinecone.Index):
raise ValueError(
- f"client should be an instance of pinecone.Index, " f"got {type(index)}"
+ f"client should be an instance of pinecone.Index, got {type(index)}"
)
self._index = index
self._embedding = embedding
diff --git a/libs/community/langchain_community/vectorstores/rocksetdb.py b/libs/community/langchain_community/vectorstores/rocksetdb.py
index 263a211fba324..29afaee177593 100644
--- a/libs/community/langchain_community/vectorstores/rocksetdb.py
+++ b/libs/community/langchain_community/vectorstores/rocksetdb.py
@@ -271,10 +271,10 @@ def similarity_search_by_vector_with_relevance_scores(
finalResult: list[Tuple[Document, float]] = []
for document in query_response.results:
metadata = {}
- assert isinstance(
- document, dict
- ), "document should be of type `dict[str,Any]`. But found: `{}`".format(
- type(document)
+ assert isinstance(document, dict), (
+ "document should be of type `dict[str,Any]`. But found: `{}`".format(
+ type(document)
+ )
)
for k, v in document.items():
if k == self._text_key:
@@ -294,7 +294,10 @@ def similarity_search_by_vector_with_relevance_scores(
# inserted. No need to return them in metadata dict.
metadata[k] = v
finalResult.append(
- (Document(page_content=page_content, metadata=metadata), score)
+ (
+ Document(page_content=page_content, metadata=metadata),
+ score,
+ )
)
return finalResult
diff --git a/libs/community/langchain_community/vectorstores/sklearn.py b/libs/community/langchain_community/vectorstores/sklearn.py
index 96953f69ff452..4c83543276c27 100644
--- a/libs/community/langchain_community/vectorstores/sklearn.py
+++ b/libs/community/langchain_community/vectorstores/sklearn.py
@@ -171,8 +171,7 @@ def embeddings(self) -> Embeddings:
def persist(self) -> None:
if self._serializer is None:
raise SKLearnVectorStoreException(
- "You must specify a persist_path on creation to persist the "
- "collection."
+ "You must specify a persist_path on creation to persist the collection."
)
data = {
"ids": self._ids,
@@ -185,7 +184,7 @@ def persist(self) -> None:
def _load(self) -> None:
if self._serializer is None:
raise SKLearnVectorStoreException(
- "You must specify a persist_path on creation to load the " "collection."
+ "You must specify a persist_path on creation to load the collection."
)
data = self._serializer.load()
self._embeddings = data["embeddings"]
diff --git a/libs/community/langchain_community/vectorstores/sqlitevec.py b/libs/community/langchain_community/vectorstores/sqlitevec.py
index 13a2d5ee9208c..52da1942f5adb 100644
--- a/libs/community/langchain_community/vectorstores/sqlitevec.py
+++ b/libs/community/langchain_community/vectorstores/sqlitevec.py
@@ -132,8 +132,7 @@ def add_texts(
for text, metadata, embed in zip(texts, metadatas, embeds)
]
self._connection.executemany(
- f"INSERT INTO {self._table}(text, metadata, text_embedding) "
- f"VALUES (?,?,?)",
+ f"INSERT INTO {self._table}(text, metadata, text_embedding) VALUES (?,?,?)",
data_input,
)
self._connection.commit()
diff --git a/libs/community/langchain_community/vectorstores/sqlitevss.py b/libs/community/langchain_community/vectorstores/sqlitevss.py
index 3ea9f427700b1..7bc394fddefcc 100644
--- a/libs/community/langchain_community/vectorstores/sqlitevss.py
+++ b/libs/community/langchain_community/vectorstores/sqlitevss.py
@@ -121,8 +121,7 @@ def add_texts(
for text, metadata, embed in zip(texts, metadatas, embeds)
]
self._connection.executemany(
- f"INSERT INTO {self._table}(text, metadata, text_embedding) "
- f"VALUES (?,?,?)",
+ f"INSERT INTO {self._table}(text, metadata, text_embedding) VALUES (?,?,?)",
data_input,
)
self._connection.commit()
diff --git a/libs/community/langchain_community/vectorstores/starrocks.py b/libs/community/langchain_community/vectorstores/starrocks.py
index 80debc09f92cd..9298f12a78ff3 100644
--- a/libs/community/langchain_community/vectorstores/starrocks.py
+++ b/libs/community/langchain_community/vectorstores/starrocks.py
@@ -176,10 +176,10 @@ def __init__(
self.schema = f"""\
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
- {self.config.column_map['id']} string,
- {self.config.column_map['document']} string,
- {self.config.column_map['embedding']} array,
- {self.config.column_map['metadata']} string
+ {self.config.column_map["id"]} string,
+ {self.config.column_map["document"]} string,
+ {self.config.column_map["embedding"]} array,
+ {self.config.column_map["metadata"]} string
) ENGINE = OLAP PRIMARY KEY(id) DISTRIBUTED BY HASH(id) \
PROPERTIES ("replication_num" = "1")\
"""
@@ -232,7 +232,7 @@ def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> s
INSERT INTO
{self.config.database}.{self.config.table}({ks})
VALUES
- {','.join(_data)}
+ {",".join(_data)}
"""
return i_str
@@ -363,10 +363,10 @@ def _build_query_sql(
where_str = ""
q_str = f"""
- SELECT {self.config.column_map['document']},
- {self.config.column_map['metadata']},
+ SELECT {self.config.column_map["document"]},
+ {self.config.column_map["metadata"]},
cosine_similarity_norm(array[{q_emb_str}],
- {self.config.column_map['embedding']}) as dist
+ {self.config.column_map["embedding"]}) as dist
FROM {self.config.database}.{self.config.table}
{where_str}
ORDER BY dist {self.dist_order}
diff --git a/libs/community/langchain_community/vectorstores/surrealdb.py b/libs/community/langchain_community/vectorstores/surrealdb.py
index ce05abdc930ce..3157f48e33b27 100644
--- a/libs/community/langchain_community/vectorstores/surrealdb.py
+++ b/libs/community/langchain_community/vectorstores/surrealdb.py
@@ -323,9 +323,9 @@ def similarity_search_with_relevance_scores(
List of Documents most similar along with relevance scores
"""
- async def _similarity_search_with_relevance_scores() -> (
- List[Tuple[Document, float]]
- ):
+ async def _similarity_search_with_relevance_scores() -> List[
+ Tuple[Document, float]
+ ]:
await self.initialize()
return await self.asimilarity_search_with_relevance_scores(
query, k, filter=filter, **kwargs
diff --git a/libs/community/langchain_community/vectorstores/typesense.py b/libs/community/langchain_community/vectorstores/typesense.py
index 88d73992d0eea..a0c8022c620f8 100644
--- a/libs/community/langchain_community/vectorstores/typesense.py
+++ b/libs/community/langchain_community/vectorstores/typesense.py
@@ -106,7 +106,10 @@ def _create_collection(self, num_dim: int) -> None:
{"name": ".*", "type": "auto"},
]
self._typesense_client.collections.create(
- {"name": self._typesense_collection_name, "fields": fields}
+ {
+ "name": self._typesense_collection_name,
+ "fields": fields,
+ }
)
def add_texts(
@@ -158,7 +161,7 @@ def similarity_search_with_score(
embedded_query = [str(x) for x in self._embedding.embed_query(query)]
query_obj = {
"q": "*",
- "vector_query": f'vec:([{",".join(embedded_query)}], k:{k})',
+ "vector_query": f"vec:([{','.join(embedded_query)}], k:{k})",
"filter_by": filter,
"collection": self._typesense_collection_name,
}
diff --git a/libs/community/langchain_community/vectorstores/vdms.py b/libs/community/langchain_community/vectorstores/vdms.py
index ed50d01462623..b77c7ff297911 100644
--- a/libs/community/langchain_community/vectorstores/vdms.py
+++ b/libs/community/langchain_community/vectorstores/vdms.py
@@ -105,7 +105,10 @@ def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]:
}
final_res.append(
- (Document(page_content=txt_contents, metadata=props), distance)
+ (
+ Document(page_content=txt_contents, metadata=props),
+ distance,
+ )
)
except Exception as e:
logger.warning(f"No results returned. Error while parsing results: {e}")
@@ -232,8 +235,7 @@ def _embed_query(self, text: str) -> List[float]:
return self.embedding.embed_query(text)
else:
raise ValueError(
- "Must provide `embedding` which is expected"
- " to be an Embeddings object"
+ "Must provide `embedding` which is expected to be an Embeddings object"
)
def _select_relevance_score_fn(self) -> Callable[[float], float]:
@@ -284,7 +286,10 @@ def _similarity_search_with_relevance_scores(
docs_and_rel_scores.append((doc, score))
else:
docs_and_rel_scores.append(
- (doc, self.override_relevance_score_fn(score))
+ (
+ doc,
+ self.override_relevance_score_fn(score),
+ )
)
return docs_and_rel_scores
@@ -1099,7 +1104,7 @@ def max_marginal_relevance_search(
"""
if self.embedding is None:
raise ValueError(
- "For MMR search, you must specify an embedding function on" "creation."
+ "For MMR search, you must specify an embedding function oncreation."
)
# embedding_vector: List[float] = self._embed_query(query)
@@ -1208,7 +1213,7 @@ def max_marginal_relevance_search_with_score(
"""
if self.embedding is None:
raise ValueError(
- "For MMR search, you must specify an embedding function on" "creation."
+ "For MMR search, you must specify an embedding function oncreation."
)
if not os.path.isfile(query) and hasattr(self.embedding, "embed_query"):
diff --git a/libs/community/langchain_community/vectorstores/vespa.py b/libs/community/langchain_community/vectorstores/vespa.py
index 6fe6585cd86be..bc9f3736406c7 100644
--- a/libs/community/langchain_community/vectorstores/vespa.py
+++ b/libs/community/langchain_community/vectorstores/vespa.py
@@ -96,7 +96,7 @@ def add_texts(
embeddings = self._embedding_function.embed_documents(list(texts))
if ids is None:
- ids = [str(f"{i+1}") for i, _ in enumerate(texts)]
+ ids = [str(f"{i + 1}") for i, _ in enumerate(texts)]
batch = []
for i, text in enumerate(texts):
diff --git a/libs/community/langchain_community/vectorstores/vikingdb.py b/libs/community/langchain_community/vectorstores/vikingdb.py
index 002db6485be2b..2e0a9b0c57fc2 100644
--- a/libs/community/langchain_community/vectorstores/vikingdb.py
+++ b/libs/community/langchain_community/vectorstores/vikingdb.py
@@ -131,8 +131,7 @@ def _create_collection(
fields.append(Field(key, FieldType.Text))
else:
raise ValueError(
- "metadatas value is invalid"
- "please change the type of metadatas."
+ "metadatas value is invalidplease change the type of metadatas."
)
# fields.append(Field("text", FieldType.String))
fields.append(Field("text", FieldType.Text))
diff --git a/libs/community/poetry.lock b/libs/community/poetry.lock
index d30362095be02..b20ef3121b942 100644
--- a/libs/community/poetry.lock
+++ b/libs/community/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "aiohappyeyeballs"
@@ -3797,29 +3797,29 @@ files = [
[[package]]
name = "ruff"
-version = "0.5.7"
+version = "0.9.2"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
- {file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"},
- {file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"},
- {file = "ruff-0.5.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eaf3d86a1fdac1aec8a3417a63587d93f906c678bb9ed0b796da7b59c1114a1e"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a01c34400097b06cf8a6e61b35d6d456d5bd1ae6961542de18ec81eaf33b4cb8"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcc8054f1a717e2213500edaddcf1dbb0abad40d98e1bd9d0ad364f75c763eea"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f70284e73f36558ef51602254451e50dd6cc479f8b6f8413a95fcb5db4a55fc"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a78ad870ae3c460394fc95437d43deb5c04b5c29297815a2a1de028903f19692"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ccd078c66a8e419475174bfe60a69adb36ce04f8d4e91b006f1329d5cd44bcf"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e31c9bad4ebf8fdb77b59cae75814440731060a09a0e0077d559a556453acbb"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d796327eed8e168164346b769dd9a27a70e0298d667b4ecee6877ce8095ec8e"},
- {file = "ruff-0.5.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a09ea2c3f7778cc635e7f6edf57d566a8ee8f485f3c4454db7771efb692c499"},
- {file = "ruff-0.5.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a36d8dcf55b3a3bc353270d544fb170d75d2dff41eba5df57b4e0b67a95bb64e"},
- {file = "ruff-0.5.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9369c218f789eefbd1b8d82a8cf25017b523ac47d96b2f531eba73770971c9e5"},
- {file = "ruff-0.5.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b88ca3db7eb377eb24fb7c82840546fb7acef75af4a74bd36e9ceb37a890257e"},
- {file = "ruff-0.5.7-py3-none-win32.whl", hash = "sha256:33d61fc0e902198a3e55719f4be6b375b28f860b09c281e4bdbf783c0566576a"},
- {file = "ruff-0.5.7-py3-none-win_amd64.whl", hash = "sha256:083bbcbe6fadb93cd86709037acc510f86eed5a314203079df174c40bbbca6b3"},
- {file = "ruff-0.5.7-py3-none-win_arm64.whl", hash = "sha256:2dca26154ff9571995107221d0aeaad0e75a77b5a682d6236cf89a58c70b76f4"},
- {file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"},
+ {file = "ruff-0.9.2-py3-none-linux_armv6l.whl", hash = "sha256:80605a039ba1454d002b32139e4970becf84b5fee3a3c3bf1c2af6f61a784347"},
+ {file = "ruff-0.9.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b9aab82bb20afd5f596527045c01e6ae25a718ff1784cb92947bff1f83068b00"},
+ {file = "ruff-0.9.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fbd337bac1cfa96be615f6efcd4bc4d077edbc127ef30e2b8ba2a27e18c054d4"},
+ {file = "ruff-0.9.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82b35259b0cbf8daa22a498018e300b9bb0174c2bbb7bcba593935158a78054d"},
+ {file = "ruff-0.9.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b6a9701d1e371bf41dca22015c3f89769da7576884d2add7317ec1ec8cb9c3c"},
+ {file = "ruff-0.9.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9cc53e68b3c5ae41e8faf83a3b89f4a5d7b2cb666dff4b366bb86ed2a85b481f"},
+ {file = "ruff-0.9.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:8efd9da7a1ee314b910da155ca7e8953094a7c10d0c0a39bfde3fcfd2a015684"},
+ {file = "ruff-0.9.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3292c5a22ea9a5f9a185e2d131dc7f98f8534a32fb6d2ee7b9944569239c648d"},
+ {file = "ruff-0.9.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a605fdcf6e8b2d39f9436d343d1f0ff70c365a1e681546de0104bef81ce88df"},
+ {file = "ruff-0.9.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c547f7f256aa366834829a08375c297fa63386cbe5f1459efaf174086b564247"},
+ {file = "ruff-0.9.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d18bba3d3353ed916e882521bc3e0af403949dbada344c20c16ea78f47af965e"},
+ {file = "ruff-0.9.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:b338edc4610142355ccf6b87bd356729b62bf1bc152a2fad5b0c7dc04af77bfe"},
+ {file = "ruff-0.9.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:492a5e44ad9b22a0ea98cf72e40305cbdaf27fac0d927f8bc9e1df316dcc96eb"},
+ {file = "ruff-0.9.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:af1e9e9fe7b1f767264d26b1075ac4ad831c7db976911fa362d09b2d0356426a"},
+ {file = "ruff-0.9.2-py3-none-win32.whl", hash = "sha256:71cbe22e178c5da20e1514e1e01029c73dc09288a8028a5d3446e6bba87a5145"},
+ {file = "ruff-0.9.2-py3-none-win_amd64.whl", hash = "sha256:c5e1d6abc798419cf46eed03f54f2e0c3adb1ad4b801119dedf23fcaf69b55b5"},
+ {file = "ruff-0.9.2-py3-none-win_arm64.whl", hash = "sha256:a1b63fa24149918f8b37cef2ee6fff81f24f0d74b6f0bdc37bc3e1f2143e41c6"},
+ {file = "ruff-0.9.2.tar.gz", hash = "sha256:b5eceb334d55fae5f316f783437392642ae18e16dcf4f1858d55d3c2a0f8f5d0"},
]
[[package]]
@@ -4731,4 +4731,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<4.0"
-content-hash = "81411c2177eec007fce10d2c1773247155a09ace08e361f72da23b9027e5afba"
+content-hash = "afd6330fcbdb646a92db0c88d04f75426083dd28a6548f53371e1fc6e5433304"
diff --git a/libs/community/pyproject.toml b/libs/community/pyproject.toml
index 6b3f75d6ae95a..0c1840e8dedeb 100644
--- a/libs/community/pyproject.toml
+++ b/libs/community/pyproject.toml
@@ -12,6 +12,7 @@ readme = "README.md"
repository = "https://github.com/langchain-ai/langchain"
[tool.ruff]
+target-version = "py39"
exclude = [
"tests/examples/non-utf8-encoding.py",
"tests/integration_tests/examples/non-utf8-encoding.py",
@@ -119,7 +120,7 @@ pytest-vcr = "^1.0.2"
vcrpy = "^6"
[tool.poetry.group.lint.dependencies]
-ruff = "^0.5"
+ruff = "^0.9.2"
[[tool.poetry.group.lint.dependencies.cffi]]
version = "<1.17.1"
python = "<3.10"
diff --git a/libs/community/tests/integration_tests/callbacks/test_langchain_tracer.py b/libs/community/tests/integration_tests/callbacks/test_langchain_tracer.py
index 51b2552139be4..a9e128a7dde0a 100644
--- a/libs/community/tests/integration_tests/callbacks/test_langchain_tracer.py
+++ b/libs/community/tests/integration_tests/callbacks/test_langchain_tracer.py
@@ -28,7 +28,7 @@
"Who won the US Open women's final in 2019? "
"What is her age raised to the 0.34 power?"
),
- ("Who is Beyonce's husband? " "What is his age raised to the 0.19 power?"),
+ ("Who is Beyonce's husband? What is his age raised to the 0.19 power?"),
]
diff --git a/libs/community/tests/integration_tests/callbacks/test_wandb_tracer.py b/libs/community/tests/integration_tests/callbacks/test_wandb_tracer.py
index 45c5287b8ec5b..15152bf075b14 100644
--- a/libs/community/tests/integration_tests/callbacks/test_wandb_tracer.py
+++ b/libs/community/tests/integration_tests/callbacks/test_wandb_tracer.py
@@ -25,7 +25,7 @@
"Who won the US Open women's final in 2019? "
"What is her age raised to the 0.34 power?"
),
- ("Who is Beyonce's husband? " "What is his age raised to the 0.19 power?"),
+ ("Who is Beyonce's husband? What is his age raised to the 0.19 power?"),
]
diff --git a/libs/community/tests/integration_tests/chat_models/test_friendli.py b/libs/community/tests/integration_tests/chat_models/test_friendli.py
index c9e5bd0d381f9..83d829e728670 100644
--- a/libs/community/tests/integration_tests/chat_models/test_friendli.py
+++ b/libs/community/tests/integration_tests/chat_models/test_friendli.py
@@ -40,7 +40,10 @@ def test_friendli_batch(friendli_chat: ChatFriendli) -> None:
async def test_friendli_abatch(friendli_chat: ChatFriendli) -> None:
"""Test async batch."""
outputs = await friendli_chat.abatch(
- ["What is generative AI?", "What is generative AI?"]
+ [
+ "What is generative AI?",
+ "What is generative AI?",
+ ]
)
for output in outputs:
assert isinstance(output, AIMessage)
diff --git a/libs/community/tests/integration_tests/chat_models/test_outlines.py b/libs/community/tests/integration_tests/chat_models/test_outlines.py
index 8f293e6532c51..b0c22981ff53b 100644
--- a/libs/community/tests/integration_tests/chat_models/test_outlines.py
+++ b/libs/community/tests/integration_tests/chat_models/test_outlines.py
@@ -80,9 +80,9 @@ def test_chat_outlines_regex(chat_model: ChatOutlines) -> None:
output = chat_model.invoke(messages)
assert isinstance(output, AIMessage)
- assert re.match(
- ip_regex, str(output.content)
- ), f"Generated output '{output.content}' is not a valid IP address"
+ assert re.match(ip_regex, str(output.content)), (
+ f"Generated output '{output.content}' is not a valid IP address"
+ )
def test_chat_outlines_type_constraints(chat_model: ChatOutlines) -> None:
@@ -129,14 +129,14 @@ def test_chat_outlines_grammar(chat_model: ChatOutlines) -> None:
output = chat_model.invoke(messages)
# Validate the output is a non-empty string
- assert (
- isinstance(output.content, str) and output.content.strip()
- ), "Output should be a non-empty string"
+ assert isinstance(output.content, str) and output.content.strip(), (
+ "Output should be a non-empty string"
+ )
# Use a simple regex to check if the output contains basic arithmetic operations and numbers
- assert re.search(
- r"[\d\+\-\*/\(\)]+", output.content
- ), f"Generated output '{output.content}' does not appear to be a valid arithmetic expression"
+ assert re.search(r"[\d\+\-\*/\(\)]+", output.content), (
+ f"Generated output '{output.content}' does not appear to be a valid arithmetic expression"
+ )
def test_chat_outlines_with_structured_output(chat_model: ChatOutlines) -> None:
diff --git a/libs/community/tests/integration_tests/chat_models/test_vertexai.py b/libs/community/tests/integration_tests/chat_models/test_vertexai.py
index 5a10353bc2f3b..37c1a8ecfde4a 100644
--- a/libs/community/tests/integration_tests/chat_models/test_vertexai.py
+++ b/libs/community/tests/integration_tests/chat_models/test_vertexai.py
@@ -112,8 +112,7 @@ def test_vertexai_single_call_with_context() -> None:
def test_multimodal() -> None:
llm = ChatVertexAI(model_name="gemini-ultra-vision")
gcs_url = (
- "gs://cloud-samples-data/generative-ai/image/"
- "320px-Felis_catus-cat_on_snow.jpg"
+ "gs://cloud-samples-data/generative-ai/image/320px-Felis_catus-cat_on_snow.jpg"
)
image_message = {
"type": "image_url",
@@ -131,8 +130,7 @@ def test_multimodal() -> None:
def test_multimodal_history() -> None:
llm = ChatVertexAI(model_name="gemini-ultra-vision")
gcs_url = (
- "gs://cloud-samples-data/generative-ai/image/"
- "320px-Felis_catus-cat_on_snow.jpg"
+ "gs://cloud-samples-data/generative-ai/image/320px-Felis_catus-cat_on_snow.jpg"
)
image_message = {
"type": "image_url",
diff --git a/libs/community/tests/integration_tests/document_loaders/test_arxiv.py b/libs/community/tests/integration_tests/document_loaders/test_arxiv.py
index ceeb48d7507f7..765a0b3ed52d3 100644
--- a/libs/community/tests/integration_tests/document_loaders/test_arxiv.py
+++ b/libs/community/tests/integration_tests/document_loaders/test_arxiv.py
@@ -58,7 +58,12 @@ def test_load_returns_full_set_of_metadata() -> None:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata).issuperset(
- {"Published", "Title", "Authors", "Summary"}
+ {
+ "Published",
+ "Title",
+ "Authors",
+ "Summary",
+ }
)
print(doc.metadata) # noqa: T201
assert len(set(doc.metadata)) > 4
diff --git a/libs/community/tests/integration_tests/graphs/test_neo4j.py b/libs/community/tests/integration_tests/graphs/test_neo4j.py
index 761cbc95e4b8e..578875a81139b 100644
--- a/libs/community/tests/integration_tests/graphs/test_neo4j.py
+++ b/libs/community/tests/integration_tests/graphs/test_neo4j.py
@@ -367,7 +367,7 @@ def test_enhanced_schema_exception() -> None:
url=url, username=username, password=password, enhanced_schema=True
)
graph.query("MATCH (n) DETACH DELETE n")
- graph.query("CREATE (:Node {foo:'bar'})," "(:Node {foo: 1}), (:Node {foo: [1,2]})")
+ graph.query("CREATE (:Node {foo:'bar'}),(:Node {foo: 1}), (:Node {foo: [1,2]})")
graph.refresh_schema()
expected_output = {
"node_props": {"Node": [{"property": "foo", "type": "STRING"}]},
diff --git a/libs/community/tests/integration_tests/indexes/test_document_manager.py b/libs/community/tests/integration_tests/indexes/test_document_manager.py
index b2b99b449c685..ba297efc35a8f 100644
--- a/libs/community/tests/integration_tests/indexes/test_document_manager.py
+++ b/libs/community/tests/integration_tests/indexes/test_document_manager.py
@@ -93,7 +93,10 @@ async def test_aupdate_timestamp(amanager: MongoDocumentManager) -> None:
records = [
doc
async for doc in amanager.async_collection.find(
- {"namespace": amanager.namespace, "key": "key1"}
+ {
+ "namespace": amanager.namespace,
+ "key": "key1",
+ }
)
]
@@ -165,7 +168,10 @@ def test_list_keys(manager: MongoDocumentManager) -> None:
manager.update(["key4"], group_ids=["group1"])
assert sorted(manager.list_keys()) == sorted(["key1", "key2", "key3", "key4"])
assert sorted(manager.list_keys(after=datetime(2022, 2, 1).timestamp())) == sorted(
- ["key3", "key4"]
+ [
+ "key3",
+ "key4",
+ ]
)
assert sorted(manager.list_keys(group_ids=["group1", "group2"])) == sorted(["key4"])
@@ -191,7 +197,12 @@ async def test_alist_keys(amanager: MongoDocumentManager) -> None:
):
await amanager.aupdate(["key4"], group_ids=["group1"])
assert sorted(await amanager.alist_keys()) == sorted(
- ["key1", "key2", "key3", "key4"]
+ [
+ "key1",
+ "key2",
+ "key3",
+ "key4",
+ ]
)
assert sorted(
await amanager.alist_keys(after=datetime(2022, 2, 1).timestamp())
@@ -244,7 +255,10 @@ async def test_anamespace_is_used(amanager: MongoDocumentManager) -> None:
await amanager.aupdate(["key3"], group_ids=["group3"])
assert (
await amanager.async_collection.find_one(
- {"key": "key3", "namespace": "kittens"}
+ {
+ "key": "key3",
+ "namespace": "kittens",
+ }
)
)["group_id"] == "group3"
diff --git a/libs/community/tests/integration_tests/llms/test_fireworks.py b/libs/community/tests/integration_tests/llms/test_fireworks.py
index 28b1df2870912..1b8305a678d3b 100644
--- a/libs/community/tests/integration_tests/llms/test_fireworks.py
+++ b/libs/community/tests/integration_tests/llms/test_fireworks.py
@@ -131,7 +131,10 @@ async def test_fireworks_async_agenerate(llm: Fireworks) -> None:
@pytest.mark.scheduled
async def test_fireworks_multiple_prompts_async_agenerate(llm: Fireworks) -> None:
output = await llm.agenerate(
- ["How is the weather in New York today?", "I'm pickle rick"]
+ [
+ "How is the weather in New York today?",
+ "I'm pickle rick",
+ ]
)
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
diff --git a/libs/community/tests/integration_tests/llms/test_outlines.py b/libs/community/tests/integration_tests/llms/test_outlines.py
index db0e043723442..42828db7142cb 100644
--- a/libs/community/tests/integration_tests/llms/test_outlines.py
+++ b/libs/community/tests/integration_tests/llms/test_outlines.py
@@ -72,9 +72,9 @@ def test_outlines_regex(llm: Outlines) -> None:
assert isinstance(output, str)
- assert re.match(
- ip_regex, output
- ), f"Generated output '{output}' is not a valid IP address"
+ assert re.match(ip_regex, output), (
+ f"Generated output '{output}' is not a valid IP address"
+ )
def test_outlines_type_constraints(llm: Outlines) -> None:
@@ -113,11 +113,11 @@ def test_outlines_grammar(llm: Outlines) -> None:
output = llm.invoke("Here is a complex arithmetic expression: ")
# Validate the output is a non-empty string
- assert (
- isinstance(output, str) and output.strip()
- ), "Output should be a non-empty string"
+ assert isinstance(output, str) and output.strip(), (
+ "Output should be a non-empty string"
+ )
# Use a simple regex to check if the output contains basic arithmetic operations and numbers
- assert re.search(
- r"[\d\+\-\*/\(\)]+", output
- ), f"Generated output '{output}' does not appear to be a valid arithmetic expression"
+ assert re.search(r"[\d\+\-\*/\(\)]+", output), (
+ f"Generated output '{output}' does not appear to be a valid arithmetic expression"
+ )
diff --git a/libs/community/tests/integration_tests/retrievers/docarray/fixtures.py b/libs/community/tests/integration_tests/retrievers/docarray/fixtures.py
index ea3a5d4815a3c..4bfa7b7937fb2 100644
--- a/libs/community/tests/integration_tests/retrievers/docarray/fixtures.py
+++ b/libs/community/tests/integration_tests/retrievers/docarray/fixtures.py
@@ -22,13 +22,11 @@
@pytest.fixture
-def init_weaviate() -> (
- Generator[
- Tuple[WeaviateDocumentIndex, Dict[str, Any], FakeEmbeddings],
- None,
- None,
- ]
-):
+def init_weaviate() -> Generator[
+ Tuple[WeaviateDocumentIndex, Dict[str, Any], FakeEmbeddings],
+ None,
+ None,
+]:
"""
cd tests/integration_tests/vectorstores/docker-compose
docker compose -f weaviate.yml up
@@ -75,9 +73,9 @@ class WeaviateDoc(BaseDoc):
@pytest.fixture
-def init_elastic() -> (
- Generator[Tuple[ElasticDocIndex, Dict[str, Any], FakeEmbeddings], None, None]
-):
+def init_elastic() -> Generator[
+ Tuple[ElasticDocIndex, Dict[str, Any], FakeEmbeddings], None, None
+]:
"""
cd tests/integration_tests/vectorstores/docker-compose
docker-compose -f elasticsearch.yml up
diff --git a/libs/community/tests/integration_tests/retrievers/test_dria_index.py b/libs/community/tests/integration_tests/retrievers/test_dria_index.py
index f3e3423bec522..50104b65b3f66 100644
--- a/libs/community/tests/integration_tests/retrievers/test_dria_index.py
+++ b/libs/community/tests/integration_tests/retrievers/test_dria_index.py
@@ -34,8 +34,8 @@ def test_dria_retriever(dria_retriever: DriaRetriever) -> None:
doc = docs[0]
assert isinstance(doc, Document), "Expected a Document instance"
assert isinstance(doc.page_content, str), (
- "Expected document content type " "to be string"
+ "Expected document content type to be string"
+ )
+ assert isinstance(doc.metadata, dict), (
+ "Expected document metadata content to be a dictionary"
)
- assert isinstance(
- doc.metadata, dict
- ), "Expected document metadata content to be a dictionary"
diff --git a/libs/community/tests/integration_tests/retrievers/test_kay.py b/libs/community/tests/integration_tests/retrievers/test_kay.py
index ac6202d89c77d..4dc142769e986 100644
--- a/libs/community/tests/integration_tests/retrievers/test_kay.py
+++ b/libs/community/tests/integration_tests/retrievers/test_kay.py
@@ -14,8 +14,7 @@ def test_kay_retriever() -> None:
num_contexts=3,
)
docs = retriever.invoke(
- "What were the biggest strategy changes and partnerships made by Roku "
- "in 2023?",
+ "What were the biggest strategy changes and partnerships made by Roku in 2023?",
)
assert len(docs) == 3
for doc in docs:
diff --git a/libs/community/tests/integration_tests/tools/nuclia/test_nuclia.py b/libs/community/tests/integration_tests/tools/nuclia/test_nuclia.py
index 9577102eb514d..d5d23049aa8f7 100644
--- a/libs/community/tests/integration_tests/tools/nuclia/test_nuclia.py
+++ b/libs/community/tests/integration_tests/tools/nuclia/test_nuclia.py
@@ -83,7 +83,12 @@ def test_nuclia_tool() -> None:
)
assert uuid == "fake_uuid"
data = nua.run(
- {"action": "pull", "id": "1", "path": None, "text": None}
+ {
+ "action": "pull",
+ "id": "1",
+ "path": None,
+ "text": None,
+ }
)
assert json.loads(data)["uuid"] == "fake_uuid"
diff --git a/libs/community/tests/integration_tests/tools/zenguard/test_zenguard.py b/libs/community/tests/integration_tests/tools/zenguard/test_zenguard.py
index 238ff93b0bb4d..c68bc34436f9b 100644
--- a/libs/community/tests/integration_tests/tools/zenguard/test_zenguard.py
+++ b/libs/community/tests/integration_tests/tools/zenguard/test_zenguard.py
@@ -32,12 +32,12 @@ def assert_detectors_response(
if resp["detector"] == detector.value
)
)
- assert (
- "err" not in common_response
- ), f"API returned an error: {common_response.get('err')}" # noqa: E501
- assert (
- common_response.get("is_detected") is False
- ), f"Prompt was detected: {common_response}" # noqa: E501
+ assert "err" not in common_response, (
+ f"API returned an error: {common_response.get('err')}"
+ ) # noqa: E501
+ assert common_response.get("is_detected") is False, (
+ f"Prompt was detected: {common_response}"
+ ) # noqa: E501
def test_prompt_injection(zenguard_tool: ZenGuardTool) -> None:
diff --git a/libs/community/tests/integration_tests/utilities/test_arxiv.py b/libs/community/tests/integration_tests/utilities/test_arxiv.py
index e4fa9e4aa567a..60c7cc413a413 100644
--- a/libs/community/tests/integration_tests/utilities/test_arxiv.py
+++ b/libs/community/tests/integration_tests/utilities/test_arxiv.py
@@ -135,7 +135,12 @@ def test_load_returns_full_set_of_metadata() -> None:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata).issuperset(
- {"Published", "Title", "Authors", "Summary"}
+ {
+ "Published",
+ "Title",
+ "Authors",
+ "Summary",
+ }
)
print(doc.metadata) # noqa: T201
assert len(set(doc.metadata)) > 4
@@ -152,9 +157,9 @@ def _load_arxiv_from_universal_entry(**kwargs: Any) -> BaseTool:
def test_load_arxiv_from_universal_entry() -> None:
arxiv_tool = _load_arxiv_from_universal_entry()
output = arxiv_tool.invoke("Caprice Stanley")
- assert (
- "On Mixing Behavior of a Family of Random Walks" in output
- ), "failed to fetch a valid result"
+ assert "On Mixing Behavior of a Family of Random Walks" in output, (
+ "failed to fetch a valid result"
+ )
def test_load_arxiv_from_universal_entry_with_params() -> None:
@@ -168,6 +173,6 @@ def test_load_arxiv_from_universal_entry_with_params() -> None:
wp = arxiv_tool.api_wrapper
assert wp.top_k_results == 1, "failed to assert top_k_results"
assert wp.load_max_docs == 10, "failed to assert load_max_docs"
- assert (
- wp.load_all_available_meta is True
- ), "failed to assert load_all_available_meta"
+ assert wp.load_all_available_meta is True, (
+ "failed to assert load_all_available_meta"
+ )
diff --git a/libs/community/tests/integration_tests/utilities/test_clickup.py b/libs/community/tests/integration_tests/utilities/test_clickup.py
index c7b1f641db8f6..5f0c284f69ca3 100644
--- a/libs/community/tests/integration_tests/utilities/test_clickup.py
+++ b/libs/community/tests/integration_tests/utilities/test_clickup.py
@@ -79,7 +79,11 @@ def test_task_related(clickup_wrapper: ClickupAPIWrapper) -> None:
clickup_wrapper.run(
mode="update_task",
query=json.dumps(
- {"task_id": task_id, "attribute_name": "name", "value": new_name}
+ {
+ "task_id": task_id,
+ "attribute_name": "name",
+ "value": new_name,
+ }
),
)
diff --git a/libs/community/tests/integration_tests/utilities/test_pubmed.py b/libs/community/tests/integration_tests/utilities/test_pubmed.py
index e974ae78a7444..a11f324d8290d 100644
--- a/libs/community/tests/integration_tests/utilities/test_pubmed.py
+++ b/libs/community/tests/integration_tests/utilities/test_pubmed.py
@@ -130,7 +130,12 @@ def test_load_returns_full_set_of_metadata() -> None:
for doc in docs:
assert doc.metadata
assert set(doc.metadata).issuperset(
- {"Copyright Information", "Published", "Title", "uid"}
+ {
+ "Copyright Information",
+ "Published",
+ "Title",
+ "uid",
+ }
)
diff --git a/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db_no_sql.py b/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db_no_sql.py
index bbaca0775be7c..784465d4639b3 100644
--- a/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db_no_sql.py
+++ b/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db_no_sql.py
@@ -418,8 +418,7 @@ def _get_texts_and_metadata(self) -> Tuple[List[str], List[Dict[str, Any]]]:
"energetic herders skilled in outdoor activities.",
"Golden Retrievers are friendly, "
"loyal companions with excellent retrieving skills.",
- "Labrador Retrievers are playful, "
- "eager learners and skilled retrievers.",
+ "Labrador Retrievers are playful, eager learners and skilled retrievers.",
"Australian Shepherds are agile, "
"energetic herders excelling in outdoor tasks.",
"German Shepherds are brave, "
diff --git a/libs/community/tests/integration_tests/vectorstores/test_duckdb.py b/libs/community/tests/integration_tests/vectorstores/test_duckdb.py
index b724dcf0542d9..25ef054812eb6 100644
--- a/libs/community/tests/integration_tests/vectorstores/test_duckdb.py
+++ b/libs/community/tests/integration_tests/vectorstores/test_duckdb.py
@@ -93,18 +93,18 @@ def test_duckdb_add_texts_with_metadata(
# Check if the metadata is correctly associated with the texts
assert len(result) == 2, "Should return two results"
- assert (
- result[0].metadata.get("author") == "Author 1"
- ), "Metadata for Author 1 should be correctly retrieved"
- assert (
- result[0].metadata.get("date") == "2021-01-01"
- ), "Date for Author 1 should be correctly retrieved"
- assert (
- result[1].metadata.get("author") == "Author 2"
- ), "Metadata for Author 2 should be correctly retrieved"
- assert (
- result[1].metadata.get("date") == "2021-02-01"
- ), "Date for Author 2 should be correctly retrieved"
+ assert result[0].metadata.get("author") == "Author 1", (
+ "Metadata for Author 1 should be correctly retrieved"
+ )
+ assert result[0].metadata.get("date") == "2021-01-01", (
+ "Date for Author 1 should be correctly retrieved"
+ )
+ assert result[1].metadata.get("author") == "Author 2", (
+ "Metadata for Author 2 should be correctly retrieved"
+ )
+ assert result[1].metadata.get("date") == "2021-02-01", (
+ "Date for Author 2 should be correctly retrieved"
+ )
@pytest.mark.requires("duckdb")
@@ -127,9 +127,9 @@ def test_duckdb_add_texts_with_predefined_ids(
result = store.similarity_search(text)
found_texts = [doc.page_content for doc in result]
- assert (
- text in found_texts
- ), f"Text '{text}' was not found in the search results."
+ assert text in found_texts, (
+ f"Text '{text}' was not found in the search results."
+ )
@pytest.mark.requires("duckdb")
diff --git a/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py b/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py
index 2842938986c3d..ecde8eb54748a 100644
--- a/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py
+++ b/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py
@@ -896,9 +896,9 @@ def test_elasticsearch_with_user_agent(
pattern = r"^langchain-py-vs/\d+\.\d+\.\d+$"
match = re.match(pattern, user_agent)
- assert (
- match is not None
- ), f"The string '{user_agent}' does not match the expected pattern."
+ assert match is not None, (
+ f"The string '{user_agent}' does not match the expected pattern."
+ )
def test_elasticsearch_with_internal_user_agent(
self, elasticsearch_connection: Dict, index_name: str
@@ -917,9 +917,9 @@ def test_elasticsearch_with_internal_user_agent(
pattern = r"^langchain-py-vs/\d+\.\d+\.\d+$"
match = re.match(pattern, user_agent)
- assert (
- match is not None
- ), f"The string '{user_agent}' does not match the expected pattern."
+ assert match is not None, (
+ f"The string '{user_agent}' does not match the expected pattern."
+ )
def test_bulk_args(self, es_client: Any, index_name: str) -> None:
"""Test to make sure the user-agent is set correctly."""
diff --git a/libs/community/tests/integration_tests/vectorstores/test_hanavector.py b/libs/community/tests/integration_tests/vectorstores/test_hanavector.py
index e1ffcd7af0eac..370fe9b43bec3 100644
--- a/libs/community/tests/integration_tests/vectorstores/test_hanavector.py
+++ b/libs/community/tests/integration_tests/vectorstores/test_hanavector.py
@@ -1133,7 +1133,7 @@ def test_preexisting_specific_columns_for_metadata_fill(
c = 0
try:
- sql_str = f'SELECT COUNT(*) FROM {table_name} WHERE "quality"=' f"'ugly'"
+ sql_str = f"SELECT COUNT(*) FROM {table_name} WHERE \"quality\"='ugly'"
cur = test_setup.conn.cursor()
cur.execute(sql_str)
if cur.has_result_set():
@@ -1195,7 +1195,7 @@ def test_preexisting_specific_columns_for_metadata_via_array(
c = 0
try:
- sql_str = f'SELECT COUNT(*) FROM {table_name} WHERE "quality"=' f"'ugly'"
+ sql_str = f"SELECT COUNT(*) FROM {table_name} WHERE \"quality\"='ugly'"
cur = test_setup.conn.cursor()
cur.execute(sql_str)
if cur.has_result_set():
@@ -1206,7 +1206,7 @@ def test_preexisting_specific_columns_for_metadata_via_array(
assert c == 3
try:
- sql_str = f'SELECT COUNT(*) FROM {table_name} WHERE "Owner"=' f"'Steve'"
+ sql_str = f"SELECT COUNT(*) FROM {table_name} WHERE \"Owner\"='Steve'"
cur = test_setup.conn.cursor()
cur.execute(sql_str)
if cur.has_result_set():
diff --git a/libs/community/tests/integration_tests/vectorstores/test_lancedb.py b/libs/community/tests/integration_tests/vectorstores/test_lancedb.py
index 615b310629d34..a322d95556aa8 100644
--- a/libs/community/tests/integration_tests/vectorstores/test_lancedb.py
+++ b/libs/community/tests/integration_tests/vectorstores/test_lancedb.py
@@ -147,8 +147,8 @@ def test_lancedb_no_metadata() -> None:
result = store.similarity_search("text 1")
# Verify that the metadata in the Document objects is an empty dictionary
for doc in result:
- assert (
- doc.metadata == {}
- ), "Expected empty metadata when 'metadata' column is missing"
+ assert doc.metadata == {}, (
+ "Expected empty metadata when 'metadata' column is missing"
+ )
# Clean up by deleting the table (optional)
db.drop_table("vectorstore_no_metadata")
diff --git a/libs/community/tests/integration_tests/vectorstores/test_neo4jvector.py b/libs/community/tests/integration_tests/vectorstores/test_neo4jvector.py
index d8586e089ccf9..ad0fcaacc5926 100644
--- a/libs/community/tests/integration_tests/vectorstores/test_neo4jvector.py
+++ b/libs/community/tests/integration_tests/vectorstores/test_neo4jvector.py
@@ -471,7 +471,7 @@ def test_neo4jvector_missing_keyword() -> None:
)
except ValueError as e:
assert str(e) == (
- "keyword_index name has to be specified when " "using hybrid search option"
+ "keyword_index name has to be specified when using hybrid search option"
)
drop_vector_indexes(docsearch)
@@ -522,7 +522,7 @@ def test_neo4jvector_from_existing_graph() -> None:
graph.query("MATCH (n) DETACH DELETE n")
- graph.query("CREATE (:Test {name:'Foo'})," "(:Test {name:'Bar'})")
+ graph.query("CREATE (:Test {name:'Foo'}),(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
@@ -558,7 +558,7 @@ def test_neo4jvector_from_existing_graph_hybrid() -> None:
graph.query("MATCH (n) DETACH DELETE n")
- graph.query("CREATE (:Test {name:'foo'})," "(:Test {name:'Bar'})")
+ graph.query("CREATE (:Test {name:'foo'}),(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
@@ -594,7 +594,7 @@ def test_neo4jvector_from_existing_graph_multiple_properties() -> None:
)
graph.query("MATCH (n) DETACH DELETE n")
- graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'})," "(:Test {name:'Bar'})")
+ graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'}),(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
@@ -629,7 +629,7 @@ def test_neo4jvector_from_existing_graph_multiple_properties_hybrid() -> None:
)
graph.query("MATCH (n) DETACH DELETE n")
- graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'})," "(:Test {name:'Bar'})")
+ graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'}),(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
diff --git a/libs/community/tests/unit_tests/chat_loaders/test_imessage.py b/libs/community/tests/unit_tests/chat_loaders/test_imessage.py
index 4f6bc17173002..2711f64343aed 100644
--- a/libs/community/tests/unit_tests/chat_loaders/test_imessage.py
+++ b/libs/community/tests/unit_tests/chat_loaders/test_imessage.py
@@ -23,9 +23,9 @@ def test_imessage_chat_loader_upgrade_osx11() -> None:
# time parsed correctly
expected_message_time = 720845450393148160
- assert (
- first_message.additional_kwargs["message_time"] == expected_message_time
- ), "unexpected time"
+ assert first_message.additional_kwargs["message_time"] == expected_message_time, (
+ "unexpected time"
+ )
expected_parsed_time = datetime.datetime(2023, 11, 5, 2, 50, 50, 393148)
assert (
@@ -34,9 +34,9 @@ def test_imessage_chat_loader_upgrade_osx11() -> None:
), "date failed to parse"
# is_from_me parsed correctly
- assert (
- first_message.additional_kwargs["is_from_me"] is False
- ), "is_from_me failed to parse"
+ assert first_message.additional_kwargs["is_from_me"] is False, (
+ "is_from_me failed to parse"
+ )
def test_imessage_chat_loader() -> None:
@@ -57,9 +57,9 @@ def test_imessage_chat_loader() -> None:
# time parsed correctly
expected_message_time = 720845450393148160
- assert (
- first_message.additional_kwargs["message_time"] == expected_message_time
- ), "unexpected time"
+ assert first_message.additional_kwargs["message_time"] == expected_message_time, (
+ "unexpected time"
+ )
expected_parsed_time = datetime.datetime(2023, 11, 5, 2, 50, 50, 393148)
assert (
@@ -68,14 +68,14 @@ def test_imessage_chat_loader() -> None:
), "date failed to parse"
# is_from_me parsed correctly
- assert (
- first_message.additional_kwargs["is_from_me"] is False
- ), "is_from_me failed to parse"
+ assert first_message.additional_kwargs["is_from_me"] is False, (
+ "is_from_me failed to parse"
+ )
# short message content in attributedBody field
- assert (
- "John is the almighty" in chat_sessions[0]["messages"][16].content
- ), "Chat content mismatch"
+ assert "John is the almighty" in chat_sessions[0]["messages"][16].content, (
+ "Chat content mismatch"
+ )
# long message content in attributedBody field
long_msg = "aaaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbbbbaaaaabbbbba"
diff --git a/libs/community/tests/unit_tests/chat_loaders/test_slack.py b/libs/community/tests/unit_tests/chat_loaders/test_slack.py
index 0ab08b679bf88..62ef8f9085a2f 100644
--- a/libs/community/tests/unit_tests/chat_loaders/test_slack.py
+++ b/libs/community/tests/unit_tests/chat_loaders/test_slack.py
@@ -14,6 +14,6 @@ def test_slack_chat_loader() -> None:
assert chat_sessions[1]["messages"], "Chat messages should not be empty"
- assert (
- "Example message" in chat_sessions[1]["messages"][0].content
- ), "Chat content mismatch"
+ assert "Example message" in chat_sessions[1]["messages"][0].content, (
+ "Chat content mismatch"
+ )
diff --git a/libs/community/tests/unit_tests/chat_message_histories/test_dynamodb_chat_message_history.py b/libs/community/tests/unit_tests/chat_message_histories/test_dynamodb_chat_message_history.py
index b88443e25fd02..032a89addd256 100644
--- a/libs/community/tests/unit_tests/chat_message_histories/test_dynamodb_chat_message_history.py
+++ b/libs/community/tests/unit_tests/chat_message_histories/test_dynamodb_chat_message_history.py
@@ -116,7 +116,10 @@ def test_add_message(
"Item"
]
assert item_after_ai_message[HISTORY_KEY] == messages_to_dict(
- [first_message, second_message]
+ [
+ first_message,
+ second_message,
+ ]
) # Second message should have appended
assert (
item_after_ai_message[TTL_KEY] == mock_time_2 + chat_history_config["ttl"]
diff --git a/libs/community/tests/unit_tests/chat_message_histories/test_sql.py b/libs/community/tests/unit_tests/chat_message_histories/test_sql.py
index 9e3eeac7b6115..c62e5159a074f 100644
--- a/libs/community/tests/unit_tests/chat_message_histories/test_sql.py
+++ b/libs/community/tests/unit_tests/chat_message_histories/test_sql.py
@@ -10,6 +10,7 @@
class Base(DeclarativeBase):
pass
+
except ImportError:
# for sqlalchemy < 2
from sqlalchemy.ext.declarative import declarative_base
@@ -81,7 +82,10 @@ def test_add_messages(
) -> None:
sql_history, other_history = sql_histories
sql_history.add_messages(
- [HumanMessage(content="Hello!"), AIMessage(content="Hi there!")]
+ [
+ HumanMessage(content="Hello!"),
+ AIMessage(content="Hi there!"),
+ ]
)
messages = sql_history.messages
@@ -98,7 +102,10 @@ async def test_async_add_messages(
) -> None:
sql_history, other_history = asql_histories
await sql_history.aadd_messages(
- [HumanMessage(content="Hello!"), AIMessage(content="Hi there!")]
+ [
+ HumanMessage(content="Hello!"),
+ AIMessage(content="Hi there!"),
+ ]
)
messages = await sql_history.aget_messages()
@@ -175,7 +182,10 @@ def test_clear_messages(
) -> None:
sql_history, other_history = sql_histories
sql_history.add_messages(
- [HumanMessage(content="Hello!"), AIMessage(content="Hi there!")]
+ [
+ HumanMessage(content="Hello!"),
+ AIMessage(content="Hi there!"),
+ ]
)
assert len(sql_history.messages) == 2
# Now create another history with different session id
@@ -194,7 +204,10 @@ async def test_async_clear_messages(
) -> None:
sql_history, other_history = asql_histories
await sql_history.aadd_messages(
- [HumanMessage(content="Hello!"), AIMessage(content="Hi there!")]
+ [
+ HumanMessage(content="Hello!"),
+ AIMessage(content="Hi there!"),
+ ]
)
assert len(await sql_history.aget_messages()) == 2
# Now create another history with different session id
diff --git a/libs/community/tests/unit_tests/document_loaders/parsers/language/test_python.py b/libs/community/tests/unit_tests/document_loaders/parsers/language/test_python.py
index c14154a2842de..d7142b1091ff5 100644
--- a/libs/community/tests/unit_tests/document_loaders/parsers/language/test_python.py
+++ b/libs/community/tests/unit_tests/document_loaders/parsers/language/test_python.py
@@ -25,8 +25,8 @@ def __init__(self):
hello("Hello!")"""
self.expected_extracted_code = [
- "def hello(text):\n" " print(text)",
- "class Simple:\n" " def __init__(self):\n" " self.a = 1",
+ "def hello(text):\n print(text)",
+ "class Simple:\n def __init__(self):\n self.a = 1",
]
def test_extract_functions_classes(self) -> None:
diff --git a/libs/community/tests/unit_tests/document_loaders/test_csv_loader.py b/libs/community/tests/unit_tests/document_loaders/test_csv_loader.py
index 75a3b08bf79d9..9fd8192335d72 100644
--- a/libs/community/tests/unit_tests/document_loaders/test_csv_loader.py
+++ b/libs/community/tests/unit_tests/document_loaders/test_csv_loader.py
@@ -113,11 +113,11 @@ def test_csv_loader_content_columns(self) -> None:
file_path = self._get_csv_file_path("test_none_col.csv")
expected_docs = [
Document(
- page_content="column1: value1\n" "column3: value3",
+ page_content="column1: value1\ncolumn3: value3",
metadata={"source": file_path, "row": 0},
),
Document(
- page_content="column1: value6\n" "column3: value8",
+ page_content="column1: value6\ncolumn3: value8",
metadata={"source": file_path, "row": 1},
),
]
diff --git a/libs/community/tests/unit_tests/document_loaders/test_mongodb.py b/libs/community/tests/unit_tests/document_loaders/test_mongodb.py
index 72ed08905f745..75ae18e6b773e 100644
--- a/libs/community/tests/unit_tests/document_loaders/test_mongodb.py
+++ b/libs/community/tests/unit_tests/document_loaders/test_mongodb.py
@@ -50,11 +50,12 @@ async def test_load_mocked_with_filters(expected_documents: List[Document]) -> N
mock_collection.find = mock_find
mock_collection.count_documents = mock_count_documents
- with patch(
- "motor.motor_asyncio.AsyncIOMotorClient", return_value=MagicMock()
- ), patch(
- "langchain_community.document_loaders.mongodb.MongodbLoader.aload",
- new=mock_async_load,
+ with (
+ patch("motor.motor_asyncio.AsyncIOMotorClient", return_value=MagicMock()),
+ patch(
+ "langchain_community.document_loaders.mongodb.MongodbLoader.aload",
+ new=mock_async_load,
+ ),
):
loader = MongodbLoader(
"mongodb://localhost:27017",
diff --git a/libs/community/tests/unit_tests/document_transformers/test_html2text_transformer.py b/libs/community/tests/unit_tests/document_transformers/test_html2text_transformer.py
index f66fee6c0bd4c..3eea5c48e7055 100644
--- a/libs/community/tests/unit_tests/document_transformers/test_html2text_transformer.py
+++ b/libs/community/tests/unit_tests/document_transformers/test_html2text_transformer.py
@@ -25,10 +25,7 @@ def test_extract_paragraphs() -> None:
documents = [Document(page_content=paragraphs_html)]
docs_transformed = html2text_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == (
- "# Header\n\n"
- "First paragraph.\n\n"
- "Second paragraph.\n\n"
- "# Ignore at end\n\n"
+ "# Header\n\nFirst paragraph.\n\nSecond paragraph.\n\n# Ignore at end\n\n"
)
@@ -78,14 +75,13 @@ def test_ignore_links() -> None:
docs_transformed = html2text_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == (
- "# First heading.\n\n"
- "First paragraph with an [example](http://example.com)\n\n"
+ "# First heading.\n\nFirst paragraph with an [example](http://example.com)\n\n"
)
html2text_transformer = Html2TextTransformer(ignore_links=True)
docs_transformed = html2text_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == (
- "# First heading.\n\n" "First paragraph with an example\n\n"
+ "# First heading.\n\nFirst paragraph with an example\n\n"
)
@@ -101,12 +97,11 @@ def test_ignore_images() -> None:
docs_transformed = html2text_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == (
- "# First heading.\n\n"
- "First paragraph with an ![Example image](example.jpg)\n\n"
+ "# First heading.\n\nFirst paragraph with an ![Example image](example.jpg)\n\n"
)
html2text_transformer = Html2TextTransformer(ignore_images=True)
docs_transformed = html2text_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == (
- "# First heading.\n\n" "First paragraph with an\n\n"
+ "# First heading.\n\nFirst paragraph with an\n\n"
)
diff --git a/libs/community/tests/unit_tests/document_transformers/test_markdownify.py b/libs/community/tests/unit_tests/document_transformers/test_markdownify.py
index 1ce407289dd4a..4b10d94371c13 100644
--- a/libs/community/tests/unit_tests/document_transformers/test_markdownify.py
+++ b/libs/community/tests/unit_tests/document_transformers/test_markdownify.py
@@ -25,7 +25,7 @@ def test_extract_paragraphs() -> None:
documents = [Document(page_content=paragraphs_html)]
docs_transformed = markdownify.transform_documents(documents)
assert docs_transformed[0].page_content == (
- "# Header\n\n" "First paragraph.\n\n" "Second paragraph.\n\n" "# Ignore at end"
+ "# Header\n\nFirst paragraph.\n\nSecond paragraph.\n\n# Ignore at end"
)
@@ -115,10 +115,7 @@ def test_convert_tags() -> None:
documents = [Document(page_content=paragraphs_html)]
docs_transformed = markdownify.transform_documents(documents)
assert docs_transformed[0].page_content == (
- "Header "
- "1st paragraph.\n\n "
- "2nd paragraph. Here is link\n\n "
- "Ignore at end"
+ "Header 1st paragraph.\n\n 2nd paragraph. Here is link\n\n Ignore at end"
)
@@ -161,7 +158,7 @@ async def test_extract_paragraphs_async() -> None:
documents = [Document(page_content=paragraphs_html)]
docs_transformed = await markdownify.atransform_documents(documents)
assert docs_transformed[0].page_content == (
- "# Header\n\n" "First paragraph.\n\n" "Second paragraph.\n\n" "# Ignore at end"
+ "# Header\n\nFirst paragraph.\n\nSecond paragraph.\n\n# Ignore at end"
)
@@ -251,10 +248,7 @@ async def test_convert_tags_async() -> None:
documents = [Document(page_content=paragraphs_html)]
docs_transformed = await markdownify.atransform_documents(documents)
assert docs_transformed[0].page_content == (
- "Header "
- "1st paragraph.\n\n "
- "2nd paragraph. Here is link\n\n "
- "Ignore at end"
+ "Header 1st paragraph.\n\n 2nd paragraph. Here is link\n\n Ignore at end"
)
diff --git a/libs/community/tests/unit_tests/embeddings/test_deterministic_embedding.py b/libs/community/tests/unit_tests/embeddings/test_deterministic_embedding.py
index df8bcb276589a..da0b0b30cdb3f 100644
--- a/libs/community/tests/unit_tests/embeddings/test_deterministic_embedding.py
+++ b/libs/community/tests/unit_tests/embeddings/test_deterministic_embedding.py
@@ -12,5 +12,8 @@ def test_deterministic_fake_embeddings() -> None:
assert fake.embed_query(text) != fake.embed_query("Goodbye world!")
assert fake.embed_documents([text, text]) == fake.embed_documents([text, text])
assert fake.embed_documents([text, text]) != fake.embed_documents(
- [text, "Goodbye world!"]
+ [
+ text,
+ "Goodbye world!",
+ ]
)
diff --git a/libs/community/tests/unit_tests/embeddings/test_oci_gen_ai_embedding.py b/libs/community/tests/unit_tests/embeddings/test_oci_gen_ai_embedding.py
index e1acdd75e60a8..9b12444caccac 100644
--- a/libs/community/tests/unit_tests/embeddings/test_oci_gen_ai_embedding.py
+++ b/libs/community/tests/unit_tests/embeddings/test_oci_gen_ai_embedding.py
@@ -40,7 +40,10 @@ def mocked_response(invocation_obj): # type: ignore[no-untyped-def]
embeddings.append(v)
return MockResponseDict(
- {"status": 200, "data": MockResponseDict({"embeddings": embeddings})}
+ {
+ "status": 200,
+ "data": MockResponseDict({"embeddings": embeddings}),
+ }
)
monkeypatch.setattr(embeddings.client, "embed_text", mocked_response)
diff --git a/libs/community/tests/unit_tests/query_constructors/test_milvus.py b/libs/community/tests/unit_tests/query_constructors/test_milvus.py
index 70035628adbe8..5369e1e04f505 100644
--- a/libs/community/tests/unit_tests/query_constructors/test_milvus.py
+++ b/libs/community/tests/unit_tests/query_constructors/test_milvus.py
@@ -45,7 +45,7 @@ def test_visit_operation() -> None:
],
)
- expected = '(( foo < 2 ) and ( bar == "baz" ) ' 'and ( abc < "4" ))'
+ expected = '(( foo < 2 ) and ( bar == "baz" ) and ( abc < "4" ))'
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
@@ -122,7 +122,7 @@ def test_visit_structured_query() -> None:
expected = (
query,
- {"expr": "(( foo < 2 ) " 'and ( bar == "baz" ) ' "and ( abc < 50 ))"},
+ {"expr": '(( foo < 2 ) and ( bar == "baz" ) and ( abc < 50 ))'},
)
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
diff --git a/libs/community/tests/unit_tests/storage/test_sql.py b/libs/community/tests/unit_tests/storage/test_sql.py
index 1f4163224b04e..4744155b17a32 100644
--- a/libs/community/tests/unit_tests/storage/test_sql.py
+++ b/libs/community/tests/unit_tests/storage/test_sql.py
@@ -86,7 +86,10 @@ async def test_async_sample_sql_docstore(async_sql_store: SQLStore) -> None:
# Get values for keys
values = await async_sql_store.amget(
- ["key1", "key2"]
+ [
+ "key1",
+ "key2",
+ ]
) # Returns [b"value1", b"value2"]
assert values == [b"value1", b"value2"]
# Delete keys
diff --git a/libs/community/tests/unit_tests/test_imports.py b/libs/community/tests/unit_tests/test_imports.py
index 59152b3b0468e..8d510f7d4928f 100644
--- a/libs/community/tests/unit_tests/test_imports.py
+++ b/libs/community/tests/unit_tests/test_imports.py
@@ -165,6 +165,6 @@ def test_init_files_properly_defined() -> None:
missing_imports = set(module.__all__) - set(names)
- assert (
- not missing_imports
- ), f"Missing imports: {missing_imports} in file path: {path}"
+ assert not missing_imports, (
+ f"Missing imports: {missing_imports} in file path: {path}"
+ )
diff --git a/libs/community/tests/unit_tests/tools/audio/test_tools.py b/libs/community/tests/unit_tests/tools/audio/test_tools.py
index 30cacb7b7b0b5..d5bb6256d4567 100644
--- a/libs/community/tests/unit_tests/tools/audio/test_tools.py
+++ b/libs/community/tests/unit_tests/tools/audio/test_tools.py
@@ -44,11 +44,12 @@ def test_huggingface_tts_constructor() -> None:
def test_huggingface_tts_run_with_requests_mock() -> None:
os.environ["HUGGINGFACE_API_KEY"] = "foo"
- with tempfile.TemporaryDirectory() as tmp_dir, patch(
- "uuid.uuid4"
- ) as mock_uuid, patch("requests.post") as mock_inference, patch(
- "builtins.open", mock_open()
- ) as mock_file:
+ with (
+ tempfile.TemporaryDirectory() as tmp_dir,
+ patch("uuid.uuid4") as mock_uuid,
+ patch("requests.post") as mock_inference,
+ patch("builtins.open", mock_open()) as mock_file,
+ ):
input_query = "Dummy input"
mock_uuid_value = uuid.UUID("00000000-0000-0000-0000-000000000000")
diff --git a/libs/community/tests/unit_tests/tools/file_management/test_copy.py b/libs/community/tests/unit_tests/tools/file_management/test_copy.py
index fd154b1535ac8..186d26b30e39d 100644
--- a/libs/community/tests/unit_tests/tools/file_management/test_copy.py
+++ b/libs/community/tests/unit_tests/tools/file_management/test_copy.py
@@ -46,7 +46,10 @@ def test_copy_file() -> None:
destination_file = Path(temp_dir) / "destination.txt"
source_file.write_text("Hello, world!")
tool.run(
- {"source_path": str(source_file), "destination_path": str(destination_file)}
+ {
+ "source_path": str(source_file),
+ "destination_path": str(destination_file),
+ }
)
assert source_file.exists()
assert destination_file.exists()
diff --git a/libs/community/tests/unit_tests/tools/file_management/test_move.py b/libs/community/tests/unit_tests/tools/file_management/test_move.py
index 10de7f734bde1..9401e733b0efd 100644
--- a/libs/community/tests/unit_tests/tools/file_management/test_move.py
+++ b/libs/community/tests/unit_tests/tools/file_management/test_move.py
@@ -45,7 +45,10 @@ def test_move_file() -> None:
destination_file = Path(temp_dir) / "destination.txt"
source_file.write_text("Hello, world!")
tool.run(
- {"source_path": str(source_file), "destination_path": str(destination_file)}
+ {
+ "source_path": str(source_file),
+ "destination_path": str(destination_file),
+ }
)
assert not source_file.exists()
assert destination_file.exists()
diff --git a/libs/community/tests/unit_tests/vectorstores/test_azure_search.py b/libs/community/tests/unit_tests/vectorstores/test_azure_search.py
index 0f1ae7356973f..e7aa37004c461 100644
--- a/libs/community/tests/unit_tests/vectorstores/test_azure_search.py
+++ b/libs/community/tests/unit_tests/vectorstores/test_azure_search.py
@@ -220,9 +220,10 @@ def mock_upload_documents(self, documents: List[object]) -> List[Response]: # t
]
ids_provided = [i.metadata.get("id") for i in documents]
- with patch.object(
- SearchClient, "upload_documents", mock_upload_documents
- ), patch.object(SearchIndexClient, "get_index", mock_default_index):
+ with (
+ patch.object(SearchClient, "upload_documents", mock_upload_documents),
+ patch.object(SearchIndexClient, "get_index", mock_default_index),
+ ):
vector_store = create_vector_store()
ids_used_at_upload = vector_store.add_documents(documents, ids=ids_provided)
assert len(ids_provided) == len(ids_used_at_upload)
diff --git a/libs/community/tests/unit_tests/vectorstores/test_faiss.py b/libs/community/tests/unit_tests/vectorstores/test_faiss.py
index 739bd243f0eb0..2e749edc49eff 100644
--- a/libs/community/tests/unit_tests/vectorstores/test_faiss.py
+++ b/libs/community/tests/unit_tests/vectorstores/test_faiss.py
@@ -1714,9 +1714,9 @@ def test_ip_score() -> None:
scores = db.similarity_search_with_relevance_scores("sundays", k=1)
assert len(scores) == 1, "only one vector should be in db"
_, score = scores[0]
- assert (
- score == 1
- ), f"expected inner product of equivalent vectors to be 1, not {score}"
+ assert score == 1, (
+ f"expected inner product of equivalent vectors to be 1, not {score}"
+ )
@pytest.mark.requires("faiss")
diff --git a/libs/community/tests/unit_tests/vectorstores/test_tencentvectordb.py b/libs/community/tests/unit_tests/vectorstores/test_tencentvectordb.py
index 36a4e959346bc..5af23b3719def 100644
--- a/libs/community/tests/unit_tests/vectorstores/test_tencentvectordb.py
+++ b/libs/community/tests/unit_tests/vectorstores/test_tencentvectordb.py
@@ -20,7 +20,7 @@ def test_translate_filter() -> None:
assert False
else:
result = translate_filter(raw_filter)
- expr = '(artist = "Taylor Swift" or artist = "Katy Perry") ' "and length < 180"
+ expr = '(artist = "Taylor Swift" or artist = "Katy Perry") and length < 180'
assert expr == result