Skip to content

Commit

Permalink
lint
Browse files Browse the repository at this point in the history
  • Loading branch information
angus-langchain committed Dec 6, 2024
1 parent 173b9c3 commit 1979844
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 11 deletions.
22 changes: 15 additions & 7 deletions python/langsmith/_internal/_operations.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,17 @@
from __future__ import annotations

Check notice on line 1 in python/langsmith/_internal/_operations.py

View workflow job for this annotation

GitHub Actions / benchmark

Benchmark results

........... WARNING: the benchmark result may be unstable * the standard deviation (87.7 ms) is 13% of the mean (665 ms) Try to rerun the benchmark with more runs, values and/or loops. Run 'python -m pyperf system tune' command to reduce the system jitter. Use pyperf stats, pyperf dump and pyperf hist to analyze results. Use --quiet option to hide these warnings. create_5_000_run_trees: Mean +- std dev: 665 ms +- 88 ms ........... WARNING: the benchmark result may be unstable * the standard deviation (226 ms) is 16% of the mean (1.45 sec) Try to rerun the benchmark with more runs, values and/or loops. Run 'python -m pyperf system tune' command to reduce the system jitter. Use pyperf stats, pyperf dump and pyperf hist to analyze results. Use --quiet option to hide these warnings. create_10_000_run_trees: Mean +- std dev: 1.45 sec +- 0.23 sec ........... WARNING: the benchmark result may be unstable * the standard deviation (184 ms) is 13% of the mean (1.46 sec) Try to rerun the benchmark with more runs, values and/or loops. Run 'python -m pyperf system tune' command to reduce the system jitter. Use pyperf stats, pyperf dump and pyperf hist to analyze results. Use --quiet option to hide these warnings. create_20_000_run_trees: Mean +- std dev: 1.46 sec +- 0.18 sec ........... dumps_class_nested_py_branch_and_leaf_200x400: Mean +- std dev: 708 us +- 10 us ........... dumps_class_nested_py_leaf_50x100: Mean +- std dev: 25.5 ms +- 0.2 ms ........... dumps_class_nested_py_leaf_100x200: Mean +- std dev: 105 ms +- 2 ms ........... dumps_dataclass_nested_50x100: Mean +- std dev: 25.7 ms +- 0.2 ms ........... WARNING: the benchmark result may be unstable * the standard deviation (16.7 ms) is 23% of the mean (72.9 ms) Try to rerun the benchmark with more runs, values and/or loops. Run 'python -m pyperf system tune' command to reduce the system jitter. Use pyperf stats, pyperf dump and pyperf hist to analyze results. Use --quiet option to hide these warnings. dumps_pydantic_nested_50x100: Mean +- std dev: 72.9 ms +- 16.7 ms ........... dumps_pydanticv1_nested_50x100: Mean +- std dev: 200 ms +- 4 ms

Check notice on line 1 in python/langsmith/_internal/_operations.py

View workflow job for this annotation

GitHub Actions / benchmark

Comparison against main

+-----------------------------------------------+----------+------------------------+ | Benchmark | main | changes | +===============================================+==========+========================+ | dumps_pydanticv1_nested_50x100 | 221 ms | 200 ms: 1.11x faster | +-----------------------------------------------+----------+------------------------+ | create_5_000_run_trees | 724 ms | 665 ms: 1.09x faster | +-----------------------------------------------+----------+------------------------+ | dumps_class_nested_py_leaf_100x200 | 105 ms | 105 ms: 1.00x slower | +-----------------------------------------------+----------+------------------------+ | dumps_dataclass_nested_50x100 | 25.6 ms | 25.7 ms: 1.00x slower | +-----------------------------------------------+----------+------------------------+ | dumps_class_nested_py_branch_and_leaf_200x400 | 705 us | 708 us: 1.00x slower | +-----------------------------------------------+----------+------------------------+ | dumps_class_nested_py_leaf_50x100 | 25.1 ms | 25.5 ms: 1.01x slower | +-----------------------------------------------+----------+------------------------+ | create_10_000_run_trees | 1.40 sec | 1.45 sec: 1.03x slower | +-----------------------------------------------+----------+------------------------+ | create_20_000_run_trees | 1.39 sec | 1.46 sec: 1.05x slower | +-----------------------------------------------+----------+------------------------+ | dumps_pydantic_nested_50x100 | 66.2 ms | 72.9 ms: 1.10x slower | +-----------------------------------------------+----------+------------------------+ | Geometric mean | (ref) | 1.00x slower | +-----------------------------------------------+----------+------------------------+

import io
import itertools
import logging
import uuid
from typing import Literal, Optional, Union, cast, Iterator, Sequence
from typing import Iterator, Literal, Optional, Sequence, Union, cast

import zstandard as zstd

from langsmith import schemas as ls_schemas
from langsmith._internal import _orjson
from langsmith._internal._multipart import MultipartPart, MultipartPartsAndContext
from langsmith._internal._serde import dumps_json as _dumps_json
import zstandard as zstd
import io

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -340,7 +341,9 @@ def compress_multipart_stream(
compressor = self.compressor.stream_writer(self.buffer)

try:
for part_name, (filename, data, content_type, headers) in parts_and_contexts.parts:
for part_name, (filename, data, content_type, headers) in (
parts_and_contexts.parts
):
# Write part headers
part_header = (
f'--{self.boundary}\r\n'
Expand Down Expand Up @@ -398,21 +401,26 @@ def compress_operations(
Compressed chunks of the multipart form data
"""
def chunk_ops(ops: Sequence[SerializedRunOperation],
size: Optional[int] = None) -> Iterator[Sequence[SerializedRunOperation]]:
size: Optional[int] = None,
) -> Iterator[Sequence[SerializedRunOperation]]:
if size is None:
yield ops
return

for i in range(0, len(ops), size):
yield ops[i:i + size]

def get_multipart_parts(batch: Sequence[SerializedRunOperation]) -> MultipartPartsAndContext:
def get_multipart_parts(
batch: Sequence[SerializedRunOperation]
) -> MultipartPartsAndContext:
parts_and_contexts = []
for op in batch:
parts_and_contexts.append(
serialized_run_operation_to_multipart_parts_and_context(op)
)
return combine_multipart_parts_and_context_for_compression(parts_and_contexts)
return combine_multipart_parts_and_context_for_compression(
parts_and_contexts
)

# Process operations in batches
for batch in chunk_ops(ops, batch_size):
Expand Down
5 changes: 1 addition & 4 deletions python/langsmith/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,6 @@

import requests
from requests import adapters as requests_adapters
from requests_toolbelt import ( # type: ignore[import-untyped]
multipart as rqtb_multipart,
)
from typing_extensions import TypeGuard, overload
from urllib3.poolmanager import PoolKey # type: ignore[attr-defined, import-untyped]
from urllib3.util import Retry # type: ignore[import-untyped]
Expand Down Expand Up @@ -88,8 +85,8 @@
)
from langsmith._internal._operations import (
SerializedFeedbackOperation,
StreamingMultipartCompressor,
SerializedRunOperation,
StreamingMultipartCompressor,
combine_serialized_queue_operations,
serialize_feedback_dict,
serialize_run_dict,
Expand Down

0 comments on commit 1979844

Please sign in to comment.