Skip to content

Commit

Permalink
lint
Browse files Browse the repository at this point in the history
  • Loading branch information
angus-langchain committed Dec 13, 2024
1 parent f59f7be commit b76e662
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 17 deletions.
15 changes: 9 additions & 6 deletions python/langsmith/_internal/_background_thread.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@
from __future__ import annotations

Check notice on line 1 in python/langsmith/_internal/_background_thread.py

View workflow job for this annotation

GitHub Actions / benchmark

Benchmark results

........... WARNING: the benchmark result may be unstable * the standard deviation (97.0 ms) is 14% of the mean (714 ms) Try to rerun the benchmark with more runs, values and/or loops. Run 'python -m pyperf system tune' command to reduce the system jitter. Use pyperf stats, pyperf dump and pyperf hist to analyze results. Use --quiet option to hide these warnings. create_5_000_run_trees: Mean +- std dev: 714 ms +- 97 ms ........... WARNING: the benchmark result may be unstable * the standard deviation (177 ms) is 12% of the mean (1.45 sec) Try to rerun the benchmark with more runs, values and/or loops. Run 'python -m pyperf system tune' command to reduce the system jitter. Use pyperf stats, pyperf dump and pyperf hist to analyze results. Use --quiet option to hide these warnings. create_10_000_run_trees: Mean +- std dev: 1.45 sec +- 0.18 sec ........... WARNING: the benchmark result may be unstable * the standard deviation (196 ms) is 13% of the mean (1.46 sec) Try to rerun the benchmark with more runs, values and/or loops. Run 'python -m pyperf system tune' command to reduce the system jitter. Use pyperf stats, pyperf dump and pyperf hist to analyze results. Use --quiet option to hide these warnings. create_20_000_run_trees: Mean +- std dev: 1.46 sec +- 0.20 sec ........... dumps_class_nested_py_branch_and_leaf_200x400: Mean +- std dev: 681 us +- 5 us ........... dumps_class_nested_py_leaf_50x100: Mean +- std dev: 25.0 ms +- 0.7 ms ........... dumps_class_nested_py_leaf_100x200: Mean +- std dev: 103 ms +- 2 ms ........... dumps_dataclass_nested_50x100: Mean +- std dev: 25.3 ms +- 0.7 ms ........... WARNING: the benchmark result may be unstable * the standard deviation (16.4 ms) is 23% of the mean (71.1 ms) Try to rerun the benchmark with more runs, values and/or loops. Run 'python -m pyperf system tune' command to reduce the system jitter. Use pyperf stats, pyperf dump and pyperf hist to analyze results. Use --quiet option to hide these warnings. dumps_pydantic_nested_50x100: Mean +- std dev: 71.1 ms +- 16.4 ms ........... dumps_pydanticv1_nested_50x100: Mean +- std dev: 198 ms +- 4 ms

Check notice on line 1 in python/langsmith/_internal/_background_thread.py

View workflow job for this annotation

GitHub Actions / benchmark

Comparison against main

+-----------------------------------------------+----------+------------------------+ | Benchmark | main | changes | +===============================================+==========+========================+ | dumps_pydanticv1_nested_50x100 | 217 ms | 198 ms: 1.09x faster | +-----------------------------------------------+----------+------------------------+ | dumps_class_nested_py_branch_and_leaf_200x400 | 695 us | 681 us: 1.02x faster | +-----------------------------------------------+----------+------------------------+ | create_5_000_run_trees | 721 ms | 714 ms: 1.01x faster | +-----------------------------------------------+----------+------------------------+ | dumps_class_nested_py_leaf_100x200 | 103 ms | 103 ms: 1.01x faster | +-----------------------------------------------+----------+------------------------+ | dumps_dataclass_nested_50x100 | 25.2 ms | 25.3 ms: 1.00x slower | +-----------------------------------------------+----------+------------------------+ | dumps_class_nested_py_leaf_50x100 | 24.8 ms | 25.0 ms: 1.01x slower | +-----------------------------------------------+----------+------------------------+ | create_10_000_run_trees | 1.37 sec | 1.45 sec: 1.06x slower | +-----------------------------------------------+----------+------------------------+ | create_20_000_run_trees | 1.36 sec | 1.46 sec: 1.07x slower | +-----------------------------------------------+----------+------------------------+ | dumps_pydantic_nested_50x100 | 64.9 ms | 71.1 ms: 1.10x slower | +-----------------------------------------------+----------+------------------------+ | Geometric mean | (ref) | 1.01x slower | +-----------------------------------------------+----------+------------------------+

import concurrent.futures
import functools
import io
import logging
import os
import sys
import threading
import time
import weakref
from multiprocessing import cpu_count
import concurrent.futures
from queue import Empty, Queue
from typing import (
TYPE_CHECKING,
Expand Down Expand Up @@ -38,7 +37,10 @@

logger = logging.getLogger("langsmith.client")

HTTP_REQUEST_THREAD_POOL = concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count())
HTTP_REQUEST_THREAD_POOL = concurrent.futures.ThreadPoolExecutor(
max_workers=cpu_count()
)


@functools.total_ordering
class TracingQueueItem:
Expand Down Expand Up @@ -261,10 +263,12 @@ def keep_thread_active() -> bool:
if hasattr(sys, "getrefcount"):
# check if client refs count indicates we're the only remaining
# reference to the client

# Count active threads
thread_pool = HTTP_REQUEST_THREAD_POOL._threads
active_count = sum(1 for thread in thread_pool if thread is not None and thread.is_alive())
active_count = sum(
1 for thread in thread_pool if thread is not None and thread.is_alive()
)

return sys.getrefcount(client) > num_known_refs + active_count
else:
Expand Down Expand Up @@ -305,7 +309,6 @@ def keep_thread_active() -> bool:
except RuntimeError:
client._send_compressed_multipart_req(final_data_stream)


except Exception:
logger.error("Error in final cleanup", exc_info=True)

Expand Down
21 changes: 10 additions & 11 deletions python/langsmith/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -1824,8 +1824,7 @@ def update_run(
if attachments:
data["attachments"] = attachments
use_multipart = (
(self.tracing_queue is not None
or self.compressed_runs_buffer is not None)
(self.tracing_queue is not None or self.compressed_runs_buffer is not None)
# batch ingest requires trace_id and dotted_order to be set
and data["trace_id"] is not None
and data["dotted_order"] is not None
Expand Down Expand Up @@ -1887,16 +1886,16 @@ def _update_run(self, run_update: dict) -> None:
},
)


def flush_compressed_runs(self, attempts: int = 3) -> None:
"""
Forcefully flush the currently buffered compressed runs.
"""
"""Force flush the currently buffered compressed runs."""
if not self.compress_traces or self.compressed_runs_buffer is None:
return

# Attempt to drain and send any remaining data
from langsmith._internal._background_thread import _tracing_thread_drain_compressed_buffer, HTTP_REQUEST_THREAD_POOL
from langsmith._internal._background_thread import (
HTTP_REQUEST_THREAD_POOL,
_tracing_thread_drain_compressed_buffer,
)

final_data_stream = _tracing_thread_drain_compressed_buffer(
self, size_limit=1, size_limit_bytes=1
Expand All @@ -1909,7 +1908,9 @@ def flush_compressed_runs(self, attempts: int = 3) -> None:
future = None
try:
future = HTTP_REQUEST_THREAD_POOL.submit(
self._send_compressed_multipart_req, final_data_stream, attempts=attempts
self._send_compressed_multipart_req,
final_data_stream,
attempts=attempts,
)
except RuntimeError:
# In case the ThreadPoolExecutor is already shutdown
Expand All @@ -1920,9 +1921,7 @@ def flush_compressed_runs(self, attempts: int = 3) -> None:
cf.wait([future])

def flush(self) -> None:
"""
A convenience method to flush either queue or compressed buffer, depending on mode.
"""
"""Flush either queue or compressed buffer, depending on mode."""
if self.compress_traces and self.compressed_runs_buffer is not None:
self.flush_compressed_runs()
elif self.tracing_queue is not None:
Expand Down

0 comments on commit b76e662

Please sign in to comment.