From 694532c91346bdbf3cfeaa616f4e3a3c0dbae14d Mon Sep 17 00:00:00 2001 From: 1martin1 Date: Thu, 23 Jan 2025 19:43:36 +0300 Subject: [PATCH 1/7] update: add class Config and move env into it --- protollm_tools/llm-worker/poetry.lock | 10 +-- .../llm-worker/protollm_worker/config.py | 79 ++++++++++++++++--- .../llm-worker/protollm_worker/main.py | 16 +--- .../protollm_worker/services/broker.py | 66 +++------------- protollm_tools/llm-worker/pyproject.toml | 2 +- protollm_tools/sdk/pyproject.toml | 2 +- 6 files changed, 87 insertions(+), 88 deletions(-) diff --git a/protollm_tools/llm-worker/poetry.lock b/protollm_tools/llm-worker/poetry.lock index c478355..c3341fa 100644 --- a/protollm_tools/llm-worker/poetry.lock +++ b/protollm_tools/llm-worker/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -2846,13 +2846,13 @@ files = [ [[package]] name = "protollm-sdk" -version = "1.0.0" +version = "1.1.0" description = "" optional = false python-versions = "<4.0,>=3.10" files = [ - {file = "protollm_sdk-1.0.0-py3-none-any.whl", hash = "sha256:bd53331811e788c606551a7c19d2c59496612db8ac237f2c28a5bec6cf373c89"}, - {file = "protollm_sdk-1.0.0.tar.gz", hash = "sha256:7f34ab288115a33d44047d689703af731abd66af11846b5233c7083b3df8a8e7"}, + {file = "protollm_sdk-1.1.0-py3-none-any.whl", hash = "sha256:2f49516d0229a85fa0abf7d2bead2ed96a565686eb87b9559468eeb3198db7c7"}, + {file = "protollm_sdk-1.1.0.tar.gz", hash = "sha256:165d7d1270dadc7eacbf6d973f8410cd15fe2c7452cf6fb5f784cd5abc1f4c0e"}, ] [package.dependencies] @@ -5060,4 +5060,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "bef6d37fd1049fed6ba443ba2fa5abbcc3fff1499232944f872b272410ac659c" +content-hash = "1f1fe7c6d722ba2011a2d043068fd4c2aa8cdf5c7d7808e6f1aed30d75805300" diff --git a/protollm_tools/llm-worker/protollm_worker/config.py b/protollm_tools/llm-worker/protollm_worker/config.py index 2b6ef2a..513399c 100644 --- a/protollm_tools/llm-worker/protollm_worker/config.py +++ b/protollm_tools/llm-worker/protollm_worker/config.py @@ -1,16 +1,71 @@ import os -REDIS_PREFIX = os.environ.get("REDIS_PREFIX", "llm-api") -REDIS_HOST = os.environ.get("REDIS_HOST", "localhost") -REDIS_PORT = os.environ.get("REDIS_PORT", "6379") -RABBIT_MQ_HOST = os.environ.get("RABBIT_MQ_HOST", "localhost") -RABBIT_MQ_PORT = os.environ.get("RABBIT_MQ_PORT", "5672") -RABBIT_MQ_LOGIN = os.environ.get("RABBIT_MQ_LOGIN", "admin") -RABBIT_MQ_PASSWORD = os.environ.get("RABBIT_MQ_PASSWORD", "admin") +class Config: + def __init__( + self, + redis_host: str = "localhost", + redis_port: int = 6379, + redis_prefix: str = "llm-api", + rabbit_host: str = "localhost", + rabbit_port: int = 5672, + rabbit_login: str = "admin", + rabbit_password: str = "admin", + queue_name: str = "llm-api-queue", + model_path: str = None, + token_len: int = None, + tensor_parallel_size: int = None, + gpu_memory_utilisation: float = None, + ): + self.redis_host = redis_host + self.redis_port = redis_port + self.redis_prefix = redis_prefix + self.rabbit_host = rabbit_host + self.rabbit_port = rabbit_port + self.rabbit_login = rabbit_login + self.rabbit_password = rabbit_password + self.queue_name = queue_name + self.model_path = model_path, + self.token_len = token_len, + self.tensor_parallel_size = tensor_parallel_size, + self.gpu_memory_utilisation = gpu_memory_utilisation, -QUEUE_NAME = os.environ.get("QUEUE_NAME", "llm-api-queue") -MODEL_PATH = os.environ.get("MODEL_PATH") -TOKENS_LEN = int(os.environ.get("TOKENS_LEN")) -TENSOR_PARALLEL_SIZE = int(os.environ.get("TENSOR_PARALLEL_SIZE")) -GPU_MEMORY_UTILISATION = float(os.environ.get("GPU_MEMORY_UTILISATION")) + @classmethod + def read_from_env(cls) -> 'Config': + return Config( + os.environ.get("REDIS_HOST", "localhost"), + os.environ.get("REDIS_PORT", "6379"), + os.environ.get("REDIS_PREFIX", "llm-api"), + os.environ.get("RABBIT_MQ_HOST", "localhost"), + os.environ.get("RABBIT_MQ_PORT", "5672"), + os.environ.get("RABBIT_MQ_LOGIN", "admin"), + os.environ.get("RABBIT_MQ_PASSWORD", "admin"), + os.environ.get("QUEUE_NAME", "llm-api-queue"), + os.environ.get("MODEL_PATH"), + int(os.environ.get("TOKENS_LEN")), + int(os.environ.get("TENSOR_PARALLEL_SIZE")), + float(os.environ.get("GPU_MEMORY_UTILISATION")), + ) + + @classmethod + def read_from_env_file(cls, path: str) -> 'Config': + with open(path) as file: + lines = file.readlines() + env_vars = {} + for line in lines: + key, value = line.split("=") + env_vars[key] = value + return Config( + env_vars.get("REDIS_HOST", "localhost"), + int(env_vars.get("REDIS_PORT", "6379")), + env_vars.get("REDIS_PREFIX", "llm-api"), + env_vars.get("RABBIT_MQ_HOST", "localhost"), + int(env_vars.get("RABBIT_MQ_PORT", "5672")), + env_vars.get("RABBIT_MQ_LOGIN", "admin"), + env_vars.get("RABBIT_MQ_PASSWORD", "admin"), + env_vars.get("QUEUE_NAME", "llm-api-queue"), + env_vars.get("MODEL_PATH"), + int(env_vars.get("TOKENS_LEN")), + int(env_vars.get("TENSOR_PARALLEL_SIZE")), + float(env_vars.get("GPU_MEMORY_UTILISATION")), + ) \ No newline at end of file diff --git a/protollm_tools/llm-worker/protollm_worker/main.py b/protollm_tools/llm-worker/protollm_worker/main.py index 9c6e6bc..993ecff 100644 --- a/protollm_tools/llm-worker/protollm_worker/main.py +++ b/protollm_tools/llm-worker/protollm_worker/main.py @@ -1,21 +1,11 @@ from protollm_worker.config import MODEL_PATH, REDIS_HOST, REDIS_PORT, QUEUE_NAME from protollm_worker.models.vllm_models import VllMModel from protollm_worker.services.broker import LLMWrap -from protollm_worker.config import ( - RABBIT_MQ_HOST, RABBIT_MQ_PORT, - RABBIT_MQ_PASSWORD, RABBIT_MQ_LOGIN, - REDIS_PREFIX -) +from protollm_worker.config import Config if __name__ == "__main__": + config = Config.read_from_env() llm_model = VllMModel(model_path=MODEL_PATH) llm_wrap = LLMWrap(llm_model=llm_model, - redis_host= REDIS_HOST, - redis_port= REDIS_PORT, - queue_name= QUEUE_NAME, - rabbit_host= RABBIT_MQ_HOST, - rabbit_port= RABBIT_MQ_PORT, - rabbit_login= RABBIT_MQ_LOGIN, - rabbit_password= RABBIT_MQ_PASSWORD, - redis_prefix= REDIS_PREFIX) + config= config) llm_wrap.start_connection() diff --git a/protollm_tools/llm-worker/protollm_worker/services/broker.py b/protollm_tools/llm-worker/protollm_worker/services/broker.py index 20db797..b5b3535 100644 --- a/protollm_tools/llm-worker/protollm_worker/services/broker.py +++ b/protollm_tools/llm-worker/protollm_worker/services/broker.py @@ -4,8 +4,10 @@ import pika from protollm_sdk.models.job_context_models import PromptModel, ChatCompletionModel, PromptTransactionModel, \ PromptWrapper, ChatCompletionTransactionModel +from protollm_sdk.object_interface import RabbitMQWrapper from protollm_sdk.object_interface.redis_wrapper import RedisWrapper +from protollm_worker.config import Config from protollm_worker.models.base import BaseLLM logging.basicConfig(level=logging.INFO) @@ -22,41 +24,21 @@ class LLMWrap: def __init__(self, llm_model: BaseLLM, - redis_host: str, - redis_port: str, - queue_name: str, - rabbit_host: str, - rabbit_port: str, - rabbit_login: str, - rabbit_password: str, - redis_prefix: str): + config: Config): """ Initialize the LLMWrap class with the necessary configurations. :param llm_model: The language model to use for processing prompts. :type llm_model: BaseLLM - :param redis_host: Hostname for the Redis server. - :type redis_host: str - :param redis_port: Port for the Redis server. - :type redis_port: str - :param queue_name: Name of the RabbitMQ queue to consume messages from. - :type queue_name: str - :param rabbit_host: Hostname for the RabbitMQ server. - :type rabbit_host: str - :param rabbit_port: Port for the RabbitMQ server. - :type rabbit_port: str - :param rabbit_login: Login for RabbitMQ authentication. - :type rabbit_login: str - :param rabbit_password: Password for RabbitMQ authentication. - :type rabbit_password: str - :param redis_prefix: Prefix for Redis keys to store results. - :type redis_prefix: str + :param config: Set for setting Redis and RabbitMQ. + :type config: Config """ self.llm = llm_model logger.info('Loaded model') - self.redis_bd = RedisWrapper(redis_host, redis_port) - self.redis_prefix = redis_prefix + self.redis_bd = RedisWrapper(config.redis_host, config.redis_port) + self.rabbitMQ = RabbitMQWrapper(config.rabbit_host, config.rabbit_port, config.rabbit_login, config.rabbit_password) + self.redis_prefix = config.redis_prefix logger.info('Connected to Redis') self.models = { @@ -64,41 +46,13 @@ def __init__(self, 'chat_completion': ChatCompletionModel, } - self.queue_name = queue_name - self.rabbit_host = rabbit_host - self.rabbit_port = rabbit_port - self.rabbit_login = rabbit_login - self.rabbit_password = rabbit_password + self.queue_name = config.queue_name def start_connection(self): """ Establish a connection to the RabbitMQ broker and start consuming messages from the specified queue. """ - connection = pika.BlockingConnection( - pika.ConnectionParameters( - host=self.rabbit_host, - port=self.rabbit_port, - virtual_host='/', - credentials=pika.PlainCredentials( - username=self.rabbit_login, - password=self.rabbit_password - ) - ) - ) - - channel = connection.channel() - logger.info('Connected to the broker') - - channel.queue_declare(queue=self.queue_name) - logger.info('Queue has been declared') - - channel.basic_consume( - on_message_callback=self._callback, - queue=self.queue_name, - auto_ack=True - ) - - channel.start_consuming() + self.rabbitMQ.consume_messages(self.queue_name, self._callback) logger.info('Started consuming messages') def _dump_from_body(self, message_body) -> PromptModel | ChatCompletionModel: diff --git a/protollm_tools/llm-worker/pyproject.toml b/protollm_tools/llm-worker/pyproject.toml index 3e9bdaf..b46de09 100644 --- a/protollm_tools/llm-worker/pyproject.toml +++ b/protollm_tools/llm-worker/pyproject.toml @@ -10,7 +10,7 @@ python = "^3.10" redis = "^5.0.5" pika = "^1.3.2" pydantic = "^2.7.4" -protollm_sdk = "^1.0.0" +protollm_sdk = "^1.1.0" vllm = "^0.6.4.post1" [toll.poetry.llama-cpp] diff --git a/protollm_tools/sdk/pyproject.toml b/protollm_tools/sdk/pyproject.toml index e9bdc7c..f8c075e 100644 --- a/protollm_tools/sdk/pyproject.toml +++ b/protollm_tools/sdk/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "protollm-sdk" -version = "1.1.0" +version = "1.1.1" description = "" authors = ["aimclub"] readme = "README.md" From 347128d4e3281e71a97aae0be905d89d120b8b6a Mon Sep 17 00:00:00 2001 From: 1martin1 Date: Mon, 27 Jan 2025 16:55:23 +0300 Subject: [PATCH 2/7] update: update vllm with config --- protollm_tools/llm-worker/protollm_worker/config.py | 12 ++++++------ protollm_tools/llm-worker/protollm_worker/main.py | 6 ++++-- .../llm-worker/protollm_worker/models/vllm_models.py | 9 ++++----- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/protollm_tools/llm-worker/protollm_worker/config.py b/protollm_tools/llm-worker/protollm_worker/config.py index 513399c..cce137c 100644 --- a/protollm_tools/llm-worker/protollm_worker/config.py +++ b/protollm_tools/llm-worker/protollm_worker/config.py @@ -42,9 +42,9 @@ def read_from_env(cls) -> 'Config': os.environ.get("RABBIT_MQ_PASSWORD", "admin"), os.environ.get("QUEUE_NAME", "llm-api-queue"), os.environ.get("MODEL_PATH"), - int(os.environ.get("TOKENS_LEN")), - int(os.environ.get("TENSOR_PARALLEL_SIZE")), - float(os.environ.get("GPU_MEMORY_UTILISATION")), + int(os.environ.get("TOKENS_LEN", "16384")), + int(os.environ.get("TENSOR_PARALLEL_SIZE", "2")), + float(os.environ.get("GPU_MEMORY_UTILISATION", "0.9")), ) @classmethod @@ -65,7 +65,7 @@ def read_from_env_file(cls, path: str) -> 'Config': env_vars.get("RABBIT_MQ_PASSWORD", "admin"), env_vars.get("QUEUE_NAME", "llm-api-queue"), env_vars.get("MODEL_PATH"), - int(env_vars.get("TOKENS_LEN")), - int(env_vars.get("TENSOR_PARALLEL_SIZE")), - float(env_vars.get("GPU_MEMORY_UTILISATION")), + int(env_vars.get("TOKENS_LEN", "16384")), + int(env_vars.get("TENSOR_PARALLEL_SIZE", "2")), + float(env_vars.get("GPU_MEMORY_UTILISATION", "0.9")), ) \ No newline at end of file diff --git a/protollm_tools/llm-worker/protollm_worker/main.py b/protollm_tools/llm-worker/protollm_worker/main.py index 993ecff..e6fc2d6 100644 --- a/protollm_tools/llm-worker/protollm_worker/main.py +++ b/protollm_tools/llm-worker/protollm_worker/main.py @@ -1,11 +1,13 @@ -from protollm_worker.config import MODEL_PATH, REDIS_HOST, REDIS_PORT, QUEUE_NAME from protollm_worker.models.vllm_models import VllMModel from protollm_worker.services.broker import LLMWrap from protollm_worker.config import Config if __name__ == "__main__": config = Config.read_from_env() - llm_model = VllMModel(model_path=MODEL_PATH) + llm_model = VllMModel(model_path=config.model_path, + tensor_parallel_size=config.tensor_parallel_size, + gpu_memory_utilisation=config.gpu_memory_utilisation, + tokens_len=config.token_len) llm_wrap = LLMWrap(llm_model=llm_model, config= config) llm_wrap.start_connection() diff --git a/protollm_tools/llm-worker/protollm_worker/models/vllm_models.py b/protollm_tools/llm-worker/protollm_worker/models/vllm_models.py index 1553d41..9827d98 100644 --- a/protollm_tools/llm-worker/protollm_worker/models/vllm_models.py +++ b/protollm_tools/llm-worker/protollm_worker/models/vllm_models.py @@ -4,7 +4,6 @@ ChatCompletionTransactionModel, PromptTypes from vllm import LLM, SamplingParams -from protollm_worker.config import GPU_MEMORY_UTILISATION, TENSOR_PARALLEL_SIZE, TOKENS_LEN from protollm_worker.models.base import BaseLLM, LocalLLM logging.basicConfig(level=logging.INFO) @@ -17,7 +16,7 @@ class VllMModel(LocalLLM, BaseLLM): and chat-based completions. """ - def __init__(self, model_path, n_ctx=8192): + def __init__(self, model_path, tensor_parallel_size, gpu_memory_utilisation, tokens_len, n_ctx=8192): """ Initialize the vLLM-based model. @@ -30,9 +29,9 @@ def __init__(self, model_path, n_ctx=8192): self.model = LLM( model=model_path, - tensor_parallel_size=TENSOR_PARALLEL_SIZE, - gpu_memory_utilization=GPU_MEMORY_UTILISATION, - max_model_len=TOKENS_LEN + tensor_parallel_size=tensor_parallel_size, + gpu_memory_utilization=gpu_memory_utilisation, + max_model_len=tokens_len ) self.handlers = { PromptTypes.SINGLE_GENERATION.value: self.generate, From 4da51d836db10aa6755e8079583e098bc3290070 Mon Sep 17 00:00:00 2001 From: 1martin1 Date: Mon, 27 Jan 2025 18:13:43 +0300 Subject: [PATCH 3/7] documentation: add dockstring to Config --- .../llm-worker/protollm_worker/config.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/protollm_tools/llm-worker/protollm_worker/config.py b/protollm_tools/llm-worker/protollm_worker/config.py index cce137c..99bf9b3 100644 --- a/protollm_tools/llm-worker/protollm_worker/config.py +++ b/protollm_tools/llm-worker/protollm_worker/config.py @@ -2,6 +2,24 @@ class Config: + """ + Configuration class for setting up Redis, RabbitMQ, and model-specific parameters. + + Attributes: + redis_host (str): The hostname of the Redis server. Defaults to "localhost". + redis_port (int): The port number of the Redis server. Defaults to 6379. + redis_prefix (str): Prefix for keys used in Redis. Defaults to "llm-api". + rabbit_host (str): The hostname of the RabbitMQ server. Defaults to "localhost". + rabbit_port (int): The port number of the RabbitMQ server. Defaults to 5672. + rabbit_login (str): The username for RabbitMQ authentication. Defaults to "admin". + rabbit_password (str): The password for RabbitMQ authentication. Defaults to "admin". + queue_name (str): The name of the RabbitMQ queue to use. Defaults to "llm-api-queue". + model_path (str): Path to the model being used. Defaults to None. + token_len (int): The maximum length of tokens for processing by the model. Defaults to None. + tensor_parallel_size (int): The size of tensor parallelism for distributed processing. Defaults to None. + gpu_memory_utilisation (float): The percentage of GPU memory utilization for the model. Defaults to None. + """ + def __init__( self, redis_host: str = "localhost", From 10b16768940f1b0a821f916b7b087ddd26e7357a Mon Sep 17 00:00:00 2001 From: 1martin1 Date: Wed, 5 Feb 2025 16:35:23 +0300 Subject: [PATCH 4/7] refactor: update configs and dockers --- protollm_tools/llm-api/docker-compose.yml | 26 ++-- protollm_tools/llm-api/poetry.lock | 116 ++++++++++++++++-- protollm_tools/llm-api/protollm_api/config.py | 36 +++--- protollm_tools/llm-api/pyproject.toml | 2 + .../{deployment => }/docker-compose.yml | 10 +- .../llm-worker/protollm_worker/config.py | 68 +++++----- protollm_tools/sdk/docker-compose.yml | 50 +++----- .../protollm_sdk/models/job_context_models.py | 1 + 8 files changed, 187 insertions(+), 122 deletions(-) rename protollm_tools/llm-worker/{deployment => }/docker-compose.yml (67%) diff --git a/protollm_tools/llm-api/docker-compose.yml b/protollm_tools/llm-api/docker-compose.yml index a2c8d7a..87faecd 100644 --- a/protollm_tools/llm-api/docker-compose.yml +++ b/protollm_tools/llm-api/docker-compose.yml @@ -8,16 +8,9 @@ services: context: . dockerfile: Dockerfile ports: - - "6672:6672" - environment: - CELERY_BROKER_URL: amqp://admin:admin@10.32.15.21:5672/ - CELERY_RESULT_BACKEND: redis://10.32.15.21:6379/0 - REDIS_HOST: redis - REDIS_PORT: 6379 - RABBIT_MQ_HOST: rabbitmq - RABBIT_MQ_PORT: 5672 - RABBIT_MQ_LOGIN: admin - RABBIT_MQ_PASSWORD: admin + - ${API_PORT}:6672 + env_file: + - .env volumes: - ./unit_config.json:/docker-entrypoint.d/unit_config.json networks: @@ -26,11 +19,10 @@ services: rabbitmq: image: "rabbitmq:3-management" ports: - - "5672:5672" # RabbitMQ broker port - - "15672:15672" # RabbitMQ management interface - environment: - - RABBITMQ_DEFAULT_USER=admin - - RABBITMQ_DEFAULT_PASS=admin + - ${RABBIT_MQ_PORT}:5672 + - ${WEB_RABBIT_MQ}:15672 + env_file: + - .env volumes: - rabbitmq_data:/var/lib/rabbitmq networks: @@ -39,7 +31,7 @@ services: redis: image: "redis:alpine" ports: - - "6379:6379" + - ${REDIS_PORT}:6379 volumes: - redis_data:/var/lib/data networks: @@ -52,4 +44,4 @@ networks: volumes: rabbitmq_data: - redis_data: \ No newline at end of file + redis_data: diff --git a/protollm_tools/llm-api/poetry.lock b/protollm_tools/llm-api/poetry.lock index 0309784..bdc2def 100644 --- a/protollm_tools/llm-api/poetry.lock +++ b/protollm_tools/llm-api/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -6,6 +6,7 @@ version = "2.4.3" description = "Happy Eyeballs for asyncio" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "aiohappyeyeballs-2.4.3-py3-none-any.whl", hash = "sha256:8a7a83727b2756f394ab2895ea0765a0a8c475e3c71e98d43d76f22b4b435572"}, {file = "aiohappyeyeballs-2.4.3.tar.gz", hash = "sha256:75cf88a15106a5002a8eb1dab212525c00d1f4c0fa96e551c9fbe6f09a621586"}, @@ -17,6 +18,7 @@ version = "3.11.4" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "aiohttp-3.11.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a60f8206818e3582c999c999c799ab068e14f1870ade47d1fe8536dbfd88010b"}, {file = "aiohttp-3.11.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e5786e5926f888ce3a996d38d9c9b8f9306f399edb1f1ca3ce7760dab9b1043c"}, @@ -115,6 +117,7 @@ version = "2.0.1" description = "asyncio (PEP 3156) Redis support" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "aioredis-2.0.1-py3-none-any.whl", hash = "sha256:9ac0d0b3b485d293b8ca1987e6de8658d7dafcca1cddfcd1d506cae8cdebfdd6"}, {file = "aioredis-2.0.1.tar.gz", hash = "sha256:eaa51aaf993f2d71f54b70527c440437ba65340588afeb786cd87c55c89cd98e"}, @@ -133,6 +136,7 @@ version = "1.3.1" description = "aiosignal: a list of registered asynchronous callbacks" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, @@ -147,6 +151,7 @@ version = "5.3.1" description = "Low-level AMQP client for Python (fork of amqplib)." optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "amqp-5.3.1-py3-none-any.whl", hash = "sha256:43b3319e1b4e7d1251833a93d672b4af1e40f3d632d479b98661a95f117880a2"}, {file = "amqp-5.3.1.tar.gz", hash = "sha256:cddc00c725449522023bad949f70fff7b48f0b1ade74d170a6f10ab044739432"}, @@ -161,6 +166,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -172,6 +178,7 @@ version = "4.6.2.post1" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"}, {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"}, @@ -194,6 +201,7 @@ version = "4.0.3" description = "Timeout context manager for asyncio programs" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, @@ -205,6 +213,7 @@ version = "24.2.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, @@ -224,6 +233,7 @@ version = "4.2.1" description = "Python multiprocessing fork with improvements and bugfixes" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "billiard-4.2.1-py3-none-any.whl", hash = "sha256:40b59a4ac8806ba2c2369ea98d876bc6108b051c227baffd928c644d15d8f3cb"}, {file = "billiard-4.2.1.tar.gz", hash = "sha256:12b641b0c539073fc8d3f5b8b7be998956665c4233c7c1fcd66a7e677c4fb36f"}, @@ -235,6 +245,7 @@ version = "5.4.0" description = "Distributed Task Queue." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "celery-5.4.0-py3-none-any.whl", hash = "sha256:369631eb580cf8c51a82721ec538684994f8277637edde2dfc0dacd73ed97f64"}, {file = "celery-5.4.0.tar.gz", hash = "sha256:504a19140e8d3029d5acad88330c541d4c3f64c789d85f94756762d8bca7e706"}, @@ -291,6 +302,7 @@ version = "2024.8.30" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, @@ -302,6 +314,7 @@ version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" +groups = ["main"] files = [ {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, @@ -416,6 +429,7 @@ version = "8.1.7" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, @@ -430,6 +444,7 @@ version = "0.3.1" description = "Enables git-like *did-you-mean* feature in click" optional = false python-versions = ">=3.6.2" +groups = ["main"] files = [ {file = "click_didyoumean-0.3.1-py3-none-any.whl", hash = "sha256:5c4bb6007cfea5f2fd6583a2fb6701a22a41eb98957e63d0fac41c10e7c3117c"}, {file = "click_didyoumean-0.3.1.tar.gz", hash = "sha256:4f82fdff0dbe64ef8ab2279bd6aa3f6a99c3b28c05aa09cbfc07c9d7fbb5a463"}, @@ -444,6 +459,7 @@ version = "1.1.1" description = "An extension module for click to enable registering CLI commands via setuptools entry-points." optional = false python-versions = "*" +groups = ["main"] files = [ {file = "click-plugins-1.1.1.tar.gz", hash = "sha256:46ab999744a9d831159c3411bb0c79346d94a444df9a3a3742e9ed63645f264b"}, {file = "click_plugins-1.1.1-py2.py3-none-any.whl", hash = "sha256:5d262006d3222f5057fd81e1623d4443e41dcda5dc815c06b442aa3c02889fc8"}, @@ -461,6 +477,7 @@ version = "0.3.0" description = "REPL plugin for Click" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "click-repl-0.3.0.tar.gz", hash = "sha256:17849c23dba3d667247dc4defe1757fff98694e90fe37474f3feebb69ced26a9"}, {file = "click_repl-0.3.0-py3-none-any.whl", hash = "sha256:fb7e06deb8da8de86180a33a9da97ac316751c094c6899382da7feeeeb51b812"}, @@ -479,6 +496,8 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] +markers = "platform_system == \"Windows\" or sys_platform == \"win32\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -490,6 +509,7 @@ version = "1.9.0" description = "Distro - an OS platform information API" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, @@ -501,6 +521,7 @@ version = "2.7.0" description = "DNS toolkit" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86"}, {file = "dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1"}, @@ -521,6 +542,7 @@ version = "2.2.0" description = "A robust email address syntax and deliverability validation library." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "email_validator-2.2.0-py3-none-any.whl", hash = "sha256:561977c2d73ce3611850a06fa56b414621e0c8faa9d66f2611407d87465da631"}, {file = "email_validator-2.2.0.tar.gz", hash = "sha256:cb690f344c617a714f22e66ae771445a1ceb46821152df8e165c5f9a364582b7"}, @@ -536,6 +558,8 @@ version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -550,6 +574,7 @@ version = "0.111.1" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "fastapi-0.111.1-py3-none-any.whl", hash = "sha256:4f51cfa25d72f9fbc3280832e84b32494cf186f50158d364a8765aabf22587bf"}, {file = "fastapi-0.111.1.tar.gz", hash = "sha256:ddd1ac34cb1f76c2e2d7f8545a4bcb5463bce4834e81abf0b189e0c359ab2413"}, @@ -575,6 +600,7 @@ version = "0.0.5" description = "Run and manage FastAPI apps from the command line with FastAPI CLI. 🚀" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "fastapi_cli-0.0.5-py3-none-any.whl", hash = "sha256:e94d847524648c748a5350673546bbf9bcaeb086b33c24f2e82e021436866a46"}, {file = "fastapi_cli-0.0.5.tar.gz", hash = "sha256:d30e1239c6f46fcb95e606f02cdda59a1e2fa778a54b64686b3ff27f6211ff9f"}, @@ -593,6 +619,7 @@ version = "2.0.1" description = "Celery Flower" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "flower-2.0.1-py2.py3-none-any.whl", hash = "sha256:9db2c621eeefbc844c8dd88be64aef61e84e2deb29b271e02ab2b5b9f01068e2"}, {file = "flower-2.0.1.tar.gz", hash = "sha256:5ab717b979530770c16afb48b50d2a98d23c3e9fe39851dcf6bc4d01845a02a0"}, @@ -611,6 +638,7 @@ version = "1.5.0" description = "A list-like structure which implements collections.abc.MutableSequence" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"}, {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"}, @@ -712,6 +740,8 @@ version = "3.1.1" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" +groups = ["main"] +markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")" files = [ {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, @@ -798,6 +828,7 @@ version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, @@ -809,6 +840,7 @@ version = "1.0.7" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, @@ -830,6 +862,7 @@ version = "0.6.4" description = "A collection of framework independent HTTP protocol utils." optional = false python-versions = ">=3.8.0" +groups = ["main"] files = [ {file = "httptools-0.6.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3c73ce323711a6ffb0d247dcd5a550b8babf0f757e86a52558fe5b86d6fefcc0"}, {file = "httptools-0.6.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:345c288418f0944a6fe67be8e6afa9262b18c7626c3ef3c28adc5eabc06a68da"}, @@ -885,6 +918,7 @@ version = "0.27.2" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, @@ -910,6 +944,7 @@ version = "4.11.0" description = "Python humanize utilities" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "humanize-4.11.0-py3-none-any.whl", hash = "sha256:b53caaec8532bcb2fff70c8826f904c35943f8cecaca29d272d9df38092736c0"}, {file = "humanize-4.11.0.tar.gz", hash = "sha256:e66f36020a2d5a974c504bd2555cf770621dbdbb6d82f94a6857c0b1ea2608be"}, @@ -924,6 +959,7 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -938,6 +974,7 @@ version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, @@ -949,6 +986,7 @@ version = "3.1.4" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, @@ -966,6 +1004,7 @@ version = "0.7.1" description = "Fast iterable JSON parser." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "jiter-0.7.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:262e96d06696b673fad6f257e6a0abb6e873dc22818ca0e0600f4a1189eb334f"}, {file = "jiter-0.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be6de02939aac5be97eb437f45cfd279b1dc9de358b13ea6e040e63a3221c40d"}, @@ -1048,6 +1087,7 @@ version = "1.33" description = "Apply JSON-Patches (RFC 6902)" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" +groups = ["main"] files = [ {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, @@ -1062,6 +1102,7 @@ version = "3.0.0" description = "Identify specific nodes in a JSON document (RFC 6901)" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"}, {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"}, @@ -1073,6 +1114,7 @@ version = "5.4.2" description = "Messaging library for Python." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "kombu-5.4.2-py3-none-any.whl", hash = "sha256:14212f5ccf022fc0a70453bb025a1dcc32782a588c49ea866884047d66e14763"}, {file = "kombu-5.4.2.tar.gz", hash = "sha256:eef572dd2fd9fc614b37580e3caeafdd5af46c1eff31e7fba89138cdb406f2cf"}, @@ -1106,6 +1148,7 @@ version = "0.3.7" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.9" +groups = ["main"] files = [ {file = "langchain-0.3.7-py3-none-any.whl", hash = "sha256:cf4af1d5751dacdc278df3de1ff3cbbd8ca7eb55d39deadccdd7fb3d3ee02ac0"}, {file = "langchain-0.3.7.tar.gz", hash = "sha256:2e4f83bf794ba38562f7ba0ede8171d7e28a583c0cec6f8595cfe72147d336b2"}, @@ -1133,6 +1176,7 @@ version = "0.3.19" description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.9" +groups = ["main"] files = [ {file = "langchain_core-0.3.19-py3-none-any.whl", hash = "sha256:562b7cc3c15dfaa9270cb1496990c1f3b3e0b660c4d6a3236d7f693346f2a96c"}, {file = "langchain_core-0.3.19.tar.gz", hash = "sha256:126d9e8cadb2a5b8d1793a228c0783a3b608e36064d5a2ef1a4d38d07a344523"}, @@ -1156,6 +1200,7 @@ version = "0.3.2" description = "LangChain text splitting utilities" optional = false python-versions = "<4.0,>=3.9" +groups = ["main"] files = [ {file = "langchain_text_splitters-0.3.2-py3-none-any.whl", hash = "sha256:0db28c53f41d1bc024cdb3b1646741f6d46d5371e90f31e7e7c9fbe75d01c726"}, {file = "langchain_text_splitters-0.3.2.tar.gz", hash = "sha256:81e6515d9901d6dd8e35fb31ccd4f30f76d44b771890c789dc835ef9f16204df"}, @@ -1170,6 +1215,7 @@ version = "0.1.143" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" +groups = ["main"] files = [ {file = "langsmith-0.1.143-py3-none-any.whl", hash = "sha256:ba0d827269e9b03a90fababe41fa3e4e3f833300b95add10184f7e67167dde6f"}, {file = "langsmith-0.1.143.tar.gz", hash = "sha256:4c5159e5cd84b3f8499433009e72d2076dd2daf6c044ac8a3611b30d0d0161c5"}, @@ -1191,6 +1237,7 @@ version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, @@ -1215,6 +1262,7 @@ version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, @@ -1285,6 +1333,7 @@ version = "0.1.2" description = "Markdown URL utilities" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, @@ -1296,6 +1345,7 @@ version = "6.1.0" description = "multidict implementation" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, @@ -1400,6 +1450,7 @@ version = "1.26.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, @@ -1445,6 +1496,7 @@ version = "1.54.5" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "openai-1.54.5-py3-none-any.whl", hash = "sha256:f55a4450f38501814b53e76311ed7845a6f7f35bab46d0fb2a3728035d7a72d8"}, {file = "openai-1.54.5.tar.gz", hash = "sha256:2aab4f9755a3e1e04d8a45ac1f4ce7b6948bab76646020c6386256d7e5cbb7e0"}, @@ -1469,6 +1521,7 @@ version = "3.10.11" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "orjson-3.10.11-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6dade64687f2bd7c090281652fe18f1151292d567a9302b34c2dbb92a3872f1f"}, {file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82f07c550a6ccd2b9290849b22316a609023ed851a87ea888c0456485a7d196a"}, @@ -1536,6 +1589,7 @@ version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -1547,6 +1601,7 @@ version = "1.3.2" description = "Pika Python AMQP Client Library" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "pika-1.3.2-py3-none-any.whl", hash = "sha256:0779a7c1fafd805672796085560d290213a465e4f6f76a6fb19e378d8041a14f"}, {file = "pika-1.3.2.tar.gz", hash = "sha256:b2a327ddddf8570b4965b3576ac77091b850262d34ce8c1d8cb4e4146aa4145f"}, @@ -1563,6 +1618,7 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -1578,6 +1634,7 @@ version = "0.21.0" description = "Python client for the Prometheus monitoring system." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "prometheus_client-0.21.0-py3-none-any.whl", hash = "sha256:4fa6b4dd0ac16d58bb587c04b1caae65b8c5043e85f778f42f5f632f6af2e166"}, {file = "prometheus_client-0.21.0.tar.gz", hash = "sha256:96c83c606b71ff2b0a433c98889d275f51ffec6c5e267de37c7a2b5c9aa9233e"}, @@ -1592,6 +1649,7 @@ version = "3.0.48" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.7.0" +groups = ["main"] files = [ {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"}, {file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"}, @@ -1606,6 +1664,7 @@ version = "0.2.0" description = "Accelerated property cache" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c5869b8fd70b81835a6f187c5fdbe67917a04d7e52b6e7cc4e5fe39d55c39d58"}, {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:952e0d9d07609d9c5be361f33b0d6d650cd2bae393aabb11d9b719364521984b"}, @@ -1713,6 +1772,7 @@ version = "1.1.0" description = "" optional = false python-versions = "<4.0,>=3.10" +groups = ["main"] files = [ {file = "protollm_sdk-1.1.0-py3-none-any.whl", hash = "sha256:2f49516d0229a85fa0abf7d2bead2ed96a565686eb87b9559468eeb3198db7c7"}, {file = "protollm_sdk-1.1.0.tar.gz", hash = "sha256:165d7d1270dadc7eacbf6d973f8410cd15fe2c7452cf6fb5f784cd5abc1f4c0e"}, @@ -1741,6 +1801,7 @@ version = "2.9.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, @@ -1764,6 +1825,7 @@ version = "2.23.4" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, @@ -1865,6 +1927,7 @@ version = "2.18.0" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, @@ -1879,6 +1942,7 @@ version = "8.3.3" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, @@ -1901,6 +1965,7 @@ version = "0.24.0" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b"}, {file = "pytest_asyncio-0.24.0.tar.gz", hash = "sha256:d081d828e576d85f875399194281e92bf8a68d60d72d1a2faf2feddb6c46b276"}, @@ -1919,6 +1984,7 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -1933,6 +1999,7 @@ version = "1.0.1" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, @@ -1947,6 +2014,7 @@ version = "0.0.17" description = "A streaming multipart parser for Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "python_multipart-0.0.17-py3-none-any.whl", hash = "sha256:15dc4f487e0a9476cc1201261188ee0940165cffc94429b6fc565c4d3045cb5d"}, {file = "python_multipart-0.0.17.tar.gz", hash = "sha256:41330d831cae6e2f22902704ead2826ea038d0419530eadff3ea80175aec5538"}, @@ -1958,6 +2026,7 @@ version = "2024.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, @@ -1969,6 +2038,7 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -2031,6 +2101,7 @@ version = "5.2.0" description = "Python client for Redis database and key-value store" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "redis-5.2.0-py3-none-any.whl", hash = "sha256:ae174f2bb3b1bf2b09d54bf3e51fbc1469cf6c10aa03e21141f51969801a7897"}, {file = "redis-5.2.0.tar.gz", hash = "sha256:0b1087665a771b1ff2e003aa5bdd354f15a70c9e25d5a7dbf9c722c16528a7b0"}, @@ -2049,6 +2120,7 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -2070,6 +2142,7 @@ version = "1.0.0" description = "A utility belt for advanced users of python-requests" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["main"] files = [ {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, @@ -2084,6 +2157,7 @@ version = "13.9.4" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.8.0" +groups = ["main"] files = [ {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, @@ -2103,6 +2177,7 @@ version = "1.5.4" description = "Tool to Detect Surrounding Shell" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, @@ -2114,6 +2189,7 @@ version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +groups = ["main"] files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -2125,6 +2201,7 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -2136,6 +2213,7 @@ version = "2.0.36" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"}, {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"}, @@ -2231,6 +2309,7 @@ version = "0.37.2" description = "The little ASGI library that shines." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "starlette-0.37.2-py3-none-any.whl", hash = "sha256:6fe59f29268538e5d0d182f2791a479a0c64638e6935d1c6989e63fb2699c6ee"}, {file = "starlette-0.37.2.tar.gz", hash = "sha256:9af890290133b79fc3db55474ade20f6220a364a0402e0b556e7cd5e1e093823"}, @@ -2248,6 +2327,7 @@ version = "9.0.0" description = "Retry code until it succeeds" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"}, {file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"}, @@ -2263,6 +2343,8 @@ version = "2.1.0" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"}, {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"}, @@ -2274,6 +2356,7 @@ version = "6.4.1" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"}, {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"}, @@ -2294,6 +2377,7 @@ version = "4.67.0" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "tqdm-4.67.0-py3-none-any.whl", hash = "sha256:0cd8af9d56911acab92182e88d763100d4788bdf421d251616040cc4d44863be"}, {file = "tqdm-4.67.0.tar.gz", hash = "sha256:fe5a6f95e6fe0b9755e9469b77b9c3cf850048224ecaa8293d7d2d31f97d869a"}, @@ -2315,6 +2399,7 @@ version = "0.13.1" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "typer-0.13.1-py3-none-any.whl", hash = "sha256:5b59580fd925e89463a29d363e0a43245ec02765bde9fb77d39e5d0f29dd7157"}, {file = "typer-0.13.1.tar.gz", hash = "sha256:9d444cb96cc268ce6f8b94e13b4335084cef4c079998a9f4851a90229a3bd25c"}, @@ -2332,10 +2417,12 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] +markers = {dev = "python_version < \"3.11\""} [[package]] name = "tzdata" @@ -2343,6 +2430,7 @@ version = "2024.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" +groups = ["main"] files = [ {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, @@ -2354,6 +2442,7 @@ version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, @@ -2371,26 +2460,28 @@ version = "1.30" description = "UUID object and generation functions (Python 2.3 or higher)" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "uuid-1.30.tar.gz", hash = "sha256:1f87cc004ac5120466f36c5beae48b4c48cc411968eed0eaecd3da82aa96193f"}, ] [[package]] name = "uvicorn" -version = "0.32.0" +version = "0.34.0" description = "The lightning-fast ASGI server." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["main", "dev"] files = [ - {file = "uvicorn-0.32.0-py3-none-any.whl", hash = "sha256:60b8f3a5ac027dcd31448f411ced12b5ef452c646f76f02f8cc3f25d8d26fd82"}, - {file = "uvicorn-0.32.0.tar.gz", hash = "sha256:f78b36b143c16f54ccdb8190d0a26b5f1901fe5a3c777e1ab29f26391af8551e"}, + {file = "uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4"}, + {file = "uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9"}, ] [package.dependencies] click = ">=7.0" colorama = {version = ">=0.4", optional = true, markers = "sys_platform == \"win32\" and extra == \"standard\""} h11 = ">=0.8" -httptools = {version = ">=0.5.0", optional = true, markers = "extra == \"standard\""} +httptools = {version = ">=0.6.3", optional = true, markers = "extra == \"standard\""} python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""} typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} @@ -2399,7 +2490,7 @@ watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standar websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""} [package.extras] -standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] +standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] [[package]] name = "uvloop" @@ -2407,6 +2498,8 @@ version = "0.21.0" description = "Fast implementation of asyncio event loop on top of libuv" optional = false python-versions = ">=3.8.0" +groups = ["main"] +markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\"" files = [ {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ec7e6b09a6fdded42403182ab6b832b71f4edaf7f37a9a0e371a01db5f0cb45f"}, {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:196274f2adb9689a289ad7d65700d37df0c0930fd8e4e743fa4834e850d7719d"}, @@ -2458,6 +2551,7 @@ version = "5.1.0" description = "Python promises." optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "vine-5.1.0-py3-none-any.whl", hash = "sha256:40fdf3c48b2cfe1c38a49e9ae2da6fda88e4794c810050a728bd7413811fb1dc"}, {file = "vine-5.1.0.tar.gz", hash = "sha256:8b62e981d35c41049211cf62a0a1242d8c1ee9bd15bb196ce38aefd6799e61e0"}, @@ -2469,6 +2563,7 @@ version = "0.24.0" description = "Simple, modern and high performance file watching and code reload in python." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "watchfiles-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:083dc77dbdeef09fa44bb0f4d1df571d2e12d8a8f985dccde71ac3ac9ac067a0"}, {file = "watchfiles-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e94e98c7cb94cfa6e071d401ea3342767f28eb5a06a58fafdc0d2a4974f4f35c"}, @@ -2564,6 +2659,7 @@ version = "0.2.13" description = "Measures the displayed width of unicode strings in a terminal" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, @@ -2575,6 +2671,7 @@ version = "14.1" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "websockets-14.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a0adf84bc2e7c86e8a202537b4fd50e6f7f0e4a6b6bf64d7ccb96c4cd3330b29"}, {file = "websockets-14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90b5d9dfbb6d07a84ed3e696012610b6da074d97453bd01e0e30744b472c8179"}, @@ -2653,6 +2750,7 @@ version = "1.17.2" description = "Yet another URL library" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "yarl-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:93771146ef048b34201bfa382c2bf74c524980870bb278e6df515efaf93699ff"}, {file = "yarl-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8281db240a1616af2f9c5f71d355057e73a1409c4648c8949901396dc0a3c151"}, @@ -2744,6 +2842,6 @@ multidict = ">=4.0" propcache = ">=0.2.0" [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = "^3.10" -content-hash = "31ced0fad9bd63b4f70d6e10e4a9d286399888423dcff7c05556ca5f20e3e93f" +content-hash = "ae21d407ce8ae1e6f417255113ede38fd3fdc35fab77e3f01d28fffbacbd57c7" diff --git a/protollm_tools/llm-api/protollm_api/config.py b/protollm_tools/llm-api/protollm_api/config.py index 1eebaec..360cca3 100644 --- a/protollm_tools/llm-api/protollm_api/config.py +++ b/protollm_tools/llm-api/protollm_api/config.py @@ -27,15 +27,15 @@ def __init__( @classmethod def read_from_env(cls) -> 'Config': return Config( - os.environ.get("INNER_LLM_URL", "localhost:8670"), - os.environ.get("REDIS_HOST", "localhost"), - os.environ.get("REDIS_PORT", "6379"), - os.environ.get("REDIS_PREFIX", "llm-api"), - os.environ.get("RABBIT_MQ_HOST", "localhost"), - os.environ.get("RABBIT_MQ_PORT", "5672"), - os.environ.get("RABBIT_MQ_LOGIN", "admin"), - os.environ.get("RABBIT_MQ_PASSWORD", "admin"), - os.environ.get("QUEUE_NAME", "llm-api-queue") + os.environ.get("INNER_LLM_URL"), + os.environ.get("REDIS_HOST"), + int(os.environ.get("REDIS_PORT")), + os.environ.get("REDIS_PREFIX"), + os.environ.get("RABBIT_MQ_HOST"), + int(os.environ.get("RABBIT_MQ_PORT")), + os.environ.get("RABBIT_MQ_LOGIN"), + os.environ.get("RABBIT_MQ_PASSWORD"), + os.environ.get("QUEUE_NAME") ) @classmethod @@ -47,13 +47,13 @@ def read_from_env_file(cls, path: str) -> 'Config': key, value = line.split("=") env_vars[key] = value return Config( - env_vars.get("INNER_LLM_URL", "localhost:8670"), - env_vars.get("REDIS_HOST", "localhost"), - int(env_vars.get("REDIS_PORT", "6379")), - env_vars.get("REDIS_PREFIX", "llm-api"), - env_vars.get("RABBIT_MQ_HOST", "localhost"), - int(env_vars.get("RABBIT_MQ_PORT", "5672")), - env_vars.get("RABBIT_MQ_LOGIN", "admin"), - env_vars.get("RABBIT_MQ_PASSWORD", "admin"), - env_vars.get("QUEUE_NAME", "llm-api-queue") + env_vars.get("INNER_LLM_URL"), + env_vars.get("REDIS_HOST"), + int(env_vars.get("REDIS_PORT")), + env_vars.get("REDIS_PREFIX"), + env_vars.get("RABBIT_MQ_HOST"), + int(env_vars.get("RABBIT_MQ_PORT")), + env_vars.get("RABBIT_MQ_LOGIN"), + env_vars.get("RABBIT_MQ_PASSWORD"), + env_vars.get("QUEUE_NAME") ) diff --git a/protollm_tools/llm-api/pyproject.toml b/protollm_tools/llm-api/pyproject.toml index 0a0d0cf..1aa21cf 100644 --- a/protollm_tools/llm-api/pyproject.toml +++ b/protollm_tools/llm-api/pyproject.toml @@ -17,3 +17,5 @@ protollm_sdk = "^1.1.0" [tool.poetry.group.dev.dependencies] pytest = "^8.2.2" pytest-asyncio = "^0.24.0" +uvicorn = "^0.34.0" + diff --git a/protollm_tools/llm-worker/deployment/docker-compose.yml b/protollm_tools/llm-worker/docker-compose.yml similarity index 67% rename from protollm_tools/llm-worker/deployment/docker-compose.yml rename to protollm_tools/llm-worker/docker-compose.yml index 46366e8..b8f6e0d 100644 --- a/protollm_tools/llm-worker/deployment/docker-compose.yml +++ b/protollm_tools/llm-worker/docker-compose.yml @@ -11,19 +11,13 @@ services: # cpus: 5 memory: 100G build: - context: .. + context: deployment dockerfile: Dockerfile env_file: .env - environment: - TOKENS_LEN: 16384 - GPU_MEMORY_UTILISATION: 0.9 - TENSOR_PARALLEL_SIZE: 2 - MODEL_PATH: /data/ - NVIDIA_VISIBLE_DEVICES: volumes: - :/data ports: - - "8677:8672" + - ${LLM_WORKER_PORT}:8672 networks: - llm_wrap_network restart: unless-stopped diff --git a/protollm_tools/llm-worker/protollm_worker/config.py b/protollm_tools/llm-worker/protollm_worker/config.py index 99bf9b3..cd3398d 100644 --- a/protollm_tools/llm-worker/protollm_worker/config.py +++ b/protollm_tools/llm-worker/protollm_worker/config.py @@ -6,18 +6,18 @@ class Config: Configuration class for setting up Redis, RabbitMQ, and model-specific parameters. Attributes: - redis_host (str): The hostname of the Redis server. Defaults to "localhost". - redis_port (int): The port number of the Redis server. Defaults to 6379. - redis_prefix (str): Prefix for keys used in Redis. Defaults to "llm-api". - rabbit_host (str): The hostname of the RabbitMQ server. Defaults to "localhost". - rabbit_port (int): The port number of the RabbitMQ server. Defaults to 5672. - rabbit_login (str): The username for RabbitMQ authentication. Defaults to "admin". - rabbit_password (str): The password for RabbitMQ authentication. Defaults to "admin". - queue_name (str): The name of the RabbitMQ queue to use. Defaults to "llm-api-queue". - model_path (str): Path to the model being used. Defaults to None. - token_len (int): The maximum length of tokens for processing by the model. Defaults to None. - tensor_parallel_size (int): The size of tensor parallelism for distributed processing. Defaults to None. - gpu_memory_utilisation (float): The percentage of GPU memory utilization for the model. Defaults to None. + redis_host: The hostname of the Redis server. Defaults to "localhost". + redis_port: The port number of the Redis server. Defaults to 6379. + redis_prefix: Prefix for keys used in Redis. Defaults to "llm-api". + rabbit_host: The hostname of the RabbitMQ server. Defaults to "localhost". + rabbit_port: The port number of the RabbitMQ server. Defaults to 5672. + rabbit_login: The username for RabbitMQ authentication. Defaults to "admin". + rabbit_password: The password for RabbitMQ authentication. Defaults to "admin". + queue_name: The name of the RabbitMQ queue to use. Defaults to "llm-api-queue". + model_path: Path to the model being used. Defaults to None. + token_len: The maximum length of tokens for processing by the model. Defaults to None. + tensor_parallel_size: The size of tensor parallelism for distributed processing. Defaults to None. + gpu_memory_utilisation: The percentage of GPU memory utilization for the model. Defaults to None. """ def __init__( @@ -51,18 +51,18 @@ def __init__( @classmethod def read_from_env(cls) -> 'Config': return Config( - os.environ.get("REDIS_HOST", "localhost"), - os.environ.get("REDIS_PORT", "6379"), - os.environ.get("REDIS_PREFIX", "llm-api"), - os.environ.get("RABBIT_MQ_HOST", "localhost"), - os.environ.get("RABBIT_MQ_PORT", "5672"), - os.environ.get("RABBIT_MQ_LOGIN", "admin"), - os.environ.get("RABBIT_MQ_PASSWORD", "admin"), - os.environ.get("QUEUE_NAME", "llm-api-queue"), + os.environ.get("REDIS_HOST"), + int(os.environ.get("REDIS_PORT")), + os.environ.get("REDIS_PREFIX"), + os.environ.get("RABBIT_MQ_HOST"), + int(os.environ.get("RABBIT_MQ_PORT")), + os.environ.get("RABBIT_MQ_LOGIN"), + os.environ.get("RABBIT_MQ_PASSWORD"), + os.environ.get("QUEUE_NAME"), os.environ.get("MODEL_PATH"), - int(os.environ.get("TOKENS_LEN", "16384")), - int(os.environ.get("TENSOR_PARALLEL_SIZE", "2")), - float(os.environ.get("GPU_MEMORY_UTILISATION", "0.9")), + int(os.environ.get("TOKENS_LEN")), + int(os.environ.get("TENSOR_PARALLEL_SIZE")), + float(os.environ.get("GPU_MEMORY_UTILISATION")), ) @classmethod @@ -74,16 +74,16 @@ def read_from_env_file(cls, path: str) -> 'Config': key, value = line.split("=") env_vars[key] = value return Config( - env_vars.get("REDIS_HOST", "localhost"), - int(env_vars.get("REDIS_PORT", "6379")), - env_vars.get("REDIS_PREFIX", "llm-api"), - env_vars.get("RABBIT_MQ_HOST", "localhost"), - int(env_vars.get("RABBIT_MQ_PORT", "5672")), - env_vars.get("RABBIT_MQ_LOGIN", "admin"), - env_vars.get("RABBIT_MQ_PASSWORD", "admin"), - env_vars.get("QUEUE_NAME", "llm-api-queue"), + env_vars.get("REDIS_HOST"), + int(env_vars.get("REDIS_PORT")), + env_vars.get("REDIS_PREFIX"), + env_vars.get("RABBIT_MQ_HOST"), + int(env_vars.get("RABBIT_MQ_PORT")), + env_vars.get("RABBIT_MQ_LOGIN"), + env_vars.get("RABBIT_MQ_PASSWORD"), + env_vars.get("QUEUE_NAME"), env_vars.get("MODEL_PATH"), - int(env_vars.get("TOKENS_LEN", "16384")), - int(env_vars.get("TENSOR_PARALLEL_SIZE", "2")), - float(env_vars.get("GPU_MEMORY_UTILISATION", "0.9")), + int(env_vars.get("TOKENS_LEN")), + int(env_vars.get("TENSOR_PARALLEL_SIZE")), + float(env_vars.get("GPU_MEMORY_UTILISATION")), ) \ No newline at end of file diff --git a/protollm_tools/sdk/docker-compose.yml b/protollm_tools/sdk/docker-compose.yml index c1aa73a..ef90981 100644 --- a/protollm_tools/sdk/docker-compose.yml +++ b/protollm_tools/sdk/docker-compose.yml @@ -4,11 +4,10 @@ services: rabbitmq: image: "rabbitmq:3-management" ports: - - "5672:5672" # RabbitMQ broker port - - "15672:15672" # RabbitMQ management interface - environment: - - RABBITMQ_DEFAULT_USER=admin - - RABBITMQ_DEFAULT_PASS=admin + - "5672:5672" + - "15672:15672" + env_file: + - .env volumes: - "rabbitmq_data:/var/lib/rabbitmq" networks: @@ -30,20 +29,8 @@ services: - redis networks: - llm_wrap_network - environment: - - CELERY_BROKER_URL=amqp://admin:admin@rabbitmq:5672/ - - CELERY_RESULT_BACKEND=redis://redis:6379/0 - - C_FORCE_ROOT=True - - LLM_API_HOST=localhost - - LLM_API_PORT=6672 - - TEXT_EMB_HOST=embedding_server - - TEXT_EMB_PORT=80 - - REDIS_HOST=redis - - REDIS_PORT=6379 - - RABBIT_HOST=rabbitmq - - RABBIT_PORT=5672 - - VECTOR_HOST=localhost - - VECTOR_PORT=9941 + env_file: + - .env command: celery -A protollm_sdk.celery.app worker --loglevel=info flower: @@ -55,23 +42,14 @@ services: - celery_worker networks: - llm_wrap_network - environment: - - CELERY_BROKER_URL=amqp://admin:admin@rabbitmq:5672/ - - CELERY_RESULT_BACKEND=redis://redis:6379/0 - - LLM_API_HOST=localhost - - LLM_API_PORT=6672 - - TEXT_EMB_HOST=embedding_server - - TEXT_EMB_PORT=80 - - REDIS_HOST=redis - - REDIS_PORT=6379 - - RABBIT_HOST=rabbitmq - - RABBIT_PORT=5672 - - VECTOR_HOST=localhost - - VECTOR_PORT=9941 - command: sh -c "sleep 20 && celery -A protollm_sdk.celery.app flower --broker=amqp://guest:guest@rabbitmq:5672/ --port=7672" + env_file: + - .env + command: sh -c "sleep 20 && celery -A protollm_sdk.celery.app flower --broker=${CELERY_BROKER_URL} --port=7672" server: image: chromadb/chroma:latest + env_file: + - .env environment: - IS_PERSISTENT=TRUE ports: @@ -80,8 +58,8 @@ services: - llm_wrap_network embedding_server: - image: ${EMBEDDING_IMAGE:-ghcr.io/huggingface/text-embeddings-inference:cpu-0.3.0} #default image with CPU support - command: --model-id ${ST_MODEL:-intfloat/multilingual-e5-large} --revision ${ST_MODEL_REVISION:-main} #configure model and model revision paramters + image: ${EMBEDDING_IMAGE} + command: --model-id ${ST_MODEL} --revision ${ST_MODEL_REVISION} ports: - 9942:80 networks: @@ -94,4 +72,4 @@ volumes: networks: llm_wrap_network: name: llm_wrap_network - driver: bridge \ No newline at end of file + driver: bridge diff --git a/protollm_tools/sdk/protollm_sdk/models/job_context_models.py b/protollm_tools/sdk/protollm_sdk/models/job_context_models.py index badfb36..fa3586e 100644 --- a/protollm_tools/sdk/protollm_sdk/models/job_context_models.py +++ b/protollm_tools/sdk/protollm_sdk/models/job_context_models.py @@ -31,6 +31,7 @@ class ChatCompletionUnit(BaseModel): class ChatCompletionModel(BaseModel): """A model for chat completion order""" job_id: str + source: str = "local" meta: PromptMeta messages: list[ChatCompletionUnit] From 9009de38bc40ba992971115d5e9d0a54c46ced0a Mon Sep 17 00:00:00 2001 From: 1martin1 Date: Wed, 5 Feb 2025 16:35:58 +0300 Subject: [PATCH 5/7] documentation: update documentation about deploying --- protollm_tools/llm-api/README.md | 21 ++++++++++++++++----- protollm_tools/llm-worker/README.md | 7 ------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/protollm_tools/llm-api/README.md b/protollm_tools/llm-api/README.md index bdc84b8..ae6f224 100644 --- a/protollm_tools/llm-api/README.md +++ b/protollm_tools/llm-api/README.md @@ -105,15 +105,21 @@ These variables must be configured and synchronized with the LLM-core system: ### Example `.env` File ```env -INNER_LLM_URL=localhost:8670 -REDIS_HOST=localhost +# API +CELERY_BROKER_URL=amqp://admin:admin@127.0.0.1:5672/ +CELERY_RESULT_BACKEND=redis://127.0.0.1:6379/0 +REDIS_HOST=redis REDIS_PORT=6379 -REDIS_PREFIX=llm-api -RABBIT_MQ_HOST=localhost +RABBIT_MQ_HOST=rabbitmq RABBIT_MQ_PORT=5672 RABBIT_MQ_LOGIN=admin RABBIT_MQ_PASSWORD=admin -QUEUE_NAME=llm-api-queue +WEB_RABBIT_MQ=15672 +API_PORT=6672 + +# RabbitMQ +RABBITMQ_DEFAULT_USER=admin +RABBITMQ_DEFAULT_PASS=admin ``` --- @@ -168,7 +174,12 @@ Below is the architecture diagram for the interaction between API, RabbitMQ, LLM app.include_router(get_router(config)) ``` +### Running the API Locally (without Docker) +To run the API locally using Uvicorn, use the following command: +```sh + uvicorn protollm_api.backend.main:app --host 127.0.0.1 --port 8000 --reload +``` ### Example Request #### Generate ```bash diff --git a/protollm_tools/llm-worker/README.md b/protollm_tools/llm-worker/README.md index 7183144..0d2f721 100644 --- a/protollm_tools/llm-worker/README.md +++ b/protollm_tools/llm-worker/README.md @@ -41,13 +41,6 @@ services: dockerfile: Dockerfile env_file: .env environment: - TOKENS_LEN: 16384 - GPU_MEMORY_UTILISATION: 0.9 - TENSOR_PARALLEL_SIZE: 2 - MODEL_PATH: /data/ - NVIDIA_VISIBLE_DEVICES: - REDIS_HOST: localhost - REDIS_PORT: 6379 FORCE_CMAKE: 1 volumes: - :/data From 976417f63f56624e64c53ce4314bbfa6f15f9de8 Mon Sep 17 00:00:00 2001 From: 1martin1 Date: Wed, 5 Feb 2025 16:37:56 +0300 Subject: [PATCH 6/7] tests: fix cleaning queues --- .../sdk/tests/protollm_sdk/job/test_text_embedder.py | 2 +- .../object_interface/integration/test_rabbit_mq_wrapper.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/protollm_tools/sdk/tests/protollm_sdk/job/test_text_embedder.py b/protollm_tools/sdk/tests/protollm_sdk/job/test_text_embedder.py index 15a9593..e7799d4 100644 --- a/protollm_tools/sdk/tests/protollm_sdk/job/test_text_embedder.py +++ b/protollm_tools/sdk/tests/protollm_sdk/job/test_text_embedder.py @@ -74,7 +74,7 @@ def text_embedder_request(): """ data = { "job_id": str(uuid.uuid4()), - "inputs": "Ехал грека через реку видит грека в реке рак.", + "inputs": "A Greek was crossing the river when he saw a crab in the water.", "truncate": False } return TextEmbedderRequest(**data) diff --git a/protollm_tools/sdk/tests/protollm_sdk/object_interface/integration/test_rabbit_mq_wrapper.py b/protollm_tools/sdk/tests/protollm_sdk/object_interface/integration/test_rabbit_mq_wrapper.py index cbb01ca..5fcea36 100644 --- a/protollm_tools/sdk/tests/protollm_sdk/object_interface/integration/test_rabbit_mq_wrapper.py +++ b/protollm_tools/sdk/tests/protollm_sdk/object_interface/integration/test_rabbit_mq_wrapper.py @@ -1,3 +1,4 @@ +import pika import pytest import json from time import sleep @@ -27,8 +28,12 @@ def cleanup_queues(rabbit_wrapper): Fixture to clean up all queues before each test. """ with rabbit_wrapper.get_channel() as channel: + channel.queue_declare(queue="test_queue", durable=True) channel.queue_purge("test_queue") + channel.queue_declare(queue="test_priority_queue", durable=True, arguments={"x-max-priority": 10}) + channel.queue_purge("test_priority_queue") + @pytest.mark.local def test_publish_message(rabbit_wrapper): """ From e41c1dbef2f9263c6f61403029777d1068c1ed88 Mon Sep 17 00:00:00 2001 From: 1martin1 Date: Wed, 5 Feb 2025 16:58:51 +0300 Subject: [PATCH 7/7] refactor: inc version on pyproject, update README, --- protollm_tools/llm-api/README.md | 24 ++++++++++++++++++------ protollm_tools/llm-api/pyproject.toml | 2 +- protollm_tools/llm-worker/pyproject.toml | 2 +- protollm_tools/sdk/docker-compose.yml | 12 ++++++------ 4 files changed, 26 insertions(+), 14 deletions(-) diff --git a/protollm_tools/llm-api/README.md b/protollm_tools/llm-api/README.md index ae6f224..f86fc1d 100644 --- a/protollm_tools/llm-api/README.md +++ b/protollm_tools/llm-api/README.md @@ -167,18 +167,30 @@ Below is the architecture diagram for the interaction between API, RabbitMQ, LLM ### Running the API 1. Configure environment variables in the `.env` file. 2. Start the API using: - ```python - app = FastAPI() +```python +app = FastAPI() - config = Config.read_from_env() +config = Config.read_from_env() - app.include_router(get_router(config)) - ``` +app.include_router(get_router(config)) +``` ### Running the API Locally (without Docker) To run the API locally using Uvicorn, use the following command: ```sh - uvicorn protollm_api.backend.main:app --host 127.0.0.1 --port 8000 --reload +uvicorn protollm_api.backend.main:app --host 127.0.0.1 --port 8000 --reload +``` + +Or use this main file: +```python +app = FastAPI() + +config = Config.read_from_env() + +app.include_router(get_router(config)) + +if __name__ == "__main__": + uvicorn.run("protollm_api.backend.main:app", host="127.0.0.1", port=8000, reload=True) ``` ### Example Request #### Generate diff --git a/protollm_tools/llm-api/pyproject.toml b/protollm_tools/llm-api/pyproject.toml index 1aa21cf..1f01f22 100644 --- a/protollm_tools/llm-api/pyproject.toml +++ b/protollm_tools/llm-api/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "protollm-api" -version = "1.0.0" +version = "1.0.1" description = "" authors = ["aimclub"] readme = "README.md" diff --git a/protollm_tools/llm-worker/pyproject.toml b/protollm_tools/llm-worker/pyproject.toml index b46de09..25adcaa 100644 --- a/protollm_tools/llm-worker/pyproject.toml +++ b/protollm_tools/llm-worker/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "protollm-worker" -version = "1.0.0" +version = "1.0.1" description = "" authors = ["aimclub"] readme = "README.md" diff --git a/protollm_tools/sdk/docker-compose.yml b/protollm_tools/sdk/docker-compose.yml index ef90981..c982002 100644 --- a/protollm_tools/sdk/docker-compose.yml +++ b/protollm_tools/sdk/docker-compose.yml @@ -4,8 +4,8 @@ services: rabbitmq: image: "rabbitmq:3-management" ports: - - "5672:5672" - - "15672:15672" + - ${RABBIT_PORT}:5672 + - ${WEB_RABBIT_MQ}:15672 env_file: - .env volumes: @@ -16,7 +16,7 @@ services: redis: image: "redis:alpine" ports: - - "6379:6379" + - ${REDIS_PORT}:6379 volumes: - redis_data:/var/lib/data networks: @@ -36,7 +36,7 @@ services: flower: build: . ports: - - "7672:7672" + - ${FLOWER_PORT}:7672 depends_on: - rabbitmq - celery_worker @@ -53,7 +53,7 @@ services: environment: - IS_PERSISTENT=TRUE ports: - - 9941:8000 + - ${VECTOR_PORT}:8000 networks: - llm_wrap_network @@ -61,7 +61,7 @@ services: image: ${EMBEDDING_IMAGE} command: --model-id ${ST_MODEL} --revision ${ST_MODEL_REVISION} ports: - - 9942:80 + - ${EMBEDER_PORT}:80 networks: - llm_wrap_network