From 4854b16a20c7c43924ed5ebc3e3a1536febddf17 Mon Sep 17 00:00:00 2001 From: = Enea_Gore Date: Thu, 24 Oct 2024 02:44:42 +0200 Subject: [PATCH 1/5] mvp multiple approaches and cot --- llm_core/llm_core/models/__init__.py | 3 + .../module_text_llm/__main__.py | 7 +- .../module_text_llm/approach_controller.py | 19 + .../module_text_llm/module_text_llm/config.py | 82 +- .../generate_cot_suggestions.py | 159 ++++ .../prompts/cot_suggestions.py | 28 + .../prompts/refined_cot_suggestions.py | 17 + modules/text/module_text_llm/poetry.lock | 698 +++++++++--------- 8 files changed, 662 insertions(+), 351 deletions(-) create mode 100644 modules/text/module_text_llm/module_text_llm/approach_controller.py create mode 100644 modules/text/module_text_llm/module_text_llm/generate_cot_suggestions.py create mode 100644 modules/text/module_text_llm/module_text_llm/prompts/cot_suggestions.py create mode 100644 modules/text/module_text_llm/module_text_llm/prompts/refined_cot_suggestions.py diff --git a/llm_core/llm_core/models/__init__.py b/llm_core/llm_core/models/__init__.py index 6f9db4fdd..f5e3edd60 100644 --- a/llm_core/llm_core/models/__init__.py +++ b/llm_core/llm_core/models/__init__.py @@ -6,6 +6,7 @@ DefaultModelConfig: Type[ModelConfig] +MiniModelConfig: ModelConfig default_model_name = os.environ.get("LLM_DEFAULT_MODEL") evaluation_model_name = os.environ.get("LLM_EVALUATION_MODEL") @@ -18,6 +19,8 @@ types.append(openai_config.OpenAIModelConfig) if default_model_name in openai_config.available_models: DefaultModelConfig = openai_config.OpenAIModelConfig + if "openai_gpt-4o-mini" in openai_config.available_models: + MiniModelConfig = openai_config.OpenAIModelConfig(model_name="openai_gpt-4o-mini",max_tokens=3000, temperature=0,top_p=0.9,presence_penalty=0,frequency_penalty=0) if evaluation_model_name in openai_config.available_models: evaluation_model = openai_config.available_models[evaluation_model_name] except AttributeError: diff --git a/modules/text/module_text_llm/module_text_llm/__main__.py b/modules/text/module_text_llm/module_text_llm/__main__.py index 6f0312729..ac99fa063 100644 --- a/modules/text/module_text_llm/module_text_llm/__main__.py +++ b/modules/text/module_text_llm/module_text_llm/__main__.py @@ -4,7 +4,7 @@ import nltk import tiktoken - +from module_text_llm.approach_controller import generate from athena import app, submission_selector, submissions_consumer, feedback_consumer, feedback_provider, evaluation_provider from athena.text import Exercise, Submission, Feedback from athena.logger import logger @@ -13,6 +13,7 @@ from module_text_llm.evaluation import get_feedback_statistics, get_llm_statistics from module_text_llm.generate_suggestions import generate_suggestions from module_text_llm.generate_evaluation import generate_evaluation +from module_text_llm.generate_cot_suggestions import generate_cot_suggestions @submissions_consumer @@ -30,12 +31,12 @@ def select_submission(exercise: Exercise, submissions: List[Submission]) -> Subm def process_incoming_feedback(exercise: Exercise, submission: Submission, feedbacks: List[Feedback]): logger.info("process_feedback: Received %d feedbacks for submission %d of exercise %d.", len(feedbacks), submission.id, exercise.id) - +# change here to have multiple approaches @feedback_provider async def suggest_feedback(exercise: Exercise, submission: Submission, is_graded: bool, module_config: Configuration) -> List[Feedback]: logger.info("suggest_feedback: %s suggestions for submission %d of exercise %d were requested", "Graded" if is_graded else "Non-graded", submission.id, exercise.id) - return await generate_suggestions(exercise, submission, module_config.approach, module_config.debug) + return await generate(exercise, submission, module_config.approach, module_config.debug) @evaluation_provider diff --git a/modules/text/module_text_llm/module_text_llm/approach_controller.py b/modules/text/module_text_llm/module_text_llm/approach_controller.py new file mode 100644 index 000000000..26ec9c72c --- /dev/null +++ b/modules/text/module_text_llm/module_text_llm/approach_controller.py @@ -0,0 +1,19 @@ + +from typing import List, Optional, Sequence +from pydantic import BaseModel, Field + +from athena import emit_meta +from athena.text import Exercise, Submission, Feedback +from athena.logger import logger +from module_text_llm.config import BasicApproachConfig, ChainOfThoughtConfig + + +from module_text_llm.helpers.utils import add_sentence_numbers, get_index_range_from_line_range, format_grading_instructions +from module_text_llm.generate_suggestions import generate_suggestions +from module_text_llm.generate_cot_suggestions import generate_cot_suggestions + +async def generate(exercise: Exercise, submission: Submission, config: BasicApproachConfig, debug: bool) -> List[Feedback]: + if(isinstance(config, BasicApproachConfig)): + return await generate_suggestions(exercise, submission, config, debug) + elif(isinstance(config, ChainOfThoughtConfig)): + return await generate_cot_suggestions(exercise, submission, config, debug) diff --git a/modules/text/module_text_llm/module_text_llm/config.py b/modules/text/module_text_llm/module_text_llm/config.py index 0a61eeba6..b612ff5cc 100644 --- a/modules/text/module_text_llm/module_text_llm/config.py +++ b/modules/text/module_text_llm/module_text_llm/config.py @@ -1,13 +1,23 @@ from pydantic import BaseModel, Field - +from typing import Union from athena import config_schema_provider -from llm_core.models import ModelConfigType, DefaultModelConfig +from llm_core.models import ModelConfigType, DefaultModelConfig, MiniModelConfig from module_text_llm.prompts.generate_suggestions import ( system_message as generate_suggestions_system_message, human_message as generate_suggestions_human_message ) +from enum import Enum +from pydantic import root_validator +from abc import ABC, abstractmethod +from module_text_llm.prompts.cot_suggestions import ( + system_message as generate_cot_suggestions_system_message, + human_message as generate_cot_suggestions_human_message +) - +from module_text_llm.prompts.refined_cot_suggestions import ( + system_message as generate_refined_cot_suggestions_system_message, + human_message as generate_refined_cot_suggestions_human_message +) class GenerateSuggestionsPrompt(BaseModel): """\ Features available: **{problem_statement}**, **{example_solution}**, **{grading_instructions}**, **{max_points}**, **{bonus_points}**, **{submission}** @@ -19,15 +29,73 @@ class GenerateSuggestionsPrompt(BaseModel): human_message: str = Field(default=generate_suggestions_human_message, description="Message from a human. The input on which the AI is supposed to act.") +class CoTGenerateSuggestionsPrompt(BaseModel): + """\ +Features cit available: **{problem_statement}**, **{example_solution}**, **{grading_instructions}**, **{max_points}**, **{bonus_points}**, **{submission}** -class BasicApproachConfig(BaseModel): - """This approach uses a LLM with a single prompt to generate feedback in a single step.""" +_Note: **{problem_statement}**, **{example_solution}**, or **{grading_instructions}** might be omitted if the input is too long._\ +""" + system_message: str = Field(default=generate_cot_suggestions_system_message, + description="Message for priming AI behavior and instructing it what to do.") + human_message: str = Field(default=generate_cot_suggestions_human_message, + description="Message from a human. The input on which the AI is supposed to act.") + second_system_message: str = Field(default=generate_refined_cot_suggestions_system_message, + description="Message for priming AI behavior and instructing it what to do.") + answer_message: str = Field(default=generate_refined_cot_suggestions_human_message, + description="Message from a human. The input on which the AI is supposed to act.") + +class ApproachType(str, Enum): + basic = "BasicApproach" + chain_of_thought = "ChainOfThought" + + +class ApproachConfig(BaseModel, ABC): max_input_tokens: int = Field(default=3000, description="Maximum number of tokens in the input prompt.") - model: ModelConfigType = Field(default=DefaultModelConfig()) # type: ignore + model: ModelConfigType = Field(default=DefaultModelConfig()) # type: ignore + + # @abstractmethod + # def get_prompt(self): + # """Abstract method to get the appropriate prompt configuration.""" + # pass + + class Config: + # Enable discriminator to distinguish between subclasses in the schema + use_enum_values = True + +class BasicApproachConfig(ApproachConfig): generate_suggestions_prompt: GenerateSuggestionsPrompt = Field(default=GenerateSuggestionsPrompt()) + # def get_prompt(self): + # return self.generate_suggestions_prompt + +class ChainOfThoughtConfig(ApproachConfig): + model: ModelConfigType = Field(default=MiniModelConfig) # type: ignore + generate_suggestions_prompt: CoTGenerateSuggestionsPrompt = Field(default=CoTGenerateSuggestionsPrompt()) + + # def get_prompt(self): + # return self.generate_suggestions_prompt + +# available_approaches = [BasicApproachConfig, ChainOfThoughtConfig] +ApproachConfigUnion = Union[BasicApproachConfig, ChainOfThoughtConfig] +# def approach_factory(approach_type: ApproachType) -> ApproachConfig: +# if approach_type == ApproachType.basic: +# return BasicApproachConfig() +# elif approach_type == ApproachType.chain_of_thought: +# return ChainOfThoughtConfig() +# else: +# raise ValueError(f"Unknown approach type: {approach_type}") + @config_schema_provider class Configuration(BaseModel): debug: bool = Field(default=False, description="Enable debug mode.") - approach: BasicApproachConfig = Field(default=BasicApproachConfig()) + approach: ApproachConfigUnion = Field(default_factory=BasicApproachConfig) # Default to BasicApproach + # approach_type: ApproachType = Field(default=ApproachType.basic, description="Type of approach to use.") + + # @root_validator(pre=True) + # def populate_approach(cls, values): + # """Automatically instantiate the correct approach based on approach_type.""" + # approach_type = values.get('approach_type', ApproachType.basic) + # values['approach'] = approach_factory(approach_type) + # return values + diff --git a/modules/text/module_text_llm/module_text_llm/generate_cot_suggestions.py b/modules/text/module_text_llm/module_text_llm/generate_cot_suggestions.py new file mode 100644 index 000000000..a029d0539 --- /dev/null +++ b/modules/text/module_text_llm/module_text_llm/generate_cot_suggestions.py @@ -0,0 +1,159 @@ +from typing import List, Optional, Sequence +from pydantic import BaseModel, Field + +from athena import emit_meta +from athena.text import Exercise, Submission, Feedback +from athena.logger import logger + +from module_text_llm.config import ChainOfThoughtConfig +from llm_core.utils.llm_utils import ( + get_chat_prompt_with_formatting_instructions, + check_prompt_length_and_omit_features_if_necessary, + num_tokens_from_prompt, +) +from llm_core.utils.predict_and_parse import predict_and_parse + +from module_text_llm.helpers.utils import add_sentence_numbers, get_index_range_from_line_range, format_grading_instructions + +class FeedbackModel(BaseModel): + title: str = Field(description="Very short title, i.e. feedback category or similar", example="Logic Error") + description: str = Field(description="Feedback description") + line_start: Optional[int] = Field(description="Referenced line number start, or empty if unreferenced") + line_end: Optional[int] = Field(description="Referenced line number end, or empty if unreferenced") + credits: float = Field(0.0, description="Number of points received/deducted") + grading_instruction_id: Optional[int] = Field( + description="ID of the grading instruction that was used to generate this feedback, or empty if no grading instruction was used" + ) + + class Config: + title = "Feedback" + + +class AssessmentModel(BaseModel): + """Collection of feedbacks making up an assessment""" + + feedbacks: List[FeedbackModel] = Field(description="Assessment feedbacks") + + class Config: + title = "Assessment" + +class InitialAssessment(BaseModel): + title: str = Field(description="Very short title, i.e. feedback category or similar", example="Logic Error") + description: str = Field(description="Feedback description") + line_start: Optional[int] = Field(description="Referenced line number start, or empty if unreferenced") + line_end: Optional[int] = Field(description="Referenced line number end, or empty if unreferenced") + credits: float = Field(0.0, description="Number of points received/deducted") + reasoning: str = Field(description="Reasoning why the feedback was given") + impprovment_suggestion: str = Field(description="Suggestion for improvement for the student") + +class InitialAssessmentModel(BaseModel): + """Collection of feedbacks making up an assessment""" + + feedbacks: List[InitialAssessment] = Field(description="Assessment feedbacks") + +async def generate_cot_suggestions(exercise: Exercise, submission: Submission, config: ChainOfThoughtConfig, debug: bool) -> List[Feedback]: + model = config.model.get_model() # type: ignore[attr-defined] + + prompt_input = { + "max_points": exercise.max_points, + "bonus_points": exercise.bonus_points, + "grading_instructions": format_grading_instructions(exercise.grading_instructions, exercise.grading_criteria), + "problem_statement": exercise.problem_statement or "No problem statement.", + "example_solution": exercise.example_solution, + "submission": add_sentence_numbers(submission.text) + } + + chat_prompt = get_chat_prompt_with_formatting_instructions( + model=model, + system_message=config.generate_suggestions_prompt.system_message, + human_message=config.generate_suggestions_prompt.human_message, + pydantic_object=InitialAssessmentModel + ) + + + + # Check if the prompt is too long and omit features if necessary (in order of importance) + omittable_features = ["example_solution", "problem_statement", "grading_instructions"] + prompt_input, should_run = check_prompt_length_and_omit_features_if_necessary( + prompt=chat_prompt, + prompt_input= prompt_input, + max_input_tokens=config.max_input_tokens, + omittable_features=omittable_features, + debug=debug + ) + + # Skip if the prompt is too long + if not should_run: + logger.warning("Input too long. Skipping.") + if debug: + emit_meta("prompt", chat_prompt.format(**prompt_input)) + emit_meta("error", f"Input too long {num_tokens_from_prompt(chat_prompt, prompt_input)} > {config.max_input_tokens}") + return [] + + initial_result = await predict_and_parse( + model=model, + chat_prompt=chat_prompt, + prompt_input=prompt_input, + pydantic_object=InitialAssessmentModel, + tags=[ + f"exercise-{exercise.id}", + f"submission-{submission.id}", + ] + ) + + second_prompt_input = { + "answer" : initial_result, + "submission": add_sentence_numbers(submission.text) + + } + + second_chat_prompt = get_chat_prompt_with_formatting_instructions( + model=model, + system_message=config.generate_suggestions_prompt.second_system_message, + human_message=config.generate_suggestions_prompt.answer_message, + pydantic_object=AssessmentModel) + + result = await predict_and_parse( + model=model, + chat_prompt=second_chat_prompt, + prompt_input=second_prompt_input, + pydantic_object=AssessmentModel, + tags=[ + f"exercise-{exercise.id}", + f"submission-{submission.id}", + ] + ) + + if debug: + emit_meta("generate_suggestions", { + "prompt": chat_prompt.format(**prompt_input), + "result": result.dict() if result is not None else None + }) + + + if result is None: + return [] + + grading_instruction_ids = set( + grading_instruction.id + for criterion in exercise.grading_criteria or [] + for grading_instruction in criterion.structured_grading_instructions + ) + + feedbacks = [] + for feedback in result.feedbacks: + index_start, index_end = get_index_range_from_line_range(feedback.line_start, feedback.line_end, submission.text) + grading_instruction_id = feedback.grading_instruction_id if feedback.grading_instruction_id in grading_instruction_ids else None + feedbacks.append(Feedback( + exercise_id=exercise.id, + submission_id=submission.id, + title=feedback.title, + description=feedback.description, + index_start=index_start, + index_end=index_end, + credits=feedback.credits, + structured_grading_instruction_id=grading_instruction_id, + meta={} + )) + + return feedbacks \ No newline at end of file diff --git a/modules/text/module_text_llm/module_text_llm/prompts/cot_suggestions.py b/modules/text/module_text_llm/module_text_llm/prompts/cot_suggestions.py new file mode 100644 index 000000000..db5258e2d --- /dev/null +++ b/modules/text/module_text_llm/module_text_llm/prompts/cot_suggestions.py @@ -0,0 +1,28 @@ +system_message = """ +You are a grading assistant at a prestrigious university tasked with grading student submissions for text exercises. +You goal is to be as helpful as possible to the student while providing constructive feedback without revealing the solution. +In order to successfully complete this task, you must: +1. Analyze the problem statement and the provided grading instructions to understand the requirements of the task. +2. The problem solution is an example of a solution that meets the requirements of the task. Analyze the solution to understand the logic and the approach used to solve the problem, keeping in mind that the student solutions might diverge and still be correct. +3. Analyze the student's submission in regards to the problem statement, so that you can create chunks of the solution that relate to a part of the problem statement. +4. Use the information gathered from the previous steps to provide constructive feedback to the student, guiding them towards the correct solution without revealing it. +5. If you have additional comments, create an unreferenced feedback. +6. For each feedback make sure that the credits are given only on the basis of the grading instructions and soltuion, the minimal answer from a student that satisfies this should be given the credits. If you have notes or additional comments, make sure to include them in a new feedback with 0 credits and no reference. + +You are tasked with grading the following exercise, your response should take into account that you are directly responding to the student so you should adress the student: +The maximal amount of points for this exercise is {max_points}. +# Problem Statement +{problem_statement} +# Sample Solution +{example_solution} +# Grading Instructions +{grading_instructions} + +""" + +human_message = """\ +Student\'s submission to grade (with sentence numbers : ): +\"\"\" +{submission} +\"\"\"\ +""" \ No newline at end of file diff --git a/modules/text/module_text_llm/module_text_llm/prompts/refined_cot_suggestions.py b/modules/text/module_text_llm/module_text_llm/prompts/refined_cot_suggestions.py new file mode 100644 index 000000000..e0b03ebec --- /dev/null +++ b/modules/text/module_text_llm/module_text_llm/prompts/refined_cot_suggestions.py @@ -0,0 +1,17 @@ +system_message = """ + You gave the following feedback on the first iteration: {answer} + On this step you need to refine your feedback. + Make sure to follow the following steps to assess and improve your feedback: + It shuold follow the grading instructions and the sample solution, if it doesn't, consider improvements. + If you have your own additional improvements that are not present in the grading instructions, add them in a new feedback with 0 credits and no reference. + Remember that your response is directly seen by students and it should adress them directly. + For each feedback where the student has room for improvement, think about how the student could improve his solution. + Once you have thought how the student can improve the solution, formulate it in a way that guides the student towards the correct solution without revealing it directly. + Consider improvements to the feedback if any of this points is not satisfied.""" + +human_message = """\ +Student\'s submission to grade (with sentence numbers : ): +\"\"\" +{submission} +\"\"\"\ +""" \ No newline at end of file diff --git a/modules/text/module_text_llm/poetry.lock b/modules/text/module_text_llm/poetry.lock index 860a0f71e..236b739b3 100644 --- a/modules/text/module_text_llm/poetry.lock +++ b/modules/text/module_text_llm/poetry.lock @@ -138,13 +138,13 @@ frozenlist = ">=1.1.0" [[package]] name = "anyio" -version = "4.6.0" +version = "4.6.2.post1" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.9" files = [ - {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, - {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, + {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"}, + {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"}, ] [package.dependencies] @@ -153,7 +153,7 @@ sniffio = ">=1.1" [package.extras] doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] trio = ["trio (>=0.26.1)"] [[package]] @@ -460,88 +460,103 @@ flake8 = "*" [[package]] name = "frozenlist" -version = "1.4.1" +version = "1.5.0" description = "A list-like structure which implements collections.abc.MutableSequence" optional = false python-versions = ">=3.8" files = [ - {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, - {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, - {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, - {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, - {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, - {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, - {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, - {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, - {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, - {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, - {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, - {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, - {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, - {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, - {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:15538c0cbf0e4fa11d1e3a71f823524b0c46299aed6e10ebb4c2089abd8c3bec"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e79225373c317ff1e35f210dd5f1344ff31066ba8067c307ab60254cd3a78ad5"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9272fa73ca71266702c4c3e2d4a28553ea03418e591e377a03b8e3659d94fa76"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498524025a5b8ba81695761d78c8dd7382ac0b052f34e66939c42df860b8ff17"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92b5278ed9d50fe610185ecd23c55d8b307d75ca18e94c0e7de328089ac5dcba"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f3c8c1dacd037df16e85227bac13cca58c30da836c6f936ba1df0c05d046d8d"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f2ac49a9bedb996086057b75bf93538240538c6d9b38e57c82d51f75a73409d2"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e66cc454f97053b79c2ab09c17fbe3c825ea6b4de20baf1be28919460dd7877f"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5a3ba5f9a0dfed20337d3e966dc359784c9f96503674c2faf015f7fe8e96798c"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6321899477db90bdeb9299ac3627a6a53c7399c8cd58d25da094007402b039ab"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:76e4753701248476e6286f2ef492af900ea67d9706a0155335a40ea21bf3b2f5"}, + {file = "frozenlist-1.5.0-cp310-cp310-win32.whl", hash = "sha256:977701c081c0241d0955c9586ffdd9ce44f7a7795df39b9151cd9a6fd0ce4cfb"}, + {file = "frozenlist-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:189f03b53e64144f90990d29a27ec4f7997d91ed3d01b51fa39d2dbe77540fd4"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fd74520371c3c4175142d02a976aee0b4cb4a7cc912a60586ffd8d5929979b30"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2f3f7a0fbc219fb4455264cae4d9f01ad41ae6ee8524500f381de64ffaa077d5"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f47c9c9028f55a04ac254346e92977bf0f166c483c74b4232bee19a6697e4778"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0996c66760924da6e88922756d99b47512a71cfd45215f3570bf1e0b694c206a"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2fe128eb4edeabe11896cb6af88fca5346059f6c8d807e3b910069f39157869"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a8ea951bbb6cacd492e3948b8da8c502a3f814f5d20935aae74b5df2b19cf3d"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de537c11e4aa01d37db0d403b57bd6f0546e71a82347a97c6a9f0dcc532b3a45"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c2623347b933fcb9095841f1cc5d4ff0b278addd743e0e966cb3d460278840d"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cee6798eaf8b1416ef6909b06f7dc04b60755206bddc599f52232606e18179d3"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f5f9da7f5dbc00a604fe74aa02ae7c98bcede8a3b8b9666f9f86fc13993bc71a"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:90646abbc7a5d5c7c19461d2e3eeb76eb0b204919e6ece342feb6032c9325ae9"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:bdac3c7d9b705d253b2ce370fde941836a5f8b3c5c2b8fd70940a3ea3af7f4f2"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03d33c2ddbc1816237a67f66336616416e2bbb6beb306e5f890f2eb22b959cdf"}, + {file = "frozenlist-1.5.0-cp311-cp311-win32.whl", hash = "sha256:237f6b23ee0f44066219dae14c70ae38a63f0440ce6750f868ee08775073f942"}, + {file = "frozenlist-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:0cc974cc93d32c42e7b0f6cf242a6bd941c57c61b618e78b6c0a96cb72788c1d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f"}, + {file = "frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8"}, + {file = "frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03"}, + {file = "frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c"}, + {file = "frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:dd94994fc91a6177bfaafd7d9fd951bc8689b0a98168aa26b5f543868548d3ca"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0da8bbec082bf6bf18345b180958775363588678f64998c2b7609e34719b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:73f2e31ea8dd7df61a359b731716018c2be196e5bb3b74ddba107f694fbd7604"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:828afae9f17e6de596825cf4228ff28fbdf6065974e5ac1410cecc22f699d2b3"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1577515d35ed5649d52ab4319db757bb881ce3b2b796d7283e6634d99ace307"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2150cc6305a2c2ab33299453e2968611dacb970d2283a14955923062c8d00b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a72b7a6e3cd2725eff67cd64c8f13335ee18fc3c7befc05aed043d24c7b9ccb9"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c16d2fa63e0800723139137d667e1056bee1a1cf7965153d2d104b62855e9b99"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:17dcc32fc7bda7ce5875435003220a457bcfa34ab7924a49a1c19f55b6ee185c"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:97160e245ea33d8609cd2b8fd997c850b56db147a304a262abc2b3be021a9171"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f1e6540b7fa044eee0bb5111ada694cf3dc15f2b0347ca125ee9ca984d5e9e6e"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:91d6c171862df0a6c61479d9724f22efb6109111017c87567cfeb7b5d1449fdf"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c1fac3e2ace2eb1052e9f7c7db480818371134410e1f5c55d65e8f3ac6d1407e"}, + {file = "frozenlist-1.5.0-cp38-cp38-win32.whl", hash = "sha256:b97f7b575ab4a8af9b7bc1d2ef7f29d3afee2226bd03ca3875c16451ad5a7723"}, + {file = "frozenlist-1.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:374ca2dabdccad8e2a76d40b1d037f5bd16824933bf7bcea3e59c891fd4a0923"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9bbcdfaf4af7ce002694a4e10a0159d5a8d20056a12b05b45cea944a4953f972"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1893f948bf6681733aaccf36c5232c231e3b5166d607c5fa77773611df6dc336"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b5e23253bb709ef57a8e95e6ae48daa9ac5f265637529e4ce6b003a37b2621f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f253985bb515ecd89629db13cb58d702035ecd8cfbca7d7a7e29a0e6d39af5f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04a5c6babd5e8fb7d3c871dc8b321166b80e41b637c31a995ed844a6139942b6"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fe0f1c29ba24ba6ff6abf688cb0b7cf1efab6b6aa6adc55441773c252f7411"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226d72559fa19babe2ccd920273e767c96a49b9d3d38badd7c91a0fdeda8ea08"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b731db116ab3aedec558573c1a5eec78822b32292fe4f2f0345b7f697745c2"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:366d8f93e3edfe5a918c874702f78faac300209a4d5bf38352b2c1bdc07a766d"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1b96af8c582b94d381a1c1f51ffaedeb77c821c690ea5f01da3d70a487dd0a9b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c03eff4a41bd4e38415cbed054bbaff4a075b093e2394b6915dca34a40d1e38b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:50cf5e7ee9b98f22bdecbabf3800ae78ddcc26e4a435515fc72d97903e8488e0"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e76bfbc72353269c44e0bc2cfe171900fbf7f722ad74c9a7b638052afe6a00c"}, + {file = "frozenlist-1.5.0-cp39-cp39-win32.whl", hash = "sha256:666534d15ba8f0fda3f53969117383d5dc021266b3c1a42c9ec4855e4b58b9d3"}, + {file = "frozenlist-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:5c28f4b5dbef8a0d8aad0d4de24d1e9e981728628afaf4ea0792f5d0939372f0"}, + {file = "frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3"}, + {file = "frozenlist-1.5.0.tar.gz", hash = "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817"}, ] [[package]] @@ -960,13 +975,13 @@ langchain-core = ">=0.2.38,<0.3.0" [[package]] name = "langsmith" -version = "0.1.134" +version = "0.1.137" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.134-py3-none-any.whl", hash = "sha256:ada98ad80ef38807725f32441a472da3dd28394010877751f48f458d3289da04"}, - {file = "langsmith-0.1.134.tar.gz", hash = "sha256:23abee3b508875a0e63c602afafffc02442a19cfd88f9daae05b3e9054fd6b61"}, + {file = "langsmith-0.1.137-py3-none-any.whl", hash = "sha256:4256d5c61133749890f7b5c88321dbb133ce0f440c621ea28e76513285859b81"}, + {file = "langsmith-0.1.137.tar.gz", hash = "sha256:56cdfcc6c74cb20a3f437d5bd144feb5bf93f54c5a2918d1e568cbd084a372d4"}, ] [package.dependencies] @@ -1003,22 +1018,22 @@ url = "../../../llm_core" [[package]] name = "marshmallow" -version = "3.22.0" +version = "3.23.0" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, - {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, + {file = "marshmallow-3.23.0-py3-none-any.whl", hash = "sha256:82f20a2397834fe6d9611b241f2f7e7b680ed89c49f84728a1ad937be6b4bdf4"}, + {file = "marshmallow-3.23.0.tar.gz", hash = "sha256:98d8827a9f10c03d44ead298d2e99c6aea8197df18ccfad360dae7f89a50da2e"}, ] [package.dependencies] packaging = ">=17.0" [package.extras] -dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] -docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] -tests = ["pytest", "pytz", "simplejson"] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"] +docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.1.3)", "sphinx-issues (==5.0.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "simplejson"] [[package]] name = "mccabe" @@ -1134,38 +1149,43 @@ files = [ [[package]] name = "mypy" -version = "1.11.2" +version = "1.13.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"}, - {file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"}, - {file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"}, - {file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"}, - {file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"}, - {file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"}, - {file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"}, - {file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"}, - {file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"}, - {file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"}, - {file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"}, - {file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"}, - {file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"}, - {file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"}, - {file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"}, - {file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"}, - {file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"}, - {file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"}, - {file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"}, - {file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"}, - {file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"}, - {file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"}, - {file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"}, - {file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"}, - {file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"}, - {file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"}, - {file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, + {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, + {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, + {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, + {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, + {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, + {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, + {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, + {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, + {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, + {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, + {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, + {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, + {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, + {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, + {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, + {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, + {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, + {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, + {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, + {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, ] [package.dependencies] @@ -1174,6 +1194,7 @@ typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] install-types = ["pip"] mypyc = ["setuptools (>=50)"] reports = ["lxml"] @@ -1285,68 +1306,69 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "orjson" -version = "3.10.7" +version = "3.10.10" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:74f4544f5a6405b90da8ea724d15ac9c36da4d72a738c64685003337401f5c12"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34a566f22c28222b08875b18b0dfbf8a947e69df21a9ed5c51a6bf91cfb944ac"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bf6ba8ebc8ef5792e2337fb0419f8009729335bb400ece005606336b7fd7bab7"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac7cf6222b29fbda9e3a472b41e6a5538b48f2c8f99261eecd60aafbdb60690c"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de817e2f5fc75a9e7dd350c4b0f54617b280e26d1631811a43e7e968fa71e3e9"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:348bdd16b32556cf8d7257b17cf2bdb7ab7976af4af41ebe79f9796c218f7e91"}, - {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:479fd0844ddc3ca77e0fd99644c7fe2de8e8be1efcd57705b5c92e5186e8a250"}, - {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fdf5197a21dd660cf19dfd2a3ce79574588f8f5e2dbf21bda9ee2d2b46924d84"}, - {file = "orjson-3.10.7-cp310-none-win32.whl", hash = "sha256:d374d36726746c81a49f3ff8daa2898dccab6596864ebe43d50733275c629175"}, - {file = "orjson-3.10.7-cp310-none-win_amd64.whl", hash = "sha256:cb61938aec8b0ffb6eef484d480188a1777e67b05d58e41b435c74b9d84e0b9c"}, - {file = "orjson-3.10.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:7db8539039698ddfb9a524b4dd19508256107568cdad24f3682d5773e60504a2"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:480f455222cb7a1dea35c57a67578848537d2602b46c464472c995297117fa09"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a9c9b168b3a19e37fe2778c0003359f07822c90fdff8f98d9d2a91b3144d8e0"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8de062de550f63185e4c1c54151bdddfc5625e37daf0aa1e75d2a1293e3b7d9a"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6b0dd04483499d1de9c8f6203f8975caf17a6000b9c0c54630cef02e44ee624e"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b58d3795dafa334fc8fd46f7c5dc013e6ad06fd5b9a4cc98cb1456e7d3558bd6"}, - {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:33cfb96c24034a878d83d1a9415799a73dc77480e6c40417e5dda0710d559ee6"}, - {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e724cebe1fadc2b23c6f7415bad5ee6239e00a69f30ee423f319c6af70e2a5c0"}, - {file = "orjson-3.10.7-cp311-none-win32.whl", hash = "sha256:82763b46053727a7168d29c772ed5c870fdae2f61aa8a25994c7984a19b1021f"}, - {file = "orjson-3.10.7-cp311-none-win_amd64.whl", hash = "sha256:eb8d384a24778abf29afb8e41d68fdd9a156cf6e5390c04cc07bbc24b89e98b5"}, - {file = "orjson-3.10.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44a96f2d4c3af51bfac6bc4ef7b182aa33f2f054fd7f34cc0ee9a320d051d41f"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ac14cd57df0572453543f8f2575e2d01ae9e790c21f57627803f5e79b0d3c3"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bdbb61dcc365dd9be94e8f7df91975edc9364d6a78c8f7adb69c1cdff318ec93"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b48b3db6bb6e0a08fa8c83b47bc169623f801e5cc4f24442ab2b6617da3b5313"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23820a1563a1d386414fef15c249040042b8e5d07b40ab3fe3efbfbbcbcb8864"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0c6a008e91d10a2564edbb6ee5069a9e66df3fbe11c9a005cb411f441fd2c09"}, - {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d352ee8ac1926d6193f602cbe36b1643bbd1bbcb25e3c1a657a4390f3000c9a5"}, - {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d2d9f990623f15c0ae7ac608103c33dfe1486d2ed974ac3f40b693bad1a22a7b"}, - {file = "orjson-3.10.7-cp312-none-win32.whl", hash = "sha256:7c4c17f8157bd520cdb7195f75ddbd31671997cbe10aee559c2d613592e7d7eb"}, - {file = "orjson-3.10.7-cp312-none-win_amd64.whl", hash = "sha256:1d9c0e733e02ada3ed6098a10a8ee0052dd55774de3d9110d29868d24b17faa1"}, - {file = "orjson-3.10.7-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:77d325ed866876c0fa6492598ec01fe30e803272a6e8b10e992288b009cbe149"}, - {file = "orjson-3.10.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ea2c232deedcb605e853ae1db2cc94f7390ac776743b699b50b071b02bea6fe"}, - {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3dcfbede6737fdbef3ce9c37af3fb6142e8e1ebc10336daa05872bfb1d87839c"}, - {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:11748c135f281203f4ee695b7f80bb1358a82a63905f9f0b794769483ea854ad"}, - {file = "orjson-3.10.7-cp313-none-win32.whl", hash = "sha256:a7e19150d215c7a13f39eb787d84db274298d3f83d85463e61d277bbd7f401d2"}, - {file = "orjson-3.10.7-cp313-none-win_amd64.whl", hash = "sha256:eef44224729e9525d5261cc8d28d6b11cafc90e6bd0be2157bde69a52ec83024"}, - {file = "orjson-3.10.7-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6ea2b2258eff652c82652d5e0f02bd5e0463a6a52abb78e49ac288827aaa1469"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:430ee4d85841e1483d487e7b81401785a5dfd69db5de01314538f31f8fbf7ee1"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4b6146e439af4c2472c56f8540d799a67a81226e11992008cb47e1267a9b3225"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:084e537806b458911137f76097e53ce7bf5806dda33ddf6aaa66a028f8d43a23"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829cf2195838e3f93b70fd3b4292156fc5e097aac3739859ac0dcc722b27ac0"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1193b2416cbad1a769f868b1749535d5da47626ac29445803dae7cc64b3f5c98"}, - {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4e6c3da13e5a57e4b3dca2de059f243ebec705857522f188f0180ae88badd354"}, - {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c31008598424dfbe52ce8c5b47e0752dca918a4fdc4a2a32004efd9fab41d866"}, - {file = "orjson-3.10.7-cp38-none-win32.whl", hash = "sha256:7122a99831f9e7fe977dc45784d3b2edc821c172d545e6420c375e5a935f5a1c"}, - {file = "orjson-3.10.7-cp38-none-win_amd64.whl", hash = "sha256:a763bc0e58504cc803739e7df040685816145a6f3c8a589787084b54ebc9f16e"}, - {file = "orjson-3.10.7-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e76be12658a6fa376fcd331b1ea4e58f5a06fd0220653450f0d415b8fd0fbe20"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed350d6978d28b92939bfeb1a0570c523f6170efc3f0a0ef1f1df287cd4f4960"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:144888c76f8520e39bfa121b31fd637e18d4cc2f115727865fdf9fa325b10412"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09b2d92fd95ad2402188cf51573acde57eb269eddabaa60f69ea0d733e789fe9"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b24a579123fa884f3a3caadaed7b75eb5715ee2b17ab5c66ac97d29b18fe57f"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591bcfe7512353bd609875ab38050efe3d55e18934e2f18950c108334b4ff"}, - {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f4db56635b58cd1a200b0a23744ff44206ee6aa428185e2b6c4a65b3197abdcd"}, - {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0fa5886854673222618638c6df7718ea7fe2f3f2384c452c9ccedc70b4a510a5"}, - {file = "orjson-3.10.7-cp39-none-win32.whl", hash = "sha256:8272527d08450ab16eb405f47e0f4ef0e5ff5981c3d82afe0efd25dcbef2bcd2"}, - {file = "orjson-3.10.7-cp39-none-win_amd64.whl", hash = "sha256:974683d4618c0c7dbf4f69c95a979734bf183d0658611760017f6e70a145af58"}, - {file = "orjson-3.10.7.tar.gz", hash = "sha256:75ef0640403f945f3a1f9f6400686560dbfb0fb5b16589ad62cd477043c4eee3"}, + {file = "orjson-3.10.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b788a579b113acf1c57e0a68e558be71d5d09aa67f62ca1f68e01117e550a998"}, + {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:804b18e2b88022c8905bb79bd2cbe59c0cd014b9328f43da8d3b28441995cda4"}, + {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9972572a1d042ec9ee421b6da69f7cc823da5962237563fa548ab17f152f0b9b"}, + {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc6993ab1c2ae7dd0711161e303f1db69062955ac2668181bfdf2dd410e65258"}, + {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d78e4cacced5781b01d9bc0f0cd8b70b906a0e109825cb41c1b03f9c41e4ce86"}, + {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6eb2598df518281ba0cbc30d24c5b06124ccf7e19169e883c14e0831217a0bc"}, + {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23776265c5215ec532de6238a52707048401a568f0fa0d938008e92a147fe2c7"}, + {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8cc2a654c08755cef90b468ff17c102e2def0edd62898b2486767204a7f5cc9c"}, + {file = "orjson-3.10.10-cp310-none-win32.whl", hash = "sha256:081b3fc6a86d72efeb67c13d0ea7c030017bd95f9868b1e329a376edc456153b"}, + {file = "orjson-3.10.10-cp310-none-win_amd64.whl", hash = "sha256:ff38c5fb749347768a603be1fb8a31856458af839f31f064c5aa74aca5be9efe"}, + {file = "orjson-3.10.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:879e99486c0fbb256266c7c6a67ff84f46035e4f8749ac6317cc83dacd7f993a"}, + {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:019481fa9ea5ff13b5d5d95e6fd5ab25ded0810c80b150c2c7b1cc8660b662a7"}, + {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0dd57eff09894938b4c86d4b871a479260f9e156fa7f12f8cad4b39ea8028bb5"}, + {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dbde6d70cd95ab4d11ea8ac5e738e30764e510fc54d777336eec09bb93b8576c"}, + {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2625cb37b8fb42e2147404e5ff7ef08712099197a9cd38895006d7053e69d6"}, + {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbf3c20c6a7db69df58672a0d5815647ecf78c8e62a4d9bd284e8621c1fe5ccb"}, + {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:75c38f5647e02d423807d252ce4528bf6a95bd776af999cb1fb48867ed01d1f6"}, + {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:23458d31fa50ec18e0ec4b0b4343730928296b11111df5f547c75913714116b2"}, + {file = "orjson-3.10.10-cp311-none-win32.whl", hash = "sha256:2787cd9dedc591c989f3facd7e3e86508eafdc9536a26ec277699c0aa63c685b"}, + {file = "orjson-3.10.10-cp311-none-win_amd64.whl", hash = "sha256:6514449d2c202a75183f807bc755167713297c69f1db57a89a1ef4a0170ee269"}, + {file = "orjson-3.10.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8564f48f3620861f5ef1e080ce7cd122ee89d7d6dacf25fcae675ff63b4d6e05"}, + {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bf161a32b479034098c5b81f2608f09167ad2fa1c06abd4e527ea6bf4837a9"}, + {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68b65c93617bcafa7f04b74ae8bc2cc214bd5cb45168a953256ff83015c6747d"}, + {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8e28406f97fc2ea0c6150f4c1b6e8261453318930b334abc419214c82314f85"}, + {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4d0d9fe174cc7a5bdce2e6c378bcdb4c49b2bf522a8f996aa586020e1b96cee"}, + {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3be81c42f1242cbed03cbb3973501fcaa2675a0af638f8be494eaf37143d999"}, + {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:65f9886d3bae65be026219c0a5f32dbbe91a9e6272f56d092ab22561ad0ea33b"}, + {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:730ed5350147db7beb23ddaf072f490329e90a1d059711d364b49fe352ec987b"}, + {file = "orjson-3.10.10-cp312-none-win32.whl", hash = "sha256:a8f4bf5f1c85bea2170800020d53a8877812892697f9c2de73d576c9307a8a5f"}, + {file = "orjson-3.10.10-cp312-none-win_amd64.whl", hash = "sha256:384cd13579a1b4cd689d218e329f459eb9ddc504fa48c5a83ef4889db7fd7a4f"}, + {file = "orjson-3.10.10-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44bffae68c291f94ff5a9b4149fe9d1bdd4cd0ff0fb575bcea8351d48db629a1"}, + {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e27b4c6437315df3024f0835887127dac2a0a3ff643500ec27088d2588fa5ae1"}, + {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca84df16d6b49325a4084fd8b2fe2229cb415e15c46c529f868c3387bb1339d"}, + {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c14ce70e8f39bd71f9f80423801b5d10bf93d1dceffdecd04df0f64d2c69bc01"}, + {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:24ac62336da9bda1bd93c0491eff0613003b48d3cb5d01470842e7b52a40d5b4"}, + {file = "orjson-3.10.10-cp313-none-win32.whl", hash = "sha256:eb0a42831372ec2b05acc9ee45af77bcaccbd91257345f93780a8e654efc75db"}, + {file = "orjson-3.10.10-cp313-none-win_amd64.whl", hash = "sha256:f0c4f37f8bf3f1075c6cc8dd8a9f843689a4b618628f8812d0a71e6968b95ffd"}, + {file = "orjson-3.10.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:829700cc18503efc0cf502d630f612884258020d98a317679cd2054af0259568"}, + {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0ceb5e0e8c4f010ac787d29ae6299846935044686509e2f0f06ed441c1ca949"}, + {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c25908eb86968613216f3db4d3003f1c45d78eb9046b71056ca327ff92bdbd4"}, + {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:218cb0bc03340144b6328a9ff78f0932e642199ac184dd74b01ad691f42f93ff"}, + {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2277ec2cea3775640dc81ab5195bb5b2ada2fe0ea6eee4677474edc75ea6785"}, + {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:848ea3b55ab5ccc9d7bbd420d69432628b691fba3ca8ae3148c35156cbd282aa"}, + {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e3e67b537ac0c835b25b5f7d40d83816abd2d3f4c0b0866ee981a045287a54f3"}, + {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7948cfb909353fce2135dcdbe4521a5e7e1159484e0bb024c1722f272488f2b8"}, + {file = "orjson-3.10.10-cp38-none-win32.whl", hash = "sha256:78bee66a988f1a333dc0b6257503d63553b1957889c17b2c4ed72385cd1b96ae"}, + {file = "orjson-3.10.10-cp38-none-win_amd64.whl", hash = "sha256:f1d647ca8d62afeb774340a343c7fc023efacfd3a39f70c798991063f0c681dd"}, + {file = "orjson-3.10.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5a059afddbaa6dd733b5a2d76a90dbc8af790b993b1b5cb97a1176ca713b5df8"}, + {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f9b5c59f7e2a1a410f971c5ebc68f1995822837cd10905ee255f96074537ee6"}, + {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d5ef198bafdef4aa9d49a4165ba53ffdc0a9e1c7b6f76178572ab33118afea25"}, + {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf29ce0bb5d3320824ec3d1508652421000ba466abd63bdd52c64bcce9eb1fa"}, + {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dddd5516bcc93e723d029c1633ae79c4417477b4f57dad9bfeeb6bc0315e654a"}, + {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12f2003695b10817f0fa8b8fca982ed7f5761dcb0d93cff4f2f9f6709903fd7"}, + {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:672f9874a8a8fb9bb1b771331d31ba27f57702c8106cdbadad8bda5d10bc1019"}, + {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dcbb0ca5fafb2b378b2c74419480ab2486326974826bbf6588f4dc62137570a"}, + {file = "orjson-3.10.10-cp39-none-win32.whl", hash = "sha256:d9bbd3a4b92256875cb058c3381b782649b9a3c68a4aa9a2fff020c2f9cfc1be"}, + {file = "orjson-3.10.10-cp39-none-win_amd64.whl", hash = "sha256:766f21487a53aee8524b97ca9582d5c6541b03ab6210fbaf10142ae2f3ced2aa"}, + {file = "orjson-3.10.10.tar.gz", hash = "sha256:37949383c4df7b4337ce82ee35b6d7471e55195efa7dcb45ab8226ceadb0fe3b"}, ] [[package]] @@ -1537,24 +1559,20 @@ with-vulture = ["vulture (>=1.5)"] [[package]] name = "psycopg2" -version = "2.9.9" +version = "2.9.10" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "psycopg2-2.9.9-cp310-cp310-win32.whl", hash = "sha256:38a8dcc6856f569068b47de286b472b7c473ac7977243593a288ebce0dc89516"}, - {file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"}, - {file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"}, - {file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"}, - {file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"}, - {file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"}, - {file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"}, - {file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"}, - {file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"}, - {file = "psycopg2-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:bac58c024c9922c23550af2a581998624d6e02350f4ae9c5f0bc642c633a2d5e"}, - {file = "psycopg2-2.9.9-cp39-cp39-win32.whl", hash = "sha256:c92811b2d4c9b6ea0285942b2e7cac98a59e166d59c588fe5cfe1eda58e72d59"}, - {file = "psycopg2-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:de80739447af31525feddeb8effd640782cf5998e1a4e9192ebdf829717e3913"}, - {file = "psycopg2-2.9.9.tar.gz", hash = "sha256:d1454bde93fb1e224166811694d600e746430c006fbb031ea06ecc2ea41bf156"}, + {file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"}, + {file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"}, + {file = "psycopg2-2.9.10-cp311-cp311-win32.whl", hash = "sha256:47c4f9875125344f4c2b870e41b6aad585901318068acd01de93f3677a6522c2"}, + {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"}, + {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"}, + {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"}, + {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"}, + {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"}, + {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"}, ] [[package]] @@ -2030,60 +2048,68 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.35" +version = "2.0.36" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"}, - {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, - {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"}, + {file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"}, + {file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"}, ] [package.dependencies] @@ -2097,7 +2123,7 @@ aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] asyncio = ["greenlet (!=0.4.17)"] asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] mssql = ["pyodbc"] mssql-pymssql = ["pymssql"] mssql-pyodbc = ["pyodbc"] @@ -2244,13 +2270,13 @@ telegram = ["requests"] [[package]] name = "types-requests" -version = "2.32.0.20240914" +version = "2.32.0.20241016" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, - {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, + {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"}, + {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"}, ] [package.dependencies] @@ -2319,103 +2345,93 @@ standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", [[package]] name = "yarl" -version = "1.14.0" +version = "1.16.0" description = "Yet another URL library" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "yarl-1.14.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1bfc25aa6a7c99cf86564210f79a0b7d4484159c67e01232b116e445b3036547"}, - {file = "yarl-1.14.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0cf21f46a15d445417de8fc89f2568852cf57fe8ca1ab3d19ddb24d45c0383ae"}, - {file = "yarl-1.14.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1dda53508df0de87b6e6b0a52d6718ff6c62a5aca8f5552748404963df639269"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:587c3cc59bc148a9b1c07a019346eda2549bc9f468acd2f9824d185749acf0a6"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3007a5b75cb50140708420fe688c393e71139324df599434633019314ceb8b59"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:06ff23462398333c78b6f4f8d3d70410d657a471c2c5bbe6086133be43fc8f1a"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:689a99a42ee4583fcb0d3a67a0204664aa1539684aed72bdafcbd505197a91c4"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0547ab1e9345dc468cac8368d88ea4c5bd473ebc1d8d755347d7401982b5dd8"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:742aef0a99844faaac200564ea6f5e08facb285d37ea18bd1a5acf2771f3255a"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:176110bff341b6730f64a1eb3a7070e12b373cf1c910a9337e7c3240497db76f"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:46a9772a1efa93f9cd170ad33101c1817c77e0e9914d4fe33e2da299d7cf0f9b"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:ee2c68e4f2dd1b1c15b849ba1c96fac105fca6ffdb7c1e8be51da6fabbdeafb9"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:047b258e00b99091b6f90355521f026238c63bd76dcf996d93527bb13320eefd"}, - {file = "yarl-1.14.0-cp310-cp310-win32.whl", hash = "sha256:0aa92e3e30a04f9462a25077db689c4ac5ea9ab6cc68a2e563881b987d42f16d"}, - {file = "yarl-1.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:d9baec588f015d0ee564057aa7574313c53a530662ffad930b7886becc85abdf"}, - {file = "yarl-1.14.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:07f9eaf57719d6721ab15805d85f4b01a5b509a0868d7320134371bcb652152d"}, - {file = "yarl-1.14.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c14b504a74e58e2deb0378b3eca10f3d076635c100f45b113c18c770b4a47a50"}, - {file = "yarl-1.14.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:16a682a127930f3fc4e42583becca6049e1d7214bcad23520c590edd741d2114"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73bedd2be05f48af19f0f2e9e1353921ce0c83f4a1c9e8556ecdcf1f1eae4892"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3ab950f8814f3b7b5e3eebc117986f817ec933676f68f0a6c5b2137dd7c9c69"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b693c63e7e64b524f54aa4888403c680342d1ad0d97be1707c531584d6aeeb4f"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85cb3e40eaa98489f1e2e8b29f5ad02ee1ee40d6ce6b88d50cf0f205de1d9d2c"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f24f08b6c9b9818fd80612c97857d28f9779f0d1211653ece9844fc7b414df2"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:29a84a46ec3ebae7a1c024c055612b11e9363a8a23238b3e905552d77a2bc51b"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5cd5dad8366e0168e0fd23d10705a603790484a6dbb9eb272b33673b8f2cce72"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a152751af7ef7b5d5fa6d215756e508dd05eb07d0cf2ba51f3e740076aa74373"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:3d569f877ed9a708e4c71a2d13d2940cb0791da309f70bd970ac1a5c088a0a92"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6a615cad11ec3428020fb3c5a88d85ce1b5c69fd66e9fcb91a7daa5e855325dd"}, - {file = "yarl-1.14.0-cp311-cp311-win32.whl", hash = "sha256:bab03192091681d54e8225c53f270b0517637915d9297028409a2a5114ff4634"}, - {file = "yarl-1.14.0-cp311-cp311-win_amd64.whl", hash = "sha256:985623575e5c4ea763056ffe0e2d63836f771a8c294b3de06d09480538316b13"}, - {file = "yarl-1.14.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:fc2c80bc87fba076e6cbb926216c27fba274dae7100a7b9a0983b53132dd99f2"}, - {file = "yarl-1.14.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:55c144d363ad4626ca744556c049c94e2b95096041ac87098bb363dcc8635e8d"}, - {file = "yarl-1.14.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b03384eed107dbeb5f625a99dc3a7de8be04fc8480c9ad42fccbc73434170b20"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f72a0d746d38cb299b79ce3d4d60ba0892c84bbc905d0d49c13df5bace1b65f8"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8648180b34faaea4aa5b5ca7e871d9eb1277033fa439693855cf0ea9195f85f1"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9557c9322aaa33174d285b0c1961fb32499d65ad1866155b7845edc876c3c835"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f50eb3837012a937a2b649ec872b66ba9541ad9d6f103ddcafb8231cfcafd22"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8892fa575ac9b1b25fae7b221bc4792a273877b9b56a99ee2d8d03eeb3dbb1d2"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e6a2c5c5bb2556dfbfffffc2bcfb9c235fd2b566d5006dfb2a37afc7e3278a07"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ab3abc0b78a5dfaa4795a6afbe7b282b6aa88d81cf8c1bb5e394993d7cae3457"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:47eede5d11d669ab3759b63afb70d28d5328c14744b8edba3323e27dc52d298d"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:fe4d2536c827f508348d7b40c08767e8c7071614250927233bf0c92170451c0a"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0fd7b941dd1b00b5f0acb97455fea2c4b7aac2dd31ea43fb9d155e9bc7b78664"}, - {file = "yarl-1.14.0-cp312-cp312-win32.whl", hash = "sha256:99ff3744f5fe48288be6bc402533b38e89749623a43208e1d57091fc96b783b9"}, - {file = "yarl-1.14.0-cp312-cp312-win_amd64.whl", hash = "sha256:1ca3894e9e9f72da93544f64988d9c052254a338a9f855165f37f51edb6591de"}, - {file = "yarl-1.14.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5d02d700705d67e09e1f57681f758f0b9d4412eeb70b2eb8d96ca6200b486db3"}, - {file = "yarl-1.14.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:30600ba5db60f7c0820ef38a2568bb7379e1418ecc947a0f76fd8b2ff4257a97"}, - {file = "yarl-1.14.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e85d86527baebb41a214cc3b45c17177177d900a2ad5783dbe6f291642d4906f"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37001e5d4621cef710c8dc1429ca04e189e572f128ab12312eab4e04cf007132"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4f4547944d4f5cfcdc03f3f097d6f05bbbc915eaaf80a2ee120d0e756de377d"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75ff4c819757f9bdb35de049a509814d6ce851fe26f06eb95a392a5640052482"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68ac1a09392ed6e3fd14be880d39b951d7b981fd135416db7d18a6208c536561"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96952f642ac69075e44c7d0284528938fdff39422a1d90d3e45ce40b72e5e2d9"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a56fbe3d7f3bce1d060ea18d2413a2ca9ca814eea7cedc4d247b5f338d54844e"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7e2637d75e92763d1322cb5041573279ec43a80c0f7fbbd2d64f5aee98447b17"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:9abe80ae2c9d37c17599557b712e6515f4100a80efb2cda15f5f070306477cd2"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:217a782020b875538eebf3948fac3a7f9bbbd0fd9bf8538f7c2ad7489e80f4e8"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b9cfef3f14f75bf6aba73a76caf61f9d00865912a04a4393c468a7ce0981b519"}, - {file = "yarl-1.14.0-cp313-cp313-win32.whl", hash = "sha256:d8361c7d04e6a264481f0b802e395f647cd3f8bbe27acfa7c12049efea675bd1"}, - {file = "yarl-1.14.0-cp313-cp313-win_amd64.whl", hash = "sha256:bc24f968b82455f336b79bf37dbb243b7d76cd40897489888d663d4e028f5069"}, - {file = "yarl-1.14.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:91d875f75fabf76b3018c5f196bf3d308ed2b49ddcb46c1576d6b075754a1393"}, - {file = "yarl-1.14.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4009def9be3a7e5175db20aa2d7307ecd00bbf50f7f0f989300710eee1d0b0b9"}, - {file = "yarl-1.14.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:582cedde49603f139be572252a318b30dc41039bc0b8165f070f279e5d12187f"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbd9ff43a04f8ffe8a959a944c2dca10d22f5f99fc6a459f49c3ebfb409309d9"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9f805e37ed16cc212fdc538a608422d7517e7faf539bedea4fe69425bc55d76"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:95e16e9eaa2d7f5d87421b8fe694dd71606aa61d74b824c8d17fc85cc51983d1"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:816d24f584edefcc5ca63428f0b38fee00b39fe64e3c5e558f895a18983efe96"}, - {file = "yarl-1.14.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd2660c01367eb3ef081b8fa0a5da7fe767f9427aa82023a961a5f28f0d4af6c"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:94b2bb9bcfd5be9d27004ea4398fb640373dd0c1a9e219084f42c08f77a720ab"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c2089a9afef887664115f7fa6d3c0edd6454adaca5488dba836ca91f60401075"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:2192f718db4a8509f63dd6d950f143279211fa7e6a2c612edc17d85bf043d36e"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:8385ab36bf812e9d37cf7613999a87715f27ef67a53f0687d28c44b819df7cb0"}, - {file = "yarl-1.14.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b4c1ecba93e7826dc71ddba75fb7740cdb52e7bd0be9f03136b83f54e6a1f511"}, - {file = "yarl-1.14.0-cp38-cp38-win32.whl", hash = "sha256:e749af6c912a7bb441d105c50c1a3da720474e8acb91c89350080dd600228f0e"}, - {file = "yarl-1.14.0-cp38-cp38-win_amd64.whl", hash = "sha256:147e36331f6f63e08a14640acf12369e041e0751bb70d9362df68c2d9dcf0c87"}, - {file = "yarl-1.14.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a9f917966d27f7ce30039fe8d900f913c5304134096554fd9bea0774bcda6d1"}, - {file = "yarl-1.14.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a2f8fb7f944bcdfecd4e8d855f84c703804a594da5123dd206f75036e536d4d"}, - {file = "yarl-1.14.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f4e475f29a9122f908d0f1f706e1f2fc3656536ffd21014ff8a6f2e1b14d1d8"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8089d4634d8fa2b1806ce44fefa4979b1ab2c12c0bc7ef3dfa45c8a374811348"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b16f6c75cffc2dc0616ea295abb0e1967601bd1fb1e0af6a1de1c6c887f3439"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498b3c55087b9d762636bca9b45f60d37e51d24341786dc01b81253f9552a607"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3f8bfc1db82589ef965ed234b87de30d140db8b6dc50ada9e33951ccd8ec07a"}, - {file = "yarl-1.14.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:625f207b1799e95e7c823f42f473c1e9dbfb6192bd56bba8695656d92be4535f"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:781e2495e408a81e4eaeedeb41ba32b63b1980dddf8b60dbbeff6036bcd35049"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:659603d26d40dd4463200df9bfbc339fbfaed3fe32e5c432fe1dc2b5d4aa94b4"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4e0d45ebf975634468682c8bec021618b3ad52c37619e5c938f8f831fa1ac5c0"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:a2e4725a08cb2b4794db09e350c86dee18202bb8286527210e13a1514dc9a59a"}, - {file = "yarl-1.14.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:19268b4fec1d7760134f2de46ef2608c2920134fb1fa61e451f679e41356dc55"}, - {file = "yarl-1.14.0-cp39-cp39-win32.whl", hash = "sha256:337912bcdcf193ade64b9aae5a4017a0a1950caf8ca140362e361543c6773f21"}, - {file = "yarl-1.14.0-cp39-cp39-win_amd64.whl", hash = "sha256:b6d0147574ce2e7b812c989e50fa72bbc5338045411a836bd066ce5fc8ac0bce"}, - {file = "yarl-1.14.0-py3-none-any.whl", hash = "sha256:c8ed4034f0765f8861620c1f2f2364d2e58520ea288497084dae880424fc0d9f"}, - {file = "yarl-1.14.0.tar.gz", hash = "sha256:88c7d9d58aab0724b979ab5617330acb1c7030b79379c8138c1c8c94e121d1b3"}, + {file = "yarl-1.16.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:32468f41242d72b87ab793a86d92f885355bcf35b3355aa650bfa846a5c60058"}, + {file = "yarl-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:234f3a3032b505b90e65b5bc6652c2329ea7ea8855d8de61e1642b74b4ee65d2"}, + {file = "yarl-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a0296040e5cddf074c7f5af4a60f3fc42c0237440df7bcf5183be5f6c802ed5"}, + {file = "yarl-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de6c14dd7c7c0badba48157474ea1f03ebee991530ba742d381b28d4f314d6f3"}, + {file = "yarl-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b140e532fe0266003c936d017c1ac301e72ee4a3fd51784574c05f53718a55d8"}, + {file = "yarl-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:019f5d58093402aa8f6661e60fd82a28746ad6d156f6c5336a70a39bd7b162b9"}, + {file = "yarl-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c42998fd1cbeb53cd985bff0e4bc25fbe55fd6eb3a545a724c1012d69d5ec84"}, + {file = "yarl-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c7c30fb38c300fe8140df30a046a01769105e4cf4282567a29b5cdb635b66c4"}, + {file = "yarl-1.16.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e49e0fd86c295e743fd5be69b8b0712f70a686bc79a16e5268386c2defacaade"}, + {file = "yarl-1.16.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:b9ca7b9147eb1365c8bab03c003baa1300599575effad765e0b07dd3501ea9af"}, + {file = "yarl-1.16.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:27e11db3f1e6a51081a981509f75617b09810529de508a181319193d320bc5c7"}, + {file = "yarl-1.16.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8994c42f4ca25df5380ddf59f315c518c81df6a68fed5bb0c159c6cb6b92f120"}, + {file = "yarl-1.16.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:542fa8e09a581bcdcbb30607c7224beff3fdfb598c798ccd28a8184ffc18b7eb"}, + {file = "yarl-1.16.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2bd6a51010c7284d191b79d3b56e51a87d8e1c03b0902362945f15c3d50ed46b"}, + {file = "yarl-1.16.0-cp310-cp310-win32.whl", hash = "sha256:178ccb856e265174a79f59721031060f885aca428983e75c06f78aa24b91d929"}, + {file = "yarl-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:fe8bba2545427418efc1929c5c42852bdb4143eb8d0a46b09de88d1fe99258e7"}, + {file = "yarl-1.16.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d8643975a0080f361639787415a038bfc32d29208a4bf6b783ab3075a20b1ef3"}, + {file = "yarl-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:676d96bafc8c2d0039cea0cd3fd44cee7aa88b8185551a2bb93354668e8315c2"}, + {file = "yarl-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d9525f03269e64310416dbe6c68d3b23e5d34aaa8f47193a1c45ac568cecbc49"}, + {file = "yarl-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b37d5ec034e668b22cf0ce1074d6c21fd2a08b90d11b1b73139b750a8b0dd97"}, + {file = "yarl-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4f32c4cb7386b41936894685f6e093c8dfaf0960124d91fe0ec29fe439e201d0"}, + {file = "yarl-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b8e265a0545637492a7e12fd7038370d66c9375a61d88c5567d0e044ded9202"}, + {file = "yarl-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:789a3423f28a5fff46fbd04e339863c169ece97c827b44de16e1a7a42bc915d2"}, + {file = "yarl-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1d1f45e3e8d37c804dca99ab3cf4ab3ed2e7a62cd82542924b14c0a4f46d243"}, + {file = "yarl-1.16.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:621280719c4c5dad4c1391160a9b88925bb8b0ff6a7d5af3224643024871675f"}, + {file = "yarl-1.16.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:ed097b26f18a1f5ff05f661dc36528c5f6735ba4ce8c9645e83b064665131349"}, + {file = "yarl-1.16.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:2f1fe2b2e3ee418862f5ebc0c0083c97f6f6625781382f828f6d4e9b614eba9b"}, + {file = "yarl-1.16.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:87dd10bc0618991c66cee0cc65fa74a45f4ecb13bceec3c62d78ad2e42b27a16"}, + {file = "yarl-1.16.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:4199db024b58a8abb2cfcedac7b1292c3ad421684571aeb622a02f242280e8d6"}, + {file = "yarl-1.16.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:99a9dcd4b71dd5f5f949737ab3f356cfc058c709b4f49833aeffedc2652dac56"}, + {file = "yarl-1.16.0-cp311-cp311-win32.whl", hash = "sha256:a9394c65ae0ed95679717d391c862dece9afacd8fa311683fc8b4362ce8a410c"}, + {file = "yarl-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:5b9101f528ae0f8f65ac9d64dda2bb0627de8a50344b2f582779f32fda747c1d"}, + {file = "yarl-1.16.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:4ffb7c129707dd76ced0a4a4128ff452cecf0b0e929f2668ea05a371d9e5c104"}, + {file = "yarl-1.16.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1a5e9d8ce1185723419c487758d81ac2bde693711947032cce600ca7c9cda7d6"}, + {file = "yarl-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d743e3118b2640cef7768ea955378c3536482d95550222f908f392167fe62059"}, + {file = "yarl-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26768342f256e6e3c37533bf9433f5f15f3e59e3c14b2409098291b3efaceacb"}, + {file = "yarl-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1b0796168b953bca6600c5f97f5ed407479889a36ad7d17183366260f29a6b9"}, + {file = "yarl-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:858728086914f3a407aa7979cab743bbda1fe2bdf39ffcd991469a370dd7414d"}, + {file = "yarl-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5570e6d47bcb03215baf4c9ad7bf7c013e56285d9d35013541f9ac2b372593e7"}, + {file = "yarl-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66ea8311422a7ba1fc79b4c42c2baa10566469fe5a78500d4e7754d6e6db8724"}, + {file = "yarl-1.16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:649bddcedee692ee8a9b7b6e38582cb4062dc4253de9711568e5620d8707c2a3"}, + {file = "yarl-1.16.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:3a91654adb7643cb21b46f04244c5a315a440dcad63213033826549fa2435f71"}, + {file = "yarl-1.16.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b439cae82034ade094526a8f692b9a2b5ee936452de5e4c5f0f6c48df23f8604"}, + {file = "yarl-1.16.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:571f781ae8ac463ce30bacebfaef2c6581543776d5970b2372fbe31d7bf31a07"}, + {file = "yarl-1.16.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:aa7943f04f36d6cafc0cf53ea89824ac2c37acbdb4b316a654176ab8ffd0f968"}, + {file = "yarl-1.16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1a5cf32539373ff39d97723e39a9283a7277cbf1224f7aef0c56c9598b6486c3"}, + {file = "yarl-1.16.0-cp312-cp312-win32.whl", hash = "sha256:a5b6c09b9b4253d6a208b0f4a2f9206e511ec68dce9198e0fbec4f160137aa67"}, + {file = "yarl-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:1208ca14eed2fda324042adf8d6c0adf4a31522fa95e0929027cd487875f0240"}, + {file = "yarl-1.16.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a5ace0177520bd4caa99295a9b6fb831d0e9a57d8e0501a22ffaa61b4c024283"}, + {file = "yarl-1.16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7118bdb5e3ed81acaa2095cba7ec02a0fe74b52a16ab9f9ac8e28e53ee299732"}, + {file = "yarl-1.16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38fec8a2a94c58bd47c9a50a45d321ab2285ad133adefbbadf3012c054b7e656"}, + {file = "yarl-1.16.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8791d66d81ee45866a7bb15a517b01a2bcf583a18ebf5d72a84e6064c417e64b"}, + {file = "yarl-1.16.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1cf936ba67bc6c734f3aa1c01391da74ab7fc046a9f8bbfa230b8393b90cf472"}, + {file = "yarl-1.16.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1aab176dd55b59f77a63b27cffaca67d29987d91a5b615cbead41331e6b7428"}, + {file = "yarl-1.16.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:995d0759004c08abd5d1b81300a91d18c8577c6389300bed1c7c11675105a44d"}, + {file = "yarl-1.16.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1bc22e00edeb068f71967ab99081e9406cd56dbed864fc3a8259442999d71552"}, + {file = "yarl-1.16.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:35b4f7842154176523e0a63c9b871168c69b98065d05a4f637fce342a6a2693a"}, + {file = "yarl-1.16.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:7ace71c4b7a0c41f317ae24be62bb61e9d80838d38acb20e70697c625e71f120"}, + {file = "yarl-1.16.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8f639e3f5795a6568aa4f7d2ac6057c757dcd187593679f035adbf12b892bb00"}, + {file = "yarl-1.16.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e8be3aff14f0120ad049121322b107f8a759be76a6a62138322d4c8a337a9e2c"}, + {file = "yarl-1.16.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:122d8e7986043d0549e9eb23c7fd23be078be4b70c9eb42a20052b3d3149c6f2"}, + {file = "yarl-1.16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0fd9c227990f609c165f56b46107d0bc34553fe0387818c42c02f77974402c36"}, + {file = "yarl-1.16.0-cp313-cp313-win32.whl", hash = "sha256:595ca5e943baed31d56b33b34736461a371c6ea0038d3baec399949dd628560b"}, + {file = "yarl-1.16.0-cp313-cp313-win_amd64.whl", hash = "sha256:921b81b8d78f0e60242fb3db615ea3f368827a76af095d5a69f1c3366db3f596"}, + {file = "yarl-1.16.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ab2b2ac232110a1fdb0d3ffcd087783edd3d4a6ced432a1bf75caf7b7be70916"}, + {file = "yarl-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7f8713717a09acbfee7c47bfc5777e685539fefdd34fa72faf504c8be2f3df4e"}, + {file = "yarl-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cdcffe1dbcb4477d2b4202f63cd972d5baa155ff5a3d9e35801c46a415b7f71a"}, + {file = "yarl-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a91217208306d82357c67daeef5162a41a28c8352dab7e16daa82e3718852a7"}, + {file = "yarl-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ab3ed42c78275477ea8e917491365e9a9b69bb615cb46169020bd0aa5e2d6d3"}, + {file = "yarl-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:707ae579ccb3262dfaef093e202b4c3fb23c3810e8df544b1111bd2401fd7b09"}, + {file = "yarl-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad7a852d1cd0b8d8b37fc9d7f8581152add917a98cfe2ea6e241878795f917ae"}, + {file = "yarl-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3f1cc3d3d4dc574bebc9b387f6875e228ace5748a7c24f49d8f01ac1bc6c31b"}, + {file = "yarl-1.16.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5ff96da263740779b0893d02b718293cc03400c3a208fc8d8cd79d9b0993e532"}, + {file = "yarl-1.16.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:3d375a19ba2bfe320b6d873f3fb165313b002cef8b7cc0a368ad8b8a57453837"}, + {file = "yarl-1.16.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:62c7da0ad93a07da048b500514ca47b759459ec41924143e2ddb5d7e20fd3db5"}, + {file = "yarl-1.16.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:147b0fcd0ee33b4b5f6edfea80452d80e419e51b9a3f7a96ce98eaee145c1581"}, + {file = "yarl-1.16.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:504e1fe1cc4f170195320eb033d2b0ccf5c6114ce5bf2f617535c01699479bca"}, + {file = "yarl-1.16.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:bdcf667a5dec12a48f669e485d70c54189f0639c2157b538a4cffd24a853624f"}, + {file = "yarl-1.16.0-cp39-cp39-win32.whl", hash = "sha256:e9951afe6557c75a71045148890052cb942689ee4c9ec29f5436240e1fcc73b7"}, + {file = "yarl-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:7d7aaa8ff95d0840e289423e7dc35696c2b058d635f945bf05b5cd633146b027"}, + {file = "yarl-1.16.0-py3-none-any.whl", hash = "sha256:e6980a558d8461230c457218bd6c92dfc1d10205548215c2c21d79dc8d0a96f3"}, + {file = "yarl-1.16.0.tar.gz", hash = "sha256:b6f687ced5510a9a2474bbae96a4352e5ace5fa34dc44a217b0537fec1db00b4"}, ] [package.dependencies] From 762c6ee9dd728526e2e662b62ef3c81b135c5cb3 Mon Sep 17 00:00:00 2001 From: = Enea_Gore Date: Fri, 25 Oct 2024 01:34:12 +0200 Subject: [PATCH 2/5] restructure approaches --- llm_core/llm_core/utils/llm_utils.py | 4 +- llm_core/llm_core/utils/predict_and_parse.py | 2 +- .../module_text_llm/__main__.py | 8 +- .../module_text_llm/approach_controller.py | 19 ---- .../approaches/approach_config.py | 10 ++ .../approaches/approach_controller.py | 16 ++++ .../approaches/basic_approach/config.py | 23 +++++ .../basic_approach}/generate_suggestions.py | 4 +- .../prompts/generate_suggestions.py | 0 .../chain_of_thought_approach/config.py | 34 +++++++ .../generate_suggestions.py} | 7 +- .../prompts/cot_suggestions.py | 1 + .../prompts/refined_cot_suggestions.py | 6 +- .../module_text_llm/module_text_llm/config.py | 92 +------------------ 14 files changed, 102 insertions(+), 124 deletions(-) delete mode 100644 modules/text/module_text_llm/module_text_llm/approach_controller.py create mode 100644 modules/text/module_text_llm/module_text_llm/approaches/approach_config.py create mode 100644 modules/text/module_text_llm/module_text_llm/approaches/approach_controller.py create mode 100644 modules/text/module_text_llm/module_text_llm/approaches/basic_approach/config.py rename modules/text/module_text_llm/module_text_llm/{ => approaches/basic_approach}/generate_suggestions.py (99%) rename modules/text/module_text_llm/module_text_llm/{ => approaches/basic_approach}/prompts/generate_suggestions.py (100%) create mode 100644 modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/config.py rename modules/text/module_text_llm/module_text_llm/{generate_cot_suggestions.py => approaches/chain_of_thought_approach/generate_suggestions.py} (95%) rename modules/text/module_text_llm/module_text_llm/{ => approaches/chain_of_thought_approach}/prompts/cot_suggestions.py (99%) rename modules/text/module_text_llm/module_text_llm/{ => approaches/chain_of_thought_approach}/prompts/refined_cot_suggestions.py (93%) diff --git a/llm_core/llm_core/utils/llm_utils.py b/llm_core/llm_core/utils/llm_utils.py index 4637b8558..0bfeb047d 100644 --- a/llm_core/llm_core/utils/llm_utils.py +++ b/llm_core/llm_core/utils/llm_utils.py @@ -1,7 +1,7 @@ from typing import Type, TypeVar, List from pydantic import BaseModel import tiktoken -from langchain.chat_models import ChatOpenAI +from langchain_openai import AzureChatOpenAI, ChatOpenAI from langchain.base_language import BaseLanguageModel from langchain.prompts import ( ChatPromptTemplate, @@ -75,7 +75,7 @@ def supports_function_calling(model: BaseLanguageModel): Returns: boolean: True if the model supports function calling, False otherwise """ - return isinstance(model, ChatOpenAI) + return isinstance(model, ChatOpenAI) or isinstance(model, AzureChatOpenAI) def get_chat_prompt_with_formatting_instructions( diff --git a/llm_core/llm_core/utils/predict_and_parse.py b/llm_core/llm_core/utils/predict_and_parse.py index d73748bdf..904ee8bfd 100644 --- a/llm_core/llm_core/utils/predict_and_parse.py +++ b/llm_core/llm_core/utils/predict_and_parse.py @@ -36,7 +36,7 @@ async def predict_and_parse( if experiment.run_id is not None: tags.append(f"run-{experiment.run_id}") - structured_output_llm = model.with_structured_output(pydantic_object, method="json_mode") + structured_output_llm = model.with_structured_output(pydantic_object) chain = RunnableSequence( chat_prompt, structured_output_llm diff --git a/modules/text/module_text_llm/module_text_llm/__main__.py b/modules/text/module_text_llm/module_text_llm/__main__.py index ac99fa063..8e35226b7 100644 --- a/modules/text/module_text_llm/module_text_llm/__main__.py +++ b/modules/text/module_text_llm/module_text_llm/__main__.py @@ -1,20 +1,16 @@ -import json import os from typing import List, Any import nltk import tiktoken -from module_text_llm.approach_controller import generate from athena import app, submission_selector, submissions_consumer, feedback_consumer, feedback_provider, evaluation_provider from athena.text import Exercise, Submission, Feedback from athena.logger import logger from module_text_llm.config import Configuration from module_text_llm.evaluation import get_feedback_statistics, get_llm_statistics -from module_text_llm.generate_suggestions import generate_suggestions from module_text_llm.generate_evaluation import generate_evaluation -from module_text_llm.generate_cot_suggestions import generate_cot_suggestions - +from module_text_llm.approaches.approach_controller import generate_suggestions @submissions_consumer def receive_submissions(exercise: Exercise, submissions: List[Submission]): @@ -36,7 +32,7 @@ def process_incoming_feedback(exercise: Exercise, submission: Submission, feedba async def suggest_feedback(exercise: Exercise, submission: Submission, is_graded: bool, module_config: Configuration) -> List[Feedback]: logger.info("suggest_feedback: %s suggestions for submission %d of exercise %d were requested", "Graded" if is_graded else "Non-graded", submission.id, exercise.id) - return await generate(exercise, submission, module_config.approach, module_config.debug) + return await generate_suggestions(exercise, submission, module_config.approach, module_config.debug) @evaluation_provider diff --git a/modules/text/module_text_llm/module_text_llm/approach_controller.py b/modules/text/module_text_llm/module_text_llm/approach_controller.py deleted file mode 100644 index 26ec9c72c..000000000 --- a/modules/text/module_text_llm/module_text_llm/approach_controller.py +++ /dev/null @@ -1,19 +0,0 @@ - -from typing import List, Optional, Sequence -from pydantic import BaseModel, Field - -from athena import emit_meta -from athena.text import Exercise, Submission, Feedback -from athena.logger import logger -from module_text_llm.config import BasicApproachConfig, ChainOfThoughtConfig - - -from module_text_llm.helpers.utils import add_sentence_numbers, get_index_range_from_line_range, format_grading_instructions -from module_text_llm.generate_suggestions import generate_suggestions -from module_text_llm.generate_cot_suggestions import generate_cot_suggestions - -async def generate(exercise: Exercise, submission: Submission, config: BasicApproachConfig, debug: bool) -> List[Feedback]: - if(isinstance(config, BasicApproachConfig)): - return await generate_suggestions(exercise, submission, config, debug) - elif(isinstance(config, ChainOfThoughtConfig)): - return await generate_cot_suggestions(exercise, submission, config, debug) diff --git a/modules/text/module_text_llm/module_text_llm/approaches/approach_config.py b/modules/text/module_text_llm/module_text_llm/approaches/approach_config.py new file mode 100644 index 000000000..c0b1cddd9 --- /dev/null +++ b/modules/text/module_text_llm/module_text_llm/approaches/approach_config.py @@ -0,0 +1,10 @@ +from abc import ABC +from pydantic import BaseModel, Field +from llm_core.models import ModelConfigType, DefaultModelConfig, MiniModelConfig + +class ApproachConfig(BaseModel, ABC): + max_input_tokens: int = Field(default=3000, description="Maximum number of tokens in the input prompt.") + model: ModelConfigType = Field(default=DefaultModelConfig()) # type: ignore + + class Config: + use_enum_values = True \ No newline at end of file diff --git a/modules/text/module_text_llm/module_text_llm/approaches/approach_controller.py b/modules/text/module_text_llm/module_text_llm/approaches/approach_controller.py new file mode 100644 index 000000000..b2e3f90a6 --- /dev/null +++ b/modules/text/module_text_llm/module_text_llm/approaches/approach_controller.py @@ -0,0 +1,16 @@ + +from typing import List, Optional, Sequence +from pydantic import BaseModel, Field + +from athena.text import Exercise, Submission, Feedback +from module_text_llm.config import BasicApproachConfig, ChainOfThoughtConfig + + +from module_text_llm.approaches.basic_approach.generate_suggestions import generate_suggestions as generate_suggestions_basic +from module_text_llm.approaches.chain_of_thought_approach.generate_suggestions import generate_suggestions as generate_cot_suggestions + +async def generate_suggestions(exercise: Exercise, submission: Submission, config: BasicApproachConfig, debug: bool) -> List[Feedback]: + if(isinstance(config, BasicApproachConfig)): + return await generate_suggestions_basic(exercise, submission, config, debug) + elif(isinstance(config, ChainOfThoughtConfig)): + return await generate_cot_suggestions(exercise, submission, config, debug) diff --git a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/config.py b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/config.py new file mode 100644 index 000000000..1a3a90086 --- /dev/null +++ b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/config.py @@ -0,0 +1,23 @@ +from module_text_llm.approaches.approach_config import ApproachConfig +from pydantic import Field, BaseModel + +from module_text_llm.approaches.basic_approach.prompts.generate_suggestions import ( + system_message as generate_suggestions_system_message, + human_message as generate_suggestions_human_message +) + +class GenerateSuggestionsPrompt(BaseModel): + """\ +Features available: **{problem_statement}**, **{example_solution}**, **{grading_instructions}**, **{max_points}**, **{bonus_points}**, **{submission}** + +_Note: **{problem_statement}**, **{example_solution}**, or **{grading_instructions}** might be omitted if the input is too long._\ +""" + system_message: str = Field(default=generate_suggestions_system_message, + description="Message for priming AI behavior and instructing it what to do.") + human_message: str = Field(default=generate_suggestions_human_message, + description="Message from a human. The input on which the AI is supposed to act.") + + +class BasicApproachConfig(ApproachConfig): + generate_suggestions_prompt: GenerateSuggestionsPrompt = Field(default=GenerateSuggestionsPrompt()) + diff --git a/modules/text/module_text_llm/module_text_llm/generate_suggestions.py b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/generate_suggestions.py similarity index 99% rename from modules/text/module_text_llm/module_text_llm/generate_suggestions.py rename to modules/text/module_text_llm/module_text_llm/approaches/basic_approach/generate_suggestions.py index be928dc2e..de3e09c1c 100644 --- a/modules/text/module_text_llm/module_text_llm/generate_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/generate_suggestions.py @@ -4,15 +4,13 @@ from athena import emit_meta from athena.text import Exercise, Submission, Feedback from athena.logger import logger - -from module_text_llm.config import BasicApproachConfig from llm_core.utils.llm_utils import ( get_chat_prompt_with_formatting_instructions, check_prompt_length_and_omit_features_if_necessary, num_tokens_from_prompt, ) from llm_core.utils.predict_and_parse import predict_and_parse - +from module_text_llm.config import BasicApproachConfig from module_text_llm.helpers.utils import add_sentence_numbers, get_index_range_from_line_range, format_grading_instructions class FeedbackModel(BaseModel): diff --git a/modules/text/module_text_llm/module_text_llm/prompts/generate_suggestions.py b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/prompts/generate_suggestions.py similarity index 100% rename from modules/text/module_text_llm/module_text_llm/prompts/generate_suggestions.py rename to modules/text/module_text_llm/module_text_llm/approaches/basic_approach/prompts/generate_suggestions.py diff --git a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/config.py b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/config.py new file mode 100644 index 000000000..9e74d5449 --- /dev/null +++ b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/config.py @@ -0,0 +1,34 @@ +from pydantic import BaseModel, Field +from llm_core.models import ModelConfigType, MiniModelConfig + +from module_text_llm.approaches.chain_of_thought_approach.prompts.cot_suggestions import ( + system_message as generate_cot_suggestions_system_message, + human_message as generate_cot_suggestions_human_message +) + +from module_text_llm.approaches.chain_of_thought_approach.prompts.refined_cot_suggestions import ( + system_message as generate_refined_cot_suggestions_system_message, + human_message as generate_refined_cot_suggestions_human_message +) + +from module_text_llm.approaches.approach_config import ApproachConfig + +class CoTGenerateSuggestionsPrompt(BaseModel): + """\ +Features cit available: **{problem_statement}**, **{example_solution}**, **{grading_instructions}**, **{max_points}**, **{bonus_points}**, **{submission}** + +_Note: **{problem_statement}**, **{example_solution}**, or **{grading_instructions}** might be omitted if the input is too long._\ +""" + system_message: str = Field(default=generate_cot_suggestions_system_message, + description="Message for priming AI behavior and instructing it what to do.") + human_message: str = Field(default=generate_cot_suggestions_human_message, + description="Message from a human. The input on which the AI is supposed to act.") + second_system_message: str = Field(default=generate_refined_cot_suggestions_system_message, + description="Message for priming AI behavior and instructing it what to do.") + answer_message: str = Field(default=generate_refined_cot_suggestions_human_message, + description="Message from a human. The input on which the AI is supposed to act.") + +class ChainOfThoughtConfig(ApproachConfig): + # Defaults to the cheaper mini 4o model + model: ModelConfigType = Field(default=MiniModelConfig) # type: ignore + generate_suggestions_prompt: CoTGenerateSuggestionsPrompt = Field(default=CoTGenerateSuggestionsPrompt()) diff --git a/modules/text/module_text_llm/module_text_llm/generate_cot_suggestions.py b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/generate_suggestions.py similarity index 95% rename from modules/text/module_text_llm/module_text_llm/generate_cot_suggestions.py rename to modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/generate_suggestions.py index a029d0539..5a7d75d0a 100644 --- a/modules/text/module_text_llm/module_text_llm/generate_cot_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/generate_suggestions.py @@ -1,11 +1,12 @@ -from typing import List, Optional, Sequence +from typing import List, Optional from pydantic import BaseModel, Field from athena import emit_meta from athena.text import Exercise, Submission, Feedback from athena.logger import logger -from module_text_llm.config import ChainOfThoughtConfig +from module_text_llm.approaches.chain_of_thought_approach.config import ChainOfThoughtConfig + from llm_core.utils.llm_utils import ( get_chat_prompt_with_formatting_instructions, check_prompt_length_and_omit_features_if_necessary, @@ -51,7 +52,7 @@ class InitialAssessmentModel(BaseModel): feedbacks: List[InitialAssessment] = Field(description="Assessment feedbacks") -async def generate_cot_suggestions(exercise: Exercise, submission: Submission, config: ChainOfThoughtConfig, debug: bool) -> List[Feedback]: +async def generate_suggestions(exercise: Exercise, submission: Submission, config: ChainOfThoughtConfig, debug: bool) -> List[Feedback]: model = config.model.get_model() # type: ignore[attr-defined] prompt_input = { diff --git a/modules/text/module_text_llm/module_text_llm/prompts/cot_suggestions.py b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/prompts/cot_suggestions.py similarity index 99% rename from modules/text/module_text_llm/module_text_llm/prompts/cot_suggestions.py rename to modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/prompts/cot_suggestions.py index db5258e2d..41df58bae 100644 --- a/modules/text/module_text_llm/module_text_llm/prompts/cot_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/prompts/cot_suggestions.py @@ -18,6 +18,7 @@ # Grading Instructions {grading_instructions} +Respond in json """ human_message = """\ diff --git a/modules/text/module_text_llm/module_text_llm/prompts/refined_cot_suggestions.py b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/prompts/refined_cot_suggestions.py similarity index 93% rename from modules/text/module_text_llm/module_text_llm/prompts/refined_cot_suggestions.py rename to modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/prompts/refined_cot_suggestions.py index e0b03ebec..d811b6116 100644 --- a/modules/text/module_text_llm/module_text_llm/prompts/refined_cot_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/prompts/refined_cot_suggestions.py @@ -7,7 +7,11 @@ Remember that your response is directly seen by students and it should adress them directly. For each feedback where the student has room for improvement, think about how the student could improve his solution. Once you have thought how the student can improve the solution, formulate it in a way that guides the student towards the correct solution without revealing it directly. - Consider improvements to the feedback if any of this points is not satisfied.""" + Consider improvements to the feedback if any of this points is not satisfied. + + Respond in json + + """ human_message = """\ Student\'s submission to grade (with sentence numbers : ): diff --git a/modules/text/module_text_llm/module_text_llm/config.py b/modules/text/module_text_llm/module_text_llm/config.py index b612ff5cc..f12547898 100644 --- a/modules/text/module_text_llm/module_text_llm/config.py +++ b/modules/text/module_text_llm/module_text_llm/config.py @@ -1,101 +1,15 @@ from pydantic import BaseModel, Field from typing import Union from athena import config_schema_provider -from llm_core.models import ModelConfigType, DefaultModelConfig, MiniModelConfig -from module_text_llm.prompts.generate_suggestions import ( - system_message as generate_suggestions_system_message, - human_message as generate_suggestions_human_message -) -from enum import Enum -from pydantic import root_validator -from abc import ABC, abstractmethod -from module_text_llm.prompts.cot_suggestions import ( - system_message as generate_cot_suggestions_system_message, - human_message as generate_cot_suggestions_human_message -) -from module_text_llm.prompts.refined_cot_suggestions import ( - system_message as generate_refined_cot_suggestions_system_message, - human_message as generate_refined_cot_suggestions_human_message -) -class GenerateSuggestionsPrompt(BaseModel): - """\ -Features available: **{problem_statement}**, **{example_solution}**, **{grading_instructions}**, **{max_points}**, **{bonus_points}**, **{submission}** +from module_text_llm.approaches.chain_of_thought_approach.config import ChainOfThoughtConfig +from module_text_llm.approaches.basic_approach.config import BasicApproachConfig -_Note: **{problem_statement}**, **{example_solution}**, or **{grading_instructions}** might be omitted if the input is too long._\ -""" - system_message: str = Field(default=generate_suggestions_system_message, - description="Message for priming AI behavior and instructing it what to do.") - human_message: str = Field(default=generate_suggestions_human_message, - description="Message from a human. The input on which the AI is supposed to act.") - -class CoTGenerateSuggestionsPrompt(BaseModel): - """\ -Features cit available: **{problem_statement}**, **{example_solution}**, **{grading_instructions}**, **{max_points}**, **{bonus_points}**, **{submission}** - -_Note: **{problem_statement}**, **{example_solution}**, or **{grading_instructions}** might be omitted if the input is too long._\ -""" - system_message: str = Field(default=generate_cot_suggestions_system_message, - description="Message for priming AI behavior and instructing it what to do.") - human_message: str = Field(default=generate_cot_suggestions_human_message, - description="Message from a human. The input on which the AI is supposed to act.") - second_system_message: str = Field(default=generate_refined_cot_suggestions_system_message, - description="Message for priming AI behavior and instructing it what to do.") - answer_message: str = Field(default=generate_refined_cot_suggestions_human_message, - description="Message from a human. The input on which the AI is supposed to act.") - -class ApproachType(str, Enum): - basic = "BasicApproach" - chain_of_thought = "ChainOfThought" - - -class ApproachConfig(BaseModel, ABC): - max_input_tokens: int = Field(default=3000, description="Maximum number of tokens in the input prompt.") - model: ModelConfigType = Field(default=DefaultModelConfig()) # type: ignore - - # @abstractmethod - # def get_prompt(self): - # """Abstract method to get the appropriate prompt configuration.""" - # pass - - class Config: - # Enable discriminator to distinguish between subclasses in the schema - use_enum_values = True - -class BasicApproachConfig(ApproachConfig): - generate_suggestions_prompt: GenerateSuggestionsPrompt = Field(default=GenerateSuggestionsPrompt()) - - # def get_prompt(self): - # return self.generate_suggestions_prompt - -class ChainOfThoughtConfig(ApproachConfig): - model: ModelConfigType = Field(default=MiniModelConfig) # type: ignore - generate_suggestions_prompt: CoTGenerateSuggestionsPrompt = Field(default=CoTGenerateSuggestionsPrompt()) - - # def get_prompt(self): - # return self.generate_suggestions_prompt - -# available_approaches = [BasicApproachConfig, ChainOfThoughtConfig] ApproachConfigUnion = Union[BasicApproachConfig, ChainOfThoughtConfig] -# def approach_factory(approach_type: ApproachType) -> ApproachConfig: -# if approach_type == ApproachType.basic: -# return BasicApproachConfig() -# elif approach_type == ApproachType.chain_of_thought: -# return ChainOfThoughtConfig() -# else: -# raise ValueError(f"Unknown approach type: {approach_type}") - @config_schema_provider class Configuration(BaseModel): debug: bool = Field(default=False, description="Enable debug mode.") approach: ApproachConfigUnion = Field(default_factory=BasicApproachConfig) # Default to BasicApproach - # approach_type: ApproachType = Field(default=ApproachType.basic, description="Type of approach to use.") - - # @root_validator(pre=True) - # def populate_approach(cls, values): - # """Automatically instantiate the correct approach based on approach_type.""" - # approach_type = values.get('approach_type', ApproachType.basic) - # values['approach'] = approach_factory(approach_type) - # return values + From 8de54abe6859be4b770b8afce760005c9466cd64 Mon Sep 17 00:00:00 2001 From: = Enea_Gore Date: Fri, 25 Oct 2024 03:16:13 +0200 Subject: [PATCH 3/5] You fix some, you break some more --- llm_core/llm_core/utils/predict_and_parse.py | 11 ++++++----- .../text/module_text_llm/module_text_llm/__main__.py | 2 +- .../module_text_llm/approaches/approach_config.py | 10 ++++++++-- .../approaches/approach_controller.py | 12 +++++++----- .../approaches/basic_approach/config.py | 3 +++ .../basic_approach/generate_suggestions.py | 3 ++- .../basic_approach/prompts/generate_suggestions.py | 4 ++++ .../approaches/chain_of_thought_approach/config.py | 2 ++ .../generate_suggestions.py | 11 ++--------- .../text/module_text_llm/module_text_llm/config.py | 5 +++-- 10 files changed, 38 insertions(+), 25 deletions(-) diff --git a/llm_core/llm_core/utils/predict_and_parse.py b/llm_core/llm_core/utils/predict_and_parse.py index 904ee8bfd..493ba032a 100644 --- a/llm_core/llm_core/utils/predict_and_parse.py +++ b/llm_core/llm_core/utils/predict_and_parse.py @@ -37,12 +37,13 @@ async def predict_and_parse( tags.append(f"run-{experiment.run_id}") structured_output_llm = model.with_structured_output(pydantic_object) - chain = RunnableSequence( - chat_prompt, - structured_output_llm - ) + # chain = RunnableSequence( + # chat_prompt, + # structured_output_llm + # ) + chain = chat_prompt | structured_output_llm try: - return await chain.ainvoke(prompt_input, config={"tags": tags}) + return await chain.ainvoke(prompt_input, config={"tags": tags}) # type: ignore # except ValidationError as e: raise ValueError(f"Could not parse output: {e}") from e \ No newline at end of file diff --git a/modules/text/module_text_llm/module_text_llm/__main__.py b/modules/text/module_text_llm/module_text_llm/__main__.py index 8e35226b7..a09f3ef08 100644 --- a/modules/text/module_text_llm/module_text_llm/__main__.py +++ b/modules/text/module_text_llm/module_text_llm/__main__.py @@ -27,11 +27,11 @@ def select_submission(exercise: Exercise, submissions: List[Submission]) -> Subm def process_incoming_feedback(exercise: Exercise, submission: Submission, feedbacks: List[Feedback]): logger.info("process_feedback: Received %d feedbacks for submission %d of exercise %d.", len(feedbacks), submission.id, exercise.id) -# change here to have multiple approaches @feedback_provider async def suggest_feedback(exercise: Exercise, submission: Submission, is_graded: bool, module_config: Configuration) -> List[Feedback]: logger.info("suggest_feedback: %s suggestions for submission %d of exercise %d were requested", "Graded" if is_graded else "Non-graded", submission.id, exercise.id) + logger.info("WHAAAAA module_config: %s", type(module_config.approach)) return await generate_suggestions(exercise, submission, module_config.approach, module_config.debug) diff --git a/modules/text/module_text_llm/module_text_llm/approaches/approach_config.py b/modules/text/module_text_llm/module_text_llm/approaches/approach_config.py index c0b1cddd9..ace029c6f 100644 --- a/modules/text/module_text_llm/module_text_llm/approaches/approach_config.py +++ b/modules/text/module_text_llm/module_text_llm/approaches/approach_config.py @@ -1,10 +1,16 @@ from abc import ABC from pydantic import BaseModel, Field -from llm_core.models import ModelConfigType, DefaultModelConfig, MiniModelConfig +from llm_core.models import ModelConfigType, DefaultModelConfig +from enum import Enum +class ApproachType(str, Enum): + basic = "BasicApproach" + chain_of_thought = "ChainOfThought" + class ApproachConfig(BaseModel, ABC): max_input_tokens: int = Field(default=3000, description="Maximum number of tokens in the input prompt.") model: ModelConfigType = Field(default=DefaultModelConfig()) # type: ignore - + type: ApproachType = Field(..., description="The type of approach config") + class Config: use_enum_values = True \ No newline at end of file diff --git a/modules/text/module_text_llm/module_text_llm/approaches/approach_controller.py b/modules/text/module_text_llm/module_text_llm/approaches/approach_controller.py index b2e3f90a6..55381608f 100644 --- a/modules/text/module_text_llm/module_text_llm/approaches/approach_controller.py +++ b/modules/text/module_text_llm/module_text_llm/approaches/approach_controller.py @@ -1,16 +1,18 @@ -from typing import List, Optional, Sequence -from pydantic import BaseModel, Field - +from typing import List from athena.text import Exercise, Submission, Feedback -from module_text_llm.config import BasicApproachConfig, ChainOfThoughtConfig +from module_text_llm.approaches.basic_approach.config import BasicApproachConfig +from module_text_llm.approaches.chain_of_thought_approach.config import ChainOfThoughtConfig +from module_text_llm.approaches.approach_config import ApproachConfig +from athena.logger import logger from module_text_llm.approaches.basic_approach.generate_suggestions import generate_suggestions as generate_suggestions_basic from module_text_llm.approaches.chain_of_thought_approach.generate_suggestions import generate_suggestions as generate_cot_suggestions -async def generate_suggestions(exercise: Exercise, submission: Submission, config: BasicApproachConfig, debug: bool) -> List[Feedback]: +async def generate_suggestions(exercise: Exercise, submission: Submission, config: ApproachConfig, debug: bool) -> List[Feedback]: if(isinstance(config, BasicApproachConfig)): return await generate_suggestions_basic(exercise, submission, config, debug) elif(isinstance(config, ChainOfThoughtConfig)): return await generate_cot_suggestions(exercise, submission, config, debug) + diff --git a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/config.py b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/config.py index 1a3a90086..278ed0ce1 100644 --- a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/config.py +++ b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/config.py @@ -1,5 +1,7 @@ from module_text_llm.approaches.approach_config import ApproachConfig from pydantic import Field, BaseModel +from typing import Literal + from module_text_llm.approaches.basic_approach.prompts.generate_suggestions import ( system_message as generate_suggestions_system_message, @@ -19,5 +21,6 @@ class GenerateSuggestionsPrompt(BaseModel): class BasicApproachConfig(ApproachConfig): + type: Literal['basic'] = 'basic' generate_suggestions_prompt: GenerateSuggestionsPrompt = Field(default=GenerateSuggestionsPrompt()) diff --git a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/generate_suggestions.py b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/generate_suggestions.py index de3e09c1c..03dba2b01 100644 --- a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/generate_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/generate_suggestions.py @@ -30,7 +30,7 @@ class Config: class AssessmentModel(BaseModel): """Collection of feedbacks making up an assessment""" - feedbacks: Sequence[FeedbackModel] = Field(description="Assessment feedbacks") + feedbacks: List[FeedbackModel] = Field(description="Assessment feedbacks") class Config: title = "Assessment" @@ -38,6 +38,7 @@ class Config: async def generate_suggestions(exercise: Exercise, submission: Submission, config: BasicApproachConfig, debug: bool) -> List[Feedback]: model = config.model.get_model() # type: ignore[attr-defined] + logger.warning("Doing basic") prompt_input = { "max_points": exercise.max_points, diff --git a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/prompts/generate_suggestions.py b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/prompts/generate_suggestions.py index 29dc465a2..c1535cc3e 100644 --- a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/prompts/generate_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/prompts/generate_suggestions.py @@ -17,10 +17,14 @@ # Grading instructions {grading_instructions} Max points: {max_points}, bonus points: {bonus_points}\ + + Respomd in json format. """ human_message = """\ Student\'s submission to grade (with sentence numbers : ): + Respomd in json format. + \"\"\" {submission} \"\"\"\ diff --git a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/config.py b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/config.py index 9e74d5449..909618fd6 100644 --- a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/config.py +++ b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/config.py @@ -1,4 +1,5 @@ from pydantic import BaseModel, Field +from typing import Literal from llm_core.models import ModelConfigType, MiniModelConfig from module_text_llm.approaches.chain_of_thought_approach.prompts.cot_suggestions import ( @@ -30,5 +31,6 @@ class CoTGenerateSuggestionsPrompt(BaseModel): class ChainOfThoughtConfig(ApproachConfig): # Defaults to the cheaper mini 4o model + type: Literal['chain_of_thought'] = 'chain_of_thought' model: ModelConfigType = Field(default=MiniModelConfig) # type: ignore generate_suggestions_prompt: CoTGenerateSuggestionsPrompt = Field(default=CoTGenerateSuggestionsPrompt()) diff --git a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/generate_suggestions.py b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/generate_suggestions.py index 5a7d75d0a..7036daf52 100644 --- a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/generate_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/generate_suggestions.py @@ -26,18 +26,10 @@ class FeedbackModel(BaseModel): description="ID of the grading instruction that was used to generate this feedback, or empty if no grading instruction was used" ) - class Config: - title = "Feedback" - - class AssessmentModel(BaseModel): """Collection of feedbacks making up an assessment""" feedbacks: List[FeedbackModel] = Field(description="Assessment feedbacks") - - class Config: - title = "Assessment" - class InitialAssessment(BaseModel): title: str = Field(description="Very short title, i.e. feedback category or similar", example="Logic Error") description: str = Field(description="Feedback description") @@ -54,6 +46,7 @@ class InitialAssessmentModel(BaseModel): async def generate_suggestions(exercise: Exercise, submission: Submission, config: ChainOfThoughtConfig, debug: bool) -> List[Feedback]: model = config.model.get_model() # type: ignore[attr-defined] + logger.warning("Doing chain of thught ") prompt_input = { "max_points": exercise.max_points, @@ -103,7 +96,7 @@ async def generate_suggestions(exercise: Exercise, submission: Submission, confi ) second_prompt_input = { - "answer" : initial_result, + "answer" : initial_result.dict(), "submission": add_sentence_numbers(submission.text) } diff --git a/modules/text/module_text_llm/module_text_llm/config.py b/modules/text/module_text_llm/module_text_llm/config.py index f12547898..041545be9 100644 --- a/modules/text/module_text_llm/module_text_llm/config.py +++ b/modules/text/module_text_llm/module_text_llm/config.py @@ -5,11 +5,12 @@ from module_text_llm.approaches.chain_of_thought_approach.config import ChainOfThoughtConfig from module_text_llm.approaches.basic_approach.config import BasicApproachConfig -ApproachConfigUnion = Union[BasicApproachConfig, ChainOfThoughtConfig] +ApproachConfigUnion = Union[ChainOfThoughtConfig, BasicApproachConfig] @config_schema_provider class Configuration(BaseModel): debug: bool = Field(default=False, description="Enable debug mode.") approach: ApproachConfigUnion = Field(default_factory=BasicApproachConfig) # Default to BasicApproach - + class Config: + smart_union = True From 95e8492f795b7c0abc42ea6f988bcba8ffcbc38b Mon Sep 17 00:00:00 2001 From: = Enea_Gore Date: Fri, 25 Oct 2024 03:21:21 +0200 Subject: [PATCH 4/5] fix broken basic approach --- modules/text/module_text_llm/module_text_llm/__main__.py | 1 - .../approaches/basic_approach/generate_suggestions.py | 8 -------- .../chain_of_thought_approach/generate_suggestions.py | 1 - 3 files changed, 10 deletions(-) diff --git a/modules/text/module_text_llm/module_text_llm/__main__.py b/modules/text/module_text_llm/module_text_llm/__main__.py index a09f3ef08..18a4c54de 100644 --- a/modules/text/module_text_llm/module_text_llm/__main__.py +++ b/modules/text/module_text_llm/module_text_llm/__main__.py @@ -31,7 +31,6 @@ def process_incoming_feedback(exercise: Exercise, submission: Submission, feedba async def suggest_feedback(exercise: Exercise, submission: Submission, is_graded: bool, module_config: Configuration) -> List[Feedback]: logger.info("suggest_feedback: %s suggestions for submission %d of exercise %d were requested", "Graded" if is_graded else "Non-graded", submission.id, exercise.id) - logger.info("WHAAAAA module_config: %s", type(module_config.approach)) return await generate_suggestions(exercise, submission, module_config.approach, module_config.debug) diff --git a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/generate_suggestions.py b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/generate_suggestions.py index 03dba2b01..47f214ccd 100644 --- a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/generate_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/generate_suggestions.py @@ -23,22 +23,14 @@ class FeedbackModel(BaseModel): description="ID of the grading instruction that was used to generate this feedback, or empty if no grading instruction was used" ) - class Config: - title = "Feedback" - class AssessmentModel(BaseModel): """Collection of feedbacks making up an assessment""" feedbacks: List[FeedbackModel] = Field(description="Assessment feedbacks") - class Config: - title = "Assessment" - - async def generate_suggestions(exercise: Exercise, submission: Submission, config: BasicApproachConfig, debug: bool) -> List[Feedback]: model = config.model.get_model() # type: ignore[attr-defined] - logger.warning("Doing basic") prompt_input = { "max_points": exercise.max_points, diff --git a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/generate_suggestions.py b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/generate_suggestions.py index 7036daf52..5b1f546b4 100644 --- a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/generate_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/generate_suggestions.py @@ -46,7 +46,6 @@ class InitialAssessmentModel(BaseModel): async def generate_suggestions(exercise: Exercise, submission: Submission, config: ChainOfThoughtConfig, debug: bool) -> List[Feedback]: model = config.model.get_model() # type: ignore[attr-defined] - logger.warning("Doing chain of thught ") prompt_input = { "max_points": exercise.max_points, From 2483905244f5d9a0f2b3aa2d632f66da4465f30b Mon Sep 17 00:00:00 2001 From: = Enea_Gore Date: Wed, 30 Oct 2024 02:12:20 +0100 Subject: [PATCH 5/5] refactor approaches --- llm_core/llm_core/utils/predict_and_parse.py | 41 ++++++++---- .../module_text_llm/__main__.py | 2 +- .../{approaches => }/approach_config.py | 4 +- .../module_text_llm/approach_controller.py | 16 +++++ .../approaches/approach_controller.py | 18 ----- .../approaches/basic_approach/config.py | 26 -------- .../prompts/generate_suggestions.py | 31 --------- .../chain_of_thought_approach/config.py | 36 ---------- .../prompts/refined_cot_suggestions.py | 21 ------ .../basic_approach/__init__.py | 11 ++++ .../basic_approach/generate_suggestions.py | 26 ++------ .../prompt_generate_suggestions.py | 65 +++++++++++++++++++ .../chain_of_thought_approach/__init__.py | 15 +++++ .../generate_suggestions.py | 41 +++--------- .../prompt_generate_feedback.py | 55 ++++++++++++++++ .../prompt_thinking.py} | 32 ++++++++- .../module_text_llm/module_text_llm/config.py | 6 +- 17 files changed, 243 insertions(+), 203 deletions(-) rename modules/text/module_text_llm/module_text_llm/{approaches => }/approach_config.py (84%) create mode 100644 modules/text/module_text_llm/module_text_llm/approach_controller.py delete mode 100644 modules/text/module_text_llm/module_text_llm/approaches/approach_controller.py delete mode 100644 modules/text/module_text_llm/module_text_llm/approaches/basic_approach/config.py delete mode 100644 modules/text/module_text_llm/module_text_llm/approaches/basic_approach/prompts/generate_suggestions.py delete mode 100644 modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/config.py delete mode 100644 modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/prompts/refined_cot_suggestions.py create mode 100644 modules/text/module_text_llm/module_text_llm/basic_approach/__init__.py rename modules/text/module_text_llm/module_text_llm/{approaches => }/basic_approach/generate_suggestions.py (79%) create mode 100644 modules/text/module_text_llm/module_text_llm/basic_approach/prompt_generate_suggestions.py create mode 100644 modules/text/module_text_llm/module_text_llm/chain_of_thought_approach/__init__.py rename modules/text/module_text_llm/module_text_llm/{approaches => }/chain_of_thought_approach/generate_suggestions.py (68%) create mode 100644 modules/text/module_text_llm/module_text_llm/chain_of_thought_approach/prompt_generate_feedback.py rename modules/text/module_text_llm/module_text_llm/{approaches/chain_of_thought_approach/prompts/cot_suggestions.py => chain_of_thought_approach/prompt_thinking.py} (53%) diff --git a/llm_core/llm_core/utils/predict_and_parse.py b/llm_core/llm_core/utils/predict_and_parse.py index 493ba032a..e72bb3e91 100644 --- a/llm_core/llm_core/utils/predict_and_parse.py +++ b/llm_core/llm_core/utils/predict_and_parse.py @@ -12,7 +12,8 @@ async def predict_and_parse( chat_prompt: ChatPromptTemplate, prompt_input: dict, pydantic_object: Type[T], - tags: Optional[List[str]] + tags: Optional[List[str]], + use_function_calling: bool = False ) -> Optional[T]: """Predicts an LLM completion using the model and parses the output using the provided Pydantic model @@ -36,14 +37,30 @@ async def predict_and_parse( if experiment.run_id is not None: tags.append(f"run-{experiment.run_id}") - structured_output_llm = model.with_structured_output(pydantic_object) - # chain = RunnableSequence( - # chat_prompt, - # structured_output_llm - # ) - chain = chat_prompt | structured_output_llm - - try: - return await chain.ainvoke(prompt_input, config={"tags": tags}) # type: ignore # - except ValidationError as e: - raise ValueError(f"Could not parse output: {e}") from e \ No newline at end of file + + if (use_function_calling): + structured_output_llm = model.with_structured_output(pydantic_object) + chain = chat_prompt | structured_output_llm + + try: + result = await chain.ainvoke(prompt_input, config={"tags": tags}) + + if isinstance(result, pydantic_object): + return result + else: + raise ValueError("Parsed output does not match the expected Pydantic model.") + + except ValidationError as e: + raise ValueError(f"Could not parse output: {e}") from e + + else: + structured_output_llm = model.with_structured_output(pydantic_object, method = "json_mode") + chain = RunnableSequence( + chat_prompt, + structured_output_llm + ) + try: + return await chain.ainvoke(prompt_input, config={"tags": tags}) + except ValidationError as e: + raise ValueError(f"Could not parse output: {e}") from e + diff --git a/modules/text/module_text_llm/module_text_llm/__main__.py b/modules/text/module_text_llm/module_text_llm/__main__.py index 18a4c54de..e23998674 100644 --- a/modules/text/module_text_llm/module_text_llm/__main__.py +++ b/modules/text/module_text_llm/module_text_llm/__main__.py @@ -10,7 +10,7 @@ from module_text_llm.config import Configuration from module_text_llm.evaluation import get_feedback_statistics, get_llm_statistics from module_text_llm.generate_evaluation import generate_evaluation -from module_text_llm.approaches.approach_controller import generate_suggestions +from module_text_llm.approach_controller import generate_suggestions @submissions_consumer def receive_submissions(exercise: Exercise, submissions: List[Submission]): diff --git a/modules/text/module_text_llm/module_text_llm/approaches/approach_config.py b/modules/text/module_text_llm/module_text_llm/approach_config.py similarity index 84% rename from modules/text/module_text_llm/module_text_llm/approaches/approach_config.py rename to modules/text/module_text_llm/module_text_llm/approach_config.py index ace029c6f..6626fdd85 100644 --- a/modules/text/module_text_llm/module_text_llm/approaches/approach_config.py +++ b/modules/text/module_text_llm/module_text_llm/approach_config.py @@ -9,8 +9,8 @@ class ApproachType(str, Enum): class ApproachConfig(BaseModel, ABC): max_input_tokens: int = Field(default=3000, description="Maximum number of tokens in the input prompt.") - model: ModelConfigType = Field(default=DefaultModelConfig()) # type: ignore - type: ApproachType = Field(..., description="The type of approach config") + model: ModelConfigType = Field(default=DefaultModelConfig()) + type: str = Field(..., description="The type of approach config") class Config: use_enum_values = True \ No newline at end of file diff --git a/modules/text/module_text_llm/module_text_llm/approach_controller.py b/modules/text/module_text_llm/module_text_llm/approach_controller.py new file mode 100644 index 000000000..25329b8ec --- /dev/null +++ b/modules/text/module_text_llm/module_text_llm/approach_controller.py @@ -0,0 +1,16 @@ + +from typing import List +from athena.text import Exercise, Submission, Feedback +from module_text_llm.basic_approach import BasicApproachConfig +from module_text_llm.chain_of_thought_approach import ChainOfThoughtConfig +from module_text_llm.approach_config import ApproachConfig + +from module_text_llm.basic_approach.generate_suggestions import generate_suggestions as generate_suggestions_basic +from module_text_llm.chain_of_thought_approach.generate_suggestions import generate_suggestions as generate_cot_suggestions + +async def generate_suggestions(exercise: Exercise, submission: Submission, config: ApproachConfig, debug: bool) -> List[Feedback]: + if(isinstance(config, BasicApproachConfig)): + return await generate_suggestions_basic(exercise, submission, config, debug) + elif(isinstance(config, ChainOfThoughtConfig)): + return await generate_cot_suggestions(exercise, submission, config, debug) + diff --git a/modules/text/module_text_llm/module_text_llm/approaches/approach_controller.py b/modules/text/module_text_llm/module_text_llm/approaches/approach_controller.py deleted file mode 100644 index 55381608f..000000000 --- a/modules/text/module_text_llm/module_text_llm/approaches/approach_controller.py +++ /dev/null @@ -1,18 +0,0 @@ - -from typing import List -from athena.text import Exercise, Submission, Feedback -from module_text_llm.approaches.basic_approach.config import BasicApproachConfig -from module_text_llm.approaches.chain_of_thought_approach.config import ChainOfThoughtConfig -from module_text_llm.approaches.approach_config import ApproachConfig -from athena.logger import logger - - -from module_text_llm.approaches.basic_approach.generate_suggestions import generate_suggestions as generate_suggestions_basic -from module_text_llm.approaches.chain_of_thought_approach.generate_suggestions import generate_suggestions as generate_cot_suggestions - -async def generate_suggestions(exercise: Exercise, submission: Submission, config: ApproachConfig, debug: bool) -> List[Feedback]: - if(isinstance(config, BasicApproachConfig)): - return await generate_suggestions_basic(exercise, submission, config, debug) - elif(isinstance(config, ChainOfThoughtConfig)): - return await generate_cot_suggestions(exercise, submission, config, debug) - diff --git a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/config.py b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/config.py deleted file mode 100644 index 278ed0ce1..000000000 --- a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/config.py +++ /dev/null @@ -1,26 +0,0 @@ -from module_text_llm.approaches.approach_config import ApproachConfig -from pydantic import Field, BaseModel -from typing import Literal - - -from module_text_llm.approaches.basic_approach.prompts.generate_suggestions import ( - system_message as generate_suggestions_system_message, - human_message as generate_suggestions_human_message -) - -class GenerateSuggestionsPrompt(BaseModel): - """\ -Features available: **{problem_statement}**, **{example_solution}**, **{grading_instructions}**, **{max_points}**, **{bonus_points}**, **{submission}** - -_Note: **{problem_statement}**, **{example_solution}**, or **{grading_instructions}** might be omitted if the input is too long._\ -""" - system_message: str = Field(default=generate_suggestions_system_message, - description="Message for priming AI behavior and instructing it what to do.") - human_message: str = Field(default=generate_suggestions_human_message, - description="Message from a human. The input on which the AI is supposed to act.") - - -class BasicApproachConfig(ApproachConfig): - type: Literal['basic'] = 'basic' - generate_suggestions_prompt: GenerateSuggestionsPrompt = Field(default=GenerateSuggestionsPrompt()) - diff --git a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/prompts/generate_suggestions.py b/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/prompts/generate_suggestions.py deleted file mode 100644 index c1535cc3e..000000000 --- a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/prompts/generate_suggestions.py +++ /dev/null @@ -1,31 +0,0 @@ -system_message = """\ -You are an AI tutor for text assessment at a prestigious university. - -# Task -Create graded feedback suggestions for a student\'s text submission that a human tutor would accept. \ -Meaning, the feedback you provide should be applicable to the submission with little to no modification. - -# Style -1. Constructive, 2. Specific, 3. Balanced, 4. Clear and Concise, 5. Actionable, 6. Educational, 7. Contextual - -# Problem statement -{problem_statement} - -# Example solution -{example_solution} - -# Grading instructions -{grading_instructions} -Max points: {max_points}, bonus points: {bonus_points}\ - - Respomd in json format. -""" - -human_message = """\ -Student\'s submission to grade (with sentence numbers : ): - Respomd in json format. - -\"\"\" -{submission} -\"\"\"\ -""" \ No newline at end of file diff --git a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/config.py b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/config.py deleted file mode 100644 index 909618fd6..000000000 --- a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/config.py +++ /dev/null @@ -1,36 +0,0 @@ -from pydantic import BaseModel, Field -from typing import Literal -from llm_core.models import ModelConfigType, MiniModelConfig - -from module_text_llm.approaches.chain_of_thought_approach.prompts.cot_suggestions import ( - system_message as generate_cot_suggestions_system_message, - human_message as generate_cot_suggestions_human_message -) - -from module_text_llm.approaches.chain_of_thought_approach.prompts.refined_cot_suggestions import ( - system_message as generate_refined_cot_suggestions_system_message, - human_message as generate_refined_cot_suggestions_human_message -) - -from module_text_llm.approaches.approach_config import ApproachConfig - -class CoTGenerateSuggestionsPrompt(BaseModel): - """\ -Features cit available: **{problem_statement}**, **{example_solution}**, **{grading_instructions}**, **{max_points}**, **{bonus_points}**, **{submission}** - -_Note: **{problem_statement}**, **{example_solution}**, or **{grading_instructions}** might be omitted if the input is too long._\ -""" - system_message: str = Field(default=generate_cot_suggestions_system_message, - description="Message for priming AI behavior and instructing it what to do.") - human_message: str = Field(default=generate_cot_suggestions_human_message, - description="Message from a human. The input on which the AI is supposed to act.") - second_system_message: str = Field(default=generate_refined_cot_suggestions_system_message, - description="Message for priming AI behavior and instructing it what to do.") - answer_message: str = Field(default=generate_refined_cot_suggestions_human_message, - description="Message from a human. The input on which the AI is supposed to act.") - -class ChainOfThoughtConfig(ApproachConfig): - # Defaults to the cheaper mini 4o model - type: Literal['chain_of_thought'] = 'chain_of_thought' - model: ModelConfigType = Field(default=MiniModelConfig) # type: ignore - generate_suggestions_prompt: CoTGenerateSuggestionsPrompt = Field(default=CoTGenerateSuggestionsPrompt()) diff --git a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/prompts/refined_cot_suggestions.py b/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/prompts/refined_cot_suggestions.py deleted file mode 100644 index d811b6116..000000000 --- a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/prompts/refined_cot_suggestions.py +++ /dev/null @@ -1,21 +0,0 @@ -system_message = """ - You gave the following feedback on the first iteration: {answer} - On this step you need to refine your feedback. - Make sure to follow the following steps to assess and improve your feedback: - It shuold follow the grading instructions and the sample solution, if it doesn't, consider improvements. - If you have your own additional improvements that are not present in the grading instructions, add them in a new feedback with 0 credits and no reference. - Remember that your response is directly seen by students and it should adress them directly. - For each feedback where the student has room for improvement, think about how the student could improve his solution. - Once you have thought how the student can improve the solution, formulate it in a way that guides the student towards the correct solution without revealing it directly. - Consider improvements to the feedback if any of this points is not satisfied. - - Respond in json - - """ - -human_message = """\ -Student\'s submission to grade (with sentence numbers : ): -\"\"\" -{submission} -\"\"\"\ -""" \ No newline at end of file diff --git a/modules/text/module_text_llm/module_text_llm/basic_approach/__init__.py b/modules/text/module_text_llm/module_text_llm/basic_approach/__init__.py new file mode 100644 index 000000000..37a674e7c --- /dev/null +++ b/modules/text/module_text_llm/module_text_llm/basic_approach/__init__.py @@ -0,0 +1,11 @@ +from module_text_llm.approach_config import ApproachConfig +from pydantic import Field +from typing import Literal + + +from module_text_llm.basic_approach.prompt_generate_suggestions import GenerateSuggestionsPrompt + +class BasicApproachConfig(ApproachConfig): + type: Literal['basic'] = 'basic' + generate_suggestions_prompt: GenerateSuggestionsPrompt = Field(default=GenerateSuggestionsPrompt()) + diff --git a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/generate_suggestions.py b/modules/text/module_text_llm/module_text_llm/basic_approach/generate_suggestions.py similarity index 79% rename from modules/text/module_text_llm/module_text_llm/approaches/basic_approach/generate_suggestions.py rename to modules/text/module_text_llm/module_text_llm/basic_approach/generate_suggestions.py index 47f214ccd..b9e8694cb 100644 --- a/modules/text/module_text_llm/module_text_llm/approaches/basic_approach/generate_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/basic_approach/generate_suggestions.py @@ -1,5 +1,4 @@ -from typing import List, Optional, Sequence -from pydantic import BaseModel, Field +from typing import List from athena import emit_meta from athena.text import Exercise, Submission, Feedback @@ -9,29 +8,15 @@ check_prompt_length_and_omit_features_if_necessary, num_tokens_from_prompt, ) +from athena.text import Exercise, Submission, Feedback from llm_core.utils.predict_and_parse import predict_and_parse + from module_text_llm.config import BasicApproachConfig from module_text_llm.helpers.utils import add_sentence_numbers, get_index_range_from_line_range, format_grading_instructions - -class FeedbackModel(BaseModel): - title: str = Field(description="Very short title, i.e. feedback category or similar", example="Logic Error") - description: str = Field(description="Feedback description") - line_start: Optional[int] = Field(description="Referenced line number start, or empty if unreferenced") - line_end: Optional[int] = Field(description="Referenced line number end, or empty if unreferenced") - credits: float = Field(0.0, description="Number of points received/deducted") - grading_instruction_id: Optional[int] = Field( - description="ID of the grading instruction that was used to generate this feedback, or empty if no grading instruction was used" - ) - - -class AssessmentModel(BaseModel): - """Collection of feedbacks making up an assessment""" - - feedbacks: List[FeedbackModel] = Field(description="Assessment feedbacks") +from module_text_llm.basic_approach.prompt_generate_suggestions import AssessmentModel async def generate_suggestions(exercise: Exercise, submission: Submission, config: BasicApproachConfig, debug: bool) -> List[Feedback]: model = config.model.get_model() # type: ignore[attr-defined] - prompt_input = { "max_points": exercise.max_points, "bonus_points": exercise.bonus_points, @@ -74,7 +59,8 @@ async def generate_suggestions(exercise: Exercise, submission: Submission, confi tags=[ f"exercise-{exercise.id}", f"submission-{submission.id}", - ] + ], + use_function_calling=True ) if debug: diff --git a/modules/text/module_text_llm/module_text_llm/basic_approach/prompt_generate_suggestions.py b/modules/text/module_text_llm/module_text_llm/basic_approach/prompt_generate_suggestions.py new file mode 100644 index 000000000..4b23137d5 --- /dev/null +++ b/modules/text/module_text_llm/module_text_llm/basic_approach/prompt_generate_suggestions.py @@ -0,0 +1,65 @@ +from pydantic import Field, BaseModel +from typing import List, Optional +from pydantic import BaseModel, Field + +system_message = """\ +You are an AI tutor for text assessment at a prestigious university. + +# Task +Create graded feedback suggestions for a student\'s text submission that a human tutor would accept. \ +Meaning, the feedback you provide should be applicable to the submission with little to no modification. + +# Style +1. Constructive, 2. Specific, 3. Balanced, 4. Clear and Concise, 5. Actionable, 6. Educational, 7. Contextual + +# Problem statement +{problem_statement} + +# Example solution +{example_solution} + +# Grading instructions +{grading_instructions} +Max points: {max_points}, bonus points: {bonus_points}\ + +Respond in json. +""" + +human_message = """\ +Student\'s submission to grade (with sentence numbers : ): + +Respond in json. + +\"\"\" +{submission} +\"\"\"\ +""" + +# Input Prompt +class GenerateSuggestionsPrompt(BaseModel): + """\ +Features available: **{problem_statement}**, **{example_solution}**, **{grading_instructions}**, **{max_points}**, **{bonus_points}**, **{submission}** + +_Note: **{problem_statement}**, **{example_solution}**, or **{grading_instructions}** might be omitted if the input is too long._\ +""" + system_message: str = Field(default=system_message, + description="Message for priming AI behavior and instructing it what to do.") + human_message: str = Field(default=human_message, + description="Message from a human. The input on which the AI is supposed to act.") +# Output Object +class FeedbackModel(BaseModel): + title: str = Field(description="Very short title, i.e. feedback category or similar", example="Logic Error") + description: str = Field(description="Feedback description") + line_start: Optional[int] = Field(description="Referenced line number start, or empty if unreferenced") + line_end: Optional[int] = Field(description="Referenced line number end, or empty if unreferenced") + credits: float = Field(0.0, description="Number of points received/deducted") + grading_instruction_id: Optional[int] = Field( + description="ID of the grading instruction that was used to generate this feedback, or empty if no grading instruction was used" + ) + + +class AssessmentModel(BaseModel): + """Collection of feedbacks making up an assessment""" + + feedbacks: List[FeedbackModel] = Field(description="Assessment feedbacks") + diff --git a/modules/text/module_text_llm/module_text_llm/chain_of_thought_approach/__init__.py b/modules/text/module_text_llm/module_text_llm/chain_of_thought_approach/__init__.py new file mode 100644 index 000000000..b63b770e1 --- /dev/null +++ b/modules/text/module_text_llm/module_text_llm/chain_of_thought_approach/__init__.py @@ -0,0 +1,15 @@ +from pydantic import BaseModel, Field +from typing import Literal +from llm_core.models import ModelConfigType, MiniModelConfig + +from module_text_llm.approach_config import ApproachConfig +from module_text_llm.chain_of_thought_approach.prompt_generate_feedback import CoTGenerateSuggestionsPrompt +from module_text_llm.chain_of_thought_approach.prompt_thinking import ThinkingPrompt + +class ChainOfThoughtConfig(ApproachConfig): + # Defaults to the cheaper mini 4o model + type: Literal['chain_of_thought'] = 'chain_of_thought' + model: ModelConfigType = Field(default=MiniModelConfig) # type: ignore + thikning_prompt: ThinkingPrompt = Field(default=ThinkingPrompt()) + generate_suggestions_prompt: CoTGenerateSuggestionsPrompt = Field(default=CoTGenerateSuggestionsPrompt()) + \ No newline at end of file diff --git a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/generate_suggestions.py b/modules/text/module_text_llm/module_text_llm/chain_of_thought_approach/generate_suggestions.py similarity index 68% rename from modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/generate_suggestions.py rename to modules/text/module_text_llm/module_text_llm/chain_of_thought_approach/generate_suggestions.py index 5b1f546b4..6b86dc978 100644 --- a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/generate_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/chain_of_thought_approach/generate_suggestions.py @@ -5,7 +5,7 @@ from athena.text import Exercise, Submission, Feedback from athena.logger import logger -from module_text_llm.approaches.chain_of_thought_approach.config import ChainOfThoughtConfig +from module_text_llm.chain_of_thought_approach import ChainOfThoughtConfig from llm_core.utils.llm_utils import ( get_chat_prompt_with_formatting_instructions, @@ -15,35 +15,10 @@ from llm_core.utils.predict_and_parse import predict_and_parse from module_text_llm.helpers.utils import add_sentence_numbers, get_index_range_from_line_range, format_grading_instructions +from module_text_llm.chain_of_thought_approach.prompt_thinking import InitialAssessmentModel +from module_text_llm.chain_of_thought_approach.prompt_generate_feedback import AssessmentModel -class FeedbackModel(BaseModel): - title: str = Field(description="Very short title, i.e. feedback category or similar", example="Logic Error") - description: str = Field(description="Feedback description") - line_start: Optional[int] = Field(description="Referenced line number start, or empty if unreferenced") - line_end: Optional[int] = Field(description="Referenced line number end, or empty if unreferenced") - credits: float = Field(0.0, description="Number of points received/deducted") - grading_instruction_id: Optional[int] = Field( - description="ID of the grading instruction that was used to generate this feedback, or empty if no grading instruction was used" - ) -class AssessmentModel(BaseModel): - """Collection of feedbacks making up an assessment""" - - feedbacks: List[FeedbackModel] = Field(description="Assessment feedbacks") -class InitialAssessment(BaseModel): - title: str = Field(description="Very short title, i.e. feedback category or similar", example="Logic Error") - description: str = Field(description="Feedback description") - line_start: Optional[int] = Field(description="Referenced line number start, or empty if unreferenced") - line_end: Optional[int] = Field(description="Referenced line number end, or empty if unreferenced") - credits: float = Field(0.0, description="Number of points received/deducted") - reasoning: str = Field(description="Reasoning why the feedback was given") - impprovment_suggestion: str = Field(description="Suggestion for improvement for the student") - -class InitialAssessmentModel(BaseModel): - """Collection of feedbacks making up an assessment""" - - feedbacks: List[InitialAssessment] = Field(description="Assessment feedbacks") - async def generate_suggestions(exercise: Exercise, submission: Submission, config: ChainOfThoughtConfig, debug: bool) -> List[Feedback]: model = config.model.get_model() # type: ignore[attr-defined] @@ -58,8 +33,8 @@ async def generate_suggestions(exercise: Exercise, submission: Submission, confi chat_prompt = get_chat_prompt_with_formatting_instructions( model=model, - system_message=config.generate_suggestions_prompt.system_message, - human_message=config.generate_suggestions_prompt.human_message, + system_message=config.thikning_prompt.system_message, + human_message=config.thikning_prompt.human_message, pydantic_object=InitialAssessmentModel ) @@ -91,7 +66,8 @@ async def generate_suggestions(exercise: Exercise, submission: Submission, confi tags=[ f"exercise-{exercise.id}", f"submission-{submission.id}", - ] + ], + use_function_calling=True ) second_prompt_input = { @@ -114,7 +90,8 @@ async def generate_suggestions(exercise: Exercise, submission: Submission, confi tags=[ f"exercise-{exercise.id}", f"submission-{submission.id}", - ] + ], + use_function_calling=True ) if debug: diff --git a/modules/text/module_text_llm/module_text_llm/chain_of_thought_approach/prompt_generate_feedback.py b/modules/text/module_text_llm/module_text_llm/chain_of_thought_approach/prompt_generate_feedback.py new file mode 100644 index 000000000..9065ad67f --- /dev/null +++ b/modules/text/module_text_llm/module_text_llm/chain_of_thought_approach/prompt_generate_feedback.py @@ -0,0 +1,55 @@ +from pydantic import BaseModel, Field +from typing import List, Optional + +system_message = """ + You gave the following feedback on the first iteration: {answer} + On this step you need to refine your feedback. + Make sure to follow the following steps to assess and improve your feedback: + It shuold follow the grading instructions and the sample solution, if it doesn't, consider improvements. + If you have your own additional improvements that are not present in the grading instructions, add them in a new feedback with 0 credits and no reference. + Remember that your response is directly seen by students and it should adress them directly. + For each feedback where the student has room for improvement, think about how the student could improve his solution. + Once you have thought how the student can improve the solution, formulate it in a way that guides the student towards the correct solution without revealing it directly. + Consider improvements to the feedback if any of this points is not satisfied. + + Respond in json + + """ + +human_message = """\ +Student\'s submission to grade (with sentence numbers : ): +\"\"\" +{submission} +\"\"\"\ +""" + +# Input Prompt + +class CoTGenerateSuggestionsPrompt(BaseModel): + """\ +Features cit available: **{problem_statement}**, **{example_solution}**, **{grading_instructions}**, **{max_points}**, **{bonus_points}**, **{submission}** + +_Note: **{problem_statement}**, **{example_solution}**, or **{grading_instructions}** might be omitted if the input is too long._\ +""" + second_system_message: str = Field(default=system_message, + description="Message for priming AI behavior and instructing it what to do.") + answer_message: str = Field(default=human_message, + description="Message from a human. The input on which the AI is supposed to act.") + + +# Output Object + +class FeedbackModel(BaseModel): + title: str = Field(description="Very short title, i.e. feedback category or similar", example="Logic Error") + description: str = Field(description="Feedback description") + line_start: Optional[int] = Field(description="Referenced line number start, or empty if unreferenced") + line_end: Optional[int] = Field(description="Referenced line number end, or empty if unreferenced") + credits: float = Field(0.0, description="Number of points received/deducted") + grading_instruction_id: Optional[int] = Field( + description="ID of the grading instruction that was used to generate this feedback, or empty if no grading instruction was used" + ) + +class AssessmentModel(BaseModel): + """Collection of feedbacks making up an assessment""" + + feedbacks: List[FeedbackModel] = Field(description="Assessment feedbacks") \ No newline at end of file diff --git a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/prompts/cot_suggestions.py b/modules/text/module_text_llm/module_text_llm/chain_of_thought_approach/prompt_thinking.py similarity index 53% rename from modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/prompts/cot_suggestions.py rename to modules/text/module_text_llm/module_text_llm/chain_of_thought_approach/prompt_thinking.py index 41df58bae..6f72b2b10 100644 --- a/modules/text/module_text_llm/module_text_llm/approaches/chain_of_thought_approach/prompts/cot_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/chain_of_thought_approach/prompt_thinking.py @@ -1,3 +1,6 @@ +from pydantic import BaseModel, Field +from typing import Optional, List + system_message = """ You are a grading assistant at a prestrigious university tasked with grading student submissions for text exercises. You goal is to be as helpful as possible to the student while providing constructive feedback without revealing the solution. @@ -26,4 +29,31 @@ \"\"\" {submission} \"\"\"\ -""" \ No newline at end of file +""" + +# Input Prompt +class ThinkingPrompt(BaseModel): + """\ +Features available: **{problem_statement}**, **{example_solution}**, **{grading_instructions}**, **{max_points}**, **{bonus_points}**, **{submission}** + +_Note: **{problem_statement}**, **{example_solution}**, or **{grading_instructions}** might be omitted if the input is too long._\ +""" + system_message: str = Field(default=system_message, + description="Message for priming AI behavior and instructing it what to do.") + human_message: str = Field(default=human_message, + description="Message from a human. The input on which the AI is supposed to act.") + +# Output Object +class InitialAssessment(BaseModel): + title: str = Field(description="Very short title, i.e. feedback category or similar", example="Logic Error") + description: str = Field(description="Feedback description") + line_start: Optional[int] = Field(description="Referenced line number start, or empty if unreferenced") + line_end: Optional[int] = Field(description="Referenced line number end, or empty if unreferenced") + credits: float = Field(0.0, description="Number of points received/deducted") + reasoning: str = Field(description="Reasoning why the feedback was given") + impprovment_suggestion: str = Field(description="Suggestion for improvement for the student") + +class InitialAssessmentModel(BaseModel): + """Collection of feedbacks making up an assessment""" + + feedbacks: List[InitialAssessment] = Field(description="Assessment feedbacks") \ No newline at end of file diff --git a/modules/text/module_text_llm/module_text_llm/config.py b/modules/text/module_text_llm/module_text_llm/config.py index 041545be9..c620e62e3 100644 --- a/modules/text/module_text_llm/module_text_llm/config.py +++ b/modules/text/module_text_llm/module_text_llm/config.py @@ -2,10 +2,10 @@ from typing import Union from athena import config_schema_provider -from module_text_llm.approaches.chain_of_thought_approach.config import ChainOfThoughtConfig -from module_text_llm.approaches.basic_approach.config import BasicApproachConfig +from module_text_llm.chain_of_thought_approach import ChainOfThoughtConfig +from module_text_llm.basic_approach import BasicApproachConfig -ApproachConfigUnion = Union[ChainOfThoughtConfig, BasicApproachConfig] +ApproachConfigUnion = Union[BasicApproachConfig, ChainOfThoughtConfig] @config_schema_provider class Configuration(BaseModel):