From c98bda18737776fddea4e479acfac1cc78c0f62d Mon Sep 17 00:00:00 2001 From: PatrykWyzgowski Date: Fri, 11 Oct 2024 14:19:36 +0200 Subject: [PATCH] Fixed the rest of ruff's claims. --- .../ragbits-core/src/ragbits/core/llms/base.py | 2 +- .../src/ragbits/core/llms/clients/local.py | 4 ++-- .../ragbits-core/src/ragbits/core/prompt/base.py | 2 +- .../ragbits-core/src/ragbits/core/prompt/prompt.py | 14 ++++++++------ 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/packages/ragbits-core/src/ragbits/core/llms/base.py b/packages/ragbits-core/src/ragbits/core/llms/base.py index 5eedb9fb..a4aed07a 100644 --- a/packages/ragbits-core/src/ragbits/core/llms/base.py +++ b/packages/ragbits-core/src/ragbits/core/llms/base.py @@ -39,7 +39,7 @@ def client(self) -> LLMClient: Client for the LLM. """ - def count_tokens(self, prompt: BasePrompt) -> int: + def count_tokens(self, prompt: BasePrompt) -> int: # noqa: PLR6301 """ Counts tokens in the prompt. diff --git a/packages/ragbits-core/src/ragbits/core/llms/clients/local.py b/packages/ragbits-core/src/ragbits/core/llms/clients/local.py index 24aba0ce..ac371ed0 100644 --- a/packages/ragbits-core/src/ragbits/core/llms/clients/local.py +++ b/packages/ragbits-core/src/ragbits/core/llms/clients/local.py @@ -21,8 +21,8 @@ class LocalLLMOptions(LLMOptions): """ Dataclass that represents all available LLM call options for the local LLM client. Each of them is described in the [HuggingFace documentation] - (https://huggingface.co/docs/huggingface_hub/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation). # noqa: E501 - """ + (https://huggingface.co/docs/huggingface_hub/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation). + """ # noqa: E501 repetition_penalty: float | None | NotGiven = NOT_GIVEN do_sample: bool | None | NotGiven = NOT_GIVEN diff --git a/packages/ragbits-core/src/ragbits/core/prompt/base.py b/packages/ragbits-core/src/ragbits/core/prompt/base.py index 9f2f086b..fe28d71f 100644 --- a/packages/ragbits-core/src/ragbits/core/prompt/base.py +++ b/packages/ragbits-core/src/ragbits/core/prompt/base.py @@ -30,7 +30,7 @@ def json_mode(self) -> bool: """ return self.output_schema() is not None - def output_schema(self) -> dict | type[BaseModel] | None: + def output_schema(self) -> dict | type[BaseModel] | None: # noqa: PLR6301 """ Returns the schema of the desired output. Can be used to request structured output from the LLM API or to validate the output. Can return either a Pydantic model or a JSON schema. diff --git a/packages/ragbits-core/src/ragbits/core/prompt/prompt.py b/packages/ragbits-core/src/ragbits/core/prompt/prompt.py index ca0870d5..be140ae9 100644 --- a/packages/ragbits-core/src/ragbits/core/prompt/prompt.py +++ b/packages/ragbits-core/src/ragbits/core/prompt/prompt.py @@ -57,7 +57,7 @@ def _get_io_types(cls) -> tuple: @classmethod def _parse_template(cls, template: str) -> Template: - env = Environment() # nosec B701 - HTML autoescaping not needed for plain text #noqa: S701 + env = Environment(autoescape=True) ast = env.parse(template) template_variables = meta.find_undeclared_variables(ast) input_fields = cls.input_type.model_fields.keys() if cls.input_type else set() @@ -169,15 +169,17 @@ def list_few_shots(self) -> ChatFormat: result: ChatFormat = [] for user_message, assistant_message in self.few_shots + self._instace_few_shots: if not isinstance(user_message, str): - user_message = self._render_template(self.user_prompt_template, user_message) + user_content = self._render_template(self.user_prompt_template, user_message) + else: + user_content = user_message if isinstance(assistant_message, BaseModel): - assistant_message = assistant_message.model_dump_json() + assistant_content = assistant_message.model_dump_json() else: - assistant_message = str(assistant_message) + assistant_content = str(assistant_message) - result.append({"role": "user", "content": user_message}) - result.append({"role": "assistant", "content": assistant_message}) + result.append({"role": "user", "content": user_content}) + result.append({"role": "assistant", "content": assistant_content}) return result def output_schema(self) -> dict | type[BaseModel] | None: