diff --git a/.github/workflows/pycodestyle.yml b/.github/workflows/pycodestyle.yml new file mode 100644 index 0000000..f74398e --- /dev/null +++ b/.github/workflows/pycodestyle.yml @@ -0,0 +1,36 @@ +name: PyCodeStyle +on: + push: + branches: + - main + pull_request: + workflow_call: + +jobs: + python-3: + name: PyCodeStyle + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.10", "3.11"] + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install pylint + run: | + python -m pip install --upgrade pip + pip install pycodestyle + + - name: Analysing the code with pylint + run: | + pycodestyle --config=./.pycodestyle . diff --git a/.pycodestyle b/.pycodestyle new file mode 100644 index 0000000..2b68278 --- /dev/null +++ b/.pycodestyle @@ -0,0 +1,2 @@ +[pycodestyle] +max-line-length=120 diff --git a/knowledge_base_gpt/apps/slackbot/slack_bot.py b/knowledge_base_gpt/apps/slackbot/slack_bot.py index 907b42c..960d64f 100644 --- a/knowledge_base_gpt/apps/slackbot/slack_bot.py +++ b/knowledge_base_gpt/apps/slackbot/slack_bot.py @@ -69,7 +69,6 @@ def _is_direct_message_channel(self, command): ) return False - def _reset_conversation(self, ack, say, command): # pylint:disable=unused-argument ack() if not self._is_direct_message_channel(command): diff --git a/knowledge_base_gpt/libs/gpt/ollama_info.py b/knowledge_base_gpt/libs/gpt/ollama_info.py index 2999c4f..cfda081 100644 --- a/knowledge_base_gpt/libs/gpt/ollama_info.py +++ b/knowledge_base_gpt/libs/gpt/ollama_info.py @@ -19,7 +19,7 @@ class OllamaMetrics(): prompt_eval_count: int = 0 eval_count: int = 0 load_duration: int = 0 - prompt_eval_duration: int =0 + prompt_eval_duration: int = 0 eval_duration: int = 0 total_duration: int = 0 @@ -122,6 +122,7 @@ def __deepcopy__(self, memo: Any) -> "OllamaCallbackHandler": register_configure_hook(ollama_callback_var, True) + @contextmanager def get_ollama_callback() -> Generator[OllamaCallbackHandler, None, None]: """Get the Ollama callback handler in a context manager. diff --git a/knowledge_base_gpt/libs/gpt/private_chat.py b/knowledge_base_gpt/libs/gpt/private_chat.py index 155aade..31f5175 100644 --- a/knowledge_base_gpt/libs/gpt/private_chat.py +++ b/knowledge_base_gpt/libs/gpt/private_chat.py @@ -49,7 +49,7 @@ def __init__(self, settings: Settings, chat_log_exporter: ChatLogExporter, vecto return_generated_question=True ) - def answer_query(self, history, query, chat_identifier: Optional[str]=None) -> Dict[str, Any]: + def answer_query(self, history, query, chat_identifier: Optional[str] = None) -> Dict[str, Any]: """ Answer the query based on the history Use the chat identifier for logging the chat diff --git a/knowledge_base_gpt/libs/settings/settings.py b/knowledge_base_gpt/libs/settings/settings.py index 2c54be6..d3042a6 100644 --- a/knowledge_base_gpt/libs/settings/settings.py +++ b/knowledge_base_gpt/libs/settings/settings.py @@ -32,9 +32,9 @@ class LLMSettings(BaseModel): ) temperature: float = Field( 0.1, - description="The temperature of the model. "\ - "Increasing the temperature will make the model answer more creatively. "\ - "A value of 0.1 would be more factual.", + description="The temperature of the model. " + "Increasing the temperature will make the model answer more creatively. " + "A value of 0.1 would be more factual.", ) verbose: bool = Field( False, @@ -54,9 +54,9 @@ class EmbeddingSettings(BaseModel): ) temperature: float = Field( 0.1, - description="The temperature of the model. "\ - "Increasing the temperature will make the model answer more creatively. "\ - "A value of 0.1 would be more factual.", + description="The temperature of the model. " + "Increasing the temperature will make the model answer more creatively. " + "A value of 0.1 would be more factual.", ) @@ -88,36 +88,36 @@ class OllamaSettings(BaseModel): ) tfs_z: float = Field( 1.0, - description="Tail free sampling is used to reduce the impact of less probable tokens from the output. "\ - "A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.", + description="Tail free sampling is used to reduce the impact of less probable tokens from the output. " + "A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.", ) num_predict: int = Field( None, - description="Maximum number of tokens to predict when generating text. "\ - "(Default: 128, -1 = infinite generation, -2 = fill context)", + description="Maximum number of tokens to predict when generating text. " + "(Default: 128, -1 = infinite generation, -2 = fill context)", ) top_k: int = Field( 40, - description="Reduces the probability of generating nonsense. "\ - "A higher value (e.g. 100) will give more diverse answers, "\ - "while a lower value (e.g. 10) will be more conservative. (Default: 40)", + description="Reduces the probability of generating nonsense. " + "A higher value (e.g. 100) will give more diverse answers, " + "while a lower value (e.g. 10) will be more conservative. (Default: 40)", ) top_p: float = Field( 0.9, - description="Works together with top-k. "\ - "A higher value (e.g., 0.95) will lead to more diverse text, "\ - "while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)", + description="Works together with top-k. " + "A higher value (e.g., 0.95) will lead to more diverse text, " + "while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)", ) repeat_last_n: int = Field( 64, - description="Sets how far back for the model to look back to prevent repetition. "\ - "(Default: 64, 0 = disabled, -1 = num_ctx)", + description="Sets how far back for the model to look back to prevent repetition. " + "(Default: 64, 0 = disabled, -1 = num_ctx)", ) repeat_penalty: float = Field( 1.1, - description="Sets how strongly to penalize repetitions. "\ - "A higher value (e.g., 1.5) will penalize repetitions more strongly, "\ - "while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)", + description="Sets how strongly to penalize repetitions. " + "A higher value (e.g., 1.5) will penalize repetitions more strongly, " + "while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)", ) @@ -128,6 +128,7 @@ class HuggingFaceSettings(BaseModel): description="Model to use. Example: 'nomic-embed-text'.", ) + class RedisSettings(BaseModel): """ Redis Settings """ host: str = Field(