Skip to content

Commit

Permalink
Add pycodestyle check and fix errors
Browse files Browse the repository at this point in the history
Signed-off-by: Ygal Blum <[email protected]>
  • Loading branch information
ygalblum committed Mar 24, 2024
1 parent 19d9ab8 commit deeb0ed
Show file tree
Hide file tree
Showing 6 changed files with 63 additions and 24 deletions.
36 changes: 36 additions & 0 deletions .github/workflows/pycodestyle.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
name: PyCodeStyle
on:
push:
branches:
- main
pull_request:
workflow_call:

jobs:
python-3:
name: PyCodeStyle
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.10", "3.11"]

steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}

- name: Install pylint
run: |
python -m pip install --upgrade pip
pip install pycodestyle
- name: Analysing the code with pylint
run: |
pycodestyle --config=./.pycodestyle .
2 changes: 2 additions & 0 deletions .pycodestyle
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
[pycodestyle]
max-line-length=120
1 change: 0 additions & 1 deletion knowledge_base_gpt/apps/slackbot/slack_bot.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,6 @@ def _is_direct_message_channel(self, command):
)
return False


def _reset_conversation(self, ack, say, command): # pylint:disable=unused-argument
ack()
if not self._is_direct_message_channel(command):
Expand Down
3 changes: 2 additions & 1 deletion knowledge_base_gpt/libs/gpt/ollama_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class OllamaMetrics():
prompt_eval_count: int = 0
eval_count: int = 0
load_duration: int = 0
prompt_eval_duration: int =0
prompt_eval_duration: int = 0
eval_duration: int = 0
total_duration: int = 0

Expand Down Expand Up @@ -122,6 +122,7 @@ def __deepcopy__(self, memo: Any) -> "OllamaCallbackHandler":

register_configure_hook(ollama_callback_var, True)


@contextmanager
def get_ollama_callback() -> Generator[OllamaCallbackHandler, None, None]:
"""Get the Ollama callback handler in a context manager.
Expand Down
2 changes: 1 addition & 1 deletion knowledge_base_gpt/libs/gpt/private_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def __init__(self, settings: Settings, chat_log_exporter: ChatLogExporter, vecto
return_generated_question=True
)

def answer_query(self, history, query, chat_identifier: Optional[str]=None) -> Dict[str, Any]:
def answer_query(self, history, query, chat_identifier: Optional[str] = None) -> Dict[str, Any]:
"""
Answer the query based on the history
Use the chat identifier for logging the chat
Expand Down
43 changes: 22 additions & 21 deletions knowledge_base_gpt/libs/settings/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,9 @@ class LLMSettings(BaseModel):
)
temperature: float = Field(
0.1,
description="The temperature of the model. "\
"Increasing the temperature will make the model answer more creatively. "\
"A value of 0.1 would be more factual.",
description="The temperature of the model. "
"Increasing the temperature will make the model answer more creatively. "
"A value of 0.1 would be more factual.",
)
verbose: bool = Field(
False,
Expand All @@ -54,9 +54,9 @@ class EmbeddingSettings(BaseModel):
)
temperature: float = Field(
0.1,
description="The temperature of the model. "\
"Increasing the temperature will make the model answer more creatively. "\
"A value of 0.1 would be more factual.",
description="The temperature of the model. "
"Increasing the temperature will make the model answer more creatively. "
"A value of 0.1 would be more factual.",
)


Expand Down Expand Up @@ -88,36 +88,36 @@ class OllamaSettings(BaseModel):
)
tfs_z: float = Field(
1.0,
description="Tail free sampling is used to reduce the impact of less probable tokens from the output. "\
"A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.",
description="Tail free sampling is used to reduce the impact of less probable tokens from the output. "
"A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.",
)
num_predict: int = Field(
None,
description="Maximum number of tokens to predict when generating text. "\
"(Default: 128, -1 = infinite generation, -2 = fill context)",
description="Maximum number of tokens to predict when generating text. "
"(Default: 128, -1 = infinite generation, -2 = fill context)",
)
top_k: int = Field(
40,
description="Reduces the probability of generating nonsense. "\
"A higher value (e.g. 100) will give more diverse answers, "\
"while a lower value (e.g. 10) will be more conservative. (Default: 40)",
description="Reduces the probability of generating nonsense. "
"A higher value (e.g. 100) will give more diverse answers, "
"while a lower value (e.g. 10) will be more conservative. (Default: 40)",
)
top_p: float = Field(
0.9,
description="Works together with top-k. "\
"A higher value (e.g., 0.95) will lead to more diverse text, "\
"while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)",
description="Works together with top-k. "
"A higher value (e.g., 0.95) will lead to more diverse text, "
"while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)",
)
repeat_last_n: int = Field(
64,
description="Sets how far back for the model to look back to prevent repetition. "\
"(Default: 64, 0 = disabled, -1 = num_ctx)",
description="Sets how far back for the model to look back to prevent repetition. "
"(Default: 64, 0 = disabled, -1 = num_ctx)",
)
repeat_penalty: float = Field(
1.1,
description="Sets how strongly to penalize repetitions. "\
"A higher value (e.g., 1.5) will penalize repetitions more strongly, "\
"while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)",
description="Sets how strongly to penalize repetitions. "
"A higher value (e.g., 1.5) will penalize repetitions more strongly, "
"while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)",
)


Expand All @@ -128,6 +128,7 @@ class HuggingFaceSettings(BaseModel):
description="Model to use. Example: 'nomic-embed-text'.",
)


class RedisSettings(BaseModel):
""" Redis Settings """
host: str = Field(
Expand Down

0 comments on commit deeb0ed

Please sign in to comment.