Skip to content

Commit

Permalink
linting fix
Browse files Browse the repository at this point in the history
  • Loading branch information
SanderGi committed Jan 26, 2024
1 parent 12a60c2 commit dd52515
Show file tree
Hide file tree
Showing 25 changed files with 121 additions and 103 deletions.
8 changes: 5 additions & 3 deletions bots/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -894,9 +894,11 @@ def to_df_format(
"Sent": message.created_at.astimezone(tz)
.replace(tzinfo=None)
.strftime("%b %d, %Y %I:%M %p"),
"Feedback": message.feedbacks.first().get_display_text()
if message.feedbacks.first()
else None, # only show first feedback as per Sean's request
"Feedback": (
message.feedbacks.first().get_display_text()
if message.feedbacks.first()
else None
), # only show first feedback as per Sean's request
"Analysis JSON": message.analysis_result,
}
rows.append(row)
Expand Down
6 changes: 3 additions & 3 deletions daras_ai_v2/bot_integration_widgets.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ def general_integration_settings(bi: BotIntegration):
st.session_state[f"_bi_user_language_{bi.id}"] = BotIntegration._meta.get_field(
"user_language"
).default
st.session_state[
f"_bi_show_feedback_buttons_{bi.id}"
] = BotIntegration._meta.get_field("show_feedback_buttons").default
st.session_state[f"_bi_show_feedback_buttons_{bi.id}"] = (
BotIntegration._meta.get_field("show_feedback_buttons").default
)
st.session_state[f"_bi_analysis_url_{bi.id}"] = None

bi.show_feedback_buttons = st.checkbox(
Expand Down
12 changes: 7 additions & 5 deletions daras_ai_v2/bots.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,11 +389,13 @@ def _save_msgs(
role=CHATML_ROLE_USER,
content=response.raw_input_text,
display_content=input_text,
saved_run=SavedRun.objects.get_or_create(
workflow=Workflow.ASR, **furl(speech_run).query.params
)[0]
if speech_run
else None,
saved_run=(
SavedRun.objects.get_or_create(
workflow=Workflow.ASR, **furl(speech_run).query.params
)[0]
if speech_run
else None
),
)
attachments = []
for f_url in (input_images or []) + (input_documents or []):
Expand Down
28 changes: 17 additions & 11 deletions daras_ai_v2/language_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,12 +198,14 @@ def calc_gpt_tokens(
for entry in messages
if (
content := (
format_chatml_message(entry) + "\n"
if is_chat_model
else entry.get("content", "")
(
format_chatml_message(entry) + "\n"
if is_chat_model
else entry.get("content", "")
)
if isinstance(entry, dict)
else str(entry)
)
if isinstance(entry, dict)
else str(entry)
)
)
return default_length_function(combined)
Expand Down Expand Up @@ -364,9 +366,11 @@ def run_language_model(
else:
out_content = [
# return messages back as either chatml or json messages
format_chatml_message(entry)
if is_chatml
else (entry.get("content") or "").strip()
(
format_chatml_message(entry)
if is_chatml
else (entry.get("content") or "").strip()
)
for entry in result
]
if tools:
Expand Down Expand Up @@ -514,9 +518,11 @@ def _run_openai_chat(
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
tools=[tool.spec for tool in tools] if tools else NOT_GIVEN,
response_format={"type": response_format_type}
if response_format_type
else NOT_GIVEN,
response_format=(
{"type": response_format_type}
if response_format_type
else NOT_GIVEN
),
)
for model_str in model
],
Expand Down
6 changes: 3 additions & 3 deletions daras_ai_v2/language_model_settings_widgets.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ def language_model_settings(show_selector=True, show_document_model=False):
f"###### {field_title_desc(VideoBotsPage.RequestModel, 'document_model')}",
key="document_model",
options=[None, *doc_model_descriptions],
format_func=lambda x: f"{doc_model_descriptions[x]} ({x})"
if x
else "———",
format_func=lambda x: (
f"{doc_model_descriptions[x]} ({x})" if x else "———"
),
)

st.checkbox("Avoid Repetition", key="avoid_repetition")
Expand Down
12 changes: 6 additions & 6 deletions daras_ai_v2/stable_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,9 +247,9 @@ def instruct_pix2pix(
},
inputs={
"prompt": [prompt] * len(images),
"negative_prompt": [negative_prompt] * len(images)
if negative_prompt
else None,
"negative_prompt": (
[negative_prompt] * len(images) if negative_prompt else None
),
"num_images_per_prompt": num_outputs,
"num_inference_steps": num_inference_steps,
"guidance_scale": guidance_scale,
Expand Down Expand Up @@ -440,9 +440,9 @@ def controlnet(
pipeline={
"model_id": text2img_model_ids[Text2ImgModels[selected_model]],
"seed": seed,
"scheduler": Schedulers[scheduler].label
if scheduler
else "UniPCMultistepScheduler",
"scheduler": (
Schedulers[scheduler].label if scheduler else "UniPCMultistepScheduler"
),
"disable_safety_checker": True,
"controlnet_model_id": [
controlnet_model_ids[ControlNetModels[model]]
Expand Down
1 change: 1 addition & 0 deletions gooeysite/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""

from django.contrib import admin
from django.urls import path

Expand Down
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -93,3 +93,6 @@ pre-commit = "^3.5.0"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

[tool.black]
--force-exclude = "migrations|node_modules|\\.git|\\.venv|\\.env|\\.pytest_cache|\\.vscode|\\.github|\\.to"
6 changes: 3 additions & 3 deletions recipes/CompareLLM.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@ class CompareLLMPage(BasePage):

class RequestModel(BaseModel):
input_prompt: str | None
selected_models: list[
typing.Literal[tuple(e.name for e in LargeLanguageModels)]
] | None
selected_models: (
list[typing.Literal[tuple(e.name for e in LargeLanguageModels)]] | None
)

avoid_repetition: bool | None
num_outputs: int | None
Expand Down
6 changes: 3 additions & 3 deletions recipes/CompareText2Img.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,9 @@ class RequestModel(BaseModel):
seed: int | None
sd_2_upscaling: bool | None

selected_models: list[
typing.Literal[tuple(e.name for e in Text2ImgModels)]
] | None
selected_models: (
list[typing.Literal[tuple(e.name for e in Text2ImgModels)]] | None
)
scheduler: typing.Literal[tuple(e.name for e in Schedulers)] | None

edit_instruction: str | None
Expand Down
6 changes: 3 additions & 3 deletions recipes/CompareUpscaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ class RequestModel(BaseModel):

scale: int

selected_models: list[
typing.Literal[tuple(e.name for e in UpscalerModels)]
] | None
selected_models: (
list[typing.Literal[tuple(e.name for e in UpscalerModels)]] | None
)

class ResponseModel(BaseModel):
output_images: dict[typing.Literal[tuple(e.name for e in UpscalerModels)], str]
Expand Down
6 changes: 3 additions & 3 deletions recipes/DocExtract.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,9 +77,9 @@ class RequestModel(BaseModel):

task_instructions: str | None

selected_model: typing.Literal[
tuple(e.name for e in LargeLanguageModels)
] | None
selected_model: (
typing.Literal[tuple(e.name for e in LargeLanguageModels)] | None
)
avoid_repetition: bool | None
num_outputs: int | None
quality: float | None
Expand Down
6 changes: 3 additions & 3 deletions recipes/DocSearch.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,9 @@ class RequestModel(DocSearchRequest):
task_instructions: str | None
query_instructions: str | None

selected_model: typing.Literal[
tuple(e.name for e in LargeLanguageModels)
] | None
selected_model: (
typing.Literal[tuple(e.name for e in LargeLanguageModels)] | None
)
avoid_repetition: bool | None
num_outputs: int | None
quality: float | None
Expand Down
6 changes: 3 additions & 3 deletions recipes/DocSummary.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,9 @@ class RequestModel(BaseModel):
task_instructions: str | None
merge_instructions: str | None

selected_model: typing.Literal[
tuple(e.name for e in LargeLanguageModels)
] | None
selected_model: (
typing.Literal[tuple(e.name for e in LargeLanguageModels)] | None
)
avoid_repetition: bool | None
num_outputs: int | None
quality: float | None
Expand Down
6 changes: 3 additions & 3 deletions recipes/GoogleGPT.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,9 @@ class RequestModel(GoogleSearchMixin, BaseModel):
task_instructions: str | None
query_instructions: str | None

selected_model: typing.Literal[
tuple(e.name for e in LargeLanguageModels)
] | None
selected_model: (
typing.Literal[tuple(e.name for e in LargeLanguageModels)] | None
)
avoid_repetition: bool | None
num_outputs: int | None
quality: float | None
Expand Down
6 changes: 3 additions & 3 deletions recipes/ImageSegmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,9 @@ class ImageSegmentationPage(BasePage):
class RequestModel(BaseModel):
input_image: str

selected_model: typing.Literal[
tuple(e.name for e in ImageSegmentationModels)
] | None
selected_model: (
typing.Literal[tuple(e.name for e in ImageSegmentationModels)] | None
)
mask_threshold: float | None

rect_persepective_transform: bool | None
Expand Down
8 changes: 5 additions & 3 deletions recipes/Img2Img.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,11 @@ class RequestModel(BaseModel):
text_prompt: str | None

selected_model: typing.Literal[tuple(e.name for e in Img2ImgModels)] | None
selected_controlnet_model: list[
typing.Literal[tuple(e.name for e in ControlNetModels)]
] | typing.Literal[tuple(e.name for e in ControlNetModels)] | None
selected_controlnet_model: (
list[typing.Literal[tuple(e.name for e in ControlNetModels)]]
| typing.Literal[tuple(e.name for e in ControlNetModels)]
| None
)
negative_prompt: str | None

num_outputs: int | None
Expand Down
12 changes: 6 additions & 6 deletions recipes/QRCodeGenerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,18 +88,18 @@ class RequestModel(BaseModel):
text_prompt: str
negative_prompt: str | None
image_prompt: str | None
image_prompt_controlnet_models: list[
typing.Literal[tuple(e.name for e in ControlNetModels)], ...
] | None
image_prompt_controlnet_models: (
list[typing.Literal[tuple(e.name for e in ControlNetModels)], ...] | None
)
image_prompt_strength: float | None
image_prompt_scale: float | None
image_prompt_pos_x: float | None
image_prompt_pos_y: float | None

selected_model: typing.Literal[tuple(e.name for e in Text2ImgModels)] | None
selected_controlnet_model: list[
typing.Literal[tuple(e.name for e in ControlNetModels)], ...
] | None
selected_controlnet_model: (
list[typing.Literal[tuple(e.name for e in ControlNetModels)], ...] | None
)

output_width: int | None
output_height: int | None
Expand Down
6 changes: 3 additions & 3 deletions recipes/SEOSummary.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,9 @@ class RequestModel(GoogleSearchMixin, BaseModel):

enable_html: bool | None

selected_model: typing.Literal[
tuple(e.name for e in LargeLanguageModels)
] | None
selected_model: (
typing.Literal[tuple(e.name for e in LargeLanguageModels)] | None
)
sampling_temperature: float | None
max_tokens: int | None
num_outputs: int | None
Expand Down
6 changes: 3 additions & 3 deletions recipes/SmartGPT.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ class RequestModel(BaseModel):
reflexion_prompt: str | None
dera_prompt: str | None

selected_model: typing.Literal[
tuple(e.name for e in LargeLanguageModels)
] | None
selected_model: (
typing.Literal[tuple(e.name for e in LargeLanguageModels)] | None
)
avoid_repetition: bool | None
num_outputs: int | None
quality: float | None
Expand Down
6 changes: 3 additions & 3 deletions recipes/SocialLookupEmail.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@ class RequestModel(BaseModel):
domain: str | None
key_words: str | None

selected_model: typing.Literal[
tuple(e.name for e in LargeLanguageModels)
] | None
selected_model: (
typing.Literal[tuple(e.name for e in LargeLanguageModels)] | None
)
sampling_temperature: float | None
max_tokens: int | None

Expand Down
12 changes: 6 additions & 6 deletions recipes/Text2Audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,9 @@ class RequestModel(BaseModel):
seed: int | None
sd_2_upscaling: bool | None

selected_models: list[
typing.Literal[tuple(e.name for e in Text2AudioModels)]
] | None
selected_models: (
list[typing.Literal[tuple(e.name for e in Text2AudioModels)]] | None
)

class ResponseModel(BaseModel):
output_audios: dict[
Expand Down Expand Up @@ -114,9 +114,9 @@ def run(self, state: dict) -> typing.Iterator[str | None]:
),
inputs=dict(
prompt=[request.text_prompt],
negative_prompt=[request.negative_prompt]
if request.negative_prompt
else None,
negative_prompt=(
[request.negative_prompt] if request.negative_prompt else None
),
num_waveforms_per_prompt=request.num_outputs,
num_inference_steps=request.quality,
guidance_scale=request.guidance_scale,
Expand Down
6 changes: 3 additions & 3 deletions recipes/TextToSpeech.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,9 @@ class TextToSpeechPage(BasePage):
class RequestModel(BaseModel):
text_prompt: str

tts_provider: typing.Literal[
tuple(e.name for e in TextToSpeechProviders)
] | None
tts_provider: (
typing.Literal[tuple(e.name for e in TextToSpeechProviders)] | None
)

uberduck_voice_name: str | None
uberduck_speaking_rate: float | None
Expand Down
Loading

0 comments on commit dd52515

Please sign in to comment.