Skip to content

Commit

Permalink
add raises docstring & formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
HRashidi committed Jul 11, 2024
1 parent 84fa594 commit 1ba4970
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 4 deletions.
4 changes: 3 additions & 1 deletion aana/core/models/video.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,9 @@ class VideoMetadata(BaseModel):

title: str = Field(None, description="The title of the video.")
description: str = Field(None, description="The description of the video.")
duration: float | None = Field(None, description="The duration of the video in seconds.")
duration: float | None = Field(
None, description="The duration of the video in seconds."
)
model_config = ConfigDict(
json_schema_extra={
"description": "Metadata of a video.",
Expand Down
7 changes: 4 additions & 3 deletions aana/deployments/vllm_deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class VLLMConfig(BaseModel):
chat_template (str): the name of the chat template, if not provided, the chat template from the model will be used
but some models may not have a chat template (optional, default: None)
enforce_eager: whether to enforce eager execution (optional, default: False)
engine_args: extra engine arguments (optional, default: {})
engine_args: extra engine arguments (optional, default: {})
"""

Expand All @@ -57,6 +57,7 @@ class VLLMConfig(BaseModel):
enforce_eager: bool | None = Field(default=False)
engine_args: CustomConfig = {}


@serve.deployment
class VLLMDeployment(BaseTextGenerationDeployment):
"""Deployment to serve large language models using vLLM."""
Expand Down Expand Up @@ -101,7 +102,7 @@ async def apply_config(self, config: dict[str, Any]):
enforce_eager=config_obj.enforce_eager,
gpu_memory_utilization=self.gpu_memory_utilization,
max_model_len=config_obj.max_model_len,
**config_obj.engine_args
**config_obj.engine_args,
)

# TODO: check if the model is already loaded.
Expand Down Expand Up @@ -153,7 +154,7 @@ async def generate_stream(
results_generator = self.engine.generate(
sampling_params=sampling_params_vllm,
request_id=request_id,
inputs=TokensPrompt(prompt_token_ids=prompt_token_ids)
inputs=TokensPrompt(prompt_token_ids=prompt_token_ids),
)

num_returned = 0
Expand Down
6 changes: 6 additions & 0 deletions aana/integrations/external/yt_dlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ def get_video_metadata(video_url: str) -> VideoMetadata:
Returns:
metadata (VideoMetadata): the metadata of the video
Raises:
DownloadException: Request does not succeed.
"""

ydl_options = {
Expand Down Expand Up @@ -51,6 +54,9 @@ def download_video(video_input: VideoInput | Video) -> Video:
Returns:
Video: the video object
Raises:
DownloadException: Request does not succeed.
"""
if isinstance(video_input, Video):
return video_input
Expand Down

0 comments on commit 1ba4970

Please sign in to comment.