From 6c816f65dc6deb72a22947d0047e00ba45360e66 Mon Sep 17 00:00:00 2001 From: Erik Bernhardsson Date: Wed, 24 Apr 2024 11:52:55 -0400 Subject: [PATCH] stub -> app --- src/__init__.py | 2 +- src/common.py | 4 ++-- src/inference.py | 6 +++--- src/train.py | 10 +++++----- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/__init__.py b/src/__init__.py index 4c93949..4f8d263 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -1,3 +1,3 @@ -from .common import stub +from .common import app from .train import train, launch from .inference import Inference diff --git a/src/common.py b/src/common.py index dcfcd1b..24ffb3a 100644 --- a/src/common.py +++ b/src/common.py @@ -1,6 +1,6 @@ from pathlib import PurePosixPath -from modal import Stub, Image, Volume +from modal import App, Image, Volume APP_NAME = "example-axolotl" @@ -32,7 +32,7 @@ "torch==2.1.2", ) -stub = Stub(APP_NAME) +app = App(APP_NAME) # Volumes for pre-trained models and training runs. pretrained_volume = Volume.from_name("example-pretrained-vol", create_if_missing=True) diff --git a/src/inference.py b/src/inference.py index 084e624..bbf7634 100644 --- a/src/inference.py +++ b/src/inference.py @@ -5,7 +5,7 @@ import modal from fastapi.responses import StreamingResponse -from .common import stub, vllm_image, VOLUME_CONFIG +from .common import app, vllm_image, VOLUME_CONFIG N_INFERENCE_GPU = 2 @@ -21,7 +21,7 @@ def get_model_path_from_run(path: Path) -> Path: return path / yaml.safe_load(f.read())["output_dir"] / "merged" -@stub.cls( +@app.cls( gpu=modal.gpu.H100(count=N_INFERENCE_GPU), image=vllm_image, volumes=VOLUME_CONFIG, @@ -103,7 +103,7 @@ async def web(self, input: str): return StreamingResponse(self._stream(input), media_type="text/event-stream") -@stub.local_entrypoint() +@app.local_entrypoint() def inference_main(run_name: str = "", prompt: str = ""): if prompt: for chunk in Inference(run_name).completion.remote_gen(prompt): diff --git a/src/train.py b/src/train.py index 5e4c905..780c265 100644 --- a/src/train.py +++ b/src/train.py @@ -5,7 +5,7 @@ import os from .common import ( - stub, + app, axolotl_image, VOLUME_CONFIG, ) @@ -46,7 +46,7 @@ def run_cmd(cmd: str, run_folder: str): VOLUME_CONFIG["/runs"].commit() -@stub.function( +@app.function( image=axolotl_image, gpu=GPU_CONFIG, volumes=VOLUME_CONFIG, @@ -70,7 +70,7 @@ def train(run_folder: str, output_dir: str): return merge_handle -@stub.function(image=axolotl_image, volumes=VOLUME_CONFIG, timeout=3600 * 24) +@app.function(image=axolotl_image, volumes=VOLUME_CONFIG, timeout=3600 * 24) def merge(run_folder: str, output_dir: str): import shutil @@ -86,7 +86,7 @@ def merge(run_folder: str, output_dir: str): VOLUME_CONFIG["/runs"].commit() -@stub.function(image=axolotl_image, timeout=60 * 30, volumes=VOLUME_CONFIG) +@app.function(image=axolotl_image, timeout=60 * 30, volumes=VOLUME_CONFIG) def launch(config_raw: str, data_raw: str): from huggingface_hub import snapshot_download import yaml @@ -131,7 +131,7 @@ def launch(config_raw: str, data_raw: str): return run_name, train_handle -@stub.local_entrypoint() +@app.local_entrypoint() def main( config: str, data: str,