diff --git a/dataherald/api/fastapi.py b/dataherald/api/fastapi.py index dc1c50a9..5fe9173a 100644 --- a/dataherald/api/fastapi.py +++ b/dataherald/api/fastapi.py @@ -692,6 +692,7 @@ def create_finetuning_job( model = model_repository.insert( Finetuning( db_connection_id=fine_tuning_request.db_connection_id, + alias=fine_tuning_request.alias, base_llm=fine_tuning_request.base_llm, golden_records=[ str(golden_record.id) for golden_record in golden_records diff --git a/dataherald/types.py b/dataherald/types.py index 99efe9e4..df67afe1 100644 --- a/dataherald/types.py +++ b/dataherald/types.py @@ -158,6 +158,7 @@ class BaseLLM(BaseModel): class Finetuning(BaseModel): id: str | None = None + alias: str | None = None db_connection_id: str | None = None status: str = "queued" error: str | None = None @@ -172,6 +173,7 @@ class Finetuning(BaseModel): class FineTuningRequest(BaseModel): db_connection_id: str + alias: str base_llm: BaseLLM golden_records: list[str] | None = None metadata: dict[str, str] | None = None diff --git a/docs/api.cancel_finetuning.rst b/docs/api.cancel_finetuning.rst index eb2de7b8..a4798068 100644 --- a/docs/api.cancel_finetuning.rst +++ b/docs/api.cancel_finetuning.rst @@ -25,6 +25,7 @@ HTTP 200 code response { "id": "finetuing-job-id", "db_connection_id": "database_connection_id" + "alias": "model name" "status": "finetuning_job_status" # queued is default other possible values are [queued, running, succeeded, failed, validating_files, or cancelled] "error": "The error message if the job failed" # optional default value is None "base_llm": { @@ -63,6 +64,7 @@ HTTP 200 code response { "id": "finetuning-job-id", + "alias": "my_model", "db_connection_id": "database_connection_id", "status": "cancelled", "error": "Fine tuning cancelled by the user", diff --git a/docs/api.finetuning.rst b/docs/api.finetuning.rst index 95f2770e..f5ab211e 100644 --- a/docs/api.finetuning.rst +++ b/docs/api.finetuning.rst @@ -16,6 +16,7 @@ Request this ``POST`` endpoint to create a finetuning job:: { "db_connection_id": "database_connection_id" + "alias": "model name", "base_llm": { "model_provider": "model_provider_name" # right now openai is the only provider. "model_name": "model_name" # right now gpt-3.5-turbo and gpt-4 are suported. @@ -37,7 +38,8 @@ HTTP 201 code response { "id": "finetuing-job-id", - "db_connection_id": "database_connection_id" + "db_connection_id": "database_connection_id", + "alias": "model name", "status": "finetuning_job_status" # queued is default other possible values are [queued, running, succeeded, failed, validating_files, or cancelled] "error": "The error message if the job failed" # optional default value is None "base_llm": { @@ -67,6 +69,7 @@ HTTP 201 code response -H 'Content-Type: application/json' \ -d '{ "db_connection_id": "database_connection_id", + "alias": "my_model", "base_llm": { "model_provider": "openai", "model_name": "gpt-3.5-turbo-1106", @@ -83,6 +86,7 @@ HTTP 201 code response { "id": "finetuning-job-id", + "alias": "my_model", "db_connection_id": "database_connection_id", "status": "queued", "error": null, diff --git a/docs/api.get_finetuning.rst b/docs/api.get_finetuning.rst index 6a87b432..af3528c5 100644 --- a/docs/api.get_finetuning.rst +++ b/docs/api.get_finetuning.rst @@ -16,6 +16,7 @@ HTTP 200 code response { "id": "finetuing-job-id", "db_connection_id": "database_connection_id" + "alias": "model name" "status": "finetuning_job_status" # queued is default other possible values are [queued, running, succeeded, failed, validating_files, or cancelled] "error": "The error message if the job failed" # optional default value is None "base_llm": { @@ -50,6 +51,7 @@ HTTP 200 code response { "id": "finetuning-job-id", "db_connection_id": "database_connection_id", + "alias": "my_model", "status": "validating_files", "error": null, "base_llm": { diff --git a/docs/api.rst b/docs/api.rst index e6ec1229..d546b2fc 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -149,6 +149,7 @@ Related endpoints are: { "id": "finetuing-job-id", "db_connection_id": "database_connection_id", + "alias": "model name", "status": "finetuning_job_status", // Possible values: queued, running, succeeded, validating_files, failed, or cancelled "error": "The error message if the job failed", // Optional, default is None "base_llm": {