diff --git a/ballerina/Dependencies.toml b/ballerina/Dependencies.toml index a1b101d..d6606d1 100644 --- a/ballerina/Dependencies.toml +++ b/ballerina/Dependencies.toml @@ -10,7 +10,7 @@ distribution-version = "2201.9.2" [[package]] org = "ballerina" name = "auth" -version = "2.11.1" +version = "2.11.2" dependencies = [ {org = "ballerina", name = "crypto"}, {org = "ballerina", name = "jballerina.java"}, @@ -144,6 +144,15 @@ dependencies = [ {org = "ballerina", name = "jballerina.java"} ] +[[package]] +org = "ballerina" +name = "lang.error" +version = "0.0.0" +scope = "testOnly" +dependencies = [ + {org = "ballerina", name = "jballerina.java"} +] + [[package]] org = "ballerina" name = "lang.int" @@ -202,6 +211,9 @@ dependencies = [ {org = "ballerina", name = "lang.value"}, {org = "ballerina", name = "observe"} ] +modules = [ + {org = "ballerina", packageName = "log", moduleName = "log"} +] [[package]] org = "ballerina" @@ -245,6 +257,9 @@ dependencies = [ {org = "ballerina", name = "io"}, {org = "ballerina", name = "jballerina.java"} ] +modules = [ + {org = "ballerina", packageName = "os", moduleName = "os"} +] [[package]] org = "ballerina" @@ -255,6 +270,20 @@ dependencies = [ {org = "ballerina", name = "time"} ] +[[package]] +org = "ballerina" +name = "test" +version = "0.0.0" +scope = "testOnly" +dependencies = [ + {org = "ballerina", name = "jballerina.java"}, + {org = "ballerina", name = "lang.array"}, + {org = "ballerina", name = "lang.error"} +] +modules = [ + {org = "ballerina", packageName = "test", moduleName = "test"} +] + [[package]] org = "ballerina" name = "time" @@ -292,7 +321,10 @@ name = "openai.finetunes" version = "1.0.5" dependencies = [ {org = "ballerina", name = "http"}, + {org = "ballerina", name = "log"}, {org = "ballerina", name = "mime"}, + {org = "ballerina", name = "os"}, + {org = "ballerina", name = "test"}, {org = "ballerina", name = "url"}, {org = "ballerinai", name = "observe"} ] diff --git a/ballerina/tests/README.md b/ballerina/tests/README.md new file mode 100644 index 0000000..cca0034 --- /dev/null +++ b/ballerina/tests/README.md @@ -0,0 +1,89 @@ +# Running Tests + +## Prerequisites + +You need an API token from OpenAI. + +To obtain this, refer to the [Ballerina OpenAI Finetunes Connector](https://github.com/ballerina-platform/module-ballerinax-openai.finetunes/blob/main/ballerina/Module.md). + +## Test Environments + +There are two test environments for running the `openai.finetunes` connector tests. The default environment is a mock server for the OpenAI API. The other environment is the actual OpenAI API. + +You can run the tests in either of these environments, and each has its own compatible set of tests. + +| Test Groups | Environment | +|-------------|---------------------------------------------------| +| mock_tests | Mock server for OpenAI API (Default Environment) | +| live_tests | OpenAI API | + +## Running Tests in the Mock Server + +To execute the tests on the mock server, ensure that the `isLiveServer` environment variable is either set to `false` or left unset before initiating the tests. + +This environment variable can be configured within the `Config.toml` file located in the `tests` directory or specified as an environment variable. + +### Using a `Config.toml` File + +Create a `Config.toml` file in the `tests` directory with the following content: + +```toml +isLiveServer = false +``` + +### Using Environment Variables + +Alternatively, you can set the environment variable directly. + +For Linux or macOS: + +```bash +export isLiveServer=false +``` + +For Windows: + +```bash +setx isLiveServer false +``` + +Then, run the following command to execute the tests: + +```bash +./gradlew clean test +``` + +## Running Tests Against the OpenAI Live API + +### Using a `Config.toml` File + +Create a `Config.toml` file in the `tests` directory and add your authentication credentials: + +```toml +isLiveServer = true +token = "" +``` + +### Using Environment Variables + +Alternatively, you can set your authentication credentials as environment variables. + +For Linux or macOS: + +```bash +export isLiveServer=true +export token="" +``` + +For Windows: + +```bash +setx isLiveServer true +setx token +``` + +Then, run the following command to execute the tests: + +```bash +./gradlew clean test +``` \ No newline at end of file diff --git a/ballerina/tests/mock_service.bal b/ballerina/tests/mock_service.bal new file mode 100644 index 0000000..47f5b7e --- /dev/null +++ b/ballerina/tests/mock_service.bal @@ -0,0 +1,436 @@ +// Copyright (c) 2024, WSO2 LLC. (http://www.wso2.com). +// +// WSO2 LLC. licenses this file to you under the Apache License, +// Version 2.0 (the "License"); you may not use this file except +// in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import ballerina/http; +import ballerina/log; + +public type OkFineTuningJob record {| + *http:Ok; + FineTuningJob body; + map headers; +|}; + +public type OkOpenAIFile record {| + *http:Ok; + OpenAIFile body; + map headers; +|}; + +listener http:Listener httpListener = new (9090); + +http:Service mockService = service object { + + # Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + # + # + model - The model to delete + # + return - OK + resource function delete models/[string model]() returns DeleteModelResponse { + + DeleteModelResponse response = { + 'object: "model", + id: model, + deleted: true + }; + + return response; + } + + # Immediately cancel a fine-tune job. + # + # + fine_tuning_job_id - The ID of the fine-tuning job to cancel. + # + return - OK + resource function post fine_tuning/jobs/[string fine_tuning_job_id]/cancel() returns OkFineTuningJob { + + OkFineTuningJob response = { + body: { + "object": "fine_tuning.job", + "id": fine_tuning_job_id, + "model": "gpt-3.5-turbo-0125", + "created_at": 1723110882, + "finished_at": null, + "fine_tuned_model": null, + "organization_id": "org-Gzp0rlPk9gw4JaNXmPqDJ1H4", + "result_files": [], + "status": "validating_files", + "validation_file": null, + "training_file": "file-JZMH9Xxnt7Hg2io6N2kzmlzM", + "hyperparameters": { + "n_epochs": "auto", + "batch_size": "auto", + "learning_rate_multiplier": "auto" + }, + "trained_tokens": null, + "error": {}, + "user_provided_suffix": null, + "seed": 1776549854, + "estimated_finish": null, + "integrations": [] + }, + headers: { + "Content-Type": "application/json" + } + }; + + return response; + } + + # Delete a file. + # + # + file_id - The ID of the file to use for this request. + # + return - OK + resource function delete files/[string file_id]() returns DeleteFileResponse { + + DeleteFileResponse response = { + 'object: "file", + id: file_id, + deleted: true + }; + + return response; + } + + # Returns a list of files that belong to the user's organization. + # + # + purpose - Only return files with the given purpose. + # + return - OK + resource function get files(string? purpose) returns ListFilesResponse { + + ListFilesResponse response = { + 'object: "list", + data: [ + { + 'object: "file", + id: "file-JZMH9Xxnt7Hg2io6N2kzmlzM", + purpose: "fine-tune", + filename: "sample.jsonl", + bytes: 71, + created_at: 1723097702, + status: "processed", + status_details: null + }, + { + 'object: "file", + id: "file-JZMH9Xxnt7Hg2io6N2kzmlzM", + purpose: "fine-tune", + filename: "sample.jsonl", + bytes: 71, + created_at: 1723097702, + status: "processed", + status_details: null + } + ] + }; + + return response; + } + + # Returns information about a specific file. + # + # + file_id - The ID of the file to use for this request. + # + return - OK + resource function get files/[string file_id]() returns OpenAIFile { + + OpenAIFile response = { + 'object: "file", + id: file_id, + purpose: "fine-tune", + filename: "sample.jsonl", + bytes: 71, + created_at: 1723097702, + status: "processed", + status_details: null + }; + + return response; + } + + # Returns the contents of the specified file. + # + # + file_id - The ID of the file to use for this request. + # + return - OK + resource function get files/[string file_id]/content() returns byte[] { + + byte[] response = [123, 34, 116, 101, 120, 116, 34, 58, 34, 72, 101, 108, 108, 111, 44, 32, 87, 111, 114, 108, 100, 34, 125]; + + return response; + } + + # List your organization's fine-tuning jobs + # + # + after - Identifier for the last job from the previous pagination request. + # + 'limit - Number of fine-tuning jobs to retrieve. + # + return - OK + resource function get fine_tuning/jobs(string? after, int 'limit = 20) returns ListPaginatedFineTuningJobsResponse { + + ListPaginatedFineTuningJobsResponse response = { + "object": "list", + "data": [ + { + "object": "fine_tuning.job", + "id": "ftjob-G0rwrYUnRwEWPjDRvxByxPxU", + "model": "gpt-3.5-turbo-0125", + "created_at": 1723097706, + "finished_at": null, + "fine_tuned_model": null, + "organization_id": "org-Gzp0rlPk9gw4JaNXmPqDJ1H4", + "result_files": [], + "status": "failed", + "validation_file": null, + "training_file": "file-JZMH9Xxnt7Hg2io6N2kzmlzM", + "hyperparameters": { + "n_epochs": "auto", + "batch_size": "auto", + "learning_rate_multiplier": "auto" + }, + "trained_tokens": null, + "error": { + "code": "invalid_training_file", + "param": "training_file", + "message": "The job failed due to an invalid training file. Expected file to have JSONL format, where every line is a valid JSON dictionary. Line 1 is not a dictionary." + }, + "user_provided_suffix": null, + "seed": 1913581589, + "estimated_finish": null, + "integrations": [] + } + ], + "has_more": false + }; + + return response; + } + + # Get info about a fine-tuning job. + # + # [Learn more about fine-tuning](/docs/guides/fine-tuning) + # + # + fine_tuning_job_id - The ID of the fine-tuning job. + # + return - OK + resource function get fine_tuning/jobs/[string fine_tuning_job_id]() returns FineTuningJob { + + FineTuningJob response = { + "object": "fine_tuning.job", + "id": fine_tuning_job_id, + "model": "gpt-3.5-turbo-0125", + "created_at": 1723097706, + "finished_at": null, + "fine_tuned_model": null, + "organization_id": "org-Gzp0rlPk9gw4JaNXmPqDJ1H4", + "result_files": [], + "status": "failed", + "validation_file": null, + "training_file": "file-JZMH9Xxnt7Hg2io6N2kzmlzM", + "hyperparameters": { + "n_epochs": "auto", + "batch_size": "auto", + "learning_rate_multiplier": "auto" + }, + "trained_tokens": null, + "error": { + "code": "invalid_training_file", + "param": "training_file", + "message": "The job failed due to an invalid training file. Expected file to have JSONL format, where every line is a valid JSON dictionary. Line 1 is not a dictionary." + }, + "user_provided_suffix": null, + "seed": 1913581589, + "estimated_finish": null, + "integrations": [] + }; + + return response; + } + + # List checkpoints for a fine-tuning job. + # + # + fine_tuning_job_id - The ID of the fine-tuning job to get checkpoints for. + # + after - Identifier for the last checkpoint ID from the previous pagination request. + # + 'limit - Number of checkpoints to retrieve. + # + return - OK + resource function get fine_tuning/jobs/[string fine_tuning_job_id]/checkpoints(string? after, int 'limit = 10) returns ListFineTuningJobCheckpointsResponse { + + ListFineTuningJobCheckpointsResponse response = { + "object": "list", + "data": [ + { + "id": "checkpoint-1", + "created_at": 1723110882, + "object": "fine_tuning.job.checkpoint", + "fine_tuned_model_checkpoint": "gpt-3.5-turbo-0125-1", + "fine_tuning_job_id": fine_tuning_job_id, + "metrics": { + "step": 1 + }, + "step_number": 2 + } + ], + "has_more": false + }; + + return response; + } + + # Get status updates for a fine-tuning job. + # + # + fine_tuning_job_id - The ID of the fine-tuning job to get events for. + # + after - Identifier for the last event from the previous pagination request. + # + 'limit - Number of events to retrieve. + # + return - OK + resource function get fine_tuning/jobs/[string fine_tuning_job_id]/events(string? after, int 'limit = 20) returns ListFineTuningJobEventsResponse { + + ListFineTuningJobEventsResponse response = { + "object": "list", + "data": [ + { + "id": fine_tuning_job_id, + "created_at": 1723110882, + "level": "warn", + "message": "Fine-tuning job started.", + "object": "fine_tuning.job.event" + } + ] + }; + + return response; + } + + # Lists the currently available models, and provides basic information about each one such as the owner and availability. + # + # + return - OK + resource function get models() returns ListModelsResponse { + + ListModelsResponse response = { + 'object: "list", + data: [ + { + id: "dall-e-3", + 'object: "model", + created: 1698785189, + owned_by: "system" + }, + { + id: "dall-e-3", + 'object: "model", + created: 1698785189, + owned_by: "system" + } + ] + }; + + return response; + } + + # Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + # + # + model - The ID of the model to use for this request + # + return - OK + resource function get models/[string model]() returns Model { + + Model response = { + id: model, + 'object: "model", + created: 1698785189, + owned_by: "system" + }; + + return response; + } + + # Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB. + # + # The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details. + # + # The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](/docs/api-reference/fine-tuning/chat-input) or [completions](/docs/api-reference/fine-tuning/completions-input) models. + # + # The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](/docs/api-reference/batch/request-input). + # + # Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + # + # + return - OK + resource function post files(http:Request request) returns OkOpenAIFile { + + OkOpenAIFile response = { + body: { + 'object: "file", + id: "file-JZMH9Xxnt7Hg2io6N2kzmlzM", + purpose: "fine-tune", + filename: "sample.jsonl", + bytes: 71, + created_at: 1723097702, + status: "processed", + status_details: null + }, + headers: { + "Content-Type": "application/json" + } + }; + + return response; + } + + # Creates a fine-tuning job which begins the process of creating a new model from a given dataset. + # + # Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + # + # [Learn more about fine-tuning](/docs/guides/fine-tuning) + # + # + return - OK + resource function post fine_tuning/jobs(@http:Payload CreateFineTuningJobRequest payload) returns OkFineTuningJob { + + OkFineTuningJob response = { + body: { + "object": "fine_tuning.job", + "id": "ftjob-5NikxOY1BsPHxt8Z8YBm8AX1", + "model": "gpt-3.5-turbo-0125", + "created_at": 1723110882, + "finished_at": null, + "fine_tuned_model": null, + "organization_id": "org-Gzp0rlPk9gw4JaNXmPqDJ1H4", + "result_files": [], + "status": "validating_files", + "validation_file": null, + "training_file": "file-JZMH9Xxnt7Hg2io6N2kzmlzM", + "hyperparameters": { + "n_epochs": "auto", + "batch_size": "auto", + "learning_rate_multiplier": "auto" + }, + "trained_tokens": null, + "error": {}, + "user_provided_suffix": null, + "seed": 1776549854, + "estimated_finish": null, + "integrations": [] + }, + headers: { + "Content-Type": "application/json" + } + }; + + return response; + } +}; + +function init() returns error? { + + if isLiveServer { + log:printInfo("Skiping mock server initialization as the tests are running on live server"); + return; + } + + log:printInfo("Initiating mock server"); + check httpListener.attach(mockService, "/"); + check httpListener.'start(); +} diff --git a/ballerina/tests/test.bal b/ballerina/tests/test.bal new file mode 100644 index 0000000..14003a1 --- /dev/null +++ b/ballerina/tests/test.bal @@ -0,0 +1,186 @@ +// Copyright (c) 2024, WSO2 LLC. (http://www.wso2.com). +// +// WSO2 LLC. licenses this file to you under the Apache License, +// Version 2.0 (the "License"); you may not use this file except +// in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import ballerina/os; +import ballerina/test; + +configurable boolean isLiveServer = os:getEnv("isLiveServer") == "true"; +configurable string token = isLiveServer ? os:getEnv("OPENAI_API_KEY") : "test"; +configurable string serviceUrl = isLiveServer ? "https://api.openai.com/v1" : "http://localhost:9090"; +configurable string apiKey = isLiveServer ? token : ""; + +final ConnectionConfig config = { + auth: { + token: apiKey + } +}; +final Client openAIFinetunes = check new Client(config, serviceUrl); + +const string fileName = "sample.jsonl"; +const byte[] fileContent = [123, 34, 109, 101, 115, 115, 97, 103, 101, 115, 34, 58, 32, 91, 123, 34, 114, 111, 108, 101, 34, 58, 32, 34, 117, 115, 101, 114, 34, 44, 32, 34, 99, 111, 110, 116, 101, 110, 116, 34, 58, 32, 34, 87, 104, 97, 116, 32, 105, 115, 32, 116, 104, 101, 32, 99, 97, 112, 105, 116, 97, 108, 32, 111, 102, 32, 70, 114, 97, 110, 99, 101, 63, 34, 125, 44, 32, 123, 34, 114, 111, 108, 101, 34, 58, 32, 34, 97, 115, 115, 105, 115, 116, 97, 110, 116, 34, 44, 32, 34, 99, 111, 110, 116, 101, 110, 116, 34, 58, 32, 34, 84, 104, 101, 32, 99, 97, 112, 105, 116, 97, 108, 32, 111, 102, 32, 70, 114, 97, 110, 99, 101, 32, 105, 115, 32, 80, 97, 114, 105, 115, 46, 34, 125, 93, 125, 13, 10, 123, 34, 109, 101, 115, 115, 97, 103, 101, 115, 34, 58, 32, 91, 123, 34, 114, 111, 108, 101, 34, 58, 32, 34, 117, 115, 101, 114, 34, 44, 32, 34, 99, 111, 110, 116, 101, 110, 116, 34, 58, 32, 34, 87, 104, 97, 116, 32, 105, 115, 32, 116, 104, 101, 32, 112, 114, 105, 109, 97, 114, 121, 32, 102, 117, 110, 99, 116, 105, 111, 110, 32, 111, 102, 32, 116, 104, 101, 32, 104, 101, 97, 114, 116, 63, 34, 125, 44, 32, 123, 34, 114, 111, 108, 101, 34, 58, 32, 34, 97, 115, 115, 105, 115, 116, 97, 110, 116, 34, 44, 32, 34, 99, 111, 110, 116, 101, 110, 116, 34, 58, 32, 34, 84, 104, 101, 32, 112, 114, 105, 109, 97, 114, 121, 32, 102, 117, 110, 99, 116, 105, 111, 110, 32, 111, 102, 32, 116, 104, 101, 32, 104, 101, 97, 114, 116, 32, 105, 115, 32, 116, 111, 32, 112, 117, 109, 112, 32, 98, 108, 111, 111, 100, 32, 116, 104, 114, 111, 117, 103, 104, 111, 117, 116, 32, 116, 104, 101, 32, 98, 111, 100, 121, 46, 34, 125, 93, 125, 13, 10]; +string modelId = "gpt-3.5-turbo"; +string fileId = ""; +string jobId = ""; + +@test:Config { + groups: ["models", "live_tests", "mock_tests"] +} +function testListModels() returns error? { + ListModelsResponse modelsResponse = check openAIFinetunes->/models.get(); + test:assertEquals(modelsResponse.'object, "list", "Object type mismatched"); + test:assertTrue(modelsResponse.hasKey("data"), "Response does not have the key 'data'"); +} + +@test:Config { + dependsOn: [testListModels], + groups: ["models", "live_tests", "mock_tests"] +} +function testRetrieveModel() returns error? { + Model modelResponse = check openAIFinetunes->/models/[modelId].get(); + test:assertEquals(modelResponse.id, modelId, "Model id mismatched"); + test:assertTrue(modelResponse.hasKey("object"), "Response does not have the key 'object'"); +} + +@test:Config { + dependsOn: [testCreateFineTuningJob, testListModels, testRetrieveModel, testListFineTuningJobCheckpoints, testListFineTuningEvents], + enable: isLiveServer ? false : true, // Enable this test only for mock server. + groups: ["models", "mock_tests"] +} +function testDeleteModel() returns error? { + DeleteModelResponse modelResponseDelete = check openAIFinetunes->/models/[modelId].delete(); + test:assertEquals(modelResponseDelete.id, modelId, "Model id mismatched"); + test:assertTrue(modelResponseDelete.hasKey("object"), "Response does not have the key 'object'"); +} + +@test:Config { + groups: ["files", "live_tests", "mock_tests"] +} +function testListFiles() returns error? { + ListFilesResponse filesResponse = check openAIFinetunes->/files.get(); + test:assertEquals(filesResponse.'object, "list", "Object type mismatched"); + test:assertTrue(filesResponse.hasKey("data"), "Response does not have the key 'data'"); +} + +@test:Config { + dependsOn: [testListFiles], + groups: ["files", "live_tests", "mock_tests"] +} +function testCreateFile() returns error? { + CreateFileRequest fileRequest = { + file: {fileContent, fileName}, + purpose: "fine-tune" + }; + + OpenAIFile fileResponse = check openAIFinetunes->/files.post(fileRequest); + fileId = fileResponse.id; + test:assertEquals(fileResponse.purpose, "fine-tune", "Purpose mismatched"); + test:assertTrue(fileResponse.id !is "", "File id is empty"); +} + +@test:Config { + dependsOn: [testCreateFile], + groups: ["files", "live_tests", "mock_tests"] +} +function testRetrieveFile() returns error? { + OpenAIFile fileResponse = check openAIFinetunes->/files/[fileId].get(); + test:assertEquals(fileResponse.id, fileId, "File id mismatched"); + test:assertTrue(fileResponse.hasKey("object"), "Response does not have the key 'object'"); +} + +@test:Config { + dependsOn: [testCreateFile], + groups: ["files", "live_tests", "mock_tests"] +} +function testDownloadFile() returns error? { + byte[] fileContentDownload = check openAIFinetunes->/files/[fileId]/content.get(); + test:assertFalse(fileContentDownload.length() <= 0, "File content is empty"); +} + +@test:Config { + dependsOn: [testCreateFile, testRetrieveFile, testDownloadFile, testCreateFineTuningJob], + groups: ["files", "live_tests", "mock_tests"] +} +function testDeleteFile() returns error? { + DeleteFileResponse fileResponseDelete = check openAIFinetunes->/files/[fileId].delete(); + test:assertEquals(fileResponseDelete.id, fileId, "File id mismatched"); + test:assertTrue(fileResponseDelete.hasKey("object"), "Response does not have the key 'object'"); +} + +@test:Config { + groups: ["fine-tuning", "live_tests", "mock_tests"] +} +function testListPaginatedFineTuningJobs() returns error? { + ListPaginatedFineTuningJobsResponse jobsResponse = check openAIFinetunes->/fine_tuning/jobs.get(); + test:assertEquals(jobsResponse.'object, "list", "Object type mismatched"); + test:assertTrue(jobsResponse.hasKey("data"), "Response does not have the key 'data'"); +} + +@test:Config { + dependsOn: [testListModels, testCreateFile], + groups: ["fine-tuning", "live_tests", "mock_tests"] +} +function testCreateFineTuningJob() returns error? { + CreateFineTuningJobRequest fineTuneRequest = { + model: modelId, + training_file: fileId + }; + + FineTuningJob fineTuneResponse = check openAIFinetunes->/fine_tuning/jobs.post(fineTuneRequest); + jobId = fineTuneResponse.id; + test:assertTrue(fineTuneResponse.hasKey("object"), "Response does not have the key 'object'"); + test:assertTrue(fineTuneResponse.hasKey("id"), "Response does not have the key 'id'"); +} + +@test:Config { + dependsOn: [testCreateFineTuningJob], + groups: ["fine-tuning", "live_tests", "mock_tests"] +} +function testRetrieveFineTuningJob() returns error? { + FineTuningJob jobResponse = check openAIFinetunes->/fine_tuning/jobs/[jobId].get(); + test:assertEquals(jobResponse.id, jobId, "Job id mismatched"); + test:assertEquals(jobResponse.'object, "fine_tuning.job", "Response does not have the key 'object'"); +} + +@test:Config { + dependsOn: [testCreateFineTuningJob], + groups: ["fine-tuning", "live_tests", "mock_tests"] +} +function testListFineTuningEvents() returns error? { + ListFineTuningJobEventsResponse eventsResponse = check openAIFinetunes->/fine_tuning/jobs/[jobId]/events.get(); + test:assertEquals(eventsResponse.'object, "list", "Object type mismatched"); + test:assertTrue(eventsResponse.hasKey("data"), "Response does not have the key 'data'"); +} + +@test:Config { + dependsOn: [testCreateFineTuningJob], + groups: ["fine-tuning", "live_tests", "mock_tests"] +} +function testListFineTuningJobCheckpoints() returns error? { + ListFineTuningJobCheckpointsResponse checkpointsResponse = check openAIFinetunes->/fine_tuning/jobs/[jobId]/checkpoints.get(); + test:assertEquals(checkpointsResponse.'object, "list", "Object type mismatched"); + test:assertTrue(checkpointsResponse.hasKey("data"), "Response does not have the key 'data'"); +} + +@test:Config { + dependsOn: [testCreateFineTuningJob], + enable: isLiveServer ? false : true, // Enable this test only for mock server. + groups: ["fine-tuning", "mock_tests"] +} +function testCancelFineTuningJob() returns error? { + FineTuningJob jobResponse = check openAIFinetunes->/fine_tuning/jobs/[jobId]/cancel.post(); + test:assertEquals(jobResponse.id, jobId, "Job id mismatched"); + test:assertTrue(jobResponse.hasKey("object"), "Response does not have the key 'object'"); +} diff --git a/docs/setup/resources/sample.jsonl b/docs/setup/resources/sample.jsonl new file mode 100644 index 0000000..c1a9c70 --- /dev/null +++ b/docs/setup/resources/sample.jsonl @@ -0,0 +1,10 @@ +{"messages": [{"role": "user", "content": "What is the capital of France?"}, {"role": "assistant", "content": "The capital of France is Paris."}]} +{"messages": [{"role": "user", "content": "What is the primary function of the heart?"}, {"role": "assistant", "content": "The primary function of the heart is to pump blood throughout the body."}]} +{"messages": [{"role": "user", "content": "What is photosynthesis?"}, {"role": "assistant", "content": "Photosynthesis is the process by which green plants and some other organisms convert sunlight into chemical energy stored in the form of glucose."}]} +{"messages": [{"role": "user", "content": "Who wrote the play 'Romeo and Juliet'?"}, {"role": "assistant", "content": "William Shakespeare wrote the play 'Romeo and Juliet'."}]} +{"messages": [{"role": "user", "content": "Which element has the atomic number 1?"}, {"role": "assistant", "content": "Hydrogen has the atomic number 1."}]} +{"messages": [{"role": "user", "content": "What is the largest planet in our solar system?"}, {"role": "assistant", "content": "Jupiter is the largest planet in our solar system."}]} +{"messages": [{"role": "user", "content": "What is the freezing point of water in Celsius?"}, {"role": "assistant", "content": "The freezing point of water in Celsius is 0 degrees."}]} +{"messages": [{"role": "user", "content": "What is the square root of 144?"}, {"role": "assistant", "content": "The square root of 144 is 12."}]} +{"messages": [{"role": "user", "content": "Who is the author of 'To Kill a Mockingbird'?"}, {"role": "assistant", "content": "The author of 'To Kill a Mockingbird' is Harper Lee."}]} +{"messages": [{"role": "user", "content": "What is the smallest unit of life?"}, {"role": "assistant", "content": "The smallest unit of life is the cell."}]}