From 8ed00fe8752c8d7220193c8c71672306786ed741 Mon Sep 17 00:00:00 2001 From: patcher99 Date: Sat, 10 Feb 2024 16:03:10 +0530 Subject: [PATCH 1/6] track `llmReqId` --- src/anthropic.js | 1 + src/cohere.js | 51 ++++++++++++++++-------------------------------- src/openai.js | 14 +++++++++++-- 3 files changed, 30 insertions(+), 36 deletions(-) diff --git a/src/anthropic.js b/src/anthropic.js index ce13280..21d2bda 100644 --- a/src/anthropic.js +++ b/src/anthropic.js @@ -41,6 +41,7 @@ export default function initAnthropic({ llm, dokuUrl, apiKey, environment, appli const duration = (end - start) / 1000; const data = { + llmReqId: response.id, environment: environment, applicationName: applicationName, sourceLanguage: 'Javascript', diff --git a/src/cohere.js b/src/cohere.js index 105561f..617bac4 100644 --- a/src/cohere.js +++ b/src/cohere.js @@ -1,29 +1,5 @@ import {sendData} from './helpers.js'; -/** - * Counts the number of tokens in the given text. - * - * @param {string} text - The input text. - * @return {number} - The calculated number of tokens. - * - * @jsondoc - * { - * "description": "Counts the number of tokens in the given text", - * "params": [{"name": "text", "type": "string", "description": "Text"}], - * "returns": {"type": "number", "description": "Number of tokens."} - * } - */ -function countTokens(text) { - const tokensPerWord = 2.5; - - // Split the text into words - const words = text.split(/\s+/); - - // Calculate the number of tokens - const numTokens = Math.round(words.length * tokensPerWord); - - return numTokens; -} /** * Initializes Cohere functionality with performance tracking and data logging. * @@ -72,13 +48,14 @@ export default function initCohere({ llm, dokuUrl, apiKey, environment, applicat for (const generation of response.generations) { const data = { + llmReqId: generation.id, environment: environment, applicationName: applicationName, sourceLanguage: 'Javascript', endpoint: 'cohere.generate', skipResp: skipResp, - completionTokens: countTokens(generation.text), - promptTokens: countTokens(prompt), + completionTokens: response.meta["billedUnits"]["outputTokens"], + promptTokens: response.meta["billedUnits"]["inputTokens"], requestDuration: duration, model: model, prompt: prompt, @@ -89,7 +66,8 @@ export default function initCohere({ llm, dokuUrl, apiKey, environment, applicat if (!params.hasOwnProperty('stream') || params.stream !== true) { data.finishReason = generation.finish_reason; } - await sendData(data, dokuUrl, apiKey); + console.log(data); + //await sendData(data, dokuUrl, apiKey); } return response; @@ -131,6 +109,7 @@ export default function initCohere({ llm, dokuUrl, apiKey, environment, applicat const prompt = params.message; const data = { + llmReqId: response.response_id, environment: environment, applicationName: applicationName, sourceLanguage: 'Javascript', @@ -139,9 +118,9 @@ export default function initCohere({ llm, dokuUrl, apiKey, environment, applicat requestDuration: duration, model: model, prompt: prompt, - promptTokens: response.meta["billed_units"]["output_tokens"], - completionTokens: response.meta["billed_units"]["input_tokens"], - totalTokens: response.token_count["billed_tokens"], + promptTokens: response.meta["billedUnits"]["outputTokens"], + completionTokens: response.meta["billedUnits"]["inputTokens"], + totalTokens: response.token_count["billedUnits"], response: response.text, }; @@ -169,12 +148,15 @@ export default function initCohere({ llm, dokuUrl, apiKey, environment, applicat data.response = "" for await (const message of response) { + if (message.eventType === "stream-end") { + data.llmReqId = message.response.response_id; + data.promptTokens = message.response.meta.billed_units["input_tokens"]; + data.completionTokens = message.response.meta.billed_units["output_tokens"]; + } data.response += message.eventType === "text-generation" ? message.text : ""; // Pass the message along so it's not consumed yield message; // this allows the message to flow back to the original caller } - data.promptTokens = countTokens(prompt) - data.completionTokens = countTokens(data.response) data.totalTokens = data.promptTokens + data.completionTokens const end = performance.now(); @@ -195,14 +177,15 @@ export default function initCohere({ llm, dokuUrl, apiKey, environment, applicat const prompt = params.text; const data = { + llmReqId: response.id, environment: environment, applicationName: applicationName, sourceLanguage: 'Javascript', endpoint: 'cohere.summarize', skipResp: skipResp, requestDuration: duration, - completionTokens: response.meta["billed_units"]["output_tokens"], - promptTokens: response.meta["billed_units"]["input_tokens"], + completionTokens: response.meta["billedUnits"]["outputTokens"], + promptTokens: response.meta["billedUnits"]["inputTokens"], model: model, prompt: prompt, response: response.summary, diff --git a/src/openai.js b/src/openai.js index 89f90f4..d56ae8d 100644 --- a/src/openai.js +++ b/src/openai.js @@ -64,6 +64,7 @@ export default function initOpenAI({ llm, dokuUrl, apiKey, environment, applicat dataResponse += content; passThroughStream.push(chunk); // Push chunk to the pass-through stream } + var responseId = chunk.id; } passThroughStream.push(null); // Signal end of the pass-through stream @@ -93,6 +94,7 @@ export default function initOpenAI({ llm, dokuUrl, apiKey, environment, applicat // Prepare the data object for Doku const data = { + llmReqId: responseId, environment: environment, applicationName: applicationName, sourceLanguage: 'Javascript', @@ -136,6 +138,7 @@ export default function initOpenAI({ llm, dokuUrl, apiKey, environment, applicat } let prompt = formattedMessages.join("\n"); const data = { + llmReqId: response.id, environment: environment, applicationName: applicationName, sourceLanguage: 'Javascript', @@ -169,6 +172,7 @@ export default function initOpenAI({ llm, dokuUrl, apiKey, environment, applicat data.promptTokens = response.usage.prompt_tokens; data.totalTokens = response.usage.total_tokens; } + await sendData(data, dokuUrl, apiKey); return response; @@ -198,6 +202,7 @@ export default function initOpenAI({ llm, dokuUrl, apiKey, environment, applicat dataResponse += content; passThroughStream.push(chunk); // Push chunk to the pass-through stream } + var responseId = chunk.id; } passThroughStream.push(null); // Signal end of the pass-through stream @@ -206,6 +211,7 @@ export default function initOpenAI({ llm, dokuUrl, apiKey, environment, applicat const duration = (end - start) / 1000; // Prepare the data object for Doku const data = { + llmReqId: responseId, environment: environment, applicationName: applicationName, sourceLanguage: 'Javascript', @@ -229,6 +235,7 @@ export default function initOpenAI({ llm, dokuUrl, apiKey, environment, applicat const duration = (end - start) / 1000; const data = { + llmReqId: response.id, environment: environment, applicationName: applicationName, sourceLanguage: 'Javascript', @@ -308,7 +315,7 @@ export default function initOpenAI({ llm, dokuUrl, apiKey, environment, applicat skipResp: skipResp, requestDuration: duration, model: params.model, - finetuneJobId: response.id, + llmReqId: response.id, finetuneJobStatus: response.status, }; @@ -331,9 +338,10 @@ export default function initOpenAI({ llm, dokuUrl, apiKey, environment, applicat } const quality = params.quality ?? 'standard'; - + var responseId = response.created; for (const item of response.data) { const data = { + llmReqId: responseId, environment: environment, applicationName: applicationName, sourceLanguage: 'Javascript', @@ -365,8 +373,10 @@ export default function initOpenAI({ llm, dokuUrl, apiKey, environment, applicat if (params.response_format && params.response_format === 'b64_json') { imageFormat = 'b64_json'; } + var responseId = response.created; for (const item of response.data) { const data = { + llmReqId: responseId, environment: environment, applicationName: applicationName, sourceLanguage: 'Javascript', From 18676086baf18fbbeb267bae6d4ab2390c040eee Mon Sep 17 00:00:00 2001 From: patcher99 Date: Sat, 10 Feb 2024 17:14:51 +0530 Subject: [PATCH 2/6] fix --- README.md | 8 ++++---- src/cohere.js | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 2eee023..99b6b1c 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,7 @@ const openai = new OpenAI({ }); // Pass the above `openai` object along with your DOKU URL and API key and this will make sure that all OpenAI calls are automatically tracked. -DokuMetry.init({llm: openai, dokuURL: "YOUR_DOKU_URL", apiKey: "YOUR_DOKU_TOKEN"}) +DokuMetry.init({llm: openai, dokuUrl: "YOUR_DOKU_URL", apiKey: "YOUR_DOKU_TOKEN"}) async function main() { const chatCompletion = await openai.chat.completions.create({ @@ -73,7 +73,7 @@ const anthropic = new Anthropic({ }); // Pass the above `anthropic` object along with your DOKU URL and API key and this will make sure that all Anthropic calls are automatically tracked. -DokuMetry.init({llm: anthropic, dokuURL: "YOUR_DOKU_URL", apiKey: "YOUR_DOKU_TOKEN"}) +DokuMetry.init({llm: anthropic, dokuUrl: "YOUR_DOKU_URL", apiKey: "YOUR_DOKU_TOKEN"}) async function main() { const completion = await anthropic.completions.create({ @@ -97,7 +97,7 @@ const cohere = new CohereClient({ }); // Pass the above `cohere` object along with your DOKU URL and API key and this will make sure that all Cohere calls are automatically tracked. -DokuMetry.init({llm: cohere, dokuURL: "YOUR_DOKU_URL", apiKey: "YOUR_DOKU_TOKEN"}) +DokuMetry.init({llm: cohere, dokuUrl: "YOUR_DOKU_URL", apiKey: "YOUR_DOKU_TOKEN"}) (async () => { const prediction = await cohere.generate({ @@ -114,7 +114,7 @@ DokuMetry.init({llm: cohere, dokuURL: "YOUR_DOKU_URL", apiKey: "YOUR_DOKU_TOKEN" | Parameter | Description | Required | |-------------------|-----------------------------------------------------------|---------------| | llm | Language Learning Model (LLM) Object to track | Yes | -| dokuURL | URL of your Doku Instance | Yes | +| dokuUrl | URL of your Doku Instance | Yes | | apiKey | Your Doku API key | Yes | | environment | Custom environment tag to include in your metrics | Optional | | applicationName | Custom application name tag for your metrics | Optional | diff --git a/src/cohere.js b/src/cohere.js index 617bac4..2b3c990 100644 --- a/src/cohere.js +++ b/src/cohere.js @@ -91,7 +91,7 @@ export default function initCohere({ llm, dokuUrl, apiKey, environment, applicat requestDuration: duration, model: model, prompt: prompt, - promptTokens: response.meta["billed_units"]["input_tokens"], + promptTokens: response.meta["billedUnits"]["inputTokens"], }; await sendData(data, dokuUrl, apiKey); @@ -118,9 +118,9 @@ export default function initCohere({ llm, dokuUrl, apiKey, environment, applicat requestDuration: duration, model: model, prompt: prompt, - promptTokens: response.meta["billedUnits"]["outputTokens"], - completionTokens: response.meta["billedUnits"]["inputTokens"], - totalTokens: response.token_count["billedUnits"], + promptTokens: response.meta["billed_units"]["output_tokens"], + completionTokens: response.meta["billed_units"]["input_tokens"], + totalTokens: response.token_count["billed_units"], response: response.text, }; From 60a365527fc877cdbfecfb0cf1a75c7933ab8d96 Mon Sep 17 00:00:00 2001 From: patcher99 Date: Sun, 11 Feb 2024 12:15:25 +0530 Subject: [PATCH 3/6] update test for cohere --- tests/cohere.test.mjs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/cohere.test.mjs b/tests/cohere.test.mjs index 2947e64..eceefb5 100644 --- a/tests/cohere.test.mjs +++ b/tests/cohere.test.mjs @@ -6,12 +6,9 @@ describe('Cohere Test', () => { const cohere = new CohereClient({ apiKey: process.env.COHERE_API_TOKEN, }); - - before(async () => { - DokuMetry.init({llm: cohere, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); - }); it('should return a response with "created" field', async () => { + DokuMetry.init({llm: cohere, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); const text = 'Ice cream is a sweetened frozen food eaten as a snack or dessert. ' + 'It may be made from milk or cream and is flavoured with a sweetener, ' + @@ -48,6 +45,7 @@ describe('Cohere Test', () => { }).timeout(10000); it('should return a response with prompt as "Doku"', async () => { + DokuMetry.init({llm: cohere, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); try { const generate = await cohere.generate({ prompt: 'Doku', @@ -64,6 +62,7 @@ describe('Cohere Test', () => { }).timeout(10000); it('should return a response with object as "embed"', async () => { + DokuMetry.init({llm: cohere, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); try { const embeddings = await cohere.embed({ texts: ['This is a test'], @@ -79,6 +78,7 @@ describe('Cohere Test', () => { }).timeout(20000); it('should return a response with object as "chat"', async () => { + DokuMetry.init({llm: cohere, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); try { const chatResponse = await cohere.chat({ message: 'Say this is a test', From bfe9903ad1da3ad705d5d99c49c797a277bbfb5e Mon Sep 17 00:00:00 2001 From: patcher99 Date: Sun, 11 Feb 2024 12:16:35 +0530 Subject: [PATCH 4/6] update openai tests --- tests/openai.test.mjs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/openai.test.mjs b/tests/openai.test.mjs index bf48bf2..9a84988 100644 --- a/tests/openai.test.mjs +++ b/tests/openai.test.mjs @@ -10,10 +10,10 @@ describe('OpenAI Test', () => { openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY, }); - await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); }); it('should return a response with object as "chat.completion"', async () => { + await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); const chatCompletion = await openai.chat.completions.create({ messages: [{role: 'user', content: 'Say this is a test'}], model: 'gpt-3.5-turbo', @@ -23,6 +23,7 @@ describe('OpenAI Test', () => { }); it('should return a response with object as "text_completion"', async () => { + await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); const completion = await openai.completions.create({ model: 'gpt-3.5-turbo-instruct', prompt: 'Say this is a test.', @@ -33,6 +34,7 @@ describe('OpenAI Test', () => { }); it('should return a response with object as "embedding"', async () => { + await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); const embeddings = await openai.embeddings.create({ model: 'text-embedding-ada-002', input: 'The quick brown fox jumped over the lazy dog', @@ -43,6 +45,7 @@ describe('OpenAI Test', () => { }); it('should return a response with object as "fine_tuning.job"', async () => { + await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); try { const fineTuningJob = await openai.fineTuning.jobs.create({ training_file: 'file-m36cc45komO83VJKAY1qVgeP', @@ -59,6 +62,7 @@ describe('OpenAI Test', () => { }).timeout(10000); it('should return a response with "created" field', async () => { + await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); const imageGeneration = await openai.images.generate({ model: 'dall-e-2', prompt: 'Generate an image of a cat.', @@ -68,6 +72,7 @@ describe('OpenAI Test', () => { }).timeout(30000); it('should return a response with "created" field', async () => { + await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); const imageVariation = await openai.images.createVariation({ image: fs.createReadStream('tests/test-image-for-openai.png'), }); @@ -76,6 +81,7 @@ describe('OpenAI Test', () => { }).timeout(30000); it('should return a response with url as "https://api.openai.com/v1/audio/speech"', async () => { + await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); const audioSpeech = await openai.audio.speech.create({ model: 'tts-1', voice: 'alloy', From a135d812dd31f025e6c5fb0e744917dd85e2cb45 Mon Sep 17 00:00:00 2001 From: patcher99 Date: Sun, 11 Feb 2024 12:20:42 +0530 Subject: [PATCH 5/6] remove await --- tests/openai.test.mjs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/openai.test.mjs b/tests/openai.test.mjs index 9a84988..896a30b 100644 --- a/tests/openai.test.mjs +++ b/tests/openai.test.mjs @@ -81,7 +81,7 @@ describe('OpenAI Test', () => { }).timeout(30000); it('should return a response with url as "https://api.openai.com/v1/audio/speech"', async () => { - await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); + DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); const audioSpeech = await openai.audio.speech.create({ model: 'tts-1', voice: 'alloy', From c80c919ae6b986004272b0115923928fad007bb4 Mon Sep 17 00:00:00 2001 From: patcher99 Date: Sun, 11 Feb 2024 12:25:52 +0530 Subject: [PATCH 6/6] add timeout --- tests/openai.test.mjs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/openai.test.mjs b/tests/openai.test.mjs index 896a30b..8e8623b 100644 --- a/tests/openai.test.mjs +++ b/tests/openai.test.mjs @@ -87,7 +87,6 @@ describe('OpenAI Test', () => { voice: 'alloy', input: 'Today is a wonderful day to build something people love!', }); - expect(audioSpeech.url).to.equal('https://api.openai.com/v1/audio/speech'); - }); + }).timeout(30000); });