-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathcostTracker.js
77 lines (74 loc) · 3.04 KB
/
costTracker.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
/**
* @fileoverview Cost tracking module for LangRoute.
*
* This module handles all cost-related calculations and tracking for LLM API requests.
* It works in conjunction with the token counter to accurately calculate costs
* based on both input and output tokens, using model-specific pricing from the database.
*
* Features:
* - Per-model cost calculation
* - Separate tracking of input and output costs
* - User cost accumulation
* - Database-driven pricing configuration
*
* @module costTracker
* @requires ./tokenCounter
* @requires ./database
*/
// costTracker.js
const { countTokens } = require('./tokenCounter');
const db = require('./database'); // Import Sequelize models
/**
* Calculates the cost of a request based on the model and token usage.
* @param {string} modelName - The name of the model used for the request.
* @param {number} inputTokens - The number of input tokens used in the request.
* @param {number} outputTokens - The number of output tokens generated by the request.
* @returns {object} An object containing the input tokens, output tokens, and total cost.
*/
/**
* Calculates the total cost of an LLM API request based on token usage.
* Uses model-specific pricing from the database to calculate costs separately
* for input and output tokens.
*
* @async
* @param {string} modelName - The name of the LLM model (e.g., 'gpt-3.5-turbo', 'mistral-small')
* @param {number} inputTokens - Number of tokens in the input/prompt
* @param {number} outputTokens - Number of tokens in the model's response
* @returns {Promise<Object>} Cost breakdown
* @returns {number} .inputTokens - Number of input tokens used
* @returns {number} .outputTokens - Number of output tokens used
* @returns {number} .inputCost - Cost for input tokens in USD
* @returns {number} .outputCost - Cost for output tokens in USD
* @returns {number} .totalCost - Total cost of the request in USD
* @throws {Error} If the model configuration is not found in the database
*/
async function calculateCost(modelName, inputTokens, outputTokens) {
const modelConfig = await db.LLMModel.findOne({ where: { name: modelName } }); //Use complete model name
if (!modelConfig) {
throw new Error(`Model ${modelName} not found in database.`);
}
const inputCost = (inputTokens / 1000) * modelConfig.inputCostPer1k;
const outputCost = (outputTokens / 1000) * modelConfig.outputCostPer1k;
return {
inputTokens,
outputTokens,
inputCost,
outputCost,
totalCost: inputCost + outputCost,
};
}
/**
* Updates a user's accumulated cost in the database.
* This function is called after each successful API request to track spending.
*
* @async
* @param {Object} user - The user object from the database
* @param {number} user.totalCost - User's current accumulated cost
* @param {number} cost - Additional cost to add to the user's total
* @returns {Promise<void>}
*/
async function updateUserCost(user, cost) {
user.totalCost += cost;
await user.save();
}
module.exports = { calculateCost, updateUserCost};