From 1f325fbbd6e024062639ef04e39f8a76f9e7d99e Mon Sep 17 00:00:00 2001 From: Jorge Cortes Date: Wed, 26 Feb 2025 16:16:36 -0500 Subject: [PATCH] [Components] kindo - new action --- components/kindo/actions/chat/chat.mjs | 120 +++++++++++++++++++++++++ components/kindo/common/utils.mjs | 52 +++++++++++ components/kindo/kindo.app.mjs | 32 +++++-- components/kindo/package.json | 7 +- pnpm-lock.yaml | 6 +- 5 files changed, 209 insertions(+), 8 deletions(-) create mode 100644 components/kindo/actions/chat/chat.mjs create mode 100644 components/kindo/common/utils.mjs diff --git a/components/kindo/actions/chat/chat.mjs b/components/kindo/actions/chat/chat.mjs new file mode 100644 index 0000000000000..9e6ccf296a737 --- /dev/null +++ b/components/kindo/actions/chat/chat.mjs @@ -0,0 +1,120 @@ +import app from "../../kindo.app.mjs"; +import utils from "../../common/utils.mjs"; + +export default { + key: "kindo-chat", + name: "Chat", + description: "Creates a model response for the given chat conversation using Kindo's API. [See the documentation](https://app.kindo.ai/settings/api) for more information.", + version: "0.0.1", + type: "action", + props: { + app, + model: { + type: "string", + label: "Model", + description: "The model name from Kindo's available models", + }, + messages: { + type: "string[]", + label: "Messages", + description: "A list of messages comprising the conversation so far. Depending on the [model](https://app.kindo.ai/settings/api) you use, different message types (modalities) are supported, like [text](https://platform.openai.com/docs/guides/text-generation), [images](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio). [See the documentation](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) for more information. Eg. `[{\"role\": \"user\", \"content\": \"Hello, world!\"}]", + }, + maxTokens: { + type: "integer", + label: "Max Tokens", + description: "The maximum number of [tokens](https://beta.openai.com/tokenizer) to generate in the completion.", + optional: true, + }, + temperature: { + type: "string", + label: "Temperature", + description: "**Optional**. What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try `0.9` for more creative applications, and `0` (argmax sampling) for ones with a well-defined answer.", + optional: true, + }, + topP: { + type: "string", + label: "Top P", + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So `0.1` means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.", + optional: true, + }, + n: { + type: "integer", + label: "N", + description: "How many completions to generate for each prompt", + optional: true, + }, + stop: { + type: "string[]", + label: "Stop", + description: "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.", + optional: true, + }, + presencePenalty: { + type: "string", + label: "Presence Penalty", + description: "Number between `-2.0` and `2.0`. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", + optional: true, + }, + frequencyPenalty: { + type: "string", + label: "Frequency Penalty", + description: "Number between `-2.0` and `2.0`. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.", + optional: true, + }, + additionalParameters: { + type: "object", + label: "Additional Parameters", + description: "Additional parameters to pass to the API.", + optional: true, + }, + }, + methods: { + chat(args = {}) { + return this.app.post({ + path: "/chat/completions", + ...args, + }); + }, + }, + async run({ $ }) { + const { + chat, + model, + messages, + maxTokens, + temperature, + topP, + n, + stop, + presencePenalty, + frequencyPenalty, + additionalParameters, + } = this; + + const response = await chat({ + $, + data: { + model, + messages: utils.parseArray(messages), + max_tokens: maxTokens, + ...(temperature && { + temperature: +temperature, + }), + ...(topP && { + top_p: +topP, + }), + n, + stop, + ...(presencePenalty && { + presence_penalty: +presencePenalty, + }), + ...(frequencyPenalty && { + frequency_penalty: +frequencyPenalty, + }), + ...additionalParameters, + }, + }); + $.export("$summary", "Successfully created model response"); + return response; + }, +}; diff --git a/components/kindo/common/utils.mjs b/components/kindo/common/utils.mjs new file mode 100644 index 0000000000000..0aeff6e32c43a --- /dev/null +++ b/components/kindo/common/utils.mjs @@ -0,0 +1,52 @@ +import { ConfigurationError } from "@pipedream/platform"; + +const parseJson = (input) => { + const parse = (value) => { + if (typeof(value) === "string") { + try { + return parseJson(JSON.parse(value)); + } catch (e) { + return value; + } + } else if (typeof(value) === "object" && value !== null) { + return Object.entries(value) + .reduce((acc, [ + key, + val, + ]) => Object.assign(acc, { + [key]: parse(val), + }), {}); + } + return value; + }; + + return parse(input); +}; + +function parseArray(value) { + try { + if (!value) { + return []; + } + + if (Array.isArray(value)) { + return value; + } + + const parsedValue = JSON.parse(value); + + if (!Array.isArray(parsedValue)) { + throw new Error("Not an array"); + } + + return parsedValue; + + } catch (e) { + throw new ConfigurationError("Make sure the custom expression contains a valid array object"); + } +} + +export default { + parseJson, + parseArray: (value) => parseArray(value)?.map(parseJson), +}; diff --git a/components/kindo/kindo.app.mjs b/components/kindo/kindo.app.mjs index 29130c0b10d5b..19e0a0b435788 100644 --- a/components/kindo/kindo.app.mjs +++ b/components/kindo/kindo.app.mjs @@ -1,11 +1,33 @@ +import { axios } from "@pipedream/platform"; + export default { type: "app", app: "kindo", - propDefinitions: {}, methods: { - // this.$auth contains connected account data - authKeys() { - console.log(Object.keys(this.$auth)); + getUrl(path) { + return `https://llm.kindo.ai/v1${path}`; + }, + getHeaders(headers) { + return { + ...headers, + "content-type": "application/json", + "api-key": this.$auth.api_key, + }; + }, + makeRequest({ + $ = this, path, headers, ...args + } = {}) { + return axios($, { + ...args, + url: this.getUrl(path), + headers: this.getHeaders(headers), + }); + }, + post(args = {}) { + return this.makeRequest({ + method: "POST", + ...args, + }); }, }, -}; \ No newline at end of file +}; diff --git a/components/kindo/package.json b/components/kindo/package.json index 7a4c94b4b8ce6..0fc47446f5641 100644 --- a/components/kindo/package.json +++ b/components/kindo/package.json @@ -1,6 +1,6 @@ { "name": "@pipedream/kindo", - "version": "0.0.1", + "version": "0.1.0", "description": "Pipedream Kindo Components", "main": "kindo.app.mjs", "keywords": [ @@ -11,5 +11,8 @@ "author": "Pipedream (https://pipedream.com/)", "publishConfig": { "access": "public" + }, + "dependencies": { + "@pipedream/platform": "^3.0.3" } -} \ No newline at end of file +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 2ce4a44e437dc..4a29808b1ef75 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -6688,7 +6688,11 @@ importers: specifier: ^1.5.1 version: 1.6.6 - components/kindo: {} + components/kindo: + dependencies: + '@pipedream/platform': + specifier: ^3.0.3 + version: 3.0.3 components/kingsumo: dependencies: