Skip to content

Commit

Permalink
[Components] kindo - new action
Browse files Browse the repository at this point in the history
  • Loading branch information
jcortes committed Feb 27, 2025
1 parent 79be8f3 commit 24430f9
Show file tree
Hide file tree
Showing 5 changed files with 215 additions and 20 deletions.
120 changes: 120 additions & 0 deletions components/kindo/actions/chat/chat.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
import app from "../../kindo.app.mjs";
import utils from "../../common/utils.mjs";

export default {
key: "kindo-chat",
name: "Chat",
description: "Creates a model response for the given chat conversation using Kindo's API. [See the documentation](https://app.kindo.ai/settings/api) for more information.",
version: "0.0.1",
type: "action",
props: {
app,
model: {
type: "string",
label: "Model",
description: "The model name from Kindo's available models",
},
messages: {
type: "string[]",
label: "Messages",
description: "A list of messages comprising the conversation so far. Depending on the [model](https://app.kindo.ai/settings/api) you use, different message types (modalities) are supported, like [text](https://platform.openai.com/docs/guides/text-generation), [images](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio). [See the documentation](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) for more information. Eg. `[{\"role\": \"user\", \"content\": \"Hello, world!\"}]",
},
maxTokens: {
type: "integer",
label: "Max Tokens",
description: "The maximum number of [tokens](https://beta.openai.com/tokenizer) to generate in the completion.",
optional: true,
},
temperature: {
type: "string",
label: "Temperature",
description: "**Optional**. What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try `0.9` for more creative applications, and `0` (argmax sampling) for ones with a well-defined answer.",
optional: true,
},
topP: {
type: "string",
label: "Top P",
description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So `0.1` means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.",
optional: true,
},
n: {
type: "integer",
label: "N",
description: "How many completions to generate for each prompt",
optional: true,
},
stop: {
type: "string[]",
label: "Stop",
description: "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.",
optional: true,
},
presencePenalty: {
type: "string",
label: "Presence Penalty",
description: "Number between `-2.0` and `2.0`. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
optional: true,
},
frequencyPenalty: {
type: "string",
label: "Frequency Penalty",
description: "Number between `-2.0` and `2.0`. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
optional: true,
},
additionalParameters: {
type: "object",
label: "Additional Parameters",
description: "Additional parameters to pass to the API.",
optional: true,
},
},
methods: {
chat(args = {}) {
return this.app.post({
path: "/chat/completions",
...args,
});
},
},
async run({ $ }) {
const {
chat,
model,
messages,
maxTokens,
temperature,
topP,
n,
stop,
presencePenalty,
frequencyPenalty,
additionalParameters,
} = this;

const response = await chat({
$,
data: {
model,
messages: utils.parseArray(messages),
max_tokens: maxTokens,
...(temperature && {
temperature: +temperature,
}),
...(topP && {
top_p: +topP,
}),
n,
stop,
...(presencePenalty && {
presence_penalty: +presencePenalty,
}),
...(frequencyPenalty && {
frequency_penalty: +frequencyPenalty,
}),
...additionalParameters,
},
});
$.export("$summary", "Successfully created model response");
return response;
},
};
52 changes: 52 additions & 0 deletions components/kindo/common/utils.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import { ConfigurationError } from "@pipedream/platform";

const parseJson = (input) => {
const parse = (value) => {
if (typeof(value) === "string") {
try {
return parseJson(JSON.parse(value));
} catch (e) {
return value;
}
} else if (typeof(value) === "object" && value !== null) {
return Object.entries(value)
.reduce((acc, [
key,
val,
]) => Object.assign(acc, {
[key]: parse(val),
}), {});
}
return value;
};

return parse(input);
};

function parseArray(value) {
try {
if (!value) {
return [];
}

if (Array.isArray(value)) {
return value;
}

const parsedValue = JSON.parse(value);

if (!Array.isArray(parsedValue)) {
throw new Error("Not an array");
}

return parsedValue;

} catch (e) {
throw new ConfigurationError("Make sure the custom expression contains a valid array object");
}
}

export default {
parseJson,
parseArray: (value) => parseArray(value)?.map(parseJson),
};
32 changes: 27 additions & 5 deletions components/kindo/kindo.app.mjs
Original file line number Diff line number Diff line change
@@ -1,11 +1,33 @@
import { axios } from "@pipedream/platform";

export default {
type: "app",
app: "kindo",
propDefinitions: {},
methods: {
// this.$auth contains connected account data
authKeys() {
console.log(Object.keys(this.$auth));
getUrl(path) {
return `https://llm.kindo.ai/v1${path}`;
},
getHeaders(headers) {
return {
...headers,
"content-type": "application/json",
"api-key": this.$auth.api_key,
};
},
makeRequest({
$ = this, path, headers, ...args
} = {}) {
return axios($, {
...args,
url: this.getUrl(path),
headers: this.getHeaders(headers),
});
},
post(args = {}) {
return this.makeRequest({
method: "POST",
...args,
});
},
},
};
};
7 changes: 5 additions & 2 deletions components/kindo/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@pipedream/kindo",
"version": "0.0.1",
"version": "0.1.0",
"description": "Pipedream Kindo Components",
"main": "kindo.app.mjs",
"keywords": [
Expand All @@ -11,5 +11,8 @@
"author": "Pipedream <[email protected]> (https://pipedream.com/)",
"publishConfig": {
"access": "public"
},
"dependencies": {
"@pipedream/platform": "^3.0.3"
}
}
}
24 changes: 11 additions & 13 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit 24430f9

Please sign in to comment.