Skip to content

Commit 1f325fb

Browse files
committed
[Components] kindo - new action
1 parent b347438 commit 1f325fb

File tree

5 files changed

+209
-8
lines changed

5 files changed

+209
-8
lines changed
Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
import app from "../../kindo.app.mjs";
2+
import utils from "../../common/utils.mjs";
3+
4+
export default {
5+
key: "kindo-chat",
6+
name: "Chat",
7+
description: "Creates a model response for the given chat conversation using Kindo's API. [See the documentation](https://app.kindo.ai/settings/api) for more information.",
8+
version: "0.0.1",
9+
type: "action",
10+
props: {
11+
app,
12+
model: {
13+
type: "string",
14+
label: "Model",
15+
description: "The model name from Kindo's available models",
16+
},
17+
messages: {
18+
type: "string[]",
19+
label: "Messages",
20+
description: "A list of messages comprising the conversation so far. Depending on the [model](https://app.kindo.ai/settings/api) you use, different message types (modalities) are supported, like [text](https://platform.openai.com/docs/guides/text-generation), [images](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio). [See the documentation](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) for more information. Eg. `[{\"role\": \"user\", \"content\": \"Hello, world!\"}]",
21+
},
22+
maxTokens: {
23+
type: "integer",
24+
label: "Max Tokens",
25+
description: "The maximum number of [tokens](https://beta.openai.com/tokenizer) to generate in the completion.",
26+
optional: true,
27+
},
28+
temperature: {
29+
type: "string",
30+
label: "Temperature",
31+
description: "**Optional**. What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try `0.9` for more creative applications, and `0` (argmax sampling) for ones with a well-defined answer.",
32+
optional: true,
33+
},
34+
topP: {
35+
type: "string",
36+
label: "Top P",
37+
description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So `0.1` means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.",
38+
optional: true,
39+
},
40+
n: {
41+
type: "integer",
42+
label: "N",
43+
description: "How many completions to generate for each prompt",
44+
optional: true,
45+
},
46+
stop: {
47+
type: "string[]",
48+
label: "Stop",
49+
description: "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.",
50+
optional: true,
51+
},
52+
presencePenalty: {
53+
type: "string",
54+
label: "Presence Penalty",
55+
description: "Number between `-2.0` and `2.0`. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
56+
optional: true,
57+
},
58+
frequencyPenalty: {
59+
type: "string",
60+
label: "Frequency Penalty",
61+
description: "Number between `-2.0` and `2.0`. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
62+
optional: true,
63+
},
64+
additionalParameters: {
65+
type: "object",
66+
label: "Additional Parameters",
67+
description: "Additional parameters to pass to the API.",
68+
optional: true,
69+
},
70+
},
71+
methods: {
72+
chat(args = {}) {
73+
return this.app.post({
74+
path: "/chat/completions",
75+
...args,
76+
});
77+
},
78+
},
79+
async run({ $ }) {
80+
const {
81+
chat,
82+
model,
83+
messages,
84+
maxTokens,
85+
temperature,
86+
topP,
87+
n,
88+
stop,
89+
presencePenalty,
90+
frequencyPenalty,
91+
additionalParameters,
92+
} = this;
93+
94+
const response = await chat({
95+
$,
96+
data: {
97+
model,
98+
messages: utils.parseArray(messages),
99+
max_tokens: maxTokens,
100+
...(temperature && {
101+
temperature: +temperature,
102+
}),
103+
...(topP && {
104+
top_p: +topP,
105+
}),
106+
n,
107+
stop,
108+
...(presencePenalty && {
109+
presence_penalty: +presencePenalty,
110+
}),
111+
...(frequencyPenalty && {
112+
frequency_penalty: +frequencyPenalty,
113+
}),
114+
...additionalParameters,
115+
},
116+
});
117+
$.export("$summary", "Successfully created model response");
118+
return response;
119+
},
120+
};

components/kindo/common/utils.mjs

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
import { ConfigurationError } from "@pipedream/platform";
2+
3+
const parseJson = (input) => {
4+
const parse = (value) => {
5+
if (typeof(value) === "string") {
6+
try {
7+
return parseJson(JSON.parse(value));
8+
} catch (e) {
9+
return value;
10+
}
11+
} else if (typeof(value) === "object" && value !== null) {
12+
return Object.entries(value)
13+
.reduce((acc, [
14+
key,
15+
val,
16+
]) => Object.assign(acc, {
17+
[key]: parse(val),
18+
}), {});
19+
}
20+
return value;
21+
};
22+
23+
return parse(input);
24+
};
25+
26+
function parseArray(value) {
27+
try {
28+
if (!value) {
29+
return [];
30+
}
31+
32+
if (Array.isArray(value)) {
33+
return value;
34+
}
35+
36+
const parsedValue = JSON.parse(value);
37+
38+
if (!Array.isArray(parsedValue)) {
39+
throw new Error("Not an array");
40+
}
41+
42+
return parsedValue;
43+
44+
} catch (e) {
45+
throw new ConfigurationError("Make sure the custom expression contains a valid array object");
46+
}
47+
}
48+
49+
export default {
50+
parseJson,
51+
parseArray: (value) => parseArray(value)?.map(parseJson),
52+
};

components/kindo/kindo.app.mjs

Lines changed: 27 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,33 @@
1+
import { axios } from "@pipedream/platform";
2+
13
export default {
24
type: "app",
35
app: "kindo",
4-
propDefinitions: {},
56
methods: {
6-
// this.$auth contains connected account data
7-
authKeys() {
8-
console.log(Object.keys(this.$auth));
7+
getUrl(path) {
8+
return `https://llm.kindo.ai/v1${path}`;
9+
},
10+
getHeaders(headers) {
11+
return {
12+
...headers,
13+
"content-type": "application/json",
14+
"api-key": this.$auth.api_key,
15+
};
16+
},
17+
makeRequest({
18+
$ = this, path, headers, ...args
19+
} = {}) {
20+
return axios($, {
21+
...args,
22+
url: this.getUrl(path),
23+
headers: this.getHeaders(headers),
24+
});
25+
},
26+
post(args = {}) {
27+
return this.makeRequest({
28+
method: "POST",
29+
...args,
30+
});
931
},
1032
},
11-
};
33+
};

components/kindo/package.json

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "@pipedream/kindo",
3-
"version": "0.0.1",
3+
"version": "0.1.0",
44
"description": "Pipedream Kindo Components",
55
"main": "kindo.app.mjs",
66
"keywords": [
@@ -11,5 +11,8 @@
1111
"author": "Pipedream <[email protected]> (https://pipedream.com/)",
1212
"publishConfig": {
1313
"access": "public"
14+
},
15+
"dependencies": {
16+
"@pipedream/platform": "^3.0.3"
1417
}
15-
}
18+
}

pnpm-lock.yaml

Lines changed: 5 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)