|
| 1 | +import app from "../../kindo.app.mjs"; |
| 2 | +import utils from "../../common/utils.mjs"; |
| 3 | + |
| 4 | +export default { |
| 5 | + key: "kindo-chat", |
| 6 | + name: "Chat", |
| 7 | + description: "Creates a model response for the given chat conversation using Kindo's API. [See the documentation](https://app.kindo.ai/settings/api) for more information.", |
| 8 | + version: "0.0.1", |
| 9 | + type: "action", |
| 10 | + props: { |
| 11 | + app, |
| 12 | + model: { |
| 13 | + type: "string", |
| 14 | + label: "Model", |
| 15 | + description: "The model name from Kindo's available models", |
| 16 | + }, |
| 17 | + messages: { |
| 18 | + type: "string[]", |
| 19 | + label: "Messages", |
| 20 | + description: "A list of messages comprising the conversation so far. Depending on the [model](https://app.kindo.ai/settings/api) you use, different message types (modalities) are supported, like [text](https://platform.openai.com/docs/guides/text-generation), [images](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio). [See the documentation](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) for more information. Eg. `[{\"role\": \"user\", \"content\": \"Hello, world!\"}]", |
| 21 | + }, |
| 22 | + maxTokens: { |
| 23 | + type: "integer", |
| 24 | + label: "Max Tokens", |
| 25 | + description: "The maximum number of [tokens](https://beta.openai.com/tokenizer) to generate in the completion.", |
| 26 | + optional: true, |
| 27 | + }, |
| 28 | + temperature: { |
| 29 | + type: "string", |
| 30 | + label: "Temperature", |
| 31 | + description: "**Optional**. What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try `0.9` for more creative applications, and `0` (argmax sampling) for ones with a well-defined answer.", |
| 32 | + optional: true, |
| 33 | + }, |
| 34 | + topP: { |
| 35 | + type: "string", |
| 36 | + label: "Top P", |
| 37 | + description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So `0.1` means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.", |
| 38 | + optional: true, |
| 39 | + }, |
| 40 | + n: { |
| 41 | + type: "integer", |
| 42 | + label: "N", |
| 43 | + description: "How many completions to generate for each prompt", |
| 44 | + optional: true, |
| 45 | + }, |
| 46 | + stop: { |
| 47 | + type: "string[]", |
| 48 | + label: "Stop", |
| 49 | + description: "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.", |
| 50 | + optional: true, |
| 51 | + }, |
| 52 | + presencePenalty: { |
| 53 | + type: "string", |
| 54 | + label: "Presence Penalty", |
| 55 | + description: "Number between `-2.0` and `2.0`. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", |
| 56 | + optional: true, |
| 57 | + }, |
| 58 | + frequencyPenalty: { |
| 59 | + type: "string", |
| 60 | + label: "Frequency Penalty", |
| 61 | + description: "Number between `-2.0` and `2.0`. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.", |
| 62 | + optional: true, |
| 63 | + }, |
| 64 | + additionalParameters: { |
| 65 | + type: "object", |
| 66 | + label: "Additional Parameters", |
| 67 | + description: "Additional parameters to pass to the API.", |
| 68 | + optional: true, |
| 69 | + }, |
| 70 | + }, |
| 71 | + methods: { |
| 72 | + chat(args = {}) { |
| 73 | + return this.app.post({ |
| 74 | + path: "/chat/completions", |
| 75 | + ...args, |
| 76 | + }); |
| 77 | + }, |
| 78 | + }, |
| 79 | + async run({ $ }) { |
| 80 | + const { |
| 81 | + chat, |
| 82 | + model, |
| 83 | + messages, |
| 84 | + maxTokens, |
| 85 | + temperature, |
| 86 | + topP, |
| 87 | + n, |
| 88 | + stop, |
| 89 | + presencePenalty, |
| 90 | + frequencyPenalty, |
| 91 | + additionalParameters, |
| 92 | + } = this; |
| 93 | + |
| 94 | + const response = await chat({ |
| 95 | + $, |
| 96 | + data: { |
| 97 | + model, |
| 98 | + messages: utils.parseArray(messages), |
| 99 | + max_tokens: maxTokens, |
| 100 | + ...(temperature && { |
| 101 | + temperature: +temperature, |
| 102 | + }), |
| 103 | + ...(topP && { |
| 104 | + top_p: +topP, |
| 105 | + }), |
| 106 | + n, |
| 107 | + stop, |
| 108 | + ...(presencePenalty && { |
| 109 | + presence_penalty: +presencePenalty, |
| 110 | + }), |
| 111 | + ...(frequencyPenalty && { |
| 112 | + frequency_penalty: +frequencyPenalty, |
| 113 | + }), |
| 114 | + ...additionalParameters, |
| 115 | + }, |
| 116 | + }); |
| 117 | + $.export("$summary", "Successfully created model response"); |
| 118 | + return response; |
| 119 | + }, |
| 120 | +}; |
0 commit comments