|  | 
|  | 1 | +import app from "../../kindo.app.mjs"; | 
|  | 2 | +import utils from "../../common/utils.mjs"; | 
|  | 3 | + | 
|  | 4 | +export default { | 
|  | 5 | +  key: "kindo-chat", | 
|  | 6 | +  name: "Chat", | 
|  | 7 | +  description: "Creates a model response for the given chat conversation using Kindo's API. [See the documentation](https://app.kindo.ai/settings/api) for more information.", | 
|  | 8 | +  version: "0.0.1", | 
|  | 9 | +  type: "action", | 
|  | 10 | +  props: { | 
|  | 11 | +    app, | 
|  | 12 | +    model: { | 
|  | 13 | +      type: "string", | 
|  | 14 | +      label: "Model", | 
|  | 15 | +      description: "The model name from Kindo's available models", | 
|  | 16 | +    }, | 
|  | 17 | +    messages: { | 
|  | 18 | +      type: "string[]", | 
|  | 19 | +      label: "Messages", | 
|  | 20 | +      description: "A list of messages comprising the conversation so far. Depending on the [model](https://app.kindo.ai/settings/api) you use, different message types (modalities) are supported, like [text](https://platform.openai.com/docs/guides/text-generation), [images](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio). [See the documentation](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages) for more information. Eg. `[{\"role\": \"user\", \"content\": \"Hello, world!\"}]", | 
|  | 21 | +    }, | 
|  | 22 | +    maxTokens: { | 
|  | 23 | +      label: "Max Tokens", | 
|  | 24 | +      description: "The maximum number of [tokens](https://beta.openai.com/tokenizer) to generate in the completion.", | 
|  | 25 | +      type: "integer", | 
|  | 26 | +      optional: true, | 
|  | 27 | +    }, | 
|  | 28 | +    temperature: { | 
|  | 29 | +      label: "Temperature", | 
|  | 30 | +      description: "**Optional**. What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.", | 
|  | 31 | +      type: "string", | 
|  | 32 | +      optional: true, | 
|  | 33 | +    }, | 
|  | 34 | +    topP: { | 
|  | 35 | +      label: "Top P", | 
|  | 36 | +      description: "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.", | 
|  | 37 | +      type: "string", | 
|  | 38 | +      optional: true, | 
|  | 39 | +    }, | 
|  | 40 | +    n: { | 
|  | 41 | +      label: "N", | 
|  | 42 | +      description: "How many completions to generate for each prompt", | 
|  | 43 | +      type: "integer", | 
|  | 44 | +      optional: true, | 
|  | 45 | +    }, | 
|  | 46 | +    stop: { | 
|  | 47 | +      label: "Stop", | 
|  | 48 | +      description: "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.", | 
|  | 49 | +      type: "string[]", | 
|  | 50 | +      optional: true, | 
|  | 51 | +    }, | 
|  | 52 | +    presencePenalty: { | 
|  | 53 | +      label: "Presence Penalty", | 
|  | 54 | +      description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", | 
|  | 55 | +      type: "string", | 
|  | 56 | +      optional: true, | 
|  | 57 | +    }, | 
|  | 58 | +    frequencyPenalty: { | 
|  | 59 | +      label: "Frequency Penalty", | 
|  | 60 | +      description: "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.", | 
|  | 61 | +      type: "string", | 
|  | 62 | +      optional: true, | 
|  | 63 | +    }, | 
|  | 64 | +  }, | 
|  | 65 | +  methods: { | 
|  | 66 | +    chat(args = {}) { | 
|  | 67 | +      return this.app.post({ | 
|  | 68 | +        path: "/chat/completions", | 
|  | 69 | +        ...args, | 
|  | 70 | +      }); | 
|  | 71 | +    }, | 
|  | 72 | +  }, | 
|  | 73 | +  async run({ $ }) { | 
|  | 74 | +    const { | 
|  | 75 | +      chat, | 
|  | 76 | +      model, | 
|  | 77 | +      messages, | 
|  | 78 | +      maxTokens, | 
|  | 79 | +      temperature, | 
|  | 80 | +      topP, | 
|  | 81 | +      n, | 
|  | 82 | +      stop, | 
|  | 83 | +      presencePenalty, | 
|  | 84 | +      frequencyPenalty, | 
|  | 85 | +    } = this; | 
|  | 86 | + | 
|  | 87 | +    const response = await chat({ | 
|  | 88 | +      $, | 
|  | 89 | +      data: { | 
|  | 90 | +        model, | 
|  | 91 | +        messages: utils.parseArray(messages), | 
|  | 92 | +        max_tokens: maxTokens, | 
|  | 93 | +        temperature, | 
|  | 94 | +        top_p: topP, | 
|  | 95 | +        n, | 
|  | 96 | +        stop, | 
|  | 97 | +        presence_penalty: presencePenalty, | 
|  | 98 | +        frequency_penalty: frequencyPenalty, | 
|  | 99 | +      }, | 
|  | 100 | +    }); | 
|  | 101 | +    $.export("$summary", "Successfully created model response"); | 
|  | 102 | +    return response; | 
|  | 103 | +  }, | 
|  | 104 | +}; | 
0 commit comments