|
16 | 16 | [taoensso.telemere :as t])) |
17 | 17 |
|
18 | 18 | (def google-generative-api-url "https://generativelanguage.googleapis.com/v1beta/openai") |
| 19 | +(def google-completions-url (str google-generative-api-url "/chat/completions")) |
19 | 20 |
|
20 | 21 | (comment |
21 | 22 | ;; Get list of valid models |
|
43 | 44 | "gemma-3-12b-it" "gemini-2.0-flash" "gemini-2.5-flash-preview-04-17" "gemini-2.0-flash-001" "gemini-1.5-flash-001" "gemma-3-1b-it" "gemini-2.5-pro-preview-05-06" |
44 | 45 | "imagen-3.0-generate-002" "gemini-2.0-flash-exp"])) |
45 | 46 |
|
46 | | -(def GoogleLLMConfigSchema |
47 | | - [:map |
48 | | - {:description "Google LLM configuration"} |
49 | | - [:llm/model model-schema] |
50 | | - [:google/api-key {:optional true} [:string |
51 | | - {:description "Google API key" |
52 | | - :secret true |
53 | | - :error/message "Invalid Google"}]]]) |
| 47 | +(def GoogleLLMConfigSchema [:map |
| 48 | + {:description "Google LLM configuration"} |
| 49 | + [:llm/model {:default :gemini-2.0-flash} model-schema] |
| 50 | + [:google/api-key [:string |
| 51 | + {:description "Google API key" |
| 52 | + :secret true |
| 53 | + :error/message "Invalid Google"}]]]) |
54 | 54 |
|
55 | | -(defn google-llm-process |
| 55 | +(comment |
| 56 | + |
| 57 | + (:body (uai/normal-chat-completion {:api-key (secret [:google :api-key]) |
| 58 | + :model :gemini-2.0-flash |
| 59 | + :messages [{:role "system" |
| 60 | + :content "You are a voice agent operating via phone. Be |
| 61 | + concise in your answers. The input you receive comes from a |
| 62 | + speech-to-text (transcription) system that isn't always |
| 63 | + efficient and may send unclear text. Ask for |
| 64 | + clarification when you're unsure what the person said."} |
| 65 | + {:role "user" :content "Do you hear me?"}] |
| 66 | + :completions-url google-completions-url})) |
| 67 | + ,) |
| 68 | + |
| 69 | +(defn google-llm-process-fn |
56 | 70 | ([] |
57 | 71 | {:ins {:in "Channel for incoming context aggregations"} |
58 | 72 | :outs {:out "Channel where streaming responses will go"} |
|
73 | 87 | (assert (or (frame/llm-context? frame) |
74 | 88 | (frame/control-interrupt-start? frame)) "Invalid frame sent to LLM. Only llm-context or interrupt-start") |
75 | 89 | (let [context (:frame/data frame) |
76 | | - stream-ch (request/stream-chat-completion {:model model |
77 | | - :api-key api-key |
78 | | - :messages (:messages context) |
79 | | - :tools (mapv u/->tool-fn (:tools context))})] |
| 90 | + stream-ch (uai/stream-chat-completion {:model model |
| 91 | + :api-key api-key |
| 92 | + :messages (:messages context) |
| 93 | + :tools (mapv u/->tool-fn (:tools context)) |
| 94 | + :completions-url google-completions-url})] |
80 | 95 | (uai/handle-completion-request! stream-ch llm-read)) |
81 | 96 |
|
82 | 97 | (recur))) |
|
93 | 108 | (frame/llm-context? msg) |
94 | 109 | [state {:llm-write [msg] |
95 | 110 | :out [(frame/llm-full-response-start true)]}])))) |
| 111 | + |
| 112 | +(def google-llm-process (flow/process google-llm-process-fn)) |
0 commit comments