|
1 | 1 | defmodule HyperLLM.Chat do |
2 | | - @derive Jason.Encoder |
3 | | - |
4 | 2 | @moduledoc """ |
5 | | - HypperLLM.Chat is a single interface for interacting with LLM providers. |
| 3 | + HyperLLM.Chat is a single interface for interacting with LLM providers. |
6 | 4 | The interface uses the OpenAI chat completion API. https://platform.openai.com/docs/api-reference/chat |
7 | | -
|
8 | | - ## Example |
9 | | -
|
10 | | - A Liveview that sends messages to the chat and updates the chat with the response. |
11 | | -
|
12 | | - defmodule ChatLive do |
13 | | - use Phoenix.LiveView |
14 | | -
|
15 | | - def mount(params, session, socket) do |
16 | | - {:ok, |
17 | | - socket |
18 | | - |> assign(chat: HyperLLM.Chat.start(model: "openai/gpt-4o-mini"))} |
19 | | - end |
20 | | -
|
21 | | - def handle_event("send_message", %{"message" => message}, socket) do |
22 | | - chat = HyperLLM.Chat.append(socket.assigns.chat, message) |
23 | | -
|
24 | | - send(self(), :chat_completion) |
25 | | - |
26 | | - {:noreply, socket |> assign(chat: chat)} |
27 | | - end |
28 | | -
|
29 | | - def handle_info(:chat_completion, socket) do |
30 | | - with {:ok, response} <- HyperLLM.Chat.completion(socket.assigns.chat) do |
31 | | - chat = HyperLLM.Chat.append(socket.assigns.chat, response) |
32 | | - {:noreply, socket |> assign(chat: chat)} |
33 | | - end |
34 | | - end |
35 | | - end |
36 | 5 | """ |
37 | 6 |
|
38 | | - @type t :: %__MODULE__{} |
39 | | - @type config :: [Keyword.t()] |
40 | | - |
41 | | - @enforce_keys [:messages, :provider, :config] |
42 | | - defstruct [:messages, :provider, :config] |
43 | | - |
44 | | - defmodule Message do |
45 | | - @derive Jason.Encoder |
46 | | - |
47 | | - @moduledoc false |
48 | | - |
49 | | - @type t :: %__MODULE__{} |
50 | | - |
51 | | - @enforce_keys [:role, :content] |
52 | | - defstruct [:role, :content] |
53 | | - end |
54 | | - |
55 | 7 | @doc """ |
56 | | - Start a new chat. |
57 | | -
|
58 | 8 | ## Example |
59 | 9 |
|
60 | | - iex> HyperLLM.Chat.start(model: "openai/gpt-4o-mini") |
61 | | - %HyperLLM.Chat{ |
62 | | - messages: [], |
63 | | - provider: HyperLLM.Provider.OpenAI, |
64 | | - config: [model: "gpt-4o-mini"] |
65 | | - } |
66 | | - """ |
67 | | - @spec start(config()) :: t() |
68 | | - def start(config \\ []) when is_list(config) do |
69 | | - model = Keyword.fetch!(config, :model) |
70 | | - |
71 | | - case HyperLLM.Models.get_provider(model) do |
72 | | - {:ok, {provider, model}} -> |
73 | | - %__MODULE__{ |
74 | | - messages: [], |
75 | | - provider: provider, |
76 | | - config: Keyword.replace(config, :model, model) |
77 | | - } |
78 | | - |
79 | | - {:error, error} -> |
80 | | - raise "Provider for model #{model} not found: #{error}" |
81 | | - end |
82 | | - end |
83 | | - |
84 | | - @doc """ |
85 | | - Append a message to the chat with the role. |
86 | | -
|
87 | | - ## Example |
88 | | -
|
89 | | - iex> chat = HyperLLM.Chat.start(model: "openai/gpt-4o-mini") |
90 | | - iex> HyperLLM.Chat.append(chat, :developer, "You are a helpful assistant.") |
91 | | - %HyperLLM.Chat{ |
92 | | - messages: [ |
93 | | - %HyperLLM.Chat.Message{ |
94 | | - role: :developer, |
95 | | - content: "You are a helpful assistant." |
| 10 | + iex> HyperLLM.Chat.completion("openai/gpt-4o-mini", [%{role: :user, content: "Hello"}], []) |
| 11 | + {:ok, %{ |
| 12 | + "id": "chatcmpl-123", |
| 13 | + "object": "chat.completion", |
| 14 | + "created": 1677652288, |
| 15 | + "model": "gpt-4o-mini", |
| 16 | + "system_fingerprint": "fp_44709d6fcb", |
| 17 | + "choices": [{ |
| 18 | + "index": 0, |
| 19 | + "message": { |
| 20 | + "role": "assistant", |
| 21 | + "content": "\n\nHello there, how may I assist you today?", |
| 22 | + }, |
| 23 | + "logprobs": null, |
| 24 | + "finish_reason": "stop" |
| 25 | + }], |
| 26 | + "service_tier": "default", |
| 27 | + "usage": { |
| 28 | + "prompt_tokens": 9, |
| 29 | + "completion_tokens": 12, |
| 30 | + "total_tokens": 21, |
| 31 | + "completion_tokens_details": { |
| 32 | + "reasoning_tokens": 0, |
| 33 | + "accepted_prediction_tokens": 0, |
| 34 | + "rejected_prediction_tokens": 0 |
96 | 35 | } |
97 | | - ], |
98 | | - provider: HyperLLM.Provider.OpenAI, |
99 | | - config: [model: "gpt-4o-mini"] |
100 | | - } |
101 | | - """ |
102 | | - @spec append(t(), atom(), binary()) :: t() |
103 | | - def append(%__MODULE__{} = chat, role, content) when is_atom(role) do |
104 | | - append(chat, %Message{role: role, content: content}) |
105 | | - end |
106 | | - |
107 | | - @doc """ |
108 | | - Append a message to the chat as a user. |
109 | | -
|
110 | | - iex> chat = HyperLLM.Chat.start(model: "openai/gpt-4o-mini") |
111 | | - iex> HyperLLM.Chat.append(chat, "Hello") |
112 | | - %HyperLLM.Chat{ |
113 | | - messages: [ |
114 | | - %HyperLLM.Chat.Message{role: :user, content: "Hello"} |
115 | | - ], |
116 | | - provider: HyperLLM.Provider.OpenAI, |
117 | | - config: [model: "gpt-4o-mini"] |
118 | | - } |
119 | | -
|
120 | | - You can also append a list of messages to the chat. |
121 | | -
|
122 | | - iex> chat = HyperLLM.Chat.start(model: "openai/gpt-4o-mini") |
123 | | - iex> HyperLLM.Chat.append(chat, ["Hello", "World"]) |
124 | | - %HyperLLM.Chat{ |
125 | | - messages: [ |
126 | | - %HyperLLM.Chat.Message{role: :user, content: "Hello"}, |
127 | | - %HyperLLM.Chat.Message{role: :user, content: "World"} |
128 | | - ], |
129 | | - provider: HyperLLM.Provider.OpenAI, |
130 | | - config: [model: "gpt-4o-mini"] |
131 | | - } |
| 36 | + } |
| 37 | + }} |
132 | 38 | """ |
133 | | - @spec append(t(), Message.t()) :: t() |
134 | | - def append(%__MODULE__{} = chat, message) when is_binary(message) do |
135 | | - append(chat, %Message{role: :user, content: message}) |
136 | | - end |
137 | | - |
138 | | - @spec append(t(), [Message.t()]) :: t() |
139 | | - def append(%__MODULE__{} = chat, messages) when is_list(messages) do |
140 | | - Enum.reduce(messages, chat, fn message, acc -> |
141 | | - append(acc, message) |
142 | | - end) |
143 | | - end |
144 | 39 |
|
145 | | - @spec append(t(), any()) :: t() |
146 | | - def append(%__MODULE__{} = chat, message) do |
147 | | - %{chat | messages: chat.messages ++ [message]} |
| 40 | + @spec completion(String.t(), list(), Keyword.t()) :: {:ok, binary()} | {:error, binary()} |
| 41 | + def completion(model_name, messages, opts) when is_binary(model_name) do |
| 42 | + HyperLLM.Model.new(model: model_name) |> completion(messages, opts) |
148 | 43 | end |
149 | 44 |
|
150 | | - @spec completion(t(), config()) :: {:ok, binary()} | {:error, binary()} |
151 | | - def completion(%__MODULE__{} = chat, config \\ []) do |
152 | | - chat.provider.completion(chat.messages, Keyword.merge(chat.config, config)) |
153 | | - end |
154 | | -end |
155 | | - |
156 | | -defimpl String.Chars, for: HyperLLM.Chat do |
157 | | - def to_string(chat) do |
158 | | - Jason.encode!(chat) |
159 | | - end |
160 | | -end |
| 45 | + @spec completion(HyperLLM.Model.t(), list(), Keyword.t()) :: |
| 46 | + {:ok, binary()} | {:error, binary()} |
| 47 | + def completion(%HyperLLM.Model{} = model, messages, opts) do |
| 48 | + opts = |
| 49 | + model.config |
| 50 | + |> Keyword.merge(opts) |
| 51 | + |> Keyword.put(:model, model.model) |
161 | 52 |
|
162 | | -defimpl String.Chars, for: HyperLLM.Chat.Message do |
163 | | - def to_string(message) do |
164 | | - Jason.encode!(message) |
| 53 | + model.provider.completion(messages, opts) |
165 | 54 | end |
166 | 55 | end |
0 commit comments