Skip to content

Commit 6f3befe

Browse files
committed
feat(conversation): extract chat logic into conversation module
- Introduced `HyperLLM.Conversation` to handle threaded conversations. - Refactored `HyperLLM.Chat` to focus only on direct api calls. - Rename `HyperLLM.Models` to `HyperLLM.Model` and now use it for model configuration. - Standardized provider responses to OpenAI format. BREAKING CHANGE: `HyperLLM.Chat` no longer manages conversation threads; use `HyperLLM.Conversation` instead.
1 parent c89af09 commit 6f3befe

File tree

13 files changed

+403
-260
lines changed

13 files changed

+403
-260
lines changed

.iex.exs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
alias HyperLLM.{Chat}
1+
alias HyperLLM.{Conversation, Chat, Model, Tool}

lib/chat.ex

Lines changed: 39 additions & 150 deletions
Original file line numberDiff line numberDiff line change
@@ -1,166 +1,55 @@
11
defmodule HyperLLM.Chat do
2-
@derive Jason.Encoder
3-
42
@moduledoc """
5-
HypperLLM.Chat is a single interface for interacting with LLM providers.
3+
HyperLLM.Chat is a single interface for interacting with LLM providers.
64
The interface uses the OpenAI chat completion API. https://platform.openai.com/docs/api-reference/chat
7-
8-
## Example
9-
10-
A Liveview that sends messages to the chat and updates the chat with the response.
11-
12-
defmodule ChatLive do
13-
use Phoenix.LiveView
14-
15-
def mount(params, session, socket) do
16-
{:ok,
17-
socket
18-
|> assign(chat: HyperLLM.Chat.start(model: "openai/gpt-4o-mini"))}
19-
end
20-
21-
def handle_event("send_message", %{"message" => message}, socket) do
22-
chat = HyperLLM.Chat.append(socket.assigns.chat, message)
23-
24-
send(self(), :chat_completion)
25-
26-
{:noreply, socket |> assign(chat: chat)}
27-
end
28-
29-
def handle_info(:chat_completion, socket) do
30-
with {:ok, response} <- HyperLLM.Chat.completion(socket.assigns.chat) do
31-
chat = HyperLLM.Chat.append(socket.assigns.chat, response)
32-
{:noreply, socket |> assign(chat: chat)}
33-
end
34-
end
35-
end
365
"""
376

38-
@type t :: %__MODULE__{}
39-
@type config :: [Keyword.t()]
40-
41-
@enforce_keys [:messages, :provider, :config]
42-
defstruct [:messages, :provider, :config]
43-
44-
defmodule Message do
45-
@derive Jason.Encoder
46-
47-
@moduledoc false
48-
49-
@type t :: %__MODULE__{}
50-
51-
@enforce_keys [:role, :content]
52-
defstruct [:role, :content]
53-
end
54-
557
@doc """
56-
Start a new chat.
57-
588
## Example
599
60-
iex> HyperLLM.Chat.start(model: "openai/gpt-4o-mini")
61-
%HyperLLM.Chat{
62-
messages: [],
63-
provider: HyperLLM.Provider.OpenAI,
64-
config: [model: "gpt-4o-mini"]
65-
}
66-
"""
67-
@spec start(config()) :: t()
68-
def start(config \\ []) when is_list(config) do
69-
model = Keyword.fetch!(config, :model)
70-
71-
case HyperLLM.Models.get_provider(model) do
72-
{:ok, {provider, model}} ->
73-
%__MODULE__{
74-
messages: [],
75-
provider: provider,
76-
config: Keyword.replace(config, :model, model)
77-
}
78-
79-
{:error, error} ->
80-
raise "Provider for model #{model} not found: #{error}"
81-
end
82-
end
83-
84-
@doc """
85-
Append a message to the chat with the role.
86-
87-
## Example
88-
89-
iex> chat = HyperLLM.Chat.start(model: "openai/gpt-4o-mini")
90-
iex> HyperLLM.Chat.append(chat, :developer, "You are a helpful assistant.")
91-
%HyperLLM.Chat{
92-
messages: [
93-
%HyperLLM.Chat.Message{
94-
role: :developer,
95-
content: "You are a helpful assistant."
10+
iex> HyperLLM.Chat.completion("openai/gpt-4o-mini", [%{role: :user, content: "Hello"}], [])
11+
{:ok, %{
12+
"id": "chatcmpl-123",
13+
"object": "chat.completion",
14+
"created": 1677652288,
15+
"model": "gpt-4o-mini",
16+
"system_fingerprint": "fp_44709d6fcb",
17+
"choices": [{
18+
"index": 0,
19+
"message": {
20+
"role": "assistant",
21+
"content": "\n\nHello there, how may I assist you today?",
22+
},
23+
"logprobs": null,
24+
"finish_reason": "stop"
25+
}],
26+
"service_tier": "default",
27+
"usage": {
28+
"prompt_tokens": 9,
29+
"completion_tokens": 12,
30+
"total_tokens": 21,
31+
"completion_tokens_details": {
32+
"reasoning_tokens": 0,
33+
"accepted_prediction_tokens": 0,
34+
"rejected_prediction_tokens": 0
9635
}
97-
],
98-
provider: HyperLLM.Provider.OpenAI,
99-
config: [model: "gpt-4o-mini"]
100-
}
101-
"""
102-
@spec append(t(), atom(), binary()) :: t()
103-
def append(%__MODULE__{} = chat, role, content) when is_atom(role) do
104-
append(chat, %Message{role: role, content: content})
105-
end
106-
107-
@doc """
108-
Append a message to the chat as a user.
109-
110-
iex> chat = HyperLLM.Chat.start(model: "openai/gpt-4o-mini")
111-
iex> HyperLLM.Chat.append(chat, "Hello")
112-
%HyperLLM.Chat{
113-
messages: [
114-
%HyperLLM.Chat.Message{role: :user, content: "Hello"}
115-
],
116-
provider: HyperLLM.Provider.OpenAI,
117-
config: [model: "gpt-4o-mini"]
118-
}
119-
120-
You can also append a list of messages to the chat.
121-
122-
iex> chat = HyperLLM.Chat.start(model: "openai/gpt-4o-mini")
123-
iex> HyperLLM.Chat.append(chat, ["Hello", "World"])
124-
%HyperLLM.Chat{
125-
messages: [
126-
%HyperLLM.Chat.Message{role: :user, content: "Hello"},
127-
%HyperLLM.Chat.Message{role: :user, content: "World"}
128-
],
129-
provider: HyperLLM.Provider.OpenAI,
130-
config: [model: "gpt-4o-mini"]
131-
}
36+
}
37+
}}
13238
"""
133-
@spec append(t(), Message.t()) :: t()
134-
def append(%__MODULE__{} = chat, message) when is_binary(message) do
135-
append(chat, %Message{role: :user, content: message})
136-
end
137-
138-
@spec append(t(), [Message.t()]) :: t()
139-
def append(%__MODULE__{} = chat, messages) when is_list(messages) do
140-
Enum.reduce(messages, chat, fn message, acc ->
141-
append(acc, message)
142-
end)
143-
end
14439

145-
@spec append(t(), any()) :: t()
146-
def append(%__MODULE__{} = chat, message) do
147-
%{chat | messages: chat.messages ++ [message]}
40+
@spec completion(String.t(), list(), Keyword.t()) :: {:ok, binary()} | {:error, binary()}
41+
def completion(model_name, messages, opts) when is_binary(model_name) do
42+
HyperLLM.Model.new(model: model_name) |> completion(messages, opts)
14843
end
14944

150-
@spec completion(t(), config()) :: {:ok, binary()} | {:error, binary()}
151-
def completion(%__MODULE__{} = chat, config \\ []) do
152-
chat.provider.completion(chat.messages, Keyword.merge(chat.config, config))
153-
end
154-
end
155-
156-
defimpl String.Chars, for: HyperLLM.Chat do
157-
def to_string(chat) do
158-
Jason.encode!(chat)
159-
end
160-
end
45+
@spec completion(HyperLLM.Model.t(), list(), Keyword.t()) ::
46+
{:ok, binary()} | {:error, binary()}
47+
def completion(%HyperLLM.Model{} = model, messages, opts) do
48+
opts =
49+
model.config
50+
|> Keyword.merge(opts)
51+
|> Keyword.put(:model, model.model)
16152

162-
defimpl String.Chars, for: HyperLLM.Chat.Message do
163-
def to_string(message) do
164-
Jason.encode!(message)
53+
model.provider.completion(messages, opts)
16554
end
16655
end

lib/conversation.ex

Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,131 @@
1+
defmodule HyperLLM.Conversation do
2+
@derive Jason.Encoder
3+
4+
@moduledoc """
5+
HyperLLM.Conversation handles the lifecycle of a conversation, including starting, appending messages, and running the conversation.
6+
7+
## Example
8+
9+
A Liveview that sends messages to the chat and updates the chat with the response.
10+
11+
defmodule ChatLive do
12+
use Phoenix.LiveView
13+
14+
def mount(params, session, socket) do
15+
{:ok,
16+
socket
17+
|> assign(conv: HyperLLM.Conversation.start(model: "openai/gpt-4o-mini"))}
18+
end
19+
20+
def handle_event("send_message", %{"message" => message}, socket) do
21+
conv = HyperLLM.Conversation.append(socket.assigns.conv, message)
22+
23+
send(self(), :chat_completion)
24+
25+
{:noreply, socket |> assign(conv: conv)}
26+
end
27+
28+
def handle_info(:chat_completion, socket) do
29+
with {:ok, conv} <- HyperLLM.Conversation.run(socket.assigns.conv) do
30+
{:noreply, socket |> assign(conv: conv)}
31+
end
32+
end
33+
end
34+
"""
35+
36+
@type t :: %__MODULE__{}
37+
@type model_config :: [Keyword.t()]
38+
39+
@enforce_keys [:thread, :model]
40+
defstruct [:thread, :model]
41+
42+
@doc """
43+
Start a new conversation.
44+
45+
## Example
46+
47+
iex> HyperLLM.Conversation.start(model: "openai/gpt-4o-mini")
48+
%HyperLLM.Conversation{
49+
thread: [],
50+
model: %HyperLLM.Model{
51+
provider: HyperLLM.Provider.OpenAI,
52+
model: "gpt-4o-mini",
53+
config: []
54+
}
55+
}
56+
"""
57+
@spec start(model_config()) :: t()
58+
def start(model_config \\ []) when is_list(model_config) do
59+
%__MODULE__{
60+
thread: [],
61+
model: HyperLLM.Model.new(model_config)
62+
}
63+
end
64+
65+
@doc """
66+
Append a message to the conversation.
67+
68+
## Example
69+
70+
iex> HyperLLM.Conversation.start(model: "openai/gpt-4o-mini") |> HyperLLM.Conversation.append(:user, "Hello")
71+
%HyperLLM.Conversation{
72+
thread: [%{role: :user, content: "Hello"}],
73+
model: %HyperLLM.Model{
74+
provider: HyperLLM.Provider.OpenAI,
75+
model: "gpt-4o-mini",
76+
config: []
77+
}
78+
}
79+
"""
80+
@spec append(t(), atom(), binary()) :: t()
81+
def append(%__MODULE__{} = conv, role, content) when is_atom(role) do
82+
append(conv, %{role: role, content: content})
83+
end
84+
85+
@spec append(t(), list()) :: t()
86+
def append(%__MODULE__{} = conv, messages) when is_list(messages) do
87+
Enum.reduce(messages, conv, &append(&2, &1))
88+
end
89+
90+
@spec append(t(), String.t()) :: t()
91+
def append(%__MODULE__{} = conv, message) when is_binary(message) do
92+
append(conv, %{role: :user, content: message})
93+
end
94+
95+
@spec append(t(), map()) :: t()
96+
def append(%__MODULE__{} = conv, message) when is_map(message) do
97+
%{conv | thread: conv.thread ++ [message]}
98+
end
99+
100+
@doc """
101+
Run the conversation to get a response.
102+
103+
## Example
104+
105+
iex> HyperLLM.Conversation.start(model: "openai/gpt-4o-mini") |> HyperLLM.Conversation.append(:user, "Hello") |> HyperLLM.Conversation.run()
106+
{:ok, %HyperLLM.Conversation{
107+
thread: [%{role: :user, content: "Hello"}, %{role: :assistant, content: "Hello, how can I help you today?"}],
108+
model: "gpt-4o-mini"
109+
}}
110+
"""
111+
@spec run(t()) :: {:ok, binary()} | {:error, binary()}
112+
def run(%__MODULE__{} = conv) do
113+
with {:ok, response} <- HyperLLM.Chat.completion(conv.model, conv.thread, []),
114+
choice when not is_nil(choice) <- hd(response["choices"]),
115+
message when not is_nil(message) <- choice["message"] do
116+
message = %{role: String.to_atom(message["role"]), content: message["content"]}
117+
{:ok, %{conv | thread: conv.thread ++ [message]}}
118+
end
119+
end
120+
121+
@spec run!(t()) :: t()
122+
def run!(%__MODULE__{} = conv) do
123+
case run(conv) do
124+
{:ok, conv} ->
125+
conv
126+
127+
{:error, error} ->
128+
raise error
129+
end
130+
end
131+
end

0 commit comments

Comments
 (0)