Skip to content

Commit 449f12f

Browse files
authored
provider instead of model (#26)
* provider instead of model * fix format * fix * bump version * no bedrock when not ExAws * refacto
1 parent ebd94d1 commit 449f12f

File tree

11 files changed

+144
-139
lines changed

11 files changed

+144
-139
lines changed

README.md

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,8 @@ Application.put_env(:llm_composer, :openai_key, "<your api key>")
2727
defmodule MyChat do
2828

2929
@settings %LlmComposer.Settings{
30-
model: LlmComposer.Models.OpenAI,
31-
model_opts: [model: "gpt-4o-mini"],
30+
provider: LlmComposer.Providers.OpenAI,
31+
provider_opts: [model: "gpt-4o-mini"],
3232
system_prompt: "You are a helpful assistant."
3333
}
3434

@@ -68,8 +68,8 @@ Application.put_env(:llm_composer, :openai_key, "<your api key>")
6868
defmodule MyCustomChat do
6969

7070
@settings %LlmComposer.Settings{
71-
model: LlmComposer.Models.OpenAI,
72-
model_opts: [model: "gpt-4o-mini"],
71+
provider: LlmComposer.Providers.OpenAI,
72+
provider_opts: [model: "gpt-4o-mini"],
7373
system_prompt: "You are an assistant specialized in history.",
7474
auto_exec_functions: false,
7575
functions: []
@@ -117,8 +117,8 @@ Make sure to start the Ollama server first.
117117
defmodule MyChat do
118118

119119
@settings %LlmComposer.Settings{
120-
model: LlmComposer.Models.Ollama,
121-
model_opts: [model: "llama3.1"],
120+
provider: LlmComposer.Providers.Ollama,
121+
provider_opts: [model: "llama3.1"],
122122
system_prompt: "You are a helpful assistant."
123123
}
124124

@@ -169,9 +169,9 @@ Application.put_env(:llm_composer, :open_router_key, "<your openrouter api key>"
169169

170170
defmodule MyOpenRouterChat do
171171
@settings %LlmComposer.Settings{
172-
model: LlmComposer.Models.OpenRouter,
172+
provider: LlmComposer.Providers.OpenRouter,
173173
# Use any model available on OpenRouter
174-
model_opts: [
174+
provider_opts: [
175175
model: "anthropic/claude-3-sonnet",
176176
models: ["openai/gpt-4o", "fallback-model2"],
177177
provider_routing: %{
@@ -223,9 +223,9 @@ config :ex_aws,
223223

224224
defmodule MyBedrockChat do
225225
@settings %LlmComposer.Settings{
226-
model: LlmComposer.Models.Bedrock,
226+
provider: LlmComposer.Providers.Bedrock,
227227
# Use any model available Bedrock model
228-
model_opts: [model: "eu.amazon.nova-lite-v1:0"],
228+
provider_opts: [model: "eu.amazon.nova-lite-v1:0"],
229229
system_prompt: "You are an expert in Quantum Field Theory."
230230
}
231231

@@ -258,8 +258,8 @@ Application.put_env(:llm_composer, :openai_key, "<your api key>")
258258
defmodule MyChat do
259259

260260
@settings %LlmComposer.Settings{
261-
model: LlmComposer.Models.OpenAI,
262-
model_opts: [model: "gpt-4o-mini"],
261+
provider: LlmComposer.Providers.OpenAI,
262+
provider_opts: [model: "gpt-4o-mini"],
263263
system_prompt: "You are a helpful math assistant that assists with calculations.",
264264
auto_exec_functions: true,
265265
functions: [

lib/llm_composer.ex

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@ defmodule LlmComposer do
1111
```elixir
1212
# Define the settings for your LlmComposer instance
1313
settings = %LlmComposer.Settings{
14-
model: LlmComposer.Models.OpenAI,
15-
model_opts: [model: "gpt-4o-mini"],
14+
provider: LlmComposer.Providers.OpenAI,
15+
provider_opts: [model: "gpt-4o-mini"],
1616
system_prompt: "You are a helpful assistant.",
1717
user_prompt_prefix: "",
1818
auto_exec_functions: false,
@@ -84,11 +84,14 @@ defmodule LlmComposer do
8484
def run_completion(settings, messages, previous_response \\ nil) do
8585
system_msg = Message.new(:system, settings.system_prompt)
8686

87-
model_opts =
88-
Keyword.merge(settings.model_opts, functions: settings.functions, api_key: settings.api_key)
87+
provider_opts =
88+
Keyword.merge(settings.provider_opts,
89+
functions: settings.functions,
90+
api_key: settings.api_key
91+
)
8992

9093
messages
91-
|> settings.model.run(system_msg, model_opts)
94+
|> settings.provider.run(system_msg, provider_opts)
9295
|> then(fn
9396
{:ok, res} ->
9497
# set previous response all the time

lib/llm_composer/models/bedrock.ex

Lines changed: 0 additions & 92 deletions
This file was deleted.
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
defmodule LlmComposer.Model do
1+
defmodule LlmComposer.Provider do
22
@moduledoc """
33
Behaviour definition for LLM models.
44
"""
@@ -9,5 +9,5 @@ defmodule LlmComposer.Model do
99
@callback run([Message.t()], Message.t() | nil, keyword()) ::
1010
{:ok, LlmResponse.t()} | {:error, term()}
1111

12-
@callback model_id() :: atom
12+
@callback name() :: atom
1313
end
Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
if Code.ensure_loaded?(ExAws) do
2+
defmodule LlmComposer.Providers.Bedrock do
3+
@moduledoc """
4+
Model implementation for Amazon Bedrock.
5+
6+
Handles chat completion requests through Amazon Bedrock Converse API. Any
7+
Bedrock compatible model can be used. To specify any model-specific options for
8+
the request, you can pass them in the `request_params` option and they will
9+
be merged into the base request that is prepared.
10+
"""
11+
@behaviour LlmComposer.Provider
12+
13+
alias LlmComposer.LlmResponse
14+
alias LlmComposer.Message
15+
alias LlmComposer.Providers.Utils
16+
17+
@impl LlmComposer.Provider
18+
def name, do: :bedrock
19+
20+
@impl LlmComposer.Provider
21+
@doc """
22+
Reference: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_Converse.html
23+
"""
24+
def run(messages, system_message, opts) do
25+
model = Keyword.get(opts, :model)
26+
27+
if model do
28+
messages
29+
|> build_request(system_message, opts)
30+
|> send_request(model)
31+
|> handle_response()
32+
|> LlmResponse.new(name())
33+
else
34+
{:error, :model_not_provided}
35+
end
36+
end
37+
38+
@spec build_request(list(Message.t()), Message.t(), keyword()) :: map()
39+
defp build_request(messages, system_message, opts) do
40+
base_request = %{
41+
"messages" => Enum.map(messages, &format_message/1),
42+
"system" => [format_message(system_message)]
43+
}
44+
45+
req_params = Keyword.get(opts, :request_params, %{})
46+
47+
base_request
48+
|> Map.merge(req_params)
49+
|> Utils.cleanup_body()
50+
end
51+
52+
@spec send_request(map(), String.t()) :: {:ok, map()} | {:error, term()}
53+
defp send_request(payload, model) do
54+
operation = %ExAws.Operation.JSON{
55+
data: payload,
56+
headers: [{"Content-Type", "application/json"}],
57+
http_method: :post,
58+
path: "/model/#{model}/converse",
59+
service: :"bedrock-runtime"
60+
}
61+
62+
config = [service_override: :bedrock]
63+
ExAws.request(operation, config)
64+
end
65+
66+
@spec format_message(Message.t()) :: map()
67+
defp format_message(%Message{type: :system, content: content}) do
68+
%{"text" => content}
69+
end
70+
71+
defp format_message(%Message{type: role, content: content}) when is_binary(content) do
72+
%{"role" => Atom.to_string(role), "content" => [%{"text" => content}]}
73+
end
74+
75+
defp format_message(%Message{type: role, content: content}) do
76+
%{"role" => Atom.to_string(role), "content" => content}
77+
end
78+
79+
@spec handle_response({:ok, map()} | {:error, map()}) :: {:ok, map()} | {:error, term}
80+
defp handle_response({:ok, %{"output" => %{"message" => _message}} = response}) do
81+
{:ok,
82+
%{
83+
response: response,
84+
actions: [],
85+
input_tokens: get_in(response, ["usage", "inputTokens"]),
86+
output_tokens: get_in(response, ["usage", "outputTokens"])
87+
}}
88+
end
89+
90+
defp handle_response({:error, resp}) do
91+
{:error, resp}
92+
end
93+
end
94+
end
Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,15 @@
1-
defmodule LlmComposer.Models.Ollama do
1+
defmodule LlmComposer.Providers.Ollama do
22
@moduledoc """
33
Model implementation for Ollama
44
55
Basically it calls the Ollama server api for getting the chat responses.
66
"""
7-
@behaviour LlmComposer.Model
7+
@behaviour LlmComposer.Provider
88

99
use Tesla
1010

1111
alias LlmComposer.LlmResponse
12-
alias LlmComposer.Models.Utils
12+
alias LlmComposer.Providers.Utils
1313

1414
@uri Application.compile_env(:llm_composer, :ollama_uri, "http://localhost:11434")
1515

@@ -28,10 +28,10 @@ defmodule LlmComposer.Models.Ollama do
2828
end
2929
)
3030

31-
@impl LlmComposer.Model
32-
def model_id, do: :ollama
31+
@impl LlmComposer.Provider
32+
def name, do: :ollama
3333

34-
@impl LlmComposer.Model
34+
@impl LlmComposer.Provider
3535
@doc """
3636
Reference: https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion
3737
"""
@@ -43,7 +43,7 @@ defmodule LlmComposer.Models.Ollama do
4343
|> build_request(system_message, model, opts)
4444
|> then(&post("/api/chat", &1))
4545
|> handle_response()
46-
|> LlmResponse.new(model_id())
46+
|> LlmResponse.new(name())
4747
else
4848
{:error, :model_not_provided}
4949
end
Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,16 @@
1-
defmodule LlmComposer.Models.OpenAI do
1+
defmodule LlmComposer.Providers.OpenAI do
22
@moduledoc """
33
Model implementation for OpenAI
44
55
Basically it calls the OpenAI api for getting the chat responses.
66
"""
7-
@behaviour LlmComposer.Model
7+
@behaviour LlmComposer.Provider
88

99
use Tesla
1010

1111
alias LlmComposer.Errors.MissingKeyError
1212
alias LlmComposer.LlmResponse
13-
alias LlmComposer.Models.Utils
13+
alias LlmComposer.Providers.Utils
1414

1515
@default_timeout 50_000
1616

@@ -36,10 +36,10 @@ defmodule LlmComposer.Models.OpenAI do
3636
timeout: Application.get_env(:llm_composer, :timeout) || @default_timeout
3737
)
3838

39-
@impl LlmComposer.Model
40-
def model_id, do: :open_ai
39+
@impl LlmComposer.Provider
40+
def name, do: :open_ai
4141

42-
@impl LlmComposer.Model
42+
@impl LlmComposer.Provider
4343
@doc """
4444
Reference: https://platform.openai.com/docs/api-reference/chat/create
4545
"""
@@ -56,7 +56,7 @@ defmodule LlmComposer.Models.OpenAI do
5656
|> build_request(system_message, model, opts)
5757
|> then(&post("/chat/completions", &1, headers: headers))
5858
|> handle_response()
59-
|> LlmResponse.new(model_id())
59+
|> LlmResponse.new(name())
6060
else
6161
{:error, :model_not_provided}
6262
end

0 commit comments

Comments
 (0)