Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

Commit 2486e0e

Browse files
authored
DEV: Extract configs to a yml file and allow local config (#1142)
1 parent 08377ba commit 2486e0e

File tree

4 files changed

+97
-82
lines changed

4 files changed

+97
-82
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,3 +4,4 @@ node_modules
44
.env
55
evals/log
66
evals/cases
7+
config/eval-llms.local.yml

README.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,8 @@ For more information, please see: https://meta.discourse.org/t/discourse-ai/2592
66

77
### Evals
88

9-
The directory `evals` contains AI evals for the Discourse AI plugin.
9+
The directory `evals` contains AI evals for the Discourse AI plugin.
10+
You may create a local config by copying `config/eval-llms.yml` to `config/eval-llms.local.yml` and modifying the values.
1011

1112
To run them use:
1213

config/eval-llms.yml

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
llms:
2+
gpt-4o:
3+
display_name: GPT-4o
4+
name: gpt-4o
5+
tokenizer: DiscourseAi::Tokenizer::OpenAiTokenizer
6+
api_key_env: OPENAI_API_KEY
7+
provider: open_ai
8+
url: https://api.openai.com/v1/chat/completions
9+
max_prompt_tokens: 131072
10+
vision_enabled: true
11+
12+
gpt-4o-mini:
13+
display_name: GPT-4o-mini
14+
name: gpt-4o-mini
15+
tokenizer: DiscourseAi::Tokenizer::OpenAiTokenizer
16+
api_key_env: OPENAI_API_KEY
17+
provider: open_ai
18+
url: https://api.openai.com/v1/chat/completions
19+
max_prompt_tokens: 131072
20+
vision_enabled: true
21+
22+
claude-3.5-haiku:
23+
display_name: Claude 3.5 Haiku
24+
name: claude-3-5-haiku-latest
25+
tokenizer: DiscourseAi::Tokenizer::AnthropicTokenizer
26+
api_key_env: ANTHROPIC_API_KEY
27+
provider: anthropic
28+
url: https://api.anthropic.com/v1/messages
29+
max_prompt_tokens: 200000
30+
vision_enabled: false
31+
32+
claude-3.5-sonnet:
33+
display_name: Claude 3.5 Sonnet
34+
name: claude-3-5-sonnet-latest
35+
tokenizer: DiscourseAi::Tokenizer::AnthropicTokenizer
36+
api_key_env: ANTHROPIC_API_KEY
37+
provider: anthropic
38+
url: https://api.anthropic.com/v1/messages
39+
max_prompt_tokens: 200000
40+
vision_enabled: true
41+
42+
gemini-2.0-flash:
43+
display_name: Gemini 2.0 Flash
44+
name: gemini-2-0-flash
45+
tokenizer: DiscourseAi::Tokenizer::GeminiTokenizer
46+
api_key_env: GEMINI_API_KEY
47+
provider: google
48+
url: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash
49+
max_prompt_tokens: 1000000
50+
vision_enabled: true
51+
52+
gemini-2.0-pro-exp:
53+
display_name: Gemini 2.0 pro
54+
name: gemini-2-0-pro-exp
55+
tokenizer: DiscourseAi::Tokenizer::GeminiTokenizer
56+
api_key_env: GEMINI_API_KEY
57+
provider: google
58+
url: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-pro-exp
59+
max_prompt_tokens: 1000000
60+
vision_enabled: true

evals/lib/llm.rb

Lines changed: 34 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -1,71 +1,23 @@
11
# frozen_string_literal: true
22

33
class DiscourseAi::Evals::Llm
4-
CONFIGS = {
5-
"gpt-4o" => {
6-
display_name: "GPT-4o",
7-
name: "gpt-4o",
8-
tokenizer: "DiscourseAi::Tokenizer::OpenAiTokenizer",
9-
api_key_env: "OPENAI_API_KEY",
10-
provider: "open_ai",
11-
url: "https://api.openai.com/v1/chat/completions",
12-
max_prompt_tokens: 131_072,
13-
vision_enabled: true,
14-
},
15-
"gpt-4o-mini" => {
16-
display_name: "GPT-4o-mini",
17-
name: "gpt-4o-mini",
18-
tokenizer: "DiscourseAi::Tokenizer::OpenAiTokenizer",
19-
api_key_env: "OPENAI_API_KEY",
20-
provider: "open_ai",
21-
url: "https://api.openai.com/v1/chat/completions",
22-
max_prompt_tokens: 131_072,
23-
vision_enabled: true,
24-
},
25-
"claude-3.5-haiku" => {
26-
display_name: "Claude 3.5 Haiku",
27-
name: "claude-3-5-haiku-latest",
28-
tokenizer: "DiscourseAi::Tokenizer::AnthropicTokenizer",
29-
api_key_env: "ANTHROPIC_API_KEY",
30-
provider: "anthropic",
31-
url: "https://api.anthropic.com/v1/messages",
32-
max_prompt_tokens: 200_000,
33-
vision_enabled: false,
34-
},
35-
"claude-3.5-sonnet" => {
36-
display_name: "Claude 3.5 Sonnet",
37-
name: "claude-3-5-sonnet-latest",
38-
tokenizer: "DiscourseAi::Tokenizer::AnthropicTokenizer",
39-
api_key_env: "ANTHROPIC_API_KEY",
40-
provider: "anthropic",
41-
url: "https://api.anthropic.com/v1/messages",
42-
max_prompt_tokens: 200_000,
43-
vision_enabled: true,
44-
},
45-
"gemini-2.0-flash" => {
46-
display_name: "Gemini 2.0 Flash",
47-
name: "gemini-2-0-flash",
48-
tokenizer: "DiscourseAi::Tokenizer::GeminiTokenizer",
49-
api_key_env: "GEMINI_API_KEY",
50-
provider: "google",
51-
url: "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash",
52-
max_prompt_tokens: 1_000_000,
53-
vision_enabled: true,
54-
},
55-
"gemini-2.0-pro-exp" => {
56-
display_name: "Gemini 2.0 pro",
57-
name: "gemini-2-0-pro-exp",
58-
tokenizer: "DiscourseAi::Tokenizer::GeminiTokenizer",
59-
api_key_env: "GEMINI_API_KEY",
60-
provider: "google",
61-
url: "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-pro-exp",
62-
max_prompt_tokens: 1_000_000,
63-
vision_enabled: true,
64-
},
65-
}
4+
def self.configs
5+
return @configs if @configs
6+
7+
yaml_path = File.join(File.dirname(__FILE__), "../../config/eval-llms.yml")
8+
local_yaml_path = File.join(File.dirname(__FILE__), "../../config/eval-llms.local.yml")
9+
10+
configs = YAML.load_file(yaml_path)["llms"] || {}
11+
if File.exist?(local_yaml_path)
12+
local_configs = YAML.load_file(local_yaml_path)["llms"] || {}
13+
configs = configs.merge(local_configs)
14+
end
15+
16+
@configs = configs
17+
end
6618

6719
def self.print
68-
CONFIGS
20+
configs
6921
.keys
7022
.map do |config_name|
7123
begin
@@ -79,38 +31,39 @@ def self.print
7931
end
8032

8133
def self.choose(config_name)
82-
if CONFIGS[config_name].nil?
83-
CONFIGS
34+
return [] unless configs
35+
if !config_name || !configs[config_name]
36+
configs
8437
.keys
85-
.map do |config_name|
38+
.map do |name|
8639
begin
87-
new(config_name)
88-
rescue => e
89-
puts "Error initializing #{config_name}: #{e}"
40+
new(name)
41+
rescue StandardError
9042
nil
9143
end
9244
end
9345
.compact
94-
elsif !CONFIGS.include?(config_name)
95-
raise "Invalid llm"
9646
else
9747
[new(config_name)]
9848
end
9949
end
10050

101-
attr_reader :llm_model
102-
attr_reader :llm_proxy
103-
attr_reader :config_name
51+
attr_reader :llm_model, :llm_proxy, :config_name
10452

10553
def initialize(config_name)
106-
config = CONFIGS[config_name].dup
107-
api_key_env = config.delete(:api_key_env)
108-
if !ENV[api_key_env]
109-
raise "Missing API key for #{config_name}, should be set via #{api_key_env}"
54+
config = self.class.configs[config_name].dup
55+
if config["api_key_env"]
56+
api_key_env = config.delete("api_key_env")
57+
unless ENV[api_key_env]
58+
raise "Missing API key for #{config_name}, should be set via #{api_key_env}"
59+
end
60+
config[:api_key] = ENV[api_key_env]
61+
elsif config["api_key"]
62+
config[:api_key] = config.delete("api_key")
63+
else
64+
raise "No API key or API key env var configured for #{config_name}"
11065
end
111-
112-
config[:api_key] = ENV[api_key_env]
113-
@llm_model = LlmModel.new(config)
66+
@llm_model = LlmModel.new(config.symbolize_keys)
11467
@llm_proxy = DiscourseAi::Completions::Llm.proxy(@llm_model)
11568
@config_name = config_name
11669
end

0 commit comments

Comments
 (0)