Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

Commit bc0cf3d

Browse files
committed
DEV: Add spec
1 parent edf7c9f commit bc0cf3d

File tree

2 files changed

+37
-1
lines changed

2 files changed

+37
-1
lines changed

spec/fabricators/llm_model_fabricator.rb

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,9 @@
88
api_key "123"
99
url "https://api.openai.com/v1/chat/completions"
1010
max_prompt_tokens 131_072
11+
input_cost 10
12+
cached_input_cost 2.5
13+
output_cost 40
1114
end
1215

1316
Fabricator(:anthropic_model, from: :llm_model) do

spec/requests/admin/ai_usage_controller_spec.rb

Lines changed: 34 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
RSpec.describe DiscourseAi::Admin::AiUsageController do
66
fab!(:admin)
77
fab!(:user)
8+
fab!(:llm_model)
89
let(:usage_report_path) { "/admin/plugins/discourse-ai/ai-usage-report.json" }
910

1011
before { SiteSetting.discourse_ai_enabled = true }
@@ -35,6 +36,18 @@
3536
)
3637
end
3738

39+
fab!(:log3) do
40+
AiApiAuditLog.create!(
41+
provider_id: 1,
42+
feature_name: "ai_helper",
43+
language_model: llm_model.name,
44+
request_tokens: 300,
45+
response_tokens: 150,
46+
cached_tokens: 50,
47+
created_at: 3.days.ago,
48+
)
49+
end
50+
3851
it "returns correct data structure" do
3952
get usage_report_path
4053

@@ -55,7 +68,7 @@
5568
}
5669

5770
json = response.parsed_body
58-
expect(json["summary"]["total_tokens"]).to eq(450) # sum of all tokens
71+
expect(json["summary"]["total_tokens"]).to eq(900) # sum of all tokens
5972
end
6073

6174
it "filters by feature" do
@@ -79,6 +92,26 @@
7992
expect(models.first["total_tokens"]).to eq(300)
8093
end
8194

95+
it "shows an estimated cost" do
96+
get usage_report_path, params: { model: llm_model.name }
97+
98+
json = response.parsed_body
99+
summary = json["summary"]
100+
feature = json["features"].find { |f| f["feature_name"] == "ai_helper" }
101+
102+
expected_input_spending = llm_model.input_cost * log3.request_tokens / 1_000_000.0
103+
expected_cached_input_spending =
104+
llm_model.cached_input_cost * log3.cached_tokens / 1_000_000.0
105+
expected_output_spending = llm_model.output_cost * log3.response_tokens / 1_000_000.0
106+
expected_total_spending =
107+
expected_input_spending + expected_cached_input_spending + expected_output_spending
108+
109+
expect(feature["input_spending"]).to eq(expected_input_spending.to_s)
110+
expect(feature["output_spending"]).to eq(expected_output_spending.to_s)
111+
expect(feature["cached_input_spending"]).to eq(expected_cached_input_spending.to_s)
112+
expect(summary["total_spending"]).to eq(expected_total_spending.round(2))
113+
end
114+
82115
it "handles different period groupings" do
83116
get usage_report_path, params: { period: "hour" }
84117
expect(response.status).to eq(200)

0 commit comments

Comments
 (0)