Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

Commit edf7c9f

Browse files
committed
DEV: updates
1 parent 3a53ae1 commit edf7c9f

File tree

4 files changed

+93
-26
lines changed

4 files changed

+93
-26
lines changed

assets/javascripts/discourse/components/ai-llm-editor-form.gjs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,9 @@ export default class AiLlmEditorForm extends Component {
4646
name: modelInfo.name,
4747
provider: info.provider,
4848
provider_params: this.computeProviderParams(info.provider),
49+
input_cost: modelInfo.input_cost,
50+
output_cost: modelInfo.output_cost,
51+
cached_input_cost: modelInfo.cached_input_cost,
4952
};
5053
}
5154

assets/javascripts/discourse/components/ai-usage.gjs

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,10 @@ import Component from "@glimmer/component";
22
import { tracked } from "@glimmer/tracking";
33
import { fn, hash } from "@ember/helper";
44
import { action } from "@ember/object";
5+
import didInsert from "@ember/render-modifiers/modifiers/did-insert";
6+
import didUpdate from "@ember/render-modifiers/modifiers/did-update";
57
import { LinkTo } from "@ember/routing";
68
import { service } from "@ember/service";
7-
import { modifier } from "ember-modifier";
89
import { eq, gt, lt } from "truth-helpers";
910
import ConditionalLoadingSpinner from "discourse/components/conditional-loading-spinner";
1011
import DButton from "discourse/components/d-button";
@@ -34,22 +35,6 @@ export default class AiUsage extends Component {
3435
@tracked isCustomDateActive = false;
3536
@tracked loadingData = true;
3637

37-
// TODO: currently doing dollar, but how should we handle other currencies?
38-
addCurrency = modifier((element) => {
39-
element.querySelectorAll(".d-stat-tile__label").forEach((label) => {
40-
if (
41-
label.innerText.trim() === i18n("discourse_ai.usage.total_spending")
42-
) {
43-
const valueElement = label
44-
.closest(".d-stat-tile")
45-
?.querySelector(".d-stat-tile__value");
46-
if (valueElement) {
47-
valueElement.innerText = `$${valueElement.innerText}`;
48-
}
49-
}
50-
});
51-
});
52-
5338
constructor() {
5439
super(...arguments);
5540
this.fetchData();
@@ -91,6 +76,22 @@ export default class AiUsage extends Component {
9176
this.onFilterChange();
9277
}
9378

79+
@action
80+
addCurrencyChar(element) {
81+
element.querySelectorAll(".d-stat-tile__label").forEach((label) => {
82+
if (
83+
label.innerText.trim() === i18n("discourse_ai.usage.total_spending")
84+
) {
85+
const valueElement = label
86+
.closest(".d-stat-tile")
87+
?.querySelector(".d-stat-tile__value");
88+
if (valueElement) {
89+
valueElement.innerText = `$${valueElement.innerText}`;
90+
}
91+
}
92+
});
93+
}
94+
9495
@bind
9596
takeUsers(start, end) {
9697
return this.data.users.slice(start, end);
@@ -403,7 +404,12 @@ export default class AiUsage extends Component {
403404
class="ai-usage__summary"
404405
>
405406
<:content>
406-
<DStatTiles {{this.addCurrency}} as |tiles|>
407+
<DStatTiles
408+
{{didInsert this.addCurrencyChar this.metrics}}
409+
{{didUpdate this.addCurrencyChar this.metrics}}
410+
as |tiles|
411+
>
412+
407413
{{#each this.metrics as |metric|}}
408414
<tiles.Tile
409415
class="bar"

config/locales/client.en.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ en:
250250
request_tokens: "Tokens used when the LLM tries to understand what you are saying"
251251
response_tokens: "Tokens used when the LLM responds to your prompt"
252252
cached_tokens: "Previously processed request tokens that the LLM reuses to optimize performance and cost"
253-
total_spending: "Cumulative cost of all tokens used by the LLMs"
253+
total_spending: "Cumulative cost of all tokens used by the LLMs based on specified cost metrics added to LLM configuration settings"
254254
periods:
255255
last_day: "Last 24 hours"
256256
last_week: "Last week"

lib/completions/llm.rb

Lines changed: 65 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,26 @@ def presets
3030
name: "claude-3-7-sonnet",
3131
tokens: 200_000,
3232
display_name: "Claude 3.7 Sonnet",
33+
input_cost: 3,
34+
cached_input_cost: 0.30,
35+
output_cost: 15,
36+
},
37+
{
38+
name: "claude-3-5-haiku",
39+
tokens: 200_000,
40+
display_name: "Claude 3.5 Haiku",
41+
input_cost: 0.80,
42+
cached_input_cost: 0.08,
43+
output_cost: 4,
44+
},
45+
{
46+
name: "claude-3-opus",
47+
tokens: 200_000,
48+
display_name: "Claude 3 Opus",
49+
input_cost: 15,
50+
cached_input_cost: 1.50,
51+
output_cost: 75,
3352
},
34-
{ name: "claude-3-5-haiku", tokens: 200_000, display_name: "Claude 3.5 Haiku" },
35-
{ name: "claude-3-opus", tokens: 200_000, display_name: "Claude 3 Opus" },
3653
],
3754
tokenizer: DiscourseAi::Tokenizer::AnthropicTokenizer,
3855
endpoint: "https://api.anthropic.com/v1/messages",
@@ -61,6 +78,8 @@ def presets
6178
endpoint:
6279
"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-lite",
6380
display_name: "Gemini 2.0 Flash Lite",
81+
input_cost: 0.075,
82+
output_cost: 0.30,
6483
},
6584
],
6685
tokenizer: DiscourseAi::Tokenizer::GeminiTokenizer,
@@ -69,11 +88,46 @@ def presets
6988
{
7089
id: "open_ai",
7190
models: [
72-
{ name: "o3-mini", tokens: 200_000, display_name: "o3 Mini" },
73-
{ name: "o1", tokens: 200_000, display_name: "o1" },
74-
{ name: "gpt-4.1", tokens: 800_000, display_name: "GPT-4.1" },
75-
{ name: "gpt-4.1-mini", tokens: 800_000, display_name: "GPT-4.1 Mini" },
76-
{ name: "gpt-4.1-nano", tokens: 800_000, display_name: "GPT-4.1 Nano" },
91+
{
92+
name: "o3-mini",
93+
tokens: 200_000,
94+
display_name: "o3 Mini",
95+
input_cost: 1.10,
96+
cached_input_cost: 0.55,
97+
output_cost: 4.40,
98+
},
99+
{
100+
name: "o1",
101+
tokens: 200_000,
102+
display_name: "o1",
103+
input_cost: 15,
104+
cached_input_cost: 7.50,
105+
output_cost: 60,
106+
},
107+
{
108+
name: "gpt-4.1",
109+
tokens: 800_000,
110+
display_name: "GPT-4.1",
111+
input_cost: 2,
112+
cached_input_cost: 0.5,
113+
output_cost: 8,
114+
},
115+
{
116+
name: "gpt-4.1-mini",
117+
tokens: 800_000,
118+
display_name: "GPT-4.1 Mini",
119+
input_cost: 0.40,
120+
cached_input_cost: 0.10,
121+
output_cost: 1.60,
122+
},
123+
{
124+
name: "gpt-4.1-nano",
125+
tokens: 800_000,
126+
display_name: "GPT-4.1 Nano",
127+
input_cost: 0.10,
128+
cached_input_cost: 0.025,
129+
output_cost: 0.40,
130+
},
77131
],
78132
tokenizer: DiscourseAi::Tokenizer::OpenAiTokenizer,
79133
endpoint: "https://api.openai.com/v1/chat/completions",
@@ -86,11 +140,15 @@ def presets
86140
name: "Meta-Llama-3.3-70B-Instruct",
87141
tokens: 131_072,
88142
display_name: "Llama 3.3 70B",
143+
input_cost: 0.60,
144+
output_cost: 1.20,
89145
},
90146
{
91147
name: "Meta-Llama-3.1-8B-Instruct",
92148
tokens: 16_384,
93149
display_name: "Llama 3.1 8B",
150+
input_cost: 0.1,
151+
output_cost: 0.20,
94152
},
95153
],
96154
tokenizer: DiscourseAi::Tokenizer::Llama3Tokenizer,

0 commit comments

Comments
 (0)