Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions server/utils/AiProviders/openRouter/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ class OpenRouterLLM {
];
}

async getChatCompletion(messages = null, { temperature = 0.7 }) {
async getChatCompletion(messages = null, { temperature = 0.7, user = null }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`OpenRouter chat: ${this.model} is not valid for chat completion!`
Expand All @@ -253,6 +253,9 @@ class OpenRouterLLM {
// This is an OpenRouter specific option that allows us to get the reasoning text
// before the token text.
include_reasoning: true,
// Add user tracking if user information is available
// This enables OpenRouter's user tracking features for multi-user systems
...(user ? { user } : {}),
})
.catch((e) => {
throw new Error(e.message);
Expand Down Expand Up @@ -294,8 +297,10 @@ class OpenRouterLLM {
// This is an OpenRouter specific option that allows us to get the reasoning text
// before the token text.
include_reasoning: true,
// Add user tracking if user information is available
// This enables OpenRouter's user tracking features for multi-user systems
...(user ? { user } : {}),
}),
messages
// We have to manually count the tokens
// OpenRouter has a ton of providers and they all can return slightly differently
// some return chunk.usage on STOP, some do it after stop, its inconsistent.
Expand Down
3 changes: 3 additions & 0 deletions server/utils/chats/apiChatHandler.js
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,7 @@ async function chatSync({
const { textResponse, metrics: performanceMetrics } =
await LLMConnector.getChatCompletion(messages, {
temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
user,
});

if (!textResponse) {
Expand Down Expand Up @@ -649,6 +650,7 @@ async function streamChat({
const { textResponse, metrics: performanceMetrics } =
await LLMConnector.getChatCompletion(messages, {
temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
user,
});
completeText = textResponse;
metrics = performanceMetrics;
Expand All @@ -664,6 +666,7 @@ async function streamChat({
} else {
const stream = await LLMConnector.streamGetChatCompletion(messages, {
temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
user,
});
completeText = await LLMConnector.handleStream(response, stream, { uuid });
metrics = stream.metrics;
Expand Down
2 changes: 2 additions & 0 deletions server/utils/chats/stream.js
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,7 @@ async function streamChatWithWorkspace(
const { textResponse, metrics: performanceMetrics } =
await LLMConnector.getChatCompletion(messages, {
temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
user,
});

completeText = textResponse;
Expand All @@ -264,6 +265,7 @@ async function streamChatWithWorkspace(
} else {
const stream = await LLMConnector.streamGetChatCompletion(messages, {
temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
user,
});
completeText = await LLMConnector.handleStream(response, stream, {
uuid,
Expand Down