Skip to content

Commit c6dbfd1

Browse files
committed
🔧 Simplify token limit handling across providers
Remove direct token limit parameter handling in provider configurations This change streamlines how token limits are managed across different LLM providers by: - Removing token_limit injection in ProviderConfig to LLMProviderConfig conversion - Adding explicit token_limit parameter filtering in OpenAI provider - Removing special Claude 3.7 token limit handling logic These changes make token limit handling more consistent and maintainable while preserving the functionality through provider-specific implementations.
1 parent 77e6861 commit c6dbfd1

File tree

3 files changed

+5
-10
lines changed

3 files changed

+5
-10
lines changed

src/config.rs

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -247,12 +247,7 @@ impl ProviderConfig {
247247

248248
/// Convert to `LLMProviderConfig`
249249
pub fn to_llm_provider_config(&self) -> LLMProviderConfig {
250-
let mut additional_params = self.additional_params.clone();
251-
252-
// Add token limit to additional params if set
253-
if let Some(limit) = self.token_limit {
254-
additional_params.insert("token_limit".to_string(), limit.to_string());
255-
}
250+
let additional_params = self.additional_params.clone();
256251

257252
LLMProviderConfig {
258253
api_key: self.api_key.clone(),

src/llm_providers/claude.rs

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -35,10 +35,6 @@ impl LLMProvider for ClaudeProvider {
3535

3636
// Add additional parameters from the configuration
3737
for (key, value) in &self.config.additional_params {
38-
// Skip token_limit parameter for Claude 3.7 models
39-
if key == "token_limit" && self.config.model.contains("claude-3-7") {
40-
continue;
41-
}
4238
request_body[key] = serde_json::Value::String(value.clone());
4339
}
4440

src/llm_providers/openai.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,10 @@ impl LLMProvider for OpenAIProvider {
3434

3535
// Add additional parameters from the configuration
3636
for (key, value) in &self.config.additional_params {
37+
// Skip token_limit parameter as it's for internal use only
38+
if key == "token_limit" {
39+
continue;
40+
}
3741
request_body[key] = serde_json::Value::String(value.clone());
3842
}
3943

0 commit comments

Comments
 (0)