Skip to content

Commit 65532e5

Browse files
fix: fix type hint
1 parent 0a1c1a2 commit 65532e5

File tree

2 files changed

+127
-1
lines changed

2 files changed

+127
-1
lines changed

litellm/llms/bedrock/chat/converse_transformation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -805,7 +805,7 @@ def _process_tools_and_beta(
805805
model: str,
806806
headers: Optional[dict],
807807
additional_request_params: dict,
808-
) -> tuple[List[ToolBlock], list]:
808+
) -> Tuple[List[ToolBlock], list]:
809809
"""Process tools and collect anthropic_beta values."""
810810
bedrock_tools: List[ToolBlock] = []
811811

litellm/model_prices_and_context_window_backup.json

Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20943,6 +20943,132 @@
2094320943
"mode": "embedding",
2094420944
"output_cost_per_token": 0.0
2094520945
},
20946+
"wandb/openai/gpt-oss-120b": {
20947+
"max_tokens": 131072,
20948+
"max_input_tokens": 131072,
20949+
"max_output_tokens": 131072,
20950+
"input_cost_per_token": 0.015,
20951+
"output_cost_per_token": 0.06,
20952+
"litellm_provider": "wandb",
20953+
"mode": "chat"
20954+
},
20955+
"wandb/openai/gpt-oss-20b": {
20956+
"max_tokens": 131072,
20957+
"max_input_tokens": 131072,
20958+
"max_output_tokens": 131072,
20959+
"input_cost_per_token": 0.005,
20960+
"output_cost_per_token": 0.02,
20961+
"litellm_provider": "wandb",
20962+
"mode": "chat"
20963+
},
20964+
"wandb/zai-org/GLM-4.5": {
20965+
"max_tokens": 131072,
20966+
"max_input_tokens": 131072,
20967+
"max_output_tokens": 131072,
20968+
"input_cost_per_token": 0.055,
20969+
"output_cost_per_token": 0.2,
20970+
"litellm_provider": "wandb",
20971+
"mode": "chat"
20972+
},
20973+
"wandb/Qwen/Qwen3-235B-A22B-Instruct-2507": {
20974+
"max_tokens": 262144,
20975+
"max_input_tokens": 262144,
20976+
"max_output_tokens": 262144,
20977+
"input_cost_per_token": 0.01,
20978+
"output_cost_per_token": 0.01,
20979+
"litellm_provider": "wandb",
20980+
"mode": "chat"
20981+
},
20982+
"wandb/Qwen/Qwen3-Coder-480B-A35B-Instruct": {
20983+
"max_tokens": 262144,
20984+
"max_input_tokens": 262144,
20985+
"max_output_tokens": 262144,
20986+
"input_cost_per_token": 0.1,
20987+
"output_cost_per_token": 0.15,
20988+
"litellm_provider": "wandb",
20989+
"mode": "chat"
20990+
},
20991+
"wandb/Qwen/Qwen3-235B-A22B-Thinking-2507": {
20992+
"max_tokens": 262144,
20993+
"max_input_tokens": 262144,
20994+
"max_output_tokens": 262144,
20995+
"input_cost_per_token": 0.01,
20996+
"output_cost_per_token": 0.01,
20997+
"litellm_provider": "wandb",
20998+
"mode": "chat"
20999+
},
21000+
"wandb/moonshotai/Kimi-K2-Instruct": {
21001+
"max_tokens": 128000,
21002+
"max_input_tokens": 128000,
21003+
"max_output_tokens": 128000,
21004+
"input_cost_per_token": 0.135,
21005+
"output_cost_per_token": 0.4,
21006+
"litellm_provider": "wandb",
21007+
"mode": "chat"
21008+
},
21009+
"wandb/meta-llama/Llama-3.1-8B-Instruct": {
21010+
"max_tokens": 128000,
21011+
"max_input_tokens": 128000,
21012+
"max_output_tokens": 128000,
21013+
"input_cost_per_token": 0.022,
21014+
"output_cost_per_token": 0.022,
21015+
"litellm_provider": "wandb",
21016+
"mode": "chat"
21017+
},
21018+
"wandb/deepseek-ai/DeepSeek-V3.1": {
21019+
"max_tokens": 128000,
21020+
"max_input_tokens": 128000,
21021+
"max_output_tokens": 128000,
21022+
"input_cost_per_token": 0.055,
21023+
"output_cost_per_token": 0.165,
21024+
"litellm_provider": "wandb",
21025+
"mode": "chat"
21026+
},
21027+
"wandb/deepseek-ai/DeepSeek-R1-0528": {
21028+
"max_tokens": 161000,
21029+
"max_input_tokens": 161000,
21030+
"max_output_tokens": 161000,
21031+
"input_cost_per_token": 0.135,
21032+
"output_cost_per_token": 0.54,
21033+
"litellm_provider": "wandb",
21034+
"mode": "chat"
21035+
},
21036+
"wandb/deepseek-ai/DeepSeek-V3-0324": {
21037+
"max_tokens": 161000,
21038+
"max_input_tokens": 161000,
21039+
"max_output_tokens": 161000,
21040+
"input_cost_per_token": 0.114,
21041+
"output_cost_per_token": 0.275,
21042+
"litellm_provider": "wandb",
21043+
"mode": "chat"
21044+
},
21045+
"wandb/meta-llama/Llama-3.3-70B-Instruct": {
21046+
"max_tokens": 128000,
21047+
"max_input_tokens": 128000,
21048+
"max_output_tokens": 128000,
21049+
"input_cost_per_token": 0.071,
21050+
"output_cost_per_token": 0.071,
21051+
"litellm_provider": "wandb",
21052+
"mode": "chat"
21053+
},
21054+
"wandb/meta-llama/Llama-4-Scout-17B-16E-Instruct": {
21055+
"max_tokens": 64000,
21056+
"max_input_tokens": 64000,
21057+
"max_output_tokens": 64000,
21058+
"input_cost_per_token": 0.017,
21059+
"output_cost_per_token": 0.066,
21060+
"litellm_provider": "wandb",
21061+
"mode": "chat"
21062+
},
21063+
"wandb/microsoft/Phi-4-mini-instruct": {
21064+
"max_tokens": 128000,
21065+
"max_input_tokens": 128000,
21066+
"max_output_tokens": 128000,
21067+
"input_cost_per_token": 0.008,
21068+
"output_cost_per_token": 0.035,
21069+
"litellm_provider": "wandb",
21070+
"mode": "chat"
21071+
},
2094621072
"watsonx/ibm/granite-3-8b-instruct": {
2094721073
"input_cost_per_token": 0.0002,
2094821074
"litellm_provider": "watsonx",

0 commit comments

Comments
 (0)