Skip to content

Commit d37be48

Browse files
committed
test: llama-3.3-70b-versatile
1 parent 94cac6f commit d37be48

File tree

3 files changed

+4
-4
lines changed

3 files changed

+4
-4
lines changed

tests/litellm_utils_tests/test_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1054,7 +1054,7 @@ def test_parse_content_for_reasoning(content, expected_reasoning, expected_conte
10541054
("gemini/gemini-1.5-pro", True),
10551055
("predibase/llama3-8b-instruct", True),
10561056
("gpt-3.5-turbo", False),
1057-
("groq/llama3-70b-8192", True),
1057+
("groq/llama-3.3-70b-versatile", True),
10581058
],
10591059
)
10601060
def test_supports_response_schema(model, expected_bool):

tests/local_testing/test_completion_cost.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -565,7 +565,7 @@ def test_groq_response_cost_tracking(is_streaming):
565565

566566
response_cost = litellm.response_cost_calculator(
567567
response_object=response,
568-
model="groq/llama3-70b-8192",
568+
model="groq/llama-3.3-70b-versatile",
569569
custom_llm_provider="groq",
570570
call_type=CallTypes.acompletion.value,
571571
optional_params={},

tests/test_litellm/test_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -979,8 +979,8 @@ def reset_mock_cache(self):
979979
# Groq models (mixed support)
980980
("groq/gemma-7b-it", "litellm_proxy/groq/gemma-7b-it", True),
981981
(
982-
"groq/llama3-70b-8192",
983-
"litellm_proxy/groq/llama3-70b-8192",
982+
"groq/llama-3.3-70b-versatile",
983+
"litellm_proxy/groq/llama-3.3-70b-versatile",
984984
False,
985985
), # This model doesn't support function calling
986986
# Cohere models (generally don't support function calling)

0 commit comments

Comments
 (0)