Skip to content
This repository was archived by the owner on Dec 20, 2025. It is now read-only.

Commit 6641890

Browse files
committed
fix: update Gemini tests to use available models
- Replace non-existent gemini-2.5-pro-exp-03-25 with gemini-2.0-flash-exp - Update context caching tests to use gemini-1.5-flash-002 which supports createCachedContent - Add check for empty chunks in streaming test to avoid nil errors
1 parent 56c8985 commit 6641890

File tree

5 files changed

+58
-5
lines changed

5 files changed

+58
-5
lines changed

check_cache_support.exs

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
{:ok, _} = Application.ensure_all_started(:ex_llm)
2+
3+
IO.puts("\nChecking Gemini models that support createCachedContent...")
4+
case ExLLM.list_models(:gemini) do
5+
{:ok, models} ->
6+
models
7+
|> Enum.filter(fn model ->
8+
# Check if model supports createCachedContent
9+
case model do
10+
%{capabilities: %{supported_generation_methods: methods}} when is_list(methods) ->
11+
"createCachedContent" in methods
12+
%{supported_generation_methods: methods} when is_list(methods) ->
13+
"createCachedContent" in methods
14+
_ ->
15+
false
16+
end
17+
end)
18+
|> Enum.each(fn model ->
19+
IO.puts(" ✅ #{model.id} supports createCachedContent")
20+
end)
21+
{:error, error} ->
22+
IO.puts("Error: #{inspect(error)}")
23+
end

list_gemini_models.exs

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
{:ok, _} = Application.ensure_all_started(:ex_llm)
2+
3+
IO.puts("\nListing Gemini models...")
4+
case ExLLM.list_models(:gemini) do
5+
{:ok, models} ->
6+
models
7+
|> Enum.sort_by(& &1.id)
8+
|> Enum.each(fn model ->
9+
IO.puts(" - #{model.id}")
10+
end)
11+
{:error, error} ->
12+
IO.puts("Error: #{inspect(error)}")
13+
end

test/ex_llm/providers/gemini_public_api_test.exs

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ defmodule ExLLM.Providers.GeminiPublicAPITest do
5757
]
5858

5959
case ExLLM.chat(:gemini, messages,
60-
model: "gemini-2.5-pro-exp-03-25",
60+
model: "gemini-2.0-flash-exp",
6161
safety_settings: safety_settings,
6262
max_tokens: 100
6363
) do
@@ -81,12 +81,13 @@ defmodule ExLLM.Providers.GeminiPublicAPITest do
8181
end
8282

8383
case ExLLM.stream(:gemini, messages, collector,
84-
model: "gemini-2.5-pro-exp-03-25",
84+
model: "gemini-2.0-flash-exp",
8585
max_tokens: 20,
8686
timeout: 10_000
8787
) do
8888
:ok ->
8989
chunks = collect_stream_chunks([], 1000)
90+
assert length(chunks) > 0, "No chunks received from Gemini streaming"
9091
last_chunk = List.last(chunks)
9192
# Gemini uses different finish reasons
9293
assert last_chunk.finish_reason in ["STOP", "MAX_TOKENS", "SAFETY", nil]
@@ -104,7 +105,7 @@ defmodule ExLLM.Providers.GeminiPublicAPITest do
104105
%{role: "user", content: "How are you?"}
105106
]
106107

107-
case ExLLM.chat(:gemini, messages, model: "gemini-2.5-pro-exp-03-25", max_tokens: 50) do
108+
case ExLLM.chat(:gemini, messages, model: "gemini-2.0-flash-exp", max_tokens: 50) do
108109
{:ok, response} ->
109110
assert is_binary(response.content)
110111
assert response.metadata.provider == :gemini

test/integration/context_caching_comprehensive_test.exs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ defmodule ExLLM.Integration.ContextCachingComprehensiveTest do
2222
content = create_test_content(content_text)
2323

2424
%{
25-
model: Keyword.get(opts, :model, "gemini-1.5-flash"),
25+
model: Keyword.get(opts, :model, "gemini-1.5-flash-002"),
2626
contents: [content],
2727
ttl: Keyword.get(opts, :ttl, "3600s"),
2828
display_name: Keyword.get(opts, :display_name)
@@ -40,7 +40,7 @@ defmodule ExLLM.Integration.ContextCachingComprehensiveTest do
4040
{:ok, cached} ->
4141
assert %CachedContent{} = cached
4242
assert cached.name =~ ~r/^cachedContents\//
43-
assert cached.model == "models/gemini-1.5-flash"
43+
assert cached.model == "models/gemini-1.5-flash-002"
4444
assert cached.usage_metadata.total_token_count > 0
4545

4646
# Cleanup

test_api_keys.exs

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# Start the application first
2+
{:ok, _} = Application.ensure_all_started(:ex_llm)
3+
4+
# Test API keys directly
5+
providers = [:anthropic, :openai, :gemini, :groq]
6+
7+
for provider <- providers do
8+
IO.puts("\nTesting #{provider}...")
9+
10+
case ExLLM.chat(provider, [%{role: "user", content: "Say hello"}], max_tokens: 10) do
11+
{:ok, response} ->
12+
IO.puts("✅ #{provider}: #{response.content}")
13+
{:error, error} ->
14+
IO.puts("❌ #{provider}: #{inspect(error)}")
15+
end
16+
end

0 commit comments

Comments
 (0)