Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.

Commit e207eba

Browse files
authored
FIX: Don't dig on nil when checking for the gemini schema (#1356)
1 parent 53905f6 commit e207eba

File tree

2 files changed

+25
-1
lines changed

2 files changed

+25
-1
lines changed

lib/completions/endpoints/gemini.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ def prepare_payload(prompt, model_params, dialect)
8888
payload[:generationConfig].merge!(model_params.except(:response_format))
8989

9090
# https://ai.google.dev/api/generate-content#generationconfig
91-
gemini_schema = model_params[:response_format].dig(:json_schema, :schema)
91+
gemini_schema = model_params.dig(:response_format, :json_schema, :schema)
9292

9393
if gemini_schema.present?
9494
payload[:generationConfig][:responseSchema] = gemini_schema.except(

spec/lib/completions/endpoints/gemini_spec.rb

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -576,4 +576,28 @@ def tool_response
576576
expect(parsed.dig(:generationConfig, :responseMimeType)).to eq("application/json")
577577
end
578578
end
579+
580+
it "includes model params in the request" do
581+
response = <<~TEXT
582+
data: {"candidates": [{"content": {"parts": [{"text": "Hello"}],"role": "model"}}],"usageMetadata": {"promptTokenCount": 399,"totalTokenCount": 399},"modelVersion": "gemini-1.5-pro-002"}
583+
584+
data: {"candidates": [{"content": {"parts": [{"text": "! This is a simple response"}],"role": "model"},"safetyRatings": [{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"}]}],"usageMetadata": {"promptTokenCount": 399,"totalTokenCount": 399},"modelVersion": "gemini-1.5-pro-002"}
585+
586+
data: {"candidates": [{"content": {"parts": [{"text": ""}],"role": "model"},"finishReason": "STOP"}],"usageMetadata": {"promptTokenCount": 399,"candidatesTokenCount": 191,"totalTokenCount": 590},"modelVersion": "gemini-1.5-pro-002"}
587+
588+
TEXT
589+
590+
llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
591+
url = "#{model.url}:streamGenerateContent?alt=sse&key=123"
592+
593+
output = []
594+
595+
stub_request(:post, url).with(
596+
body: hash_including(generationConfig: { temperature: 0.2 }),
597+
).to_return(status: 200, body: response)
598+
599+
llm.generate("Hello", user: user, temperature: 0.2) { |partial| output << partial }
600+
601+
expect(output).to eq(["Hello", "! This is a simple response"])
602+
end
579603
end

0 commit comments

Comments
 (0)