Skip to content
This repository was archived by the owner on Jul 22, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion lib/completions/endpoints/gemini.rb
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def prepare_payload(prompt, model_params, dialect)
payload[:generationConfig].merge!(model_params.except(:response_format))

# https://ai.google.dev/api/generate-content#generationconfig
gemini_schema = model_params[:response_format].dig(:json_schema, :schema)
gemini_schema = model_params.dig(:response_format, :json_schema, :schema)

if gemini_schema.present?
payload[:generationConfig][:responseSchema] = gemini_schema.except(
Expand Down
24 changes: 24 additions & 0 deletions spec/lib/completions/endpoints/gemini_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -576,4 +576,28 @@ def tool_response
expect(parsed.dig(:generationConfig, :responseMimeType)).to eq("application/json")
end
end

it "includes model params in the request" do
response = <<~TEXT
data: {"candidates": [{"content": {"parts": [{"text": "Hello"}],"role": "model"}}],"usageMetadata": {"promptTokenCount": 399,"totalTokenCount": 399},"modelVersion": "gemini-1.5-pro-002"}

data: {"candidates": [{"content": {"parts": [{"text": "! This is a simple response"}],"role": "model"},"safetyRatings": [{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"}]}],"usageMetadata": {"promptTokenCount": 399,"totalTokenCount": 399},"modelVersion": "gemini-1.5-pro-002"}

data: {"candidates": [{"content": {"parts": [{"text": ""}],"role": "model"},"finishReason": "STOP"}],"usageMetadata": {"promptTokenCount": 399,"candidatesTokenCount": 191,"totalTokenCount": 590},"modelVersion": "gemini-1.5-pro-002"}

TEXT

llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}")
url = "#{model.url}:streamGenerateContent?alt=sse&key=123"

output = []

stub_request(:post, url).with(
body: hash_including(generationConfig: { temperature: 0.2 }),
).to_return(status: 200, body: response)

llm.generate("Hello", user: user, temperature: 0.2) { |partial| output << partial }

expect(output).to eq(["Hello", "! This is a simple response"])
end
end
Loading