From 7e7ca114338e7912133593026860d17574c98eff Mon Sep 17 00:00:00 2001 From: Roman Rizzi Date: Wed, 21 May 2025 08:17:38 -0300 Subject: [PATCH] FIX: Don't dig on nil when checking for the gemini schema --- lib/completions/endpoints/gemini.rb | 2 +- spec/lib/completions/endpoints/gemini_spec.rb | 24 +++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/lib/completions/endpoints/gemini.rb b/lib/completions/endpoints/gemini.rb index 171074554..1db5b76fa 100644 --- a/lib/completions/endpoints/gemini.rb +++ b/lib/completions/endpoints/gemini.rb @@ -88,7 +88,7 @@ def prepare_payload(prompt, model_params, dialect) payload[:generationConfig].merge!(model_params.except(:response_format)) # https://ai.google.dev/api/generate-content#generationconfig - gemini_schema = model_params[:response_format].dig(:json_schema, :schema) + gemini_schema = model_params.dig(:response_format, :json_schema, :schema) if gemini_schema.present? payload[:generationConfig][:responseSchema] = gemini_schema.except( diff --git a/spec/lib/completions/endpoints/gemini_spec.rb b/spec/lib/completions/endpoints/gemini_spec.rb index 429e81084..aea97bb89 100644 --- a/spec/lib/completions/endpoints/gemini_spec.rb +++ b/spec/lib/completions/endpoints/gemini_spec.rb @@ -576,4 +576,28 @@ def tool_response expect(parsed.dig(:generationConfig, :responseMimeType)).to eq("application/json") end end + + it "includes model params in the request" do + response = <<~TEXT + data: {"candidates": [{"content": {"parts": [{"text": "Hello"}],"role": "model"}}],"usageMetadata": {"promptTokenCount": 399,"totalTokenCount": 399},"modelVersion": "gemini-1.5-pro-002"} + + data: {"candidates": [{"content": {"parts": [{"text": "! This is a simple response"}],"role": "model"},"safetyRatings": [{"category": "HARM_CATEGORY_HATE_SPEECH","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_DANGEROUS_CONTENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_HARASSMENT","probability": "NEGLIGIBLE"},{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT","probability": "NEGLIGIBLE"}]}],"usageMetadata": {"promptTokenCount": 399,"totalTokenCount": 399},"modelVersion": "gemini-1.5-pro-002"} + + data: {"candidates": [{"content": {"parts": [{"text": ""}],"role": "model"},"finishReason": "STOP"}],"usageMetadata": {"promptTokenCount": 399,"candidatesTokenCount": 191,"totalTokenCount": 590},"modelVersion": "gemini-1.5-pro-002"} + + TEXT + + llm = DiscourseAi::Completions::Llm.proxy("custom:#{model.id}") + url = "#{model.url}:streamGenerateContent?alt=sse&key=123" + + output = [] + + stub_request(:post, url).with( + body: hash_including(generationConfig: { temperature: 0.2 }), + ).to_return(status: 200, body: response) + + llm.generate("Hello", user: user, temperature: 0.2) { |partial| output << partial } + + expect(output).to eq(["Hello", "! This is a simple response"]) + end end