Skip to content
Closed
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
import io.micrometer.observation.contextpropagation.ObservationThreadLocalAccessor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.ai.vertexai.gemini.api.VertexAiGeminiApi;
Copy link
Contributor

@dev-jonghoonpark dev-jonghoonpark Jul 2, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please move this line between lines 87 and 88.

...
import org.springframework.ai.tool.definition.ToolDefinition;
import org.springframework.ai.vertexai.gemini.api.VertexAiGeminiApi;
import org.springframework.ai.vertexai.gemini.common.VertexAiGeminiConstants;
...

import reactor.core.publisher.Flux;
import reactor.core.scheduler.Schedulers;

Expand Down Expand Up @@ -580,8 +581,28 @@ protected List<Generation> responseCandidateToGeneration(Candidate candidate) {
int candidateIndex = candidate.getIndex();
FinishReason candidateFinishReason = candidate.getFinishReason();

// Convert from VertexAI protobuf to VertexAiGeminiApi DTOs
List<VertexAiGeminiApi.LogProbs.TopContent> topCandidates = candidate.getLogprobsResult()
.getTopCandidatesList()
.stream()
.filter(topCandidate -> !topCandidate.getCandidatesList().isEmpty())
.map(topCandidate -> new VertexAiGeminiApi.LogProbs.TopContent(topCandidate.getCandidatesList()
.stream()
.map(c -> new VertexAiGeminiApi.LogProbs.Content(c.getToken(), c.getLogProbability(), c.getTokenId()))
.toList()))
.toList();

List<VertexAiGeminiApi.LogProbs.Content> chosenCandidates = candidate.getLogprobsResult()
.getChosenCandidatesList()
.stream()
.map(c -> new VertexAiGeminiApi.LogProbs.Content(c.getToken(), c.getLogProbability(), c.getTokenId()))
.toList();

VertexAiGeminiApi.LogProbs logprobs = new VertexAiGeminiApi.LogProbs(candidate.getAvgLogprobs(), topCandidates,
chosenCandidates);

Map<String, Object> messageMetadata = Map.of("candidateIndex", candidateIndex, "finishReason",
candidateFinishReason);
candidateFinishReason, "logprobs", logprobs);

ChatGenerationMetadata chatGenerationMetadata = ChatGenerationMetadata.builder()
.finishReason(candidateFinishReason.name())
Expand Down Expand Up @@ -737,6 +758,10 @@ private GenerationConfig toGenerationConfig(VertexAiGeminiChatOptions options) {
if (options.getPresencePenalty() != null) {
generationConfigBuilder.setPresencePenalty(options.getPresencePenalty().floatValue());
}
if (options.getLogprobs() != null) {
generationConfigBuilder.setLogprobs(options.getLogprobs());
}
generationConfigBuilder.setResponseLogprobs(options.getResponseLogprobs());

return generationConfigBuilder.build();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,20 @@ public class VertexAiGeminiChatOptions implements ToolCallingChatOptions {
*/
private @JsonProperty("temperature") Double temperature;

/**
* Optional. Enable returning the log probabilities of the top candidate tokens at each generation step.
* The model's chosen token might not be the same as the top candidate token at each step.
* Specify the number of candidates to return by using an integer value in the range of 1-20.
* Should not be set unless responseLogprobs is set to true.
*/
private @JsonProperty("logprobs") Integer logprobs;

/**
* Optional. If true, returns the log probabilities of the tokens that were chosen by the model at each step.
* By default, this parameter is set to false.
*/
private @JsonProperty("responseLogprobs") boolean responseLogprobs;

/**
* Optional. If specified, nucleus sampling will be used.
*/
Expand Down Expand Up @@ -162,6 +176,8 @@ public static VertexAiGeminiChatOptions fromOptions(VertexAiGeminiChatOptions fr
options.setSafetySettings(fromOptions.getSafetySettings());
options.setInternalToolExecutionEnabled(fromOptions.getInternalToolExecutionEnabled());
options.setToolContext(fromOptions.getToolContext());
options.setLogprobs(fromOptions.getLogprobs());
options.setResponseLogprobs(fromOptions.getResponseLogprobs());
return options;
}

Expand All @@ -183,6 +199,10 @@ public void setTemperature(Double temperature) {
this.temperature = temperature;
}

public void setResponseLogprobs(boolean responseLogprobs) {
this.responseLogprobs = responseLogprobs;
}

@Override
public Double getTopP() {
return this.topP;
Expand Down Expand Up @@ -326,6 +346,18 @@ public void setToolContext(Map<String, Object> toolContext) {
this.toolContext = toolContext;
}

public Integer getLogprobs() {
return logprobs;
}

public void setLogprobs(Integer logprobs) {
this.logprobs = logprobs;
}

public boolean getResponseLogprobs() {
return responseLogprobs;
}

@Override
public boolean equals(Object o) {
if (this == o) {
Expand All @@ -346,15 +378,17 @@ public boolean equals(Object o) {
&& Objects.equals(this.toolNames, that.toolNames)
&& Objects.equals(this.safetySettings, that.safetySettings)
&& Objects.equals(this.internalToolExecutionEnabled, that.internalToolExecutionEnabled)
&& Objects.equals(this.toolContext, that.toolContext);
&& Objects.equals(this.toolContext, that.toolContext) && Objects.equals(this.logprobs, that.logprobs)
&& Objects.equals(this.responseLogprobs, that.responseLogprobs);
}

@Override
public int hashCode() {
return Objects.hash(this.stopSequences, this.temperature, this.topP, this.topK, this.candidateCount,
this.frequencyPenalty, this.presencePenalty, this.maxOutputTokens, this.model, this.responseMimeType,
this.toolCallbacks, this.toolNames, this.googleSearchRetrieval, this.safetySettings,
this.internalToolExecutionEnabled, this.toolContext);
this.internalToolExecutionEnabled, this.toolContext, this.toolContext, this.logprobs,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you please confirm whether the use of this.toolContext twice is intentional?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch! It's not - just removed the extra one.

this.responseLogprobs);
}

@Override
Expand All @@ -365,7 +399,8 @@ public String toString() {
+ this.candidateCount + ", maxOutputTokens=" + this.maxOutputTokens + ", model='" + this.model + '\''
+ ", responseMimeType='" + this.responseMimeType + '\'' + ", toolCallbacks=" + this.toolCallbacks
+ ", toolNames=" + this.toolNames + ", googleSearchRetrieval=" + this.googleSearchRetrieval
+ ", safetySettings=" + this.safetySettings + '}';
+ ", safetySettings=" + this.safetySettings + ", logProbs=" + this.logprobs + ", responseLogprobs="
+ this.responseLogprobs + '}';
}

@Override
Expand Down Expand Up @@ -488,6 +523,16 @@ public Builder toolContext(Map<String, Object> toolContext) {
return this;
}

public Builder logprobs(Integer logprobs) {
this.options.setLogprobs(logprobs);
return this;
}

public Builder responseLogprobs(Boolean responseLogprobs) {
this.options.setResponseLogprobs(responseLogprobs);
return this;
}

public VertexAiGeminiChatOptions build() {
return this.options;
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
package org.springframework.ai.vertexai.gemini.api;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please add a license header.


import java.util.List;

public class VertexAiGeminiApi {

public record LogProbs(Double avgLogprobs, List<TopContent> topCandidates,
List<LogProbs.Content> chosenCandidates) {
public record Content(String token, Float logprob, Integer id) {
}

public record TopContent(List<Content> candidates) {
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,8 @@ public void createRequestWithGenerationConfigOptions() {
.stopSequences(List.of("stop1", "stop2"))
.candidateCount(1)
.responseMimeType("application/json")
.responseLogprobs(true)
.logprobs(2)
.build())
.build();

Expand All @@ -280,6 +282,8 @@ public void createRequestWithGenerationConfigOptions() {
assertThat(request.model().getGenerationConfig().getStopSequences(0)).isEqualTo("stop1");
assertThat(request.model().getGenerationConfig().getStopSequences(1)).isEqualTo("stop2");
assertThat(request.model().getGenerationConfig().getResponseMimeType()).isEqualTo("application/json");
assertThat(request.model().getGenerationConfig().getLogprobs()).isEqualTo(2);
assertThat(request.model().getGenerationConfig().getResponseLogprobs()).isEqualTo(true);
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
import org.springframework.ai.model.tool.ToolCallingManager;
import org.springframework.ai.tool.annotation.Tool;
import org.springframework.ai.vertexai.gemini.VertexAiGeminiChatModel.ChatModel;
import org.springframework.ai.vertexai.gemini.api.VertexAiGeminiApi;
import org.springframework.ai.vertexai.gemini.common.VertexAiGeminiSafetySetting;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
Expand Down Expand Up @@ -226,6 +227,26 @@ void textStream() {
assertThat(generationTextFromStream).isNotEmpty();
}

@Test
void logprobs() {
VertexAiGeminiChatOptions chatOptions = VertexAiGeminiChatOptions.builder()
.logprobs(1)
.responseLogprobs(true)
.build();

var logprobs = (VertexAiGeminiApi.LogProbs) this.chatModel
.call(new Prompt("Explain Bulgaria? Answer in 10 paragraphs.", chatOptions))
.getResult()
.getOutput()
.getMetadata()
.get("logprobs");

assertThat(logprobs).isNotNull();
assertThat(logprobs.avgLogprobs()).isNotZero();
assertThat(logprobs.topCandidates()).isNotEmpty();
assertThat(logprobs.chosenCandidates()).isNotEmpty();
}

@Test
void beanStreamOutputConverterRecords() {

Expand Down