Skip to content

Commit 15a1b9a

Browse files
authored
[Firebase AI] Remove LiveGenConfig CandidateCount (#1314)
1 parent d34ff02 commit 15a1b9a

File tree

2 files changed

+5
-9
lines changed

2 files changed

+5
-9
lines changed

docs/readme.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,11 @@ Support
109109

110110
Release Notes
111111
-------------
112+
### Upcoming
113+
- Changes
114+
- Firebase AI: Remove `LiveGenerationConfig.CandidateCount`, since the
115+
connection fails silently when it is set.
116+
112117
### 13.1.0
113118
- Changes
114119
- Firebase AI: Add support for Developer API backend to LiveSessions.

firebaseai/src/LiveGenerationConfig.cs

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,6 @@ public readonly struct LiveGenerationConfig {
6767
private readonly float? _temperature;
6868
private readonly float? _topP;
6969
private readonly float? _topK;
70-
private readonly int? _candidateCount;
7170
private readonly int? _maxOutputTokens;
7271
private readonly float? _presencePenalty;
7372
private readonly float? _frequencyPenalty;
@@ -119,11 +118,6 @@ public readonly struct LiveGenerationConfig {
119118
/// [Cloud documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#generationconfig)
120119
/// for more details.</param>
121120
///
122-
/// <param name="candidateCount">The number of response variations to return; defaults to 1 if not set.
123-
/// Support for multiple candidates depends on the model; see the
124-
/// [Cloud documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#generationconfig)
125-
/// for more details.</param>
126-
///
127121
/// <param name="maxOutputTokens">Maximum number of tokens that can be generated in the response.
128122
/// See the configure model parameters [documentation](https://firebase.google.com/docs/vertex-ai/model-parameters?platform=ios#max-output-tokens)
129123
/// for more details.</param>
@@ -166,7 +160,6 @@ public LiveGenerationConfig(
166160
float? temperature = null,
167161
float? topP = null,
168162
float? topK = null,
169-
int? candidateCount = null,
170163
int? maxOutputTokens = null,
171164
float? presencePenalty = null,
172165
float? frequencyPenalty = null) {
@@ -176,7 +169,6 @@ public LiveGenerationConfig(
176169
_temperature = temperature;
177170
_topP = topP;
178171
_topK = topK;
179-
_candidateCount = candidateCount;
180172
_maxOutputTokens = maxOutputTokens;
181173
_presencePenalty = presencePenalty;
182174
_frequencyPenalty = frequencyPenalty;
@@ -196,7 +188,6 @@ internal Dictionary<string, object> ToJson() {
196188
if (_temperature.HasValue) jsonDict["temperature"] = _temperature.Value;
197189
if (_topP.HasValue) jsonDict["topP"] = _topP.Value;
198190
if (_topK.HasValue) jsonDict["topK"] = _topK.Value;
199-
if (_candidateCount.HasValue) jsonDict["candidateCount"] = _candidateCount.Value;
200191
if (_maxOutputTokens.HasValue) jsonDict["maxOutputTokens"] = _maxOutputTokens.Value;
201192
if (_presencePenalty.HasValue) jsonDict["presencePenalty"] = _presencePenalty.Value;
202193
if (_frequencyPenalty.HasValue) jsonDict["frequencyPenalty"] = _frequencyPenalty.Value;

0 commit comments

Comments
 (0)