Skip to content

Commit 02025ce

Browse files
authored
[Vertex AI] Replace "enum" computed properties with static let (#13870)
1 parent fde2baf commit 02025ce

File tree

4 files changed

+40
-104
lines changed

4 files changed

+40
-104
lines changed

FirebaseVertexAI/Sources/FunctionCalling.swift

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -84,22 +84,16 @@ public struct FunctionCallingConfig {
8484
/// The default behavior for function calling.
8585
///
8686
/// The model calls functions to answer queries at its discretion.
87-
public static var auto: Mode {
88-
return self.init(kind: .auto)
89-
}
87+
public static let auto = Mode(kind: .auto)
9088

9189
/// The model always predicts a provided function call to answer every query.
92-
public static var any: Mode {
93-
return self.init(kind: .any)
94-
}
90+
public static let any = Mode(kind: .any)
9591

9692
/// The model will never predict a function call to answer a query.
9793
///
9894
/// > Note: This can also be achieved by not passing any ``FunctionDeclaration`` tools
9995
/// > when instantiating the model.
100-
public static var none: Mode {
101-
return self.init(kind: .none)
102-
}
96+
public static let none = Mode(kind: .none)
10397

10498
let rawValue: String
10599
}

FirebaseVertexAI/Sources/GenerateContentResponse.swift

Lines changed: 17 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -159,62 +159,43 @@ public struct FinishReason: DecodableProtoEnum, Hashable, Sendable {
159159
}
160160

161161
/// Natural stop point of the model or provided stop sequence.
162-
public static var stop: FinishReason {
163-
return self.init(kind: .stop)
164-
}
162+
public static let stop = FinishReason(kind: .stop)
165163

166164
/// The maximum number of tokens as specified in the request was reached.
167-
public static var maxTokens: FinishReason {
168-
return self.init(kind: .maxTokens)
169-
}
165+
public static let maxTokens = FinishReason(kind: .maxTokens)
170166

171167
/// The token generation was stopped because the response was flagged for safety reasons.
172168
///
173169
/// > NOTE: When streaming, the ``CandidateResponse/content`` will be empty if content filters
174170
/// > blocked the output.
175-
public static var safety: FinishReason {
176-
return self.init(kind: .safety)
177-
}
171+
public static let safety = FinishReason(kind: .safety)
178172

179173
/// The token generation was stopped because the response was flagged for unauthorized citations.
180-
public static var recitation: FinishReason {
181-
return self.init(kind: .recitation)
182-
}
174+
public static let recitation = FinishReason(kind: .recitation)
183175

184176
/// All other reasons that stopped token generation.
185-
public static var other: FinishReason {
186-
return self.init(kind: .other)
187-
}
177+
public static let other = FinishReason(kind: .other)
188178

189179
/// Token generation was stopped because the response contained forbidden terms.
190-
public static var blocklist: FinishReason {
191-
return self.init(kind: .blocklist)
192-
}
180+
public static let blocklist = FinishReason(kind: .blocklist)
193181

194182
/// Token generation was stopped because the response contained potentially prohibited content.
195-
public static var prohibitedContent: FinishReason {
196-
return self.init(kind: .prohibitedContent)
197-
}
183+
public static let prohibitedContent = FinishReason(kind: .prohibitedContent)
198184

199185
/// Token generation was stopped because of Sensitive Personally Identifiable Information (SPII).
200-
public static var spii: FinishReason {
201-
return self.init(kind: .spii)
202-
}
186+
public static let spii = FinishReason(kind: .spii)
203187

204188
/// Token generation was stopped because the function call generated by the model was invalid.
205-
public static var malformedFunctionCall: FinishReason {
206-
return self.init(kind: .malformedFunctionCall)
207-
}
189+
public static let malformedFunctionCall = FinishReason(kind: .malformedFunctionCall)
208190

209191
/// Returns the raw string representation of the `FinishReason` value.
210192
///
211193
/// > Note: This value directly corresponds to the values in the [REST
212194
/// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#FinishReason).
213195
public let rawValue: String
214196

215-
var unrecognizedValueMessageCode: VertexLog.MessageCode {
216-
.generateContentResponseUnrecognizedFinishReason
217-
}
197+
static let unrecognizedValueMessageCode =
198+
VertexLog.MessageCode.generateContentResponseUnrecognizedFinishReason
218199
}
219200

220201
/// A metadata struct containing any feedback the model had on the prompt it was provided.
@@ -230,34 +211,25 @@ public struct PromptFeedback: Sendable {
230211
}
231212

232213
/// The prompt was blocked because it was deemed unsafe.
233-
public static var safety: BlockReason {
234-
return self.init(kind: .safety)
235-
}
214+
public static let safety = BlockReason(kind: .safety)
236215

237216
/// All other block reasons.
238-
public static var other: BlockReason {
239-
return self.init(kind: .other)
240-
}
217+
public static let other = BlockReason(kind: .other)
241218

242219
/// The prompt was blocked because it contained terms from the terminology blocklist.
243-
public static var blocklist: BlockReason {
244-
return self.init(kind: .blocklist)
245-
}
220+
public static let blocklist = BlockReason(kind: .blocklist)
246221

247222
/// The prompt was blocked due to prohibited content.
248-
public static var prohibitedContent: BlockReason {
249-
return self.init(kind: .prohibitedContent)
250-
}
223+
public static let prohibitedContent = BlockReason(kind: .prohibitedContent)
251224

252225
/// Returns the raw string representation of the `BlockReason` value.
253226
///
254227
/// > Note: This value directly corresponds to the values in the [REST
255228
/// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#BlockedReason).
256229
public let rawValue: String
257230

258-
var unrecognizedValueMessageCode: VertexLog.MessageCode {
259-
.generateContentResponseUnrecognizedBlockReason
260-
}
231+
static let unrecognizedValueMessageCode =
232+
VertexLog.MessageCode.generateContentResponseUnrecognizedBlockReason
261233
}
262234

263235
/// The reason a prompt was blocked, if it was blocked.

FirebaseVertexAI/Sources/Protocols/Internal/CodableProtoEnum.swift

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ protocol ProtoEnum {
4747
/// provided when conforming to this type.
4848
protocol DecodableProtoEnum: ProtoEnum, Decodable {
4949
/// Returns the ``VertexLog/MessageCode`` associated with unrecognized (unknown) enum values.
50-
var unrecognizedValueMessageCode: VertexLog.MessageCode { get }
50+
static var unrecognizedValueMessageCode: VertexLog.MessageCode { get }
5151

5252
/// Creates a new instance by decoding from the given decoder.
5353
///
@@ -91,7 +91,7 @@ extension DecodableProtoEnum {
9191

9292
if Kind(rawValue: rawValue) == nil {
9393
VertexLog.error(
94-
code: unrecognizedValueMessageCode,
94+
code: Self.unrecognizedValueMessageCode,
9595
"""
9696
Unrecognized \(Self.self) with value "\(rawValue)":
9797
- Check for updates to the SDK as support for "\(rawValue)" may have been added; see \

FirebaseVertexAI/Sources/Safety.swift

Lines changed: 18 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -52,36 +52,27 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
5252
/// The probability is zero or close to zero.
5353
///
5454
/// For benign content, the probability across all categories will be this value.
55-
public static var negligible: HarmProbability {
56-
return self.init(kind: .negligible)
57-
}
55+
public static let negligible = HarmProbability(kind: .negligible)
5856

5957
/// The probability is small but non-zero.
60-
public static var low: HarmProbability {
61-
return self.init(kind: .low)
62-
}
58+
public static let low = HarmProbability(kind: .low)
6359

6460
/// The probability is moderate.
65-
public static var medium: HarmProbability {
66-
return self.init(kind: .medium)
67-
}
61+
public static let medium = HarmProbability(kind: .medium)
6862

6963
/// The probability is high.
7064
///
7165
/// The content described is very likely harmful.
72-
public static var high: HarmProbability {
73-
return self.init(kind: .high)
74-
}
66+
public static let high = HarmProbability(kind: .high)
7567

7668
/// Returns the raw string representation of the `HarmProbability` value.
7769
///
7870
/// > Note: This value directly corresponds to the values in the [REST
7971
/// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#SafetyRating).
8072
public let rawValue: String
8173

82-
var unrecognizedValueMessageCode: VertexLog.MessageCode {
83-
.generateContentResponseUnrecognizedHarmProbability
84-
}
74+
static let unrecognizedValueMessageCode =
75+
VertexLog.MessageCode.generateContentResponseUnrecognizedHarmProbability
8576
}
8677
}
8778

@@ -100,29 +91,19 @@ public struct SafetySetting {
10091
}
10192

10293
/// Content with `.negligible` will be allowed.
103-
public static var blockLowAndAbove: HarmBlockThreshold {
104-
return self.init(kind: .blockLowAndAbove)
105-
}
94+
public static let blockLowAndAbove = HarmBlockThreshold(kind: .blockLowAndAbove)
10695

10796
/// Content with `.negligible` and `.low` will be allowed.
108-
public static var blockMediumAndAbove: HarmBlockThreshold {
109-
return self.init(kind: .blockMediumAndAbove)
110-
}
97+
public static let blockMediumAndAbove = HarmBlockThreshold(kind: .blockMediumAndAbove)
11198

11299
/// Content with `.negligible`, `.low`, and `.medium` will be allowed.
113-
public static var blockOnlyHigh: HarmBlockThreshold {
114-
return self.init(kind: .blockOnlyHigh)
115-
}
100+
public static let blockOnlyHigh = HarmBlockThreshold(kind: .blockOnlyHigh)
116101

117102
/// All content will be allowed.
118-
public static var blockNone: HarmBlockThreshold {
119-
return self.init(kind: .blockNone)
120-
}
103+
public static let blockNone = HarmBlockThreshold(kind: .blockNone)
121104

122105
/// Turn off the safety filter.
123-
public static var off: HarmBlockThreshold {
124-
return self.init(kind: .off)
125-
}
106+
public static let off = HarmBlockThreshold(kind: .off)
126107

127108
let rawValue: String
128109
}
@@ -156,39 +137,28 @@ public struct HarmCategory: CodableProtoEnum, Hashable, Sendable {
156137
}
157138

158139
/// Harassment content.
159-
public static var harassment: HarmCategory {
160-
return self.init(kind: .harassment)
161-
}
140+
public static let harassment = HarmCategory(kind: .harassment)
162141

163142
/// Negative or harmful comments targeting identity and/or protected attributes.
164-
public static var hateSpeech: HarmCategory {
165-
return self.init(kind: .hateSpeech)
166-
}
143+
public static let hateSpeech = HarmCategory(kind: .hateSpeech)
167144

168145
/// Contains references to sexual acts or other lewd content.
169-
public static var sexuallyExplicit: HarmCategory {
170-
return self.init(kind: .sexuallyExplicit)
171-
}
146+
public static let sexuallyExplicit = HarmCategory(kind: .sexuallyExplicit)
172147

173148
/// Promotes or enables access to harmful goods, services, or activities.
174-
public static var dangerousContent: HarmCategory {
175-
return self.init(kind: .dangerousContent)
176-
}
149+
public static let dangerousContent = HarmCategory(kind: .dangerousContent)
177150

178151
/// Content that may be used to harm civic integrity.
179-
public static var civicIntegrity: HarmCategory {
180-
return self.init(kind: .civicIntegrity)
181-
}
152+
public static let civicIntegrity = HarmCategory(kind: .civicIntegrity)
182153

183154
/// Returns the raw string representation of the `HarmCategory` value.
184155
///
185156
/// > Note: This value directly corresponds to the values in the
186157
/// > [REST API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/HarmCategory).
187158
public let rawValue: String
188159

189-
var unrecognizedValueMessageCode: VertexLog.MessageCode {
190-
.generateContentResponseUnrecognizedHarmCategory
191-
}
160+
static let unrecognizedValueMessageCode =
161+
VertexLog.MessageCode.generateContentResponseUnrecognizedHarmCategory
192162
}
193163

194164
// MARK: - Codable Conformances

0 commit comments

Comments
 (0)