@@ -52,36 +52,27 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
5252 /// The probability is zero or close to zero.
5353 ///
5454 /// For benign content, the probability across all categories will be this value.
55- public static var negligible : HarmProbability {
56- return self . init ( kind: . negligible)
57- }
55+ public static let negligible = HarmProbability ( kind: . negligible)
5856
5957 /// The probability is small but non-zero.
60- public static var low : HarmProbability {
61- return self . init ( kind: . low)
62- }
58+ public static let low = HarmProbability ( kind: . low)
6359
6460 /// The probability is moderate.
65- public static var medium : HarmProbability {
66- return self . init ( kind: . medium)
67- }
61+ public static let medium = HarmProbability ( kind: . medium)
6862
6963 /// The probability is high.
7064 ///
7165 /// The content described is very likely harmful.
72- public static var high : HarmProbability {
73- return self . init ( kind: . high)
74- }
66+ public static let high = HarmProbability ( kind: . high)
7567
7668 /// Returns the raw string representation of the `HarmProbability` value.
7769 ///
7870 /// > Note: This value directly corresponds to the values in the [REST
7971 /// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#SafetyRating).
8072 public let rawValue : String
8173
82- var unrecognizedValueMessageCode : VertexLog . MessageCode {
83- . generateContentResponseUnrecognizedHarmProbability
84- }
74+ static let unrecognizedValueMessageCode =
75+ VertexLog . MessageCode. generateContentResponseUnrecognizedHarmProbability
8576 }
8677}
8778
@@ -100,29 +91,19 @@ public struct SafetySetting {
10091 }
10192
10293 /// Content with `.negligible` will be allowed.
103- public static var blockLowAndAbove : HarmBlockThreshold {
104- return self . init ( kind: . blockLowAndAbove)
105- }
94+ public static let blockLowAndAbove = HarmBlockThreshold ( kind: . blockLowAndAbove)
10695
10796 /// Content with `.negligible` and `.low` will be allowed.
108- public static var blockMediumAndAbove : HarmBlockThreshold {
109- return self . init ( kind: . blockMediumAndAbove)
110- }
97+ public static let blockMediumAndAbove = HarmBlockThreshold ( kind: . blockMediumAndAbove)
11198
11299 /// Content with `.negligible`, `.low`, and `.medium` will be allowed.
113- public static var blockOnlyHigh : HarmBlockThreshold {
114- return self . init ( kind: . blockOnlyHigh)
115- }
100+ public static let blockOnlyHigh = HarmBlockThreshold ( kind: . blockOnlyHigh)
116101
117102 /// All content will be allowed.
118- public static var blockNone : HarmBlockThreshold {
119- return self . init ( kind: . blockNone)
120- }
103+ public static let blockNone = HarmBlockThreshold ( kind: . blockNone)
121104
122105 /// Turn off the safety filter.
123- public static var off : HarmBlockThreshold {
124- return self . init ( kind: . off)
125- }
106+ public static let off = HarmBlockThreshold ( kind: . off)
126107
127108 let rawValue : String
128109 }
@@ -156,39 +137,28 @@ public struct HarmCategory: CodableProtoEnum, Hashable, Sendable {
156137 }
157138
158139 /// Harassment content.
159- public static var harassment : HarmCategory {
160- return self . init ( kind: . harassment)
161- }
140+ public static let harassment = HarmCategory ( kind: . harassment)
162141
163142 /// Negative or harmful comments targeting identity and/or protected attributes.
164- public static var hateSpeech : HarmCategory {
165- return self . init ( kind: . hateSpeech)
166- }
143+ public static let hateSpeech = HarmCategory ( kind: . hateSpeech)
167144
168145 /// Contains references to sexual acts or other lewd content.
169- public static var sexuallyExplicit : HarmCategory {
170- return self . init ( kind: . sexuallyExplicit)
171- }
146+ public static let sexuallyExplicit = HarmCategory ( kind: . sexuallyExplicit)
172147
173148 /// Promotes or enables access to harmful goods, services, or activities.
174- public static var dangerousContent : HarmCategory {
175- return self . init ( kind: . dangerousContent)
176- }
149+ public static let dangerousContent = HarmCategory ( kind: . dangerousContent)
177150
178151 /// Content that may be used to harm civic integrity.
179- public static var civicIntegrity : HarmCategory {
180- return self . init ( kind: . civicIntegrity)
181- }
152+ public static let civicIntegrity = HarmCategory ( kind: . civicIntegrity)
182153
183154 /// Returns the raw string representation of the `HarmCategory` value.
184155 ///
185156 /// > Note: This value directly corresponds to the values in the
186157 /// > [REST API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/HarmCategory).
187158 public let rawValue : String
188159
189- var unrecognizedValueMessageCode : VertexLog . MessageCode {
190- . generateContentResponseUnrecognizedHarmCategory
191- }
160+ static let unrecognizedValueMessageCode =
161+ VertexLog . MessageCode. generateContentResponseUnrecognizedHarmCategory
192162}
193163
194164// MARK: - Codable Conformances
0 commit comments