@@ -52,36 +52,27 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
52
52
/// The probability is zero or close to zero.
53
53
///
54
54
/// For benign content, the probability across all categories will be this value.
55
- public static var negligible : HarmProbability {
56
- return self . init ( kind: . negligible)
57
- }
55
+ public static let negligible = HarmProbability ( kind: . negligible)
58
56
59
57
/// The probability is small but non-zero.
60
- public static var low : HarmProbability {
61
- return self . init ( kind: . low)
62
- }
58
+ public static let low = HarmProbability ( kind: . low)
63
59
64
60
/// The probability is moderate.
65
- public static var medium : HarmProbability {
66
- return self . init ( kind: . medium)
67
- }
61
+ public static let medium = HarmProbability ( kind: . medium)
68
62
69
63
/// The probability is high.
70
64
///
71
65
/// The content described is very likely harmful.
72
- public static var high : HarmProbability {
73
- return self . init ( kind: . high)
74
- }
66
+ public static let high = HarmProbability ( kind: . high)
75
67
76
68
/// Returns the raw string representation of the `HarmProbability` value.
77
69
///
78
70
/// > Note: This value directly corresponds to the values in the [REST
79
71
/// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#SafetyRating).
80
72
public let rawValue : String
81
73
82
- var unrecognizedValueMessageCode : VertexLog . MessageCode {
83
- . generateContentResponseUnrecognizedHarmProbability
84
- }
74
+ static let unrecognizedValueMessageCode =
75
+ VertexLog . MessageCode. generateContentResponseUnrecognizedHarmProbability
85
76
}
86
77
}
87
78
@@ -100,29 +91,19 @@ public struct SafetySetting {
100
91
}
101
92
102
93
/// Content with `.negligible` will be allowed.
103
- public static var blockLowAndAbove : HarmBlockThreshold {
104
- return self . init ( kind: . blockLowAndAbove)
105
- }
94
+ public static let blockLowAndAbove = HarmBlockThreshold ( kind: . blockLowAndAbove)
106
95
107
96
/// Content with `.negligible` and `.low` will be allowed.
108
- public static var blockMediumAndAbove : HarmBlockThreshold {
109
- return self . init ( kind: . blockMediumAndAbove)
110
- }
97
+ public static let blockMediumAndAbove = HarmBlockThreshold ( kind: . blockMediumAndAbove)
111
98
112
99
/// Content with `.negligible`, `.low`, and `.medium` will be allowed.
113
- public static var blockOnlyHigh : HarmBlockThreshold {
114
- return self . init ( kind: . blockOnlyHigh)
115
- }
100
+ public static let blockOnlyHigh = HarmBlockThreshold ( kind: . blockOnlyHigh)
116
101
117
102
/// All content will be allowed.
118
- public static var blockNone : HarmBlockThreshold {
119
- return self . init ( kind: . blockNone)
120
- }
103
+ public static let blockNone = HarmBlockThreshold ( kind: . blockNone)
121
104
122
105
/// Turn off the safety filter.
123
- public static var off : HarmBlockThreshold {
124
- return self . init ( kind: . off)
125
- }
106
+ public static let off = HarmBlockThreshold ( kind: . off)
126
107
127
108
let rawValue : String
128
109
}
@@ -156,39 +137,28 @@ public struct HarmCategory: CodableProtoEnum, Hashable, Sendable {
156
137
}
157
138
158
139
/// Harassment content.
159
- public static var harassment : HarmCategory {
160
- return self . init ( kind: . harassment)
161
- }
140
+ public static let harassment = HarmCategory ( kind: . harassment)
162
141
163
142
/// Negative or harmful comments targeting identity and/or protected attributes.
164
- public static var hateSpeech : HarmCategory {
165
- return self . init ( kind: . hateSpeech)
166
- }
143
+ public static let hateSpeech = HarmCategory ( kind: . hateSpeech)
167
144
168
145
/// Contains references to sexual acts or other lewd content.
169
- public static var sexuallyExplicit : HarmCategory {
170
- return self . init ( kind: . sexuallyExplicit)
171
- }
146
+ public static let sexuallyExplicit = HarmCategory ( kind: . sexuallyExplicit)
172
147
173
148
/// Promotes or enables access to harmful goods, services, or activities.
174
- public static var dangerousContent : HarmCategory {
175
- return self . init ( kind: . dangerousContent)
176
- }
149
+ public static let dangerousContent = HarmCategory ( kind: . dangerousContent)
177
150
178
151
/// Content that may be used to harm civic integrity.
179
- public static var civicIntegrity : HarmCategory {
180
- return self . init ( kind: . civicIntegrity)
181
- }
152
+ public static let civicIntegrity = HarmCategory ( kind: . civicIntegrity)
182
153
183
154
/// Returns the raw string representation of the `HarmCategory` value.
184
155
///
185
156
/// > Note: This value directly corresponds to the values in the
186
157
/// > [REST API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/HarmCategory).
187
158
public let rawValue : String
188
159
189
- var unrecognizedValueMessageCode : VertexLog . MessageCode {
190
- . generateContentResponseUnrecognizedHarmCategory
191
- }
160
+ static let unrecognizedValueMessageCode =
161
+ VertexLog . MessageCode. generateContentResponseUnrecognizedHarmCategory
192
162
}
193
163
194
164
// MARK: - Codable Conformances
0 commit comments