@@ -97,21 +97,65 @@ public struct SafetySetting {
97
97
}
98
98
99
99
/// Categories describing the potential harm a piece of content may pose.
100
- public enum HarmCategory : String , Sendable {
101
- /// Unknown. A new server value that isn't recognized by the SDK.
102
- case unknown = " HARM_CATEGORY_UNKNOWN "
100
+ public struct HarmCategory : Sendable , Equatable , Hashable {
101
+ enum Kind : String {
102
+ case harassment = " HARM_CATEGORY_HARASSMENT "
103
+ case hateSpeech = " HARM_CATEGORY_HATE_SPEECH "
104
+ case sexuallyExplicit = " HARM_CATEGORY_SEXUALLY_EXPLICIT "
105
+ case dangerousContent = " HARM_CATEGORY_DANGEROUS_CONTENT "
106
+ case civicIntegrity = " HARM_CATEGORY_CIVIC_INTEGRITY "
107
+ }
103
108
104
109
/// Harassment content.
105
- case harassment = " HARM_CATEGORY_HARASSMENT "
110
+ public static var harassment : HarmCategory {
111
+ return self . init ( kind: . harassment)
112
+ }
106
113
107
114
/// Negative or harmful comments targeting identity and/or protected attributes.
108
- case hateSpeech = " HARM_CATEGORY_HATE_SPEECH "
115
+ public static var hateSpeech : HarmCategory {
116
+ return self . init ( kind: . hateSpeech)
117
+ }
109
118
110
119
/// Contains references to sexual acts or other lewd content.
111
- case sexuallyExplicit = " HARM_CATEGORY_SEXUALLY_EXPLICIT "
120
+ public static var sexuallyExplicit : HarmCategory {
121
+ return self . init ( kind: . sexuallyExplicit)
122
+ }
112
123
113
124
/// Promotes or enables access to harmful goods, services, or activities.
114
- case dangerousContent = " HARM_CATEGORY_DANGEROUS_CONTENT "
125
+ public static var dangerousContent : HarmCategory {
126
+ return self . init ( kind: . dangerousContent)
127
+ }
128
+
129
+ /// Content that may be used to harm civic integrity.
130
+ public static var civicIntegrity : HarmCategory {
131
+ return self . init ( kind: . civicIntegrity)
132
+ }
133
+
134
+ /// Returns the raw string representation of the `HarmCategory` value.
135
+ ///
136
+ /// > Note: This value directly corresponds to the values in the
137
+ /// > [REST API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/HarmCategory).
138
+ public let rawValue : String
139
+
140
+ init ( kind: Kind ) {
141
+ rawValue = kind. rawValue
142
+ }
143
+
144
+ init ( rawValue: String ) {
145
+ if Kind ( rawValue: rawValue) == nil {
146
+ VertexLog . error (
147
+ code: . generateContentResponseUnrecognizedHarmCategory,
148
+ """
149
+ Unrecognized HarmCategory with value " \( rawValue) " :
150
+ - Check for updates to the SDK as support for " \( rawValue) " may have been added; see \
151
+ release notes at https://firebase.google.com/support/release-notes/ios
152
+ - Search for " \( rawValue) " in the Firebase Apple SDK Issue Tracker at \
153
+ https://github.com/firebase/firebase-ios-sdk/issues and file a Bug Report if none found
154
+ """
155
+ )
156
+ }
157
+ self . rawValue = rawValue
158
+ }
115
159
}
116
160
117
161
// MARK: - Codable Conformances
@@ -139,17 +183,8 @@ extension SafetyRating: Decodable {}
139
183
@available ( iOS 15 . 0 , macOS 11 . 0 , macCatalyst 15 . 0 , tvOS 15 . 0 , watchOS 8 . 0 , * )
140
184
extension HarmCategory : Codable {
141
185
public init ( from decoder: Decoder ) throws {
142
- let value = try decoder. singleValueContainer ( ) . decode ( String . self)
143
- guard let decodedCategory = HarmCategory ( rawValue: value) else {
144
- VertexLog . error (
145
- code: . generateContentResponseUnrecognizedHarmCategory,
146
- " Unrecognized HarmCategory with value \" \( value) \" . "
147
- )
148
- self = . unknown
149
- return
150
- }
151
-
152
- self = decodedCategory
186
+ let rawValue = try decoder. singleValueContainer ( ) . decode ( String . self)
187
+ self = HarmCategory ( rawValue: rawValue)
153
188
}
154
189
}
155
190
0 commit comments