@@ -26,16 +26,50 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
2626
2727 /// The model-generated probability that the content falls under the specified harm ``category``.
2828 ///
29- /// See ``HarmProbability`` for a list of possible values.
29+ /// See ``HarmProbability`` for a list of possible values. This is a discretized representation
30+ /// of the ``probabilityScore``.
3031 ///
3132 /// > Important: This does not indicate the severity of harm for a piece of content.
3233 public let probability : HarmProbability
3334
35+ /// The confidence score that the response is associated with the corresponding harm ``category``.
36+ ///
37+ /// The probability safety score is a confidence score between 0.0 and 1.0, rounded to one decimal
38+ /// place; it is discretized into a ``HarmProbability`` in ``probability``. See [probability
39+ /// scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
40+ /// in the Google Cloud documentation for more details.
41+ public let probabilityScore : Float
42+
43+ /// The severity reflects the magnitude of how harmful a model response might be.
44+ ///
45+ /// See ``HarmSeverity`` for a list of possible values. This is a discretized representation of
46+ /// the ``severityScore``.
47+ public let severity : HarmSeverity
48+
49+ /// The severity score is the magnitude of how harmful a model response might be.
50+ ///
51+ /// The severity score ranges from 0.0 to 1.0, rounded to one decimal place; it is discretized
52+ /// into a ``HarmSeverity`` in ``severity``. See [severity scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
53+ /// in the Google Cloud documentation for more details.
54+ public let severityScore : Float
55+
56+ /// If true, the response was blocked.
57+ public let blocked : Bool
58+
3459 /// Initializes a new `SafetyRating` instance with the given category and probability.
3560 /// Use this initializer for SwiftUI previews or tests.
36- public init ( category: HarmCategory , probability: HarmProbability ) {
61+ public init ( category: HarmCategory ,
62+ probability: HarmProbability ,
63+ probabilityScore: Float ,
64+ severity: HarmSeverity ,
65+ severityScore: Float ,
66+ blocked: Bool ) {
3767 self . category = category
3868 self . probability = probability
69+ self . probabilityScore = probabilityScore
70+ self . severity = severity
71+ self . severityScore = severityScore
72+ self . blocked = blocked
3973 }
4074
4175 /// The probability that a given model output falls under a harmful content category.
@@ -74,6 +108,37 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
74108 static let unrecognizedValueMessageCode =
75109 VertexLog . MessageCode. generateContentResponseUnrecognizedHarmProbability
76110 }
111+
112+ /// The magnitude of how harmful a model response might be for the respective ``HarmCategory``.
113+ public struct HarmSeverity : DecodableProtoEnum , Hashable , Sendable {
114+ enum Kind : String {
115+ case negligible = " HARM_SEVERITY_NEGLIGIBLE "
116+ case low = " HARM_SEVERITY_LOW "
117+ case medium = " HARM_SEVERITY_MEDIUM "
118+ case high = " HARM_SEVERITY_HIGH "
119+ }
120+
121+ /// Negligible level of harm severity.
122+ public static let negligible = HarmSeverity ( kind: . negligible)
123+
124+ /// Low level of harm severity.
125+ public static let low = HarmSeverity ( kind: . low)
126+
127+ /// Medium level of harm severity.
128+ public static let medium = HarmSeverity ( kind: . medium)
129+
130+ /// High level of harm severity.
131+ public static let high = HarmSeverity ( kind: . high)
132+
133+ /// Returns the raw string representation of the `HarmSeverity` value.
134+ ///
135+ /// > Note: This value directly corresponds to the values in the [REST
136+ /// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#HarmSeverity).
137+ public let rawValue : String
138+
139+ static let unrecognizedValueMessageCode =
140+ VertexLog . MessageCode. generateContentResponseUnrecognizedHarmSeverity
141+ }
77142}
78143
79144/// A type used to specify a threshold for harmful content, beyond which the model will return a
@@ -164,7 +229,31 @@ public struct HarmCategory: CodableProtoEnum, Hashable, Sendable {
164229// MARK: - Codable Conformances
165230
166231@available ( iOS 15 . 0 , macOS 11 . 0 , macCatalyst 15 . 0 , tvOS 15 . 0 , watchOS 8 . 0 , * )
167- extension SafetyRating : Decodable { }
232+ extension SafetyRating : Decodable {
233+ enum CodingKeys : CodingKey {
234+ case category
235+ case probability
236+ case probabilityScore
237+ case severity
238+ case severityScore
239+ case blocked
240+ }
241+
242+ public init ( from decoder: any Decoder ) throws {
243+ let container = try decoder. container ( keyedBy: CodingKeys . self)
244+ category = try container. decode ( HarmCategory . self, forKey: . category)
245+ probability = try container. decode ( HarmProbability . self, forKey: . probability)
246+
247+ // The following 3 fields are only omitted in our test data.
248+ probabilityScore = try container. decodeIfPresent ( Float . self, forKey: . probabilityScore) ?? 0.0
249+ severity = try container. decodeIfPresent ( HarmSeverity . self, forKey: . severity) ??
250+ HarmSeverity ( rawValue: " HARM_SEVERITY_UNSPECIFIED " )
251+ severityScore = try container. decodeIfPresent ( Float . self, forKey: . severityScore) ?? 0.0
252+
253+ // The blocked field is only included when true.
254+ blocked = try container. decodeIfPresent ( Bool . self, forKey: . blocked) ?? false
255+ }
256+ }
168257
169258@available ( iOS 15 . 0 , macOS 11 . 0 , macCatalyst 15 . 0 , tvOS 15 . 0 , watchOS 8 . 0 , * )
170259extension SafetySetting . HarmBlockThreshold : Encodable { }
0 commit comments