@@ -19,18 +19,21 @@ import Foundation
19
19
/// responses that exceed a certain threshold.
20
20
@available ( iOS 15 . 0 , macOS 11 . 0 , macCatalyst 15 . 0 , tvOS 15 . 0 , watchOS 8 . 0 , * )
21
21
public struct SafetyRating : Equatable , Hashable , Sendable {
22
- /// The category describing the potential harm a piece of content may pose. See
23
- /// ``SafetySetting/HarmCategory`` for a list of possible values.
24
- public let category : SafetySetting . HarmCategory
25
-
26
- /// The model-generated probability that a given piece of content falls under the harm category
27
- /// described in ``SafetySetting/HarmCategory``. This does not indicate the severity of harm for a
28
- /// piece of content. See ``HarmProbability`` for a list of possible values.
22
+ /// The category describing the potential harm a piece of content may pose.
23
+ ///
24
+ /// See ``HarmCategory`` for a list of possible values.
25
+ public let category : HarmCategory
26
+
27
+ /// The model-generated probability that the content falls under the specified harm ``category``.
28
+ ///
29
+ /// See ``HarmProbability`` for a list of possible values.
30
+ ///
31
+ /// > Important: This does not indicate the severity of harm for a piece of content.
29
32
public let probability : HarmProbability
30
33
31
34
/// Initializes a new `SafetyRating` instance with the given category and probability.
32
35
/// Use this initializer for SwiftUI previews or tests.
33
- public init ( category: SafetySetting . HarmCategory , probability: HarmProbability ) {
36
+ public init ( category: HarmCategory , probability: HarmProbability ) {
34
37
self . category = category
35
38
self . probability = probability
36
39
}
@@ -63,28 +66,6 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
63
66
/// fallback response instead of generated content.
64
67
@available ( iOS 15 . 0 , macOS 11 . 0 , macCatalyst 15 . 0 , tvOS 15 . 0 , watchOS 8 . 0 , * )
65
68
public struct SafetySetting {
66
- /// A type describing safety attributes, which include harmful categories and topics that can
67
- /// be considered sensitive.
68
- public enum HarmCategory : String , Sendable {
69
- /// Unknown. A new server value that isn't recognized by the SDK.
70
- case unknown = " HARM_CATEGORY_UNKNOWN "
71
-
72
- /// Unspecified by the server.
73
- case unspecified = " HARM_CATEGORY_UNSPECIFIED "
74
-
75
- /// Harassment content.
76
- case harassment = " HARM_CATEGORY_HARASSMENT "
77
-
78
- /// Negative or harmful comments targeting identity and/or protected attributes.
79
- case hateSpeech = " HARM_CATEGORY_HATE_SPEECH "
80
-
81
- /// Contains references to sexual acts or other lewd content.
82
- case sexuallyExplicit = " HARM_CATEGORY_SEXUALLY_EXPLICIT "
83
-
84
- /// Promotes or enables access to harmful goods, services, or activities.
85
- case dangerousContent = " HARM_CATEGORY_DANGEROUS_CONTENT "
86
- }
87
-
88
69
/// Block at and beyond a specified ``SafetyRating/HarmProbability``.
89
70
public enum BlockThreshold : String , Sendable {
90
71
// Content with `.negligible` will be allowed.
@@ -118,6 +99,24 @@ public struct SafetySetting {
118
99
}
119
100
}
120
101
102
+ /// Categories describing the potential harm a piece of content may pose.
103
+ public enum HarmCategory : String , Sendable {
104
+ /// Unknown. A new server value that isn't recognized by the SDK.
105
+ case unknown = " HARM_CATEGORY_UNKNOWN "
106
+
107
+ /// Harassment content.
108
+ case harassment = " HARM_CATEGORY_HARASSMENT "
109
+
110
+ /// Negative or harmful comments targeting identity and/or protected attributes.
111
+ case hateSpeech = " HARM_CATEGORY_HATE_SPEECH "
112
+
113
+ /// Contains references to sexual acts or other lewd content.
114
+ case sexuallyExplicit = " HARM_CATEGORY_SEXUALLY_EXPLICIT "
115
+
116
+ /// Promotes or enables access to harmful goods, services, or activities.
117
+ case dangerousContent = " HARM_CATEGORY_DANGEROUS_CONTENT "
118
+ }
119
+
121
120
// MARK: - Codable Conformances
122
121
123
122
@available ( iOS 15 . 0 , macOS 11 . 0 , macCatalyst 15 . 0 , tvOS 15 . 0 , watchOS 8 . 0 , * )
@@ -139,10 +138,10 @@ extension SafetyRating.HarmProbability: Decodable {
139
138
extension SafetyRating : Decodable { }
140
139
141
140
@available ( iOS 15 . 0 , macOS 11 . 0 , macCatalyst 15 . 0 , tvOS 15 . 0 , watchOS 8 . 0 , * )
142
- extension SafetySetting . HarmCategory : Codable {
141
+ extension HarmCategory : Codable {
143
142
public init ( from decoder: Decoder ) throws {
144
143
let value = try decoder. singleValueContainer ( ) . decode ( String . self)
145
- guard let decodedCategory = SafetySetting . HarmCategory ( rawValue: value) else {
144
+ guard let decodedCategory = HarmCategory ( rawValue: value) else {
146
145
Logging . default
147
146
. error ( " [FirebaseVertexAI] Unrecognized HarmCategory with value \" \( value) \" . " )
148
147
self = . unknown
0 commit comments