Skip to content

Commit 3cab646

Browse files
committed
[Vertex AI] Refactor HarmCategory enum (#13686)
# Conflicts: # FirebaseVertexAI/CHANGELOG.md
1 parent c07397c commit 3cab646

File tree

3 files changed

+37
-34
lines changed

3 files changed

+37
-34
lines changed

FirebaseVertexAI/CHANGELOG.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,8 @@
1+
# 11.4.0
2+
- [changed] **Breaking Change**: The `HarmCategory` enum is no longer nested
3+
inside the `SafetySetting` struct and the `unspecified` case has been
4+
removed. (#13686)
5+
16
# 11.3.0
27
- [added] Added `Decodable` conformance for `FunctionResponse`. (#13606)
38
- [changed] **Breaking Change**: Reverted refactor of `GenerativeModel` and

FirebaseVertexAI/Sample/ChatSample/Views/ErrorDetailsView.swift

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,15 +16,14 @@ import FirebaseVertexAI
1616
import MarkdownUI
1717
import SwiftUI
1818

19-
extension SafetySetting.HarmCategory: CustomStringConvertible {
19+
extension HarmCategory: CustomStringConvertible {
2020
public var description: String {
2121
switch self {
2222
case .dangerousContent: "Dangerous content"
2323
case .harassment: "Harassment"
2424
case .hateSpeech: "Hate speech"
2525
case .sexuallyExplicit: "Sexually explicit"
2626
case .unknown: "Unknown"
27-
case .unspecified: "Unspecified"
2827
}
2928
}
3029
}

FirebaseVertexAI/Sources/Safety.swift

Lines changed: 31 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -19,18 +19,21 @@ import Foundation
1919
/// responses that exceed a certain threshold.
2020
@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
2121
public struct SafetyRating: Equatable, Hashable, Sendable {
22-
/// The category describing the potential harm a piece of content may pose. See
23-
/// ``SafetySetting/HarmCategory`` for a list of possible values.
24-
public let category: SafetySetting.HarmCategory
25-
26-
/// The model-generated probability that a given piece of content falls under the harm category
27-
/// described in ``SafetySetting/HarmCategory``. This does not indicate the severity of harm for a
28-
/// piece of content. See ``HarmProbability`` for a list of possible values.
22+
/// The category describing the potential harm a piece of content may pose.
23+
///
24+
/// See ``HarmCategory`` for a list of possible values.
25+
public let category: HarmCategory
26+
27+
/// The model-generated probability that the content falls under the specified harm ``category``.
28+
///
29+
/// See ``HarmProbability`` for a list of possible values.
30+
///
31+
/// > Important: This does not indicate the severity of harm for a piece of content.
2932
public let probability: HarmProbability
3033

3134
/// Initializes a new `SafetyRating` instance with the given category and probability.
3235
/// Use this initializer for SwiftUI previews or tests.
33-
public init(category: SafetySetting.HarmCategory, probability: HarmProbability) {
36+
public init(category: HarmCategory, probability: HarmProbability) {
3437
self.category = category
3538
self.probability = probability
3639
}
@@ -63,28 +66,6 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
6366
/// fallback response instead of generated content.
6467
@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
6568
public struct SafetySetting {
66-
/// A type describing safety attributes, which include harmful categories and topics that can
67-
/// be considered sensitive.
68-
public enum HarmCategory: String, Sendable {
69-
/// Unknown. A new server value that isn't recognized by the SDK.
70-
case unknown = "HARM_CATEGORY_UNKNOWN"
71-
72-
/// Unspecified by the server.
73-
case unspecified = "HARM_CATEGORY_UNSPECIFIED"
74-
75-
/// Harassment content.
76-
case harassment = "HARM_CATEGORY_HARASSMENT"
77-
78-
/// Negative or harmful comments targeting identity and/or protected attributes.
79-
case hateSpeech = "HARM_CATEGORY_HATE_SPEECH"
80-
81-
/// Contains references to sexual acts or other lewd content.
82-
case sexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT"
83-
84-
/// Promotes or enables access to harmful goods, services, or activities.
85-
case dangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT"
86-
}
87-
8869
/// Block at and beyond a specified ``SafetyRating/HarmProbability``.
8970
public enum BlockThreshold: String, Sendable {
9071
// Content with `.negligible` will be allowed.
@@ -118,6 +99,24 @@ public struct SafetySetting {
11899
}
119100
}
120101

102+
/// Categories describing the potential harm a piece of content may pose.
103+
public enum HarmCategory: String, Sendable {
104+
/// Unknown. A new server value that isn't recognized by the SDK.
105+
case unknown = "HARM_CATEGORY_UNKNOWN"
106+
107+
/// Harassment content.
108+
case harassment = "HARM_CATEGORY_HARASSMENT"
109+
110+
/// Negative or harmful comments targeting identity and/or protected attributes.
111+
case hateSpeech = "HARM_CATEGORY_HATE_SPEECH"
112+
113+
/// Contains references to sexual acts or other lewd content.
114+
case sexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT"
115+
116+
/// Promotes or enables access to harmful goods, services, or activities.
117+
case dangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT"
118+
}
119+
121120
// MARK: - Codable Conformances
122121

123122
@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
@@ -139,10 +138,10 @@ extension SafetyRating.HarmProbability: Decodable {
139138
extension SafetyRating: Decodable {}
140139

141140
@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
142-
extension SafetySetting.HarmCategory: Codable {
141+
extension HarmCategory: Codable {
143142
public init(from decoder: Decoder) throws {
144143
let value = try decoder.singleValueContainer().decode(String.self)
145-
guard let decodedCategory = SafetySetting.HarmCategory(rawValue: value) else {
144+
guard let decodedCategory = HarmCategory(rawValue: value) else {
146145
Logging.default
147146
.error("[FirebaseVertexAI] Unrecognized HarmCategory with value \"\(value)\".")
148147
self = .unknown

0 commit comments

Comments
 (0)