Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion FirebaseVertexAI/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
# Unreleased
# 11.4.0
- [changed] **Breaking Change**: The `HarmCategory` enum is no longer nested
inside the `SafetySetting` struct and the `unspecified` case has been
removed. (#13686)

# 11.3.0
- [added] Added `Decodable` conformance for `FunctionResponse`. (#13606)

# 11.2.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,14 @@ import FirebaseVertexAI
import MarkdownUI
import SwiftUI

extension SafetySetting.HarmCategory: CustomStringConvertible {
extension HarmCategory: CustomStringConvertible {
public var description: String {
switch self {
case .dangerousContent: "Dangerous content"
case .harassment: "Harassment"
case .hateSpeech: "Hate speech"
case .sexuallyExplicit: "Sexually explicit"
case .unknown: "Unknown"
case .unspecified: "Unspecified"
}
}
}
Expand Down
63 changes: 31 additions & 32 deletions FirebaseVertexAI/Sources/Safety.swift
Original file line number Diff line number Diff line change
Expand Up @@ -19,18 +19,21 @@ import Foundation
/// responses that exceed a certain threshold.
@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
public struct SafetyRating: Equatable, Hashable, Sendable {
/// The category describing the potential harm a piece of content may pose. See
/// ``SafetySetting/HarmCategory`` for a list of possible values.
public let category: SafetySetting.HarmCategory

/// The model-generated probability that a given piece of content falls under the harm category
/// described in ``SafetySetting/HarmCategory``. This does not indicate the severity of harm for a
/// piece of content. See ``HarmProbability`` for a list of possible values.
/// The category describing the potential harm a piece of content may pose.
///
/// See ``HarmCategory`` for a list of possible values.
public let category: HarmCategory

/// The model-generated probability that the content falls under the specified harm ``category``.
///
/// See ``HarmProbability`` for a list of possible values.
///
/// > Important: This does not indicate the severity of harm for a piece of content.
public let probability: HarmProbability

/// Initializes a new `SafetyRating` instance with the given category and probability.
/// Use this initializer for SwiftUI previews or tests.
public init(category: SafetySetting.HarmCategory, probability: HarmProbability) {
public init(category: HarmCategory, probability: HarmProbability) {
self.category = category
self.probability = probability
}
Expand Down Expand Up @@ -63,28 +66,6 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
/// fallback response instead of generated content.
@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
public struct SafetySetting {
/// A type describing safety attributes, which include harmful categories and topics that can
/// be considered sensitive.
public enum HarmCategory: String, Sendable {
/// Unknown. A new server value that isn't recognized by the SDK.
case unknown = "HARM_CATEGORY_UNKNOWN"

/// Unspecified by the server.
case unspecified = "HARM_CATEGORY_UNSPECIFIED"

/// Harassment content.
case harassment = "HARM_CATEGORY_HARASSMENT"

/// Negative or harmful comments targeting identity and/or protected attributes.
case hateSpeech = "HARM_CATEGORY_HATE_SPEECH"

/// Contains references to sexual acts or other lewd content.
case sexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT"

/// Promotes or enables access to harmful goods, services, or activities.
case dangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT"
}

/// Block at and beyond a specified ``SafetyRating/HarmProbability``.
public enum BlockThreshold: String, Sendable {
// Content with `.negligible` will be allowed.
Expand Down Expand Up @@ -118,6 +99,24 @@ public struct SafetySetting {
}
}

/// Categories describing the potential harm a piece of content may pose.
public enum HarmCategory: String, Sendable {
/// Unknown. A new server value that isn't recognized by the SDK.
case unknown = "HARM_CATEGORY_UNKNOWN"

/// Harassment content.
case harassment = "HARM_CATEGORY_HARASSMENT"

/// Negative or harmful comments targeting identity and/or protected attributes.
case hateSpeech = "HARM_CATEGORY_HATE_SPEECH"

/// Contains references to sexual acts or other lewd content.
case sexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT"

/// Promotes or enables access to harmful goods, services, or activities.
case dangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT"
}

// MARK: - Codable Conformances

@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
Expand All @@ -139,10 +138,10 @@ extension SafetyRating.HarmProbability: Decodable {
extension SafetyRating: Decodable {}

@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
extension SafetySetting.HarmCategory: Codable {
extension HarmCategory: Codable {
public init(from decoder: Decoder) throws {
let value = try decoder.singleValueContainer().decode(String.self)
guard let decodedCategory = SafetySetting.HarmCategory(rawValue: value) else {
guard let decodedCategory = HarmCategory(rawValue: value) else {
Logging.default
.error("[FirebaseVertexAI] Unrecognized HarmCategory with value \"\(value)\".")
self = .unknown
Expand Down
Loading