Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions FirebaseVertexAI/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@
`totalBillableCharacters` counts, where applicable. (#13813)
- [added] Added a new `HarmCategory` `.civicIntegrity` for filtering content
that may be used to harm civic integrity. (#13728)
- [added] Added `probabilityScore`, `severity` and `severityScore` in
`SafetyRating` to provide more fine-grained detail on blocked responses.
(#13875)
- [added] Added a new `HarmBlockThreshold` `.off`, which turns off the safety
filter. (#13863)
- [added] Added new `FinishReason` values `.blocklist`, `.prohibitedContent`,
Expand Down
72 changes: 64 additions & 8 deletions FirebaseVertexAI/Sample/ChatSample/Views/ErrorDetailsView.swift
Original file line number Diff line number Diff line change
Expand Up @@ -168,10 +168,38 @@ struct ErrorDetailsView: View {
Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo.
"""),
safetyRatings: [
SafetyRating(category: .dangerousContent, probability: .high),
SafetyRating(category: .harassment, probability: .low),
SafetyRating(category: .hateSpeech, probability: .low),
SafetyRating(category: .sexuallyExplicit, probability: .low),
SafetyRating(
category: .dangerousContent,
probability: .medium,
probabilityScore: 0.8,
severity: .medium,
severityScore: 0.9,
blocked: false
),
SafetyRating(
category: .harassment,
probability: .low,
probabilityScore: 0.5,
severity: .low,
severityScore: 0.6,
blocked: false
),
SafetyRating(
category: .hateSpeech,
probability: .low,
probabilityScore: 0.3,
severity: .medium,
severityScore: 0.2,
blocked: false
),
SafetyRating(
category: .sexuallyExplicit,
probability: .low,
probabilityScore: 0.2,
severity: .negligible,
severityScore: 0.5,
blocked: false
),
],
finishReason: FinishReason.maxTokens,
citationMetadata: nil),
Expand All @@ -190,10 +218,38 @@ struct ErrorDetailsView: View {
Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo.
"""),
safetyRatings: [
SafetyRating(category: .dangerousContent, probability: .high),
SafetyRating(category: .harassment, probability: .low),
SafetyRating(category: .hateSpeech, probability: .low),
SafetyRating(category: .sexuallyExplicit, probability: .low),
SafetyRating(
category: .dangerousContent,
probability: .low,
probabilityScore: 0.8,
severity: .medium,
severityScore: 0.9,
blocked: false
),
SafetyRating(
category: .harassment,
probability: .low,
probabilityScore: 0.5,
severity: .low,
severityScore: 0.6,
blocked: false
),
SafetyRating(
category: .hateSpeech,
probability: .low,
probabilityScore: 0.3,
severity: .medium,
severityScore: 0.2,
blocked: false
),
SafetyRating(
category: .sexuallyExplicit,
probability: .low,
probabilityScore: 0.2,
severity: .negligible,
severityScore: 0.5,
blocked: false
),
],
finishReason: FinishReason.other,
citationMetadata: nil),
Expand Down
64 changes: 48 additions & 16 deletions FirebaseVertexAI/Sample/ChatSample/Views/ErrorView.swift
Original file line number Diff line number Diff line change
Expand Up @@ -36,22 +36,54 @@ struct ErrorView: View {
#Preview {
NavigationView {
let errorPromptBlocked = GenerateContentError.promptBlocked(
response: GenerateContentResponse(candidates: [
CandidateResponse(content: ModelContent(role: "model", parts: [
"""
A _hypothetical_ model response.
Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo.
""",
]),
safetyRatings: [
SafetyRating(category: .dangerousContent, probability: .high),
SafetyRating(category: .harassment, probability: .low),
SafetyRating(category: .hateSpeech, probability: .low),
SafetyRating(category: .sexuallyExplicit, probability: .low),
],
finishReason: FinishReason.other,
citationMetadata: nil),
])
response: GenerateContentResponse(
candidates: [
CandidateResponse(
content: ModelContent(role: "model", parts: [
"""
A _hypothetical_ model response.
Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo.
""",
]),
safetyRatings: [
SafetyRating(
category: .dangerousContent,
probability: .high,
probabilityScore: 0.8,
severity: .medium,
severityScore: 0.9,
blocked: true
),
SafetyRating(
category: .harassment,
probability: .low,
probabilityScore: 0.5,
severity: .low,
severityScore: 0.6,
blocked: false
),
SafetyRating(
category: .hateSpeech,
probability: .low,
probabilityScore: 0.3,
severity: .medium,
severityScore: 0.2,
blocked: false
),
SafetyRating(
category: .sexuallyExplicit,
probability: .low,
probabilityScore: 0.2,
severity: .negligible,
severityScore: 0.5,
blocked: false
),
],
finishReason: FinishReason.other,
citationMetadata: nil
),
]
)
)
List {
MessageView(message: ChatMessage.samples[0])
Expand Down
95 changes: 92 additions & 3 deletions FirebaseVertexAI/Sources/Safety.swift
Original file line number Diff line number Diff line change
Expand Up @@ -26,16 +26,50 @@ public struct SafetyRating: Equatable, Hashable, Sendable {

/// The model-generated probability that the content falls under the specified harm ``category``.
///
/// See ``HarmProbability`` for a list of possible values.
/// See ``HarmProbability`` for a list of possible values. This is a discretized representation
/// of the ``probabilityScore``.
///
/// > Important: This does not indicate the severity of harm for a piece of content.
public let probability: HarmProbability

/// The confidence score that the response is associated with the corresponding harm ``category``.
///
/// The probability safety score is a confidence score between 0.0 and 1.0, rounded to one decimal
/// place; it is discretized into a ``HarmProbability`` in ``probability``. See [probability
/// scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
/// in the Google Cloud documentation for more details.
public let probabilityScore: Float

/// The severity reflects the magnitude of how harmful a model response might be.
///
/// See ``HarmSeverity`` for a list of possible values. This is a discretized representation of
/// the ``severityScore``.
public let severity: HarmSeverity

/// The severity score is the magnitude of how harmful a model response might be.
///
/// The severity score ranges from 0.0 to 1.0, rounded to one decimal place; it is discretized
/// into a ``HarmSeverity`` in ``severity``. See [severity scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
/// in the Google Cloud documentation for more details.
public let severityScore: Float

/// If true, the response was blocked.
public let blocked: Bool

/// Initializes a new `SafetyRating` instance with the given category and probability.
/// Use this initializer for SwiftUI previews or tests.
public init(category: HarmCategory, probability: HarmProbability) {
public init(category: HarmCategory,
probability: HarmProbability,
probabilityScore: Float,
severity: HarmSeverity,
severityScore: Float,
blocked: Bool) {
self.category = category
self.probability = probability
self.probabilityScore = probabilityScore
self.severity = severity
self.severityScore = severityScore
self.blocked = blocked
}

/// The probability that a given model output falls under a harmful content category.
Expand Down Expand Up @@ -74,6 +108,37 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
static let unrecognizedValueMessageCode =
VertexLog.MessageCode.generateContentResponseUnrecognizedHarmProbability
}

/// The magnitude of how harmful a model response might be for the respective ``HarmCategory``.
public struct HarmSeverity: DecodableProtoEnum, Hashable, Sendable {
enum Kind: String {
case negligible = "HARM_SEVERITY_NEGLIGIBLE"
case low = "HARM_SEVERITY_LOW"
case medium = "HARM_SEVERITY_MEDIUM"
case high = "HARM_SEVERITY_HIGH"
}

/// Negligible level of harm severity.
public static let negligible = HarmSeverity(kind: .negligible)

/// Low level of harm severity.
public static let low = HarmSeverity(kind: .low)

/// Medium level of harm severity.
public static let medium = HarmSeverity(kind: .medium)

/// High level of harm severity.
public static let high = HarmSeverity(kind: .high)

/// Returns the raw string representation of the `HarmSeverity` value.
///
/// > Note: This value directly corresponds to the values in the [REST
/// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#HarmSeverity).
public let rawValue: String

static let unrecognizedValueMessageCode =
VertexLog.MessageCode.generateContentResponseUnrecognizedHarmSeverity
}
}

/// A type used to specify a threshold for harmful content, beyond which the model will return a
Expand Down Expand Up @@ -164,7 +229,31 @@ public struct HarmCategory: CodableProtoEnum, Hashable, Sendable {
// MARK: - Codable Conformances

@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
extension SafetyRating: Decodable {}
extension SafetyRating: Decodable {
enum CodingKeys: CodingKey {
case category
case probability
case probabilityScore
case severity
case severityScore
case blocked
}

public init(from decoder: any Decoder) throws {
let container = try decoder.container(keyedBy: CodingKeys.self)
category = try container.decode(HarmCategory.self, forKey: .category)
probability = try container.decode(HarmProbability.self, forKey: .probability)

// The following 3 fields are only omitted in our test data.
probabilityScore = try container.decodeIfPresent(Float.self, forKey: .probabilityScore) ?? 0.0
severity = try container.decodeIfPresent(HarmSeverity.self, forKey: .severity) ??
HarmSeverity(rawValue: "HARM_SEVERITY_UNSPECIFIED")
severityScore = try container.decodeIfPresent(Float.self, forKey: .severityScore) ?? 0.0

// The blocked field is only included when true.
blocked = try container.decodeIfPresent(Bool.self, forKey: .blocked) ?? false
}
}

@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
extension SafetySetting.HarmBlockThreshold: Encodable {}
Expand Down
1 change: 1 addition & 0 deletions FirebaseVertexAI/Sources/VertexLog.swift
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ enum VertexLog {
case generateContentResponseUnrecognizedBlockThreshold = 3004
case generateContentResponseUnrecognizedHarmProbability = 3005
case generateContentResponseUnrecognizedHarmCategory = 3006
case generateContentResponseUnrecognizedHarmSeverity = 3007

// SDK State Errors
case generateContentResponseNoCandidates = 4000
Expand Down
Loading
Loading