Skip to content

Commit 27cffd9

Browse files
authored
[Vertex AI] Refactor HarmBlockThreshold as a struct and add .off (#13863)
1 parent 67502af commit 27cffd9

File tree

3 files changed

+47
-6
lines changed

3 files changed

+47
-6
lines changed

FirebaseVertexAI/CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,12 @@
4646
- [changed] The response from `GenerativeModel.countTokens(...)` now includes
4747
`systemInstruction`, `tools` and `generationConfig` in the `totalTokens` and
4848
`totalBillableCharacters` counts, where applicable. (#13813)
49+
- [added] Added a new `HarmCategory` `.civicIntegrity` for filtering content
50+
that may be used to harm civic integrity. (#13728)
51+
- [added] Added a new `HarmBlockThreshold` `.off`, which turns off the safety
52+
filter. (#13863)
53+
- [added] Added new `FinishReason` values `.blocklist`, `.prohibitedContent`,
54+
`.spii` and `.malformedFunctionCall` that may be reported. (#13860)
4955

5056
# 11.3.0
5157
- [added] Added `Decodable` conformance for `FunctionResponse`. (#13606)

FirebaseVertexAI/Sources/Safety.swift

Lines changed: 29 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -90,18 +90,41 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
9090
@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
9191
public struct SafetySetting {
9292
/// Block at and beyond a specified ``SafetyRating/HarmProbability``.
93-
public enum HarmBlockThreshold: String, Sendable {
94-
// Content with `.negligible` will be allowed.
95-
case blockLowAndAbove = "BLOCK_LOW_AND_ABOVE"
93+
public struct HarmBlockThreshold: EncodableProtoEnum, Sendable {
94+
enum Kind: String {
95+
case blockLowAndAbove = "BLOCK_LOW_AND_ABOVE"
96+
case blockMediumAndAbove = "BLOCK_MEDIUM_AND_ABOVE"
97+
case blockOnlyHigh = "BLOCK_ONLY_HIGH"
98+
case blockNone = "BLOCK_NONE"
99+
case off = "OFF"
100+
}
101+
102+
/// Content with `.negligible` will be allowed.
103+
public static var blockLowAndAbove: HarmBlockThreshold {
104+
return self.init(kind: .blockLowAndAbove)
105+
}
96106

97107
/// Content with `.negligible` and `.low` will be allowed.
98-
case blockMediumAndAbove = "BLOCK_MEDIUM_AND_ABOVE"
108+
public static var blockMediumAndAbove: HarmBlockThreshold {
109+
return self.init(kind: .blockMediumAndAbove)
110+
}
99111

100112
/// Content with `.negligible`, `.low`, and `.medium` will be allowed.
101-
case blockOnlyHigh = "BLOCK_ONLY_HIGH"
113+
public static var blockOnlyHigh: HarmBlockThreshold {
114+
return self.init(kind: .blockOnlyHigh)
115+
}
102116

103117
/// All content will be allowed.
104-
case blockNone = "BLOCK_NONE"
118+
public static var blockNone: HarmBlockThreshold {
119+
return self.init(kind: .blockNone)
120+
}
121+
122+
/// Turn off the safety filter.
123+
public static var off: HarmBlockThreshold {
124+
return self.init(kind: .off)
125+
}
126+
127+
let rawValue: String
105128
}
106129

107130
enum CodingKeys: String, CodingKey {

FirebaseVertexAI/Tests/Integration/IntegrationTests.swift

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,18 @@ final class IntegrationTests: XCTestCase {
8484

8585
func testCountTokens_text() async throws {
8686
let prompt = "Why is the sky blue?"
87+
model = vertex.generativeModel(
88+
modelName: "gemini-1.5-pro",
89+
generationConfig: generationConfig,
90+
safetySettings: [
91+
SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove),
92+
SafetySetting(harmCategory: .hateSpeech, threshold: .blockMediumAndAbove),
93+
SafetySetting(harmCategory: .sexuallyExplicit, threshold: .blockOnlyHigh),
94+
SafetySetting(harmCategory: .dangerousContent, threshold: .blockNone),
95+
SafetySetting(harmCategory: .civicIntegrity, threshold: .off),
96+
],
97+
systemInstruction: systemInstruction
98+
)
8799

88100
let response = try await model.countTokens(prompt)
89101

0 commit comments

Comments
 (0)