Skip to content

Commit 411fceb

Browse files
committed
[Vertex AI] Add HarmBlockMethod enum and method property
1 parent 9cb9895 commit 411fceb

File tree

2 files changed

+24
-5
lines changed

2 files changed

+24
-5
lines changed

FirebaseVertexAI/Sources/Safety.swift

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,9 +173,24 @@ public struct SafetySetting {
173173
let rawValue: String
174174
}
175175

176+
/// The method of computing whether the ``SafetySetting/HarmBlockThreshold`` has been exceeded.
177+
public struct HarmBlockMethod: EncodableProtoEnum, Sendable {
178+
enum Kind: String {
179+
case severity = "SEVERITY"
180+
case probability = "PROBABILITY"
181+
}
182+
183+
public static let severity = HarmBlockMethod(kind: .severity)
184+
185+
public static let probability = HarmBlockMethod(kind: .probability)
186+
187+
let rawValue: String
188+
}
189+
176190
enum CodingKeys: String, CodingKey {
177191
case harmCategory = "category"
178192
case threshold
193+
case method
179194
}
180195

181196
/// The category this safety setting should be applied to.
@@ -184,10 +199,14 @@ public struct SafetySetting {
184199
/// The threshold describing what content should be blocked.
185200
public let threshold: HarmBlockThreshold
186201

202+
public let method: HarmBlockMethod?
203+
187204
/// Initializes a new safety setting with the given category and threshold.
188-
public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold) {
205+
public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold,
206+
method: HarmBlockMethod? = nil) {
189207
self.harmCategory = harmCategory
190208
self.threshold = threshold
209+
self.method = method
191210
}
192211
}
193212

FirebaseVertexAI/Tests/Integration/IntegrationTests.swift

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,8 @@ final class IntegrationTests: XCTestCase {
3030
parts: "You are a friendly and helpful assistant."
3131
)
3232
let safetySettings = [
33-
SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove),
34-
SafetySetting(harmCategory: .hateSpeech, threshold: .blockLowAndAbove),
33+
SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove, method: .probability),
34+
SafetySetting(harmCategory: .hateSpeech, threshold: .blockLowAndAbove, method: .severity),
3535
SafetySetting(harmCategory: .sexuallyExplicit, threshold: .blockLowAndAbove),
3636
SafetySetting(harmCategory: .dangerousContent, threshold: .blockLowAndAbove),
3737
SafetySetting(harmCategory: .civicIntegrity, threshold: .blockLowAndAbove),
@@ -89,11 +89,11 @@ final class IntegrationTests: XCTestCase {
8989
modelName: "gemini-1.5-pro",
9090
generationConfig: generationConfig,
9191
safetySettings: [
92-
SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove),
92+
SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove, method: .severity),
9393
SafetySetting(harmCategory: .hateSpeech, threshold: .blockMediumAndAbove),
9494
SafetySetting(harmCategory: .sexuallyExplicit, threshold: .blockOnlyHigh),
9595
SafetySetting(harmCategory: .dangerousContent, threshold: .blockNone),
96-
SafetySetting(harmCategory: .civicIntegrity, threshold: .off),
96+
SafetySetting(harmCategory: .civicIntegrity, threshold: .off, method: .probability),
9797
],
9898
toolConfig: .init(functionCallingConfig: .auto()),
9999
systemInstruction: systemInstruction

0 commit comments

Comments
 (0)