diff --git a/FirebaseVertexAI/CHANGELOG.md b/FirebaseVertexAI/CHANGELOG.md index 346ef9a70bf..c3b6b5462ef 100644 --- a/FirebaseVertexAI/CHANGELOG.md +++ b/FirebaseVertexAI/CHANGELOG.md @@ -63,6 +63,9 @@ (#13875) - [added] Added a new `HarmBlockThreshold` `.off`, which turns off the safety filter. (#13863) +- [added] Added an optional `HarmBlockMethod` parameter `method` in + `SafetySetting` that configures whether responses are blocked based on the + `probability` and/or `severity` of content being in a `HarmCategory`. (#13876) - [added] Added new `FinishReason` values `.blocklist`, `.prohibitedContent`, `.spii` and `.malformedFunctionCall` that may be reported. (#13860) - [added] Added new `BlockReason` values `.blocklist` and `.prohibitedContent` diff --git a/FirebaseVertexAI/Sources/Safety.swift b/FirebaseVertexAI/Sources/Safety.swift index 2ff4fe85f1c..655046db98a 100644 --- a/FirebaseVertexAI/Sources/Safety.swift +++ b/FirebaseVertexAI/Sources/Safety.swift @@ -173,9 +173,26 @@ public struct SafetySetting { let rawValue: String } + /// The method of computing whether the ``SafetySetting/HarmBlockThreshold`` has been exceeded. + public struct HarmBlockMethod: EncodableProtoEnum, Sendable { + enum Kind: String { + case severity = "SEVERITY" + case probability = "PROBABILITY" + } + + /// Use both probability and severity scores. + public static let severity = HarmBlockMethod(kind: .severity) + + /// Use only the probability score. + public static let probability = HarmBlockMethod(kind: .probability) + + let rawValue: String + } + enum CodingKeys: String, CodingKey { case harmCategory = "category" case threshold + case method } /// The category this safety setting should be applied to. @@ -184,10 +201,25 @@ public struct SafetySetting { /// The threshold describing what content should be blocked. public let threshold: HarmBlockThreshold + /// The method of computing whether the ``threshold`` has been exceeded. + public let method: HarmBlockMethod? + /// Initializes a new safety setting with the given category and threshold. - public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold) { + /// + /// - Parameters: + /// - harmCategory: The category this safety setting should be applied to. + /// - threshold: The threshold describing what content should be blocked. + /// - method: The method of computing whether the threshold has been exceeded; if not specified, + /// the default method is ``HarmBlockMethod/severity`` for most models. See [harm block + /// methods](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#how_to_configure_safety_filters) + /// in the Google Cloud documentation for more details. + /// > Note: For models older than `gemini-1.5-flash` and `gemini-1.5-pro`, the default method + /// > is ``HarmBlockMethod/probability``. + public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold, + method: HarmBlockMethod? = nil) { self.harmCategory = harmCategory self.threshold = threshold + self.method = method } } diff --git a/FirebaseVertexAI/Tests/Integration/IntegrationTests.swift b/FirebaseVertexAI/Tests/Integration/IntegrationTests.swift index fee87108da7..b884a41e9a5 100644 --- a/FirebaseVertexAI/Tests/Integration/IntegrationTests.swift +++ b/FirebaseVertexAI/Tests/Integration/IntegrationTests.swift @@ -30,8 +30,8 @@ final class IntegrationTests: XCTestCase { parts: "You are a friendly and helpful assistant." ) let safetySettings = [ - SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove), - SafetySetting(harmCategory: .hateSpeech, threshold: .blockLowAndAbove), + SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove, method: .probability), + SafetySetting(harmCategory: .hateSpeech, threshold: .blockLowAndAbove, method: .severity), SafetySetting(harmCategory: .sexuallyExplicit, threshold: .blockLowAndAbove), SafetySetting(harmCategory: .dangerousContent, threshold: .blockLowAndAbove), SafetySetting(harmCategory: .civicIntegrity, threshold: .blockLowAndAbove), @@ -89,11 +89,11 @@ final class IntegrationTests: XCTestCase { modelName: "gemini-1.5-pro", generationConfig: generationConfig, safetySettings: [ - SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove), + SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove, method: .severity), SafetySetting(harmCategory: .hateSpeech, threshold: .blockMediumAndAbove), SafetySetting(harmCategory: .sexuallyExplicit, threshold: .blockOnlyHigh), SafetySetting(harmCategory: .dangerousContent, threshold: .blockNone), - SafetySetting(harmCategory: .civicIntegrity, threshold: .off), + SafetySetting(harmCategory: .civicIntegrity, threshold: .off, method: .probability), ], toolConfig: .init(functionCallingConfig: .auto()), systemInstruction: systemInstruction