Skip to content

Commit fa2fc8f

Browse files
committed
[Vertex AI] Add HarmBlockMethod enum and method property
1 parent 4e50fd8 commit fa2fc8f

File tree

2 files changed

+24
-5
lines changed

2 files changed

+24
-5
lines changed

FirebaseVertexAI/Sources/Safety.swift

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,24 @@ public struct SafetySetting {
108108
let rawValue: String
109109
}
110110

111+
/// The method of computing whether the ``SafetySetting/HarmBlockThreshold`` has been exceeded.
112+
public struct HarmBlockMethod: EncodableProtoEnum, Sendable {
113+
enum Kind: String {
114+
case severity = "SEVERITY"
115+
case probability = "PROBABILITY"
116+
}
117+
118+
public static let severity = HarmBlockMethod(kind: .severity)
119+
120+
public static let probability = HarmBlockMethod(kind: .probability)
121+
122+
let rawValue: String
123+
}
124+
111125
enum CodingKeys: String, CodingKey {
112126
case harmCategory = "category"
113127
case threshold
128+
case method
114129
}
115130

116131
/// The category this safety setting should be applied to.
@@ -119,10 +134,14 @@ public struct SafetySetting {
119134
/// The threshold describing what content should be blocked.
120135
public let threshold: HarmBlockThreshold
121136

137+
public let method: HarmBlockMethod?
138+
122139
/// Initializes a new safety setting with the given category and threshold.
123-
public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold) {
140+
public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold,
141+
method: HarmBlockMethod? = nil) {
124142
self.harmCategory = harmCategory
125143
self.threshold = threshold
144+
self.method = method
126145
}
127146
}
128147

FirebaseVertexAI/Tests/Integration/IntegrationTests.swift

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,8 @@ final class IntegrationTests: XCTestCase {
3030
parts: "You are a friendly and helpful assistant."
3131
)
3232
let safetySettings = [
33-
SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove),
34-
SafetySetting(harmCategory: .hateSpeech, threshold: .blockLowAndAbove),
33+
SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove, method: .probability),
34+
SafetySetting(harmCategory: .hateSpeech, threshold: .blockLowAndAbove, method: .severity),
3535
SafetySetting(harmCategory: .sexuallyExplicit, threshold: .blockLowAndAbove),
3636
SafetySetting(harmCategory: .dangerousContent, threshold: .blockLowAndAbove),
3737
SafetySetting(harmCategory: .civicIntegrity, threshold: .blockLowAndAbove),
@@ -89,11 +89,11 @@ final class IntegrationTests: XCTestCase {
8989
modelName: "gemini-1.5-pro",
9090
generationConfig: generationConfig,
9191
safetySettings: [
92-
SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove),
92+
SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove, method: .severity),
9393
SafetySetting(harmCategory: .hateSpeech, threshold: .blockMediumAndAbove),
9494
SafetySetting(harmCategory: .sexuallyExplicit, threshold: .blockOnlyHigh),
9595
SafetySetting(harmCategory: .dangerousContent, threshold: .blockNone),
96-
SafetySetting(harmCategory: .civicIntegrity, threshold: .off),
96+
SafetySetting(harmCategory: .civicIntegrity, threshold: .off, method: .probability),
9797
],
9898
toolConfig: .init(functionCallingConfig: .auto()),
9999
systemInstruction: systemInstruction

0 commit comments

Comments
 (0)