Skip to content

Commit 9cb9895

Browse files
authored
[Vertex AI] Add HarmSeverity enum and SafetyRating properties (#13875)
1 parent 84f28c1 commit 9cb9895

File tree

6 files changed

+273
-39
lines changed

6 files changed

+273
-39
lines changed

FirebaseVertexAI/CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,9 @@
5858
`totalBillableCharacters` counts, where applicable. (#13813)
5959
- [added] Added a new `HarmCategory` `.civicIntegrity` for filtering content
6060
that may be used to harm civic integrity. (#13728)
61+
- [added] Added `probabilityScore`, `severity` and `severityScore` in
62+
`SafetyRating` to provide more fine-grained detail on blocked responses.
63+
(#13875)
6164
- [added] Added a new `HarmBlockThreshold` `.off`, which turns off the safety
6265
filter. (#13863)
6366
- [added] Added new `FinishReason` values `.blocklist`, `.prohibitedContent`,

FirebaseVertexAI/Sample/ChatSample/Views/ErrorDetailsView.swift

Lines changed: 64 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -168,10 +168,38 @@ struct ErrorDetailsView: View {
168168
Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo.
169169
"""),
170170
safetyRatings: [
171-
SafetyRating(category: .dangerousContent, probability: .high),
172-
SafetyRating(category: .harassment, probability: .low),
173-
SafetyRating(category: .hateSpeech, probability: .low),
174-
SafetyRating(category: .sexuallyExplicit, probability: .low),
171+
SafetyRating(
172+
category: .dangerousContent,
173+
probability: .medium,
174+
probabilityScore: 0.8,
175+
severity: .medium,
176+
severityScore: 0.9,
177+
blocked: false
178+
),
179+
SafetyRating(
180+
category: .harassment,
181+
probability: .low,
182+
probabilityScore: 0.5,
183+
severity: .low,
184+
severityScore: 0.6,
185+
blocked: false
186+
),
187+
SafetyRating(
188+
category: .hateSpeech,
189+
probability: .low,
190+
probabilityScore: 0.3,
191+
severity: .medium,
192+
severityScore: 0.2,
193+
blocked: false
194+
),
195+
SafetyRating(
196+
category: .sexuallyExplicit,
197+
probability: .low,
198+
probabilityScore: 0.2,
199+
severity: .negligible,
200+
severityScore: 0.5,
201+
blocked: false
202+
),
175203
],
176204
finishReason: FinishReason.maxTokens,
177205
citationMetadata: nil),
@@ -190,10 +218,38 @@ struct ErrorDetailsView: View {
190218
Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo.
191219
"""),
192220
safetyRatings: [
193-
SafetyRating(category: .dangerousContent, probability: .high),
194-
SafetyRating(category: .harassment, probability: .low),
195-
SafetyRating(category: .hateSpeech, probability: .low),
196-
SafetyRating(category: .sexuallyExplicit, probability: .low),
221+
SafetyRating(
222+
category: .dangerousContent,
223+
probability: .low,
224+
probabilityScore: 0.8,
225+
severity: .medium,
226+
severityScore: 0.9,
227+
blocked: false
228+
),
229+
SafetyRating(
230+
category: .harassment,
231+
probability: .low,
232+
probabilityScore: 0.5,
233+
severity: .low,
234+
severityScore: 0.6,
235+
blocked: false
236+
),
237+
SafetyRating(
238+
category: .hateSpeech,
239+
probability: .low,
240+
probabilityScore: 0.3,
241+
severity: .medium,
242+
severityScore: 0.2,
243+
blocked: false
244+
),
245+
SafetyRating(
246+
category: .sexuallyExplicit,
247+
probability: .low,
248+
probabilityScore: 0.2,
249+
severity: .negligible,
250+
severityScore: 0.5,
251+
blocked: false
252+
),
197253
],
198254
finishReason: FinishReason.other,
199255
citationMetadata: nil),

FirebaseVertexAI/Sample/ChatSample/Views/ErrorView.swift

Lines changed: 48 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -36,22 +36,54 @@ struct ErrorView: View {
3636
#Preview {
3737
NavigationView {
3838
let errorPromptBlocked = GenerateContentError.promptBlocked(
39-
response: GenerateContentResponse(candidates: [
40-
CandidateResponse(content: ModelContent(role: "model", parts: [
41-
"""
42-
A _hypothetical_ model response.
43-
Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo.
44-
""",
45-
]),
46-
safetyRatings: [
47-
SafetyRating(category: .dangerousContent, probability: .high),
48-
SafetyRating(category: .harassment, probability: .low),
49-
SafetyRating(category: .hateSpeech, probability: .low),
50-
SafetyRating(category: .sexuallyExplicit, probability: .low),
51-
],
52-
finishReason: FinishReason.other,
53-
citationMetadata: nil),
54-
])
39+
response: GenerateContentResponse(
40+
candidates: [
41+
CandidateResponse(
42+
content: ModelContent(role: "model", parts: [
43+
"""
44+
A _hypothetical_ model response.
45+
Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo.
46+
""",
47+
]),
48+
safetyRatings: [
49+
SafetyRating(
50+
category: .dangerousContent,
51+
probability: .high,
52+
probabilityScore: 0.8,
53+
severity: .medium,
54+
severityScore: 0.9,
55+
blocked: true
56+
),
57+
SafetyRating(
58+
category: .harassment,
59+
probability: .low,
60+
probabilityScore: 0.5,
61+
severity: .low,
62+
severityScore: 0.6,
63+
blocked: false
64+
),
65+
SafetyRating(
66+
category: .hateSpeech,
67+
probability: .low,
68+
probabilityScore: 0.3,
69+
severity: .medium,
70+
severityScore: 0.2,
71+
blocked: false
72+
),
73+
SafetyRating(
74+
category: .sexuallyExplicit,
75+
probability: .low,
76+
probabilityScore: 0.2,
77+
severity: .negligible,
78+
severityScore: 0.5,
79+
blocked: false
80+
),
81+
],
82+
finishReason: FinishReason.other,
83+
citationMetadata: nil
84+
),
85+
]
86+
)
5587
)
5688
List {
5789
MessageView(message: ChatMessage.samples[0])

FirebaseVertexAI/Sources/Safety.swift

Lines changed: 92 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,16 +26,50 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
2626

2727
/// The model-generated probability that the content falls under the specified harm ``category``.
2828
///
29-
/// See ``HarmProbability`` for a list of possible values.
29+
/// See ``HarmProbability`` for a list of possible values. This is a discretized representation
30+
/// of the ``probabilityScore``.
3031
///
3132
/// > Important: This does not indicate the severity of harm for a piece of content.
3233
public let probability: HarmProbability
3334

35+
/// The confidence score that the response is associated with the corresponding harm ``category``.
36+
///
37+
/// The probability safety score is a confidence score between 0.0 and 1.0, rounded to one decimal
38+
/// place; it is discretized into a ``HarmProbability`` in ``probability``. See [probability
39+
/// scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
40+
/// in the Google Cloud documentation for more details.
41+
public let probabilityScore: Float
42+
43+
/// The severity reflects the magnitude of how harmful a model response might be.
44+
///
45+
/// See ``HarmSeverity`` for a list of possible values. This is a discretized representation of
46+
/// the ``severityScore``.
47+
public let severity: HarmSeverity
48+
49+
/// The severity score is the magnitude of how harmful a model response might be.
50+
///
51+
/// The severity score ranges from 0.0 to 1.0, rounded to one decimal place; it is discretized
52+
/// into a ``HarmSeverity`` in ``severity``. See [severity scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
53+
/// in the Google Cloud documentation for more details.
54+
public let severityScore: Float
55+
56+
/// If true, the response was blocked.
57+
public let blocked: Bool
58+
3459
/// Initializes a new `SafetyRating` instance with the given category and probability.
3560
/// Use this initializer for SwiftUI previews or tests.
36-
public init(category: HarmCategory, probability: HarmProbability) {
61+
public init(category: HarmCategory,
62+
probability: HarmProbability,
63+
probabilityScore: Float,
64+
severity: HarmSeverity,
65+
severityScore: Float,
66+
blocked: Bool) {
3767
self.category = category
3868
self.probability = probability
69+
self.probabilityScore = probabilityScore
70+
self.severity = severity
71+
self.severityScore = severityScore
72+
self.blocked = blocked
3973
}
4074

4175
/// The probability that a given model output falls under a harmful content category.
@@ -74,6 +108,37 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
74108
static let unrecognizedValueMessageCode =
75109
VertexLog.MessageCode.generateContentResponseUnrecognizedHarmProbability
76110
}
111+
112+
/// The magnitude of how harmful a model response might be for the respective ``HarmCategory``.
113+
public struct HarmSeverity: DecodableProtoEnum, Hashable, Sendable {
114+
enum Kind: String {
115+
case negligible = "HARM_SEVERITY_NEGLIGIBLE"
116+
case low = "HARM_SEVERITY_LOW"
117+
case medium = "HARM_SEVERITY_MEDIUM"
118+
case high = "HARM_SEVERITY_HIGH"
119+
}
120+
121+
/// Negligible level of harm severity.
122+
public static let negligible = HarmSeverity(kind: .negligible)
123+
124+
/// Low level of harm severity.
125+
public static let low = HarmSeverity(kind: .low)
126+
127+
/// Medium level of harm severity.
128+
public static let medium = HarmSeverity(kind: .medium)
129+
130+
/// High level of harm severity.
131+
public static let high = HarmSeverity(kind: .high)
132+
133+
/// Returns the raw string representation of the `HarmSeverity` value.
134+
///
135+
/// > Note: This value directly corresponds to the values in the [REST
136+
/// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#HarmSeverity).
137+
public let rawValue: String
138+
139+
static let unrecognizedValueMessageCode =
140+
VertexLog.MessageCode.generateContentResponseUnrecognizedHarmSeverity
141+
}
77142
}
78143

79144
/// A type used to specify a threshold for harmful content, beyond which the model will return a
@@ -164,7 +229,31 @@ public struct HarmCategory: CodableProtoEnum, Hashable, Sendable {
164229
// MARK: - Codable Conformances
165230

166231
@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
167-
extension SafetyRating: Decodable {}
232+
extension SafetyRating: Decodable {
233+
enum CodingKeys: CodingKey {
234+
case category
235+
case probability
236+
case probabilityScore
237+
case severity
238+
case severityScore
239+
case blocked
240+
}
241+
242+
public init(from decoder: any Decoder) throws {
243+
let container = try decoder.container(keyedBy: CodingKeys.self)
244+
category = try container.decode(HarmCategory.self, forKey: .category)
245+
probability = try container.decode(HarmProbability.self, forKey: .probability)
246+
247+
// The following 3 fields are only omitted in our test data.
248+
probabilityScore = try container.decodeIfPresent(Float.self, forKey: .probabilityScore) ?? 0.0
249+
severity = try container.decodeIfPresent(HarmSeverity.self, forKey: .severity) ??
250+
HarmSeverity(rawValue: "HARM_SEVERITY_UNSPECIFIED")
251+
severityScore = try container.decodeIfPresent(Float.self, forKey: .severityScore) ?? 0.0
252+
253+
// The blocked field is only included when true.
254+
blocked = try container.decodeIfPresent(Bool.self, forKey: .blocked) ?? false
255+
}
256+
}
168257

169258
@available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
170259
extension SafetySetting.HarmBlockThreshold: Encodable {}

FirebaseVertexAI/Sources/VertexLog.swift

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ enum VertexLog {
4949
case generateContentResponseUnrecognizedBlockThreshold = 3004
5050
case generateContentResponseUnrecognizedHarmProbability = 3005
5151
case generateContentResponseUnrecognizedHarmCategory = 3006
52+
case generateContentResponseUnrecognizedHarmSeverity = 3007
5253

5354
// SDK State Errors
5455
case generateContentResponseNoCandidates = 4000

0 commit comments

Comments
 (0)