File tree Expand file tree Collapse file tree 4 files changed +15
-15
lines changed Expand file tree Collapse file tree 4 files changed +15
-15
lines changed Original file line number Diff line number Diff line change @@ -108,8 +108,8 @@ describe('Top level API', () => {
108
108
} ) ;
109
109
it ( 'getGenerativeModel with HybridParams sets the model' , ( ) => {
110
110
const genModel = getGenerativeModel ( fakeVertexAI , {
111
- mode : InferenceMode . ONLY_ON_CLOUD ,
112
- onCloudParams : { model : 'my-model' }
111
+ mode : InferenceMode . ONLY_IN_CLOUD ,
112
+ inCloudParams : { model : 'my-model' }
113
113
} ) ;
114
114
expect ( genModel . model ) . to . equal ( 'publishers/google/models/my-model' ) ;
115
115
} ) ;
Original file line number Diff line number Diff line change @@ -76,22 +76,22 @@ export function getGenerativeModel(
76
76
) : GenerativeModel {
77
77
// Uses the existence of HybridParams.mode to clarify the type of the modelParams input.
78
78
const hybridParams = modelParams as HybridParams ;
79
- let onCloudParams : ModelParams ;
79
+ let inCloudParams : ModelParams ;
80
80
if ( hybridParams . mode ) {
81
- onCloudParams = hybridParams . onCloudParams || {
81
+ inCloudParams = hybridParams . inCloudParams || {
82
82
model : 'gemini-2.0-flash-lite'
83
83
} ;
84
84
} else {
85
- onCloudParams = modelParams as ModelParams ;
85
+ inCloudParams = modelParams as ModelParams ;
86
86
}
87
87
88
- if ( ! onCloudParams . model ) {
88
+ if ( ! inCloudParams . model ) {
89
89
throw new VertexAIError (
90
90
VertexAIErrorCode . NO_MODEL ,
91
91
`Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`
92
92
) ;
93
93
}
94
- return new GenerativeModel ( vertexAI , onCloudParams , requestOptions ) ;
94
+ return new GenerativeModel ( vertexAI , inCloudParams , requestOptions ) ;
95
95
}
96
96
97
97
/**
Original file line number Diff line number Diff line change @@ -242,12 +242,12 @@ export enum Modality {
242
242
}
243
243
244
244
/**
245
- * Determines whether inference happens on-device or on -cloud.
245
+ * Determines whether inference happens on-device or in -cloud.
246
246
* @public
247
247
*/
248
248
export enum InferenceMode {
249
249
/**
250
- * Uses the on-device model if available, or falls back to the on -cloud model.
250
+ * Uses the on-device model if available, or falls back to the in -cloud model.
251
251
*/
252
252
PREFER_ON_DEVICE = 'PREFER_ON_DEVICE' ,
253
253
@@ -257,7 +257,7 @@ export enum InferenceMode {
257
257
ONLY_ON_DEVICE = 'ONLY_ON_DEVICE' ,
258
258
259
259
/**
260
- * Exclusively uses the on -cloud model.
260
+ * Exclusively uses the in -cloud model.
261
261
*/
262
- ONLY_ON_CLOUD = 'ONLY_ON_CLOUD '
262
+ ONLY_IN_CLOUD = 'ONLY_IN_CLOUD '
263
263
}
Original file line number Diff line number Diff line change @@ -216,20 +216,20 @@ export interface FunctionCallingConfig {
216
216
}
217
217
218
218
/**
219
- * Configures on-device and on -cloud inference.
219
+ * Configures on-device and in -cloud inference.
220
220
* @public
221
221
*/
222
222
export interface HybridParams {
223
223
/**
224
- * Optional. Specifies on-device or on -cloud inference. Defaults to prefer on-device.
224
+ * Optional. Specifies on-device or in -cloud inference. Defaults to prefer on-device.
225
225
*/
226
226
mode ?: InferenceMode ;
227
227
/**
228
228
* Optional. Specifies advanced params for on-device inference.
229
229
*/
230
230
onDeviceParams ?: AILanguageModelCreateOptionsWithSystemPrompt ;
231
231
/**
232
- * Optional. Specifies advanced params for on -cloud inference.
232
+ * Optional. Specifies advanced params for in -cloud inference.
233
233
*/
234
- onCloudParams ?: ModelParams ;
234
+ inCloudParams ?: ModelParams ;
235
235
}
You can’t perform that action at this time.
0 commit comments