Skip to content

Commit 75ebe54

Browse files
authored
Merge 86d47b0 into d546aec
2 parents d546aec + 86d47b0 commit 75ebe54

File tree

7 files changed

+209
-160
lines changed

7 files changed

+209
-160
lines changed

e2e/sample-apps/modular.js

Lines changed: 19 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -58,12 +58,7 @@ import {
5858
onValue,
5959
off
6060
} from 'firebase/database';
61-
import {
62-
getGenerativeModel,
63-
getVertexAI,
64-
InferenceMode,
65-
VertexAI
66-
} from 'firebase/vertexai';
61+
import { getGenerativeModel, getVertexAI, VertexAI } from 'firebase/vertexai';
6762
import { getDataConnect, DataConnect } from 'firebase/data-connect';
6863

6964
/**
@@ -318,8 +313,13 @@ function callPerformance(app) {
318313
async function callVertexAI(app) {
319314
console.log('[VERTEXAI] start');
320315
const vertexAI = getVertexAI(app);
321-
const model = getGenerativeModel(vertexAI, { model: 'gemini-1.5-flash' });
322-
const result = await model.countTokens('abcdefg');
316+
const model = getGenerativeModel(vertexAI, {
317+
mode: 'prefer_in_cloud'
318+
});
319+
const result = await model.generateContentStream("What is Roko's Basalisk?");
320+
for await (const chunk of result.stream) {
321+
console.log(chunk.text());
322+
}
323323
console.log(`[VERTEXAI] counted tokens: ${result.totalTokens}`);
324324
}
325325

@@ -337,17 +337,6 @@ function callDataConnect(app) {
337337
console.log('[DATACONNECT] initialized');
338338
}
339339

340-
async function callVertex(app) {
341-
console.log('[VERTEX] start');
342-
const vertex = getVertexAI(app);
343-
const model = getGenerativeModel(vertex, {
344-
mode: InferenceMode.PREFER_ON_DEVICE
345-
});
346-
const result = await model.generateContent("What is Roko's Basalisk?");
347-
console.log(result.response.text());
348-
console.log('[VERTEX] initialized');
349-
}
350-
351340
/**
352341
* Run smoke tests for all products.
353342
* Comment out any products you want to ignore.
@@ -357,19 +346,18 @@ async function main() {
357346
const app = initializeApp(config);
358347
setLogLevel('warn');
359348

360-
callAppCheck(app);
361-
await authLogin(app);
362-
await callStorage(app);
363-
await callFirestore(app);
364-
await callDatabase(app);
365-
await callMessaging(app);
366-
callAnalytics(app);
367-
callPerformance(app);
368-
await callFunctions(app);
349+
// callAppCheck(app);
350+
// await authLogin(app);
351+
// await callStorage(app);
352+
// await callFirestore(app);
353+
// await callDatabase(app);
354+
// await callMessaging(app);
355+
// callAnalytics(app);
356+
// callPerformance(app);
357+
// await callFunctions(app);
369358
await callVertexAI(app);
370-
callDataConnect(app);
371-
await authLogout(app);
372-
await callVertex(app);
359+
// callDataConnect(app);
360+
// await authLogout(app);
373361
console.log('DONE');
374362
}
375363

packages/vertexai/src/methods/chat-session.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -149,6 +149,7 @@ export class ChatSession {
149149
this._apiSettings,
150150
this.model,
151151
generateContentRequest,
152+
this.chromeAdapter,
152153
this.requestOptions
153154
);
154155

packages/vertexai/src/methods/chrome-adapter.test.ts

Lines changed: 86 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -108,42 +108,6 @@ describe('ChromeAdapter', () => {
108108
})
109109
).to.be.false;
110110
});
111-
it('returns false if request system instruction has function role', async () => {
112-
const adapter = new ChromeAdapter({} as AI, 'prefer_on_device');
113-
expect(
114-
await adapter.isAvailable({
115-
contents: [],
116-
systemInstruction: {
117-
role: 'function',
118-
parts: []
119-
}
120-
})
121-
).to.be.false;
122-
});
123-
it('returns false if request system instruction has multiple parts', async () => {
124-
const adapter = new ChromeAdapter({} as AI, 'prefer_on_device');
125-
expect(
126-
await adapter.isAvailable({
127-
contents: [],
128-
systemInstruction: {
129-
role: 'function',
130-
parts: [{ text: 'a' }, { text: 'b' }]
131-
}
132-
})
133-
).to.be.false;
134-
});
135-
it('returns false if request system instruction has non-text part', async () => {
136-
const adapter = new ChromeAdapter({} as AI, 'prefer_on_device');
137-
expect(
138-
await adapter.isAvailable({
139-
contents: [],
140-
systemInstruction: {
141-
role: 'function',
142-
parts: [{ inlineData: { mimeType: 'a', data: 'b' } }]
143-
}
144-
})
145-
).to.be.false;
146-
});
147111
it('returns true if model is readily available', async () => {
148112
const aiProvider = {
149113
languageModel: {
@@ -173,13 +137,18 @@ describe('ChromeAdapter', () => {
173137
const createStub = stub(aiProvider.languageModel, 'create').resolves(
174138
{} as AILanguageModel
175139
);
176-
const adapter = new ChromeAdapter(aiProvider, 'prefer_on_device');
140+
const onDeviceParams = {} as AILanguageModelCreateOptionsWithSystemPrompt;
141+
const adapter = new ChromeAdapter(
142+
aiProvider,
143+
'prefer_on_device',
144+
onDeviceParams
145+
);
177146
expect(
178147
await adapter.isAvailable({
179148
contents: [{ role: 'user', parts: [{ text: 'hi' }] }]
180149
})
181150
).to.be.false;
182-
expect(createStub).to.have.been.calledOnce;
151+
expect(createStub).to.have.been.calledOnceWith(onDeviceParams);
183152
});
184153
it('avoids redundant downloads', async () => {
185154
const aiProvider = {
@@ -250,4 +219,83 @@ describe('ChromeAdapter', () => {
250219
).to.be.false;
251220
});
252221
});
222+
describe('generateContentOnDevice', () => {
223+
it('Extracts and concats initial prompts', async () => {
224+
const aiProvider = {
225+
languageModel: {
226+
create: () => Promise.resolve({})
227+
}
228+
} as AI;
229+
const factoryStub = stub(aiProvider.languageModel, 'create').resolves({
230+
prompt: s => Promise.resolve(s)
231+
} as AILanguageModel);
232+
const text = ['first', 'second', 'third'];
233+
const onDeviceParams = {
234+
initialPrompts: [{ role: 'user', content: text[0] }]
235+
} as AILanguageModelCreateOptionsWithSystemPrompt;
236+
const adapter = new ChromeAdapter(
237+
aiProvider,
238+
'prefer_on_device',
239+
onDeviceParams
240+
);
241+
const response = await adapter.generateContentOnDevice({
242+
contents: [
243+
{ role: 'model', parts: [{ text: text[1] }] },
244+
{ role: 'user', parts: [{ text: text[2] }] }
245+
]
246+
});
247+
expect(factoryStub).to.have.been.calledOnceWith({
248+
initialPrompts: [
249+
{ role: 'user', content: text[0] },
250+
// Asserts tail is passed as initial prompts, and
251+
// role is normalized from model to assistant.
252+
{ role: 'assistant', content: text[1] }
253+
]
254+
});
255+
expect(await response.json()).to.deep.equal({
256+
candidates: [
257+
{
258+
content: {
259+
parts: [{ text: text[2] }]
260+
}
261+
}
262+
]
263+
});
264+
});
265+
it('Extracts system prompt', async () => {
266+
const aiProvider = {
267+
languageModel: {
268+
create: () => Promise.resolve({})
269+
}
270+
} as AI;
271+
const factoryStub = stub(aiProvider.languageModel, 'create').resolves({
272+
prompt: s => Promise.resolve(s)
273+
} as AILanguageModel);
274+
const onDeviceParams = {
275+
systemPrompt: 'be yourself'
276+
} as AILanguageModelCreateOptionsWithSystemPrompt;
277+
const adapter = new ChromeAdapter(
278+
aiProvider,
279+
'prefer_on_device',
280+
onDeviceParams
281+
);
282+
const text = 'hi';
283+
const response = await adapter.generateContentOnDevice({
284+
contents: [{ role: 'user', parts: [{ text }] }]
285+
});
286+
expect(factoryStub).to.have.been.calledOnceWith({
287+
initialPrompts: [],
288+
systemPrompt: onDeviceParams.systemPrompt
289+
});
290+
expect(await response.json()).to.deep.equal({
291+
candidates: [
292+
{
293+
content: {
294+
parts: [{ text }]
295+
}
296+
}
297+
]
298+
});
299+
});
300+
});
253301
});

0 commit comments

Comments
 (0)