@@ -10,7 +10,7 @@ const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY);
10
10
const fs = require ( 'fs' ) ; // Import the file system module
11
11
const apiVersion = 'v1beta' ;
12
12
const generationConfig = {
13
- temperature : 0.9 ,
13
+ temperature : 0.0 ,
14
14
} ;
15
15
const safetySettings = [
16
16
{
@@ -49,7 +49,7 @@ io.on('connection', async (socket) => {
49
49
// Read system text from file asynchronously
50
50
const systemText = await readSystemTextFromFile ( "systemi.txt" ) ;
51
51
// let systemText = "";
52
- const systemInstruction = { role : "system" , parts : [ { text : systemText } ] } ;
52
+ const systemInstruction = { role : "system" , parts : [ { text : systemText } ] } ;
53
53
54
54
const model = genAI . getGenerativeModel ( { model : "gemini-1.5-pro-latest" , systemInstruction, generationConfig, safetySettings } , { apiVersion } ) ;
55
55
const chat = model . startChat ( { } ) ;
@@ -60,9 +60,14 @@ io.on('connection', async (socket) => {
60
60
const result = await chat . sendMessage ( prompt ) ;
61
61
const response = await result . response ;
62
62
console . log ( 'Response received' ) ;
63
- // console.log(util.inspect(response.candidates, {showHidden: false, depth: null, colors: true}))
64
- const output = response . text ( ) ;
63
+ console . log ( util . inspect ( response . candidates , { showHidden : false , depth : null , colors : true } ) )
64
+ if ( response . candidates [ 0 ] . finishReason === 'STOP' ) {
65
+ const output = response . text ( ) ;
65
66
socket . emit ( 'response' , output ) ;
67
+ }
68
+ else {
69
+ socket . emit ( 'error' , 'An error occurred: ' + response . candidates [ 0 ] . finishReason ) ;
70
+ }
66
71
} catch ( err ) {
67
72
console . error ( err ) ;
68
73
socket . emit ( 'error' , 'An error occurred' ) ;
@@ -103,4 +108,4 @@ function printChatNicely(chatHistory) {
103
108
104
109
server . listen ( 3000 , ( ) => {
105
110
console . log ( 'Server listening on port 3000' ) ;
106
- } ) ;
111
+ } ) ;
0 commit comments