Skip to content

Commit

Permalink
feat: conversation chaining support for gemini
Browse files Browse the repository at this point in the history
  • Loading branch information
ellvix committed Dec 27, 2024
1 parent 8966b52 commit 5b3b1ba
Showing 1 changed file with 64 additions and 42 deletions.
106 changes: 64 additions & 42 deletions src/js/constants.js
Original file line number Diff line number Diff line change
Expand Up @@ -2179,9 +2179,9 @@ class ChatLLM {
img = await constants.ConvertSVGtoJPG(singleMaidr.id, 'gemini');
}
if (constants.emailAuthKey) {
chatLLM.GeminiPromptAPI(text, img);
chatLLM.GeminiPromptRemote(text, img);
} else {
chatLLM.GeminiPrompt(text, img);
chatLLM.GeminiPromptLocal(text, img);
}
}

Expand Down Expand Up @@ -2304,10 +2304,10 @@ class ChatLLM {

if (model == 'openai') {
text = data.choices[0].message.content;
let i = this.requestJson.messages.length;
this.requestJson.messages[i] = {};
this.requestJson.messages[i].role = 'assistant';
this.requestJson.messages[i].content = text;
let i = this.requestJsonOpenAI.messages.length;
this.requestJsonOpenAI.messages[i] = {};
this.requestJsonOpenAI.messages[i].role = 'assistant';
this.requestJsonOpenAI.messages[i].content = text;

if (data.error) {
chatLLM.DisplayChatMessage(LLMName, 'Error processing request.', true);
Expand All @@ -2318,6 +2318,12 @@ class ChatLLM {
} else if (model == 'gemini') {
if (data.text()) {
text = data.text();
if (this.requestJsonGemini.contents.length > 2) {
let i = this.requestJsonGemini.contents.length;
this.requestJsonGemini.contents[i] = {};
this.requestJsonGemini.contents[i].role = 'model';
this.requestJsonGemini.contents[i].content = text;
}
chatLLM.DisplayChatMessage(LLMName, text);
} else {
if (!data.error) {
Expand Down Expand Up @@ -2360,7 +2366,7 @@ class ChatLLM {
*/
fakeLLMResponseData() {
let responseText = {};
if (this.requestJson.messages.length > 2) {
if (this.requestJsonOpenAI.messages.length > 2) {
// subsequent responses
responseText = {
id: 'chatcmpl-8Y44iRCRrohYbAqm8rfBbJqTUADC7',
Expand Down Expand Up @@ -2567,32 +2573,32 @@ class ChatLLM {
let backupMessage =
'Describe ' + singleMaidr.type + ' charts to a blind person';
// headers and sys message
if (!this.requestJson) {
this.requestJson = {};
//this.requestJson.model = 'gpt-4-vision-preview';
this.requestJson.model = 'gpt-4o-2024-11-20';
this.requestJson.max_tokens = constants.LLMmaxResponseTokens; // note: if this is too short (tested with less than 200), the response gets cut off
if (!this.requestJsonOpenAI) {
this.requestJsonOpenAI = {};
//this.requestJsonOpenAI.model = 'gpt-4-vision-preview';
this.requestJsonOpenAI.model = 'gpt-4o-2024-11-20';
this.requestJsonOpenAI.max_tokens = constants.LLMmaxResponseTokens; // note: if this is too short (tested with less than 200), the response gets cut off

// sys message
this.requestJson.messages = [];
this.requestJson.messages[0] = {};
this.requestJson.messages[0].role = 'system';
this.requestJson.messages[0].content = sysMessage;
this.requestJsonOpenAI.messages = [];
this.requestJsonOpenAI.messages[0] = {};
this.requestJsonOpenAI.messages[0].role = 'system';
this.requestJsonOpenAI.messages[0].content = sysMessage;
if (constants.LLMPreferences) {
this.requestJson.messages[1] = {};
this.requestJson.messages[1].role = 'system';
this.requestJson.messages[1].content = constants.LLMPreferences;
this.requestJsonOpenAI.messages[1] = {};
this.requestJsonOpenAI.messages[1].role = 'system';
this.requestJsonOpenAI.messages[1].content = constants.LLMPreferences;
}
}

// user message
// if we have an image (first time only), send the image and the text, otherwise just the text
let i = this.requestJson.messages.length;
this.requestJson.messages[i] = {};
this.requestJson.messages[i].role = 'user';
let i = this.requestJsonOpenAI.messages.length;
this.requestJsonOpenAI.messages[i] = {};
this.requestJsonOpenAI.messages[i].role = 'user';
if (img) {
// first message, include the img
this.requestJson.messages[i].content = [
this.requestJsonOpenAI.messages[i].content = [
{
type: 'text',
text: text,
Expand All @@ -2604,10 +2610,10 @@ class ChatLLM {
];
} else {
// just the text
this.requestJson.messages[i].content = text;
this.requestJsonOpenAI.messages[i].content = text;
}

return this.requestJson;
return this.requestJsonOpenAI;
}

GeminiJson(text, img = null) {
Expand Down Expand Up @@ -2672,7 +2678,7 @@ class ChatLLM {
return payload;
}

async GeminiPromptAPI(text, imgBase64 = null) {
async GeminiPromptRemote(text, imgBase64 = null) {
let url = constants.baseURL + 'gemini' + constants.code;

// Create the prompt
Expand All @@ -2689,15 +2695,28 @@ class ChatLLM {
}
constants.LLMImage = imgBase64;

let requestJson = chatLLM.GeminiJson(prompt, imgBase64);
if (!this.requestJsonGemini) {
// this is our first message, do the full construction
this.requestJsonGemini = chatLLM.GeminiJson(prompt, imgBase64);
} else {
// subsequent messages, just add the new user message
let i = this.requestJsonGemini.contents.length;
this.requestJsonGemini.contents[i] = {};
this.requestJsonGemini.contents[i].role = 'user';
this.requestJsonGemini.contents[i].parts = [
{
text: text,
},
];
}

const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authentication: constants.emailAuthKey + ' ' + constants.clientToken,
},
body: JSON.stringify(requestJson),
body: JSON.stringify(this.requestJsonGemini),
});
if (response.ok) {
const responseJson = await response.json();
Expand All @@ -2713,7 +2732,7 @@ class ChatLLM {
}
}

async GeminiPrompt(text, imgBase64 = null) {
async GeminiPromptLocal(text, imgBase64 = null) {
// https://ai.google.dev/docs/gemini_api_overview#node.js
try {
// Save the image for next time
Expand All @@ -2735,29 +2754,32 @@ class ChatLLM {
}); // old model was 'gemini-pro-vision'

// Create the prompt
let prompt = constants.LLMSystemMessage;
if (constants.LLMPreferences) {
prompt += constants.LLMPreferences;
if (!this.requestJsonGemini) {
// this is our first message, do the full construction
this.requestJsonGemini = chatLLM.GeminiJson(prompt, imgBase64);
} else {
// subsequent messages, just add the new user message
let i = this.requestJsonGemini.contents.length;
this.requestJsonGemini.contents[i] = {};
this.requestJsonGemini.contents[i].role = 'user';
this.requestJsonGemini.contents[i].parts = [
{
text: text,
},
];
}
prompt += '\n\n' + text; // Use the text parameter as the prompt
const image = {
inlineData: {
data: imgBase64, // Use the base64 image string
mimeType: 'image/png', // Or the appropriate mime type of your image
},
};

// Generate the content
//console.log('LLM request: ', prompt, image);
const result = await model.generateContent([prompt, image]);
const result = await model.generateContent(this.requestJsonGemini);
//console.log(result.response.text());

// Process the response
chatLLM.ProcessLLMResponse(result.response, 'gemini');
} catch (error) {
chatLLM.WaitingSound(false);
chatLLM.DisplayChatMessage('Gemini', 'Error processing request.', true);
console.error('Error in GeminiPrompt:', error);
console.error('Error in GeminiPromptLocal:', error);
throw error; // Rethrow the error for further handling if necessary
}
}
Expand Down Expand Up @@ -2822,7 +2844,7 @@ class ChatLLM {
document.getElementById('chatLLM_chat_history').innerHTML = '';

// reset the data
this.requestJson = null;
this.requestJsonOpenAI = null;
this.firstTime = true;

// and start over, if enabled, or window is open
Expand Down

0 comments on commit 5b3b1ba

Please sign in to comment.