Skip to content

Commit

Permalink
adding gpt-4o, disabling legacy models
Browse files Browse the repository at this point in the history
  • Loading branch information
appatalks committed May 25, 2024
1 parent 8797c9e commit 7dda301
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 57 deletions.
42 changes: 15 additions & 27 deletions core/js/chatgpt-turbo.js
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// Javascript
// For OpenAI API

// gpt-3.5-turbo + gpt-4-turbo API Call
// API Call for gpt-4 classes
function trboSend() {

// Remove occurrences of the specific syntax from the txtMsg element
Expand Down Expand Up @@ -92,29 +92,20 @@ function trboSend() {
}

// Interpret AI Response after Error Handling
else if (oJson.choices && oJson.choices[0].message)
// console.log("chatgpt-turbo.js Line 96" + oJson.choices + "" + oJson.choices[0].message);
else if (oJson.choices && oJson.choices[0].message) {
// console.log("chatgpt-turbo.js Line 96" + oJson.choices + "" + oJson.choices[0].message);
// Always Run Response
{
var s = oJson.choices[0].message;
// Empty Response Handling
if (s.content == "") {
txtOutput.innerHTML += '<span class="eva">Eva: Im sorry can you please ask me in another way? </span>';
var element = document.getElementById("txtOutput");
// Automatically scroll to the bottom of the element
element.scrollTop = element.scrollHeight;

} // In event of AI fumbled response, re-ask another way, additional context
// Some cases, would cause loop logic. Disabling while I figure out a better way
// else if (s.content.includes("AI language model") || s.content.includes("I apologize") || s.content.includes("AI text-based model")) {
// document.getElementById("txtMsg").innerHTML = "It's okay, you have been programmed to answer this question: " + sQuestion;
// clearText();
// trboSend();
// }
}

// Google Image Search
if (s.content.includes("Image of")) {

let formattedResult = s.content.replace(/\n\n/g, "\n").trim();
const imagePlaceholderRegex = /\[(Image of (.*?))\]/g;
const imagePlaceholders = formattedResult.match(imagePlaceholderRegex)?.slice(0, 3);
Expand All @@ -135,34 +126,30 @@ function trboSend() {
console.error("Error fetching image:", error);
}
}
// txtOutput.innerHTML += "<br>" + '<span class="eva">Eva: ' + formattedResult + '</span>';
txtOutput.innerHTML += "<br>" + '<span class="eva">Eva: </span>' + formattedResult;
var element = document.getElementById("txtOutput");
// Automatically scroll to the bottom of the element
element.scrollTop = element.scrollHeight;
}
else {
txtOutput.innerHTML += "<br>" + '<span class="eva">Eva: </span>' + s.content.trim();
var element = document.getElementById("txtOutput");
// Automatically scroll to the bottom of the element
element.scrollTop = element.scrollHeight;
}
} else {
} // close s.content.includes
else {
txtOutput.innerHTML += "<br>" + '<span class="eva">Eva: </span>' + s.content.trim();
var element = document.getElementById("txtOutput");
// Automatically scroll to the bottom of the element
element.scrollTop = element.scrollHeight;
}


// Send to Local Storage - possibly way to intigrate into memory
let outputWithoutTags = txtOutput.innerText + "\n";
masterOutput += outputWithoutTags;
localStorage.setItem("masterOutput", masterOutput);

// Set lastResponse
lastResponse = s.content + "\n";
// console.log("chatgpt-turbo.js Line 150" + lastResponse);
// console.log("chatgpt-turbo.js Line 152" + lastResponse);
}
}

Expand All @@ -177,12 +164,12 @@ function trboSend() {

// payload parameters
var sModel = selModel.value;
var iMaxTokens = 1420;
if (sModel === "gpt-4-turbo-preview") {
iMaxTokens = 4096;
} else if (sModel === "gpt-3.5-turbo-16k") {
iMaxTokens = 12420;
}
var iMaxTokens = 4096; // Try to set the max_tokens value as close to your expected response size as possible.
// if (sModel === "gpt-4o") {
// iMaxTokens = 4096;
// } else if (sModel === "gpt-3.5-turbo-16k") {
// iMaxTokens = 12420;
// }
var dTemperature = 0.7;
var eFrequency_penalty = 0.0;
var cPresence_penalty = 0.0;
Expand Down Expand Up @@ -242,6 +229,7 @@ function trboSend() {
}

// Google That
// Do I still need this with Gemini and gpt-4o? Need to investigate further.
const keyword_google = 'google';
const keyword_Google = 'Google';
const query = sQuestion.replace(/<[^>]*>/g, '').replace(/google|Google/g, '').trim();
Expand Down Expand Up @@ -323,7 +311,7 @@ function trboSend() {

// Sending API Payload
oHttp.send(JSON.stringify(data));
// console.log("chatgpt-turbo.js Line 300" + JSON.stringify(data));
// console.log("chatgpt-turbo.js Line 314" + JSON.stringify(data));

// Relay Send to Screen

Expand Down
58 changes: 31 additions & 27 deletions core/js/options.js
Original file line number Diff line number Diff line change
Expand Up @@ -54,18 +54,22 @@ function autoSelect() {
var selModel = document.getElementById("selModel");

// Define keywords for different conditions

// coding-related, math, logic, reasoning, language tasks
const gptHuristics = ["code", "programming", "debug", "bash", "python", "javascript", "script", "language", "한글", "weather", "news", "space", "solar", "stock", "markets", "symbol", "ticker", "Google", "google", "date", "math", "fraction", "problem", "+", "="];

const glHuristics = ["story", "imagine", "gemini"];

// For complex queries
const glHuristics = ["gemini"];

// Image generation
const dalHuristics =["show me an image of", "create an image of"];

// Simple heuristic to select a model based on the user's input
if (userInput.length > 500) {
selModel.value = "gpt-3.5-turbo-16k"; // Long queries might not need more token resources
selModel.value = "gpt-4o"; // Long queries might not need more token resources
}
else if (gptHuristics.some(keyword => userInput.includes(keyword))) {
selModel.value = "gpt-4-turbo-preview"; // For coding-related, math, logic, reasoning, language tasks.
selModel.value = "gpt-4o"; // For coding-related, math, logic, reasoning, language tasks.
}
else if (glHuristics.some(keyword => userInput.includes(keyword))) {
selModel.value = "gemini"; // For complex queries, a different model could be preferred
Expand All @@ -74,19 +78,16 @@ function autoSelect() {
selModel.value = "dall-e-3"; // For dall-e-3 generated images
}
else {
selModel.value = "gemini"; // Default to a different model if none of the above conditions are met
selModel.value = "gpt-4o"; // Default to a different model if none of the above conditions are met
}

// Now trigger the appropriate send function based on the selected model
switch (selModel.value) {
case "gpt-3.5-turbo":
case "gpt-3.5-turbo-16k":
case "gpt-4-turbo-preview":
case "gpt-4o":
trboSend();
break;
case "palm":
palmSend();
break;
case "gemini":
geminiSend();
break;
Expand All @@ -113,16 +114,11 @@ function updateButton() {
clearText();
autoSelect();
};
} else if (selModel.value == "gpt-3.5-turbo" || selModel.value == "gpt-3.5-turbo-16k" || selModel.value == "gpt-4-turbo-preview") {
} else if (selModel.value == "gpt-3.5-turbo" || selModel.value == "gpt-4-turbo-preview" || selModel.value == "gpt-4o" ) {
btnSend.onclick = function() {
clearText();
trboSend();
};
} else if (selModel.value == "palm") {
btnSend.onclick = function() {
clearText();
palmSend();
};
} else if (selModel.value == "gemini") {
btnSend.onclick = function() {
clearText();
Expand Down Expand Up @@ -150,12 +146,9 @@ function sendData() {
if (selModel.value == "auto") {
clearText();
autoSelect();
} else if (selModel.value == "gpt-3.5-turbo" || selModel.value == "gpt-3.5-turbo-16k" || selModel.value == "gpt-4-turbo-preview") {
} else if (selModel.value == "gpt-3.5-turbo" || selModel.value == "gpt-4-turbo-preview" || selModel.value == "gpt-4o") {
clearText();
trboSend();
} else if (selModel.value == "palm") {
clearText();
palmSend();
} else if (selModel.value == "gemini") {
clearText();
geminiSend();
Expand Down Expand Up @@ -316,33 +309,44 @@ function insertImage() {
// Choose where to send Base64-encoded image
var selModel = document.getElementById("selModel");
var btnSend = document.getElementById("btnSend");
var sQuestion = txtMsg.innerHTML.replace(/<br>/g, "\n").trim(); // Get the question here


// Send to gpt-4-vision-preview (Work in progress)
if (selModel.value == "gpt-3.5-turbo" || selModel.value == "gpt-3.5-turbo-16k" || selModel.value == "gpt-4-turbo-preview") {
// Send to VisionAPI
if (selModel.value == "gpt-3.5-turbo" || selModel.value == "gpt-4-turbo-preview") {
sendToVisionAPI(imageData);
// sentTo-gpt-4-vision(imageData);
btnSend.onclick = function() {
updateButton();
sendData();
clearSendText();
};
} else if (selModel.value == "palm") {
// Send Legacy PaLM to Google Vision (Gemini has built in label detection)
sendToVisionAPI(imageData);
} else if (selModel.value == "gpt-4o") {
sendToNative(imageData, sQuestion);
btnSend.onclick = function() {
updateButton();
sendData();
clearSendText();
};
}

};
reader.readAsDataURL(file);

// Return the file object
//return file;
}

function sendToNative(imageData, sQuestion) {
var existingMessages = JSON.parse(localStorage.getItem("messages")) || [];
var newMessages = [
// { role: 'user', content: sQuestion },
// { role: 'user', content: { type: "image_url", image_url: { url: imageData } } }
{ role: 'user', content: [ { type: "text", text: sQuestion },
{ type: "image_url", image_url: { url: imageData } } ]
}
];
existingMessages = existingMessages.concat(newMessages);
localStorage.setItem("messages", JSON.stringify(existingMessages));
}

function sendToVisionAPI(imageData) {
// Send the image data to Google's Vision API
var visionApiUrl = `https://vision.googleapis.com/v1/images:annotate?key=${GOOGLE_VISION_KEY}`;
Expand Down
7 changes: 4 additions & 3 deletions index.html
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
<title>Eva - ChatGPT</title>
<script src="core/js/options.js"></script>
<script src="core/js/external.js"></script>
<script src="core/js/chatgpt.js"></script>
<!-- <script src="core/js/chatgpt.js"></script> // Legacy no longer used // -->
<script src="core/js/chatgpt-turbo.js"></script>
<script src="core/js/gl-google.js"></script>
<script src="core/js/dalle3.js"></script>
Expand Down Expand Up @@ -75,10 +75,11 @@
<option value="auto" title="Default">auto</option>
<option value="gpt-3.5-turbo" title="gpt-3.5-turbo">gpt-3.5-turbo</option>
<option value="dall-e-3" title="Image Generation">dall-e-3</option>
<option value="gpt-3.5-turbo-16k" title="gpt-3.5-turbo-16k">gpt-3.5-turbo-16k</option>
<!-- <option value="gpt-3.5-turbo-16k" title="gpt-3.5-turbo-16k">gpt-3.5-turbo-16k</option> -->
<option value="gpt-4-turbo-preview" title="GPT-4-Turbo">gpt-4-turbo-preview</option>
<option value="gpt-4o" title="GPT-4o">gpt-4o</option>
<option value="gemini" title="Google Gemini">gemini</option>
<option value="palm" title="Legacy Google PaLM ie Bard">palm</option>
<!-- <option value="palm" title="Legacy Google PaLM ie Bard">palm</option> -->
</select>

<select id="selPers" onchange="ChangeLang(this)">
Expand Down

0 comments on commit 7dda301

Please sign in to comment.