diff --git a/core/js/chatgpt.js b/core/js/chatgpt.js
deleted file mode 100644
index 10b7856..0000000
--- a/core/js/chatgpt.js
+++ /dev/null
@@ -1,145 +0,0 @@
-// Javascript
-// For OpenAI API
-
-// Send API Call
-function Send() {
-
- var sQuestion = txtMsg.innerHTML;
- sQuestion = sQuestion.replace(/
/g, "\n");
- if (sQuestion.trim() == "") {
- alert("Type in your question!");
- txtMsg.focus();
- return;
- }
-
- var oHttp = new XMLHttpRequest();
- oHttp.open("POST", "https://api.openai.com/v1/completions");
- oHttp.setRequestHeader("Accept", "application/json");
- oHttp.setRequestHeader("Content-Type", "application/json");
- oHttp.setRequestHeader("Authorization", "Bearer " + OPENAI_API_KEY)
-
- // Error Handling
- oHttp.onreadystatechange = function () {
- if (oHttp.readyState === 4) {
- // Check for errors
- if (oHttp.status === 500) {
- txtOutput.innerHTML += "
Error 500: Internal Server Error" + "
" + oHttp.responseText;
- console.log("Error 500: Internal Server Error chatgpt-turbo.js Line 27");
- return;
- }
- if (oHttp.status === 429) {
- txtOutput.innerHTML += "
Error 429: Too Many Requests" + "
" + oHttp.responseText;
- console.log("Error 429: Too Many Requests chatgpt-turbo.js Line 32");
- return;
- }
- if (oHttp.status === 404) {
- txtOutput.innerHTML += "
Error 404: Not Found" + "
" + oHttp.responseText;
- console.log("Error 404: Too Many Requests chatgpt-turbo.js Line 37");
- return;
- }
- //console.log(oHttp.status);
- var oJson = {}
- if (txtOutput.innerHTML != "") txtOutput.innerHTML += "
"; // User Send Data
- try {
- oJson = JSON.parse(oHttp.responseText); // API Response Data
- } catch (ex) {
- txtOutput.innerHTML += "Error: " + ex.message;
- console.log("Error: chatgpt.js Line 47");
- return;
- }
-
- // EasterEgg
- if ((oJson.usage.completion_tokens === 420) || (oJson.usage.total_tokens === 420)) {
- function displayImage() {
- // code to display image
- var image = document.getElementById("eEgg");
- image.style.display = "flex";
- setTimeout(function() {
- image.style.opacity = 1;
- }, 50);
- setTimeout(function() {
- image.style.opacity = 0;
- }, 2420);
- setTimeout(function() {
- image.style.display = "none";
- }, 2920);
- }
- displayImage();
- }
-
- // Timeout Error Exponetial Backoff
- if (oJson.error && oJson.error.message) {
- // txtOutput.innerHTML += "Error: " + oJson.error.message;
- // 503 "Error That model is currently overloaded with other requests."
- if (oJson.error.message == "overloaded" && retryCount < maxRetries) {
- retryCount++;
- var retryDelay = Math.pow(2, retryCount) * 1000;
- console.log("Too busy. Retrying in " + retryDelay + "ms");
- setTimeout(Send, retryDelay);
- return;
- }
- txtOutput.innerHTML += "Error Other: " + oJson.error.message;
- console.log("Error Other: chatgpt.js Line 82");
- retryCount = 0;
- }
-
- // Interpret AI Response after Error Handling
- else if (oJson.choices && oJson.choices[0].text);
- // console.log("Line 88" + oJson.choices + "" +oJson.choices[0].text);
- // Always Run Response
- {
- var s = oJson.choices[0].text;
- // Empty Response Handling
- if (s == "") {
- txtOutput.innerHTML += "Eva: I'm sorry can you please ask me in another way?";
- } else {
- txtOutput.innerHTML += "Eva: " + s.trim();
- }
-
- // Send to Local Storage - possibly way to intigrate into memory
- let outputWithoutTags = txtOutput.innerText ;
- masterOutput += outputWithoutTags;
- localStorage.setItem("masterOutput", masterOutput);
-
- userMasterResponse += sQuestion + "\n";
- localStorage.setItem("userMasterResponse", userMasterResponse);
-
- // Set lastResponse
- lastResponse = s;
- }
- }
-
- // Check the state of the checkbox and have fun
- const checkbox = document.getElementById("autoSpeak");
- if (checkbox.checked) {
- speakText();
- const audio = document.getElementById("audioPlayback");
- audio.setAttribute("autoplay", true);
- }
- };
-
- // payload parameters
- var sModel = selModel.value;
- var iMaxTokens = 750;
- var dTemperature = 0.7;
- var stop = "&*&";
-
- // API Payload
- var data = {
- model: sModel,
- prompt: selPers.value + " " +lastResponse.replace(/\n/g, '') + " " + sQuestion.replace(/\n/g, ''),
- max_tokens: iMaxTokens,
- temperature: dTemperature,
- frequency_penalty: 0.0, // Between -2 and 2, Positive values decreases repeat responses.
- presence_penalty: 0.0, // Between -2 and 2, Positive values increases new topic probability.
- stop: stop
- }
-
- // Sending API Payload
- oHttp.send(JSON.stringify(data));
-
- // Relay Send to Screen
- if (txtOutput.innerHTML != "") txtOutput.innerHTML += "
";
- txtOutput.innerHTML += "You: " + sQuestion;
- txtMsg.innerHTML = "";
-}
diff --git a/core/js/chatgpt-turbo.js b/core/js/gpt-core.js
similarity index 97%
rename from core/js/chatgpt-turbo.js
rename to core/js/gpt-core.js
index 9eb3f2d..d869712 100644
--- a/core/js/chatgpt-turbo.js
+++ b/core/js/gpt-core.js
@@ -1,7 +1,7 @@
// Javascript
// For OpenAI API
-// API Call for gpt-4 classes
+// API Call for latest gpt classes
function trboSend() {
// Remove occurrences of the specific syntax from the txtMsg element
@@ -27,7 +27,7 @@ function trboSend() {
// Check for errors
if (oHttp.status === 500) {
txtOutput.innerHTML += "
Error 500: Internal Server Error" + "
" + oHttp.responseText;
- console.log("Error 500: Internal Server Error chatgpt-turbo.js Line 26");
+ console.log("Error 500: Internal Server Error chatgpt-turbo.js Line 30");
return;
}
if (oHttp.status === 429) {
@@ -53,7 +53,7 @@ function trboSend() {
console.log("oJson", oJson);
} catch (ex) {
txtOutput.innerHTML += "Error: " + ex.message;
- console.log("Error: chatgpt-turbo.js Line 52");
+ console.log("Error: gpt-core.js Line 56");
return;
}
@@ -86,14 +86,14 @@ function trboSend() {
}
else {
txtOutput.innerHTML += "Error Other: " + oJson.error.message;
- console.log("Error Other: chatgpt-turbo.js Line 87");
+ console.log("Error Other: gpt-core.js Line 89");
retryCount = 0;
}
}
// Interpret AI Response after Error Handling
else if (oJson.choices && oJson.choices[0].message) {
- // console.log("chatgpt-turbo.js Line 96" + oJson.choices + "" + oJson.choices[0].message);
+ // console.log("gpt-core.js Line 96" + oJson.choices + "" + oJson.choices[0].message);
// Always Run Response
var s = oJson.choices[0].message;
// Empty Response Handling
@@ -149,7 +149,7 @@ function trboSend() {
// Set lastResponse
lastResponse = s.content + "\n";
- // console.log("chatgpt-turbo.js Line 152" + lastResponse);
+ // console.log("gpt-core.js Line 152" + lastResponse);
}
}
@@ -311,7 +311,7 @@ function trboSend() {
// Sending API Payload
oHttp.send(JSON.stringify(data));
- // console.log("chatgpt-turbo.js Line 314" + JSON.stringify(data));
+ // console.log("gpt-core.js Line 314" + JSON.stringify(data));
// Relay Send to Screen
diff --git a/core/js/options.js b/core/js/options.js
index 14ea838..2998d98 100644
--- a/core/js/options.js
+++ b/core/js/options.js
@@ -55,8 +55,8 @@ function autoSelect() {
// Define keywords for different conditions
- // coding-related, math, logic, reasoning, language tasks
- const gptHuristics = ["code", "programming", "debug", "bash", "python", "javascript", "script", "language", "한글", "weather", "news", "space", "solar", "stock", "markets", "symbol", "ticker", "Google", "google", "date", "math", "fraction", "problem", "+", "="];
+ // coding-related, math, logic, math, science
+ const gptHuristics = ["code", "programming", "debug", "bash", "python", "javascript", "script", "space", "solar", "math", "fraction", "problem", "+", "="];
// For complex queries
const glHuristics = ["gemini"];
@@ -69,23 +69,24 @@ function autoSelect() {
selModel.value = "gpt-4o"; // Long queries might not need more token resources
}
else if (gptHuristics.some(keyword => userInput.includes(keyword))) {
- selModel.value = "gpt-4o"; // For coding-related, math, logic, reasoning, language tasks.
+ selModel.value = "o1-mini"; // For coding-related, math, logic, science.
}
else if (glHuristics.some(keyword => userInput.includes(keyword))) {
- selModel.value = "gemini"; // For complex queries, a different model could be preferred
+ selModel.value = "gemini"; // For Google Gemini's model
}
else if (dalHuristics.some(keyword => userInput.includes(keyword))) {
selModel.value = "dall-e-3"; // For dall-e-3 generated images
}
else {
- selModel.value = "gpt-4o"; // Default to a different model if none of the above conditions are met
+ selModel.value = "gpt-4o-mini"; // Default to a different model if none of the above conditions are met
}
// Now trigger the appropriate send function based on the selected model
switch (selModel.value) {
- case "gpt-3.5-turbo":
- case "gpt-4-turbo-preview":
+ case "gpt-4o-mini":
case "gpt-4o":
+ case "o1-mini":
+ case "o1-preview":
trboSend();
break;
case "gemini":
@@ -114,7 +115,7 @@ function updateButton() {
clearText();
autoSelect();
};
- } else if (selModel.value == "gpt-3.5-turbo" || selModel.value == "gpt-4-turbo-preview" || selModel.value == "gpt-4o" ) {
+ } else if (selModel.value == "gpt-4o-mini" || selModel.value == "o1-mini" || selModel.value == "gpt-4o" || selModel.value == "o1-preview") {
btnSend.onclick = function() {
clearText();
trboSend();
@@ -146,7 +147,7 @@ function sendData() {
if (selModel.value == "auto") {
clearText();
autoSelect();
- } else if (selModel.value == "gpt-3.5-turbo" || selModel.value == "gpt-4-turbo-preview" || selModel.value == "gpt-4o") {
+ } else if (selModel.value == "gpt-4o-mini" || selModel.value == "o1-mini" || selModel.value == "gpt-4o" || selModel.value == "o1-preview") {
clearText();
trboSend();
} else if (selModel.value == "gemini") {
@@ -263,7 +264,6 @@ function mobile_txtmsd() {
var input = document.getElementById("imgInput");
input.style.display = "block";
-
// Mic Button
let micButton = document.querySelector(".mic-button");
micButton.style.top = "-49px";
diff --git a/index.html b/index.html
index 4b619eb..6cc4d8d 100644
--- a/index.html
+++ b/index.html
@@ -8,8 +8,7 @@