Skip to content

Commit

Permalink
Restructure code so we can support multiple LLMs
Browse files Browse the repository at this point in the history
  • Loading branch information
keithsw1111 committed Feb 9, 2025
1 parent ec3d427 commit f2999dc
Show file tree
Hide file tree
Showing 16 changed files with 154 additions and 59 deletions.
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Lines in prompts which have a # at the first character are comments and should be ignored. They will not be passed to the LLM.
#
# This is the LLM generic prompt. Copy this file and prefix with the appropriate LLM name followed by an underscore for an LLM specific prompt.
#
# This prompt is used in AI mapping of sequences to models and model groups.
# Models can receive effects from models, submodels or groups in the source sequence.
#
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Lines in prompts which have a # at the first character are comments and should be ignored. They will not be passed to the LLM.
#
# This is the LLM generic prompt. Copy this file and prefix with the appropriate LLM name followed by an underscore for an LLM specific prompt.
#
# This prompt is used in AI mapping of sequences at the node level.
# Node level mapping is only attempted where the source sequence has node level effects
# Node mapping is only available to small channel models. See the code to see the latest limit.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Lines in prompts which have a # at the first character are comments and should be ignored. They will not be passed to the LLM.
#
# This is the LLM generic prompt. Copy this file and prefix with the appropriate LLM name followed by an underscore for an LLM specific prompt.
#
# This prompt is used in AI mapping of sequences at the strand level.
# Strand level mapping is only attempted where the source sequence has strand level effects
# Strand effects can only be mapped to strands.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Lines in prompts which have a # at the first character are comments and should be ignored. They will not be passed to the LLM.
#
# This is the LLM generic prompt. Copy this file and prefix with the appropriate LLM name followed by an underscore for an LLM specific prompt.
#
# This prompt is used in AI mapping of sequences to submodels.
# Submodels can receive effects from models, submodels or groups in the source sequence.
#
Expand Down
17 changes: 8 additions & 9 deletions xLights/Xlights.vcxproj
Original file line number Diff line number Diff line change
Expand Up @@ -497,13 +497,14 @@ xcopy "$(SolutionDir)..\bin64\Vamp\" "$(TargetDir)Vamp\" /e /y /i /r
<ClCompile Include="..\xSchedule\wxJSON\jsonval.cpp" />
<ClCompile Include="..\xSchedule\wxJSON\jsonwriter.cpp" />
<ClCompile Include="AboutDialog.cpp" />
<ClCompile Include="ai\aiBase.cpp" />
<ClCompile Include="ai\chatGPT.cpp" />
<ClCompile Include="AlignmentDialog.cpp" />
<ClCompile Include="AutoLabelDialog.cpp" />
<ClCompile Include="automation\automation.cpp" />
<ClCompile Include="automation\LuaRunner.cpp" />
<ClCompile Include="automation\PythonRunner.cpp" />
<ClCompile Include="automation\xLightsAutomations.cpp" />
<ClCompile Include="chatGPT.cpp" />
<ClCompile Include="controllers\ILightThat.cpp" />
<ClCompile Include="BatchRenderDialog.cpp" />
<ClCompile Include="BulkEditColourPickerDialog.cpp" />
Expand Down Expand Up @@ -1013,12 +1014,13 @@ xcopy "$(SolutionDir)..\bin64\Vamp\" "$(TargetDir)Vamp\" /e /y /i /r
<ClInclude Include="..\xSchedule\wxHTTPServer\sha1.h" />
<ClInclude Include="..\xSchedule\wxHTTPServer\wxhttpserver.h" />
<ClInclude Include="AboutDialog.h" />
<ClInclude Include="ai\aiBase.h" />
<ClInclude Include="ai\chatGPT.h" />
<ClInclude Include="AlignmentDialog.h" />
<ClInclude Include="AutoLabelDialog.h" />
<ClInclude Include="automation\automation.h" />
<ClInclude Include="automation\LuaRunner.h" />
<ClInclude Include="automation\PythonRunner.h" />
<ClInclude Include="chatGPT.h" />
<ClInclude Include="controllers\ILightThat.h" />
<ClInclude Include="BatchRenderDialog.h" />
<ClInclude Include="BulkEditColourPickerDialog.h" />
Expand Down Expand Up @@ -1542,13 +1544,10 @@ xcopy "$(SolutionDir)..\bin64\Vamp\" "$(TargetDir)Vamp\" /e /y /i /r
</ItemGroup>
<ItemGroup>
<Text Include="..\documentation\xlDo Commands.txt" />
<Text Include="..\prompts\ChatGPT_AI_Model_AutoMap.txt">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
<DeploymentContent Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</DeploymentContent>
</Text>
<Text Include="..\prompts\ChatGPT_AI_Node_AutoMap.txt" />
<Text Include="..\prompts\ChatGPT_AI_Strand_AutoMap.txt" />
<Text Include="..\prompts\ChatGPT_AI_SubModel_AutoMap.txt" />
<Text Include="..\prompts\AI_Model_AutoMap.txt" />
<Text Include="..\prompts\AI_Node_AutoMap.txt" />
<Text Include="..\prompts\AI_Strand_AutoMap.txt" />
<Text Include="..\prompts\AI_SubModel_AutoMap.txt" />
</ItemGroup>
<ItemGroup>
<CustomBuild Include="effects\ispc\LayerBlendingFunctions.ispc">
Expand Down
25 changes: 19 additions & 6 deletions xLights/Xlights.vcxproj.filters
Original file line number Diff line number Diff line change
Expand Up @@ -1102,7 +1102,12 @@
<ClCompile Include="preferences\ServicesPanel.cpp">
<Filter>Preferences</Filter>
</ClCompile>
<ClCompile Include="chatGPT.cpp" />
<ClCompile Include="ai\aiBase.cpp">
<Filter>ai</Filter>
</ClCompile>
<ClCompile Include="ai\chatGPT.cpp">
<Filter>ai</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="BatchRenderDialog.h" />
Expand Down Expand Up @@ -2214,7 +2219,12 @@
<ClInclude Include="preferences\ServicesPanel.h">
<Filter>Preferences</Filter>
</ClInclude>
<ClInclude Include="chatGPT.h" />
<ClInclude Include="ai\aiBase.h">
<Filter>ai</Filter>
</ClInclude>
<ClInclude Include="ai\chatGPT.h">
<Filter>ai</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<Filter Include="Models">
Expand Down Expand Up @@ -2268,6 +2278,9 @@
<Filter Include="Prompts">
<UniqueIdentifier>{22d4d199-c5a9-4be6-8413-da0802e96e06}</UniqueIdentifier>
</Filter>
<Filter Include="ai">
<UniqueIdentifier>{b7af1744-89fc-4b00-b7a5-fac169948940}</UniqueIdentifier>
</Filter>
</ItemGroup>
<ItemGroup>
<ResourceCompile Include="resource.rc" />
Expand All @@ -2276,16 +2289,16 @@
<Text Include="..\documentation\xlDo Commands.txt">
<Filter>automation</Filter>
</Text>
<Text Include="..\prompts\ChatGPT_AI_Model_AutoMap.txt">
<Text Include="..\prompts\AI_Model_AutoMap.txt">
<Filter>Prompts</Filter>
</Text>
<Text Include="..\prompts\ChatGPT_AI_Node_AutoMap.txt">
<Text Include="..\prompts\AI_Node_AutoMap.txt">
<Filter>Prompts</Filter>
</Text>
<Text Include="..\prompts\ChatGPT_AI_Strand_AutoMap.txt">
<Text Include="..\prompts\AI_Strand_AutoMap.txt">
<Filter>Prompts</Filter>
</Text>
<Text Include="..\prompts\ChatGPT_AI_SubModel_AutoMap.txt">
<Text Include="..\prompts\AI_SubModel_AutoMap.txt">
<Filter>Prompts</Filter>
</Text>
</ItemGroup>
Expand Down
1 change: 1 addition & 0 deletions xLights/ai/aiBase.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
#include "aiBase.h"
17 changes: 14 additions & 3 deletions xLights/chatGPT.h → xLights/ai/aiBase.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,17 @@

class xLightsFrame;

std::string CallChatGPT(xLightsFrame* frame, const std::string& prompt, const std::string& token = "");
bool TestChatGPT(xLightsFrame* frame, const std::string& token = "");
bool IsChatGPTAvailable(xLightsFrame* frame, const std::string& token = "");
class aiBase {

protected:
xLightsFrame* _frame = nullptr;

public:
aiBase(xLightsFrame* frame) : _frame(frame) {}
virtual ~aiBase() {}

virtual std::string CallLLM(const std::string& prompt, const std::string& token = "") const = 0;
virtual bool TestLLM(const std::string& token = "") const = 0;
virtual bool IsAvailable(const std::string& token = "") const = 0;
virtual std::string GetLLMName() const = 0;
};
25 changes: 8 additions & 17 deletions xLights/chatGPT.cpp → xLights/ai/chatGPT.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,27 +8,18 @@

#include <log4cpp/Category.hh>

// https://platform.openai.com/docs/api-reference/introduction

#define CHATGPT_API_URL "https://api.openai.com/v1/chat/completions"

// we may want to make these user controlled in the future or even feature controlled
#define CHATGPT_MODEL "gpt-4o-mini"
#define TEMPERATURE "0.0"

// to get a list of models curl https://api.openai.com/v1/models -H "Authorization: Bearer YOUR_API_KEY"

bool IsChatGPTAvailable(xLightsFrame* frame, const std::string& token) {
return token != "" || frame->GetServiceSetting("ChatGPTBearerToken") != "";
bool chatGPT::IsAvailable(const std::string& token) const {
return token != "" || _frame->GetServiceSetting("ChatGPTBearerToken") != "";
}

std::string CallChatGPT(xLightsFrame* frame, const std::string& prompt, const std::string& token) {
std::string chatGPT::CallLLM(const std::string& prompt, const std::string& token) const {

static log4cpp::Category& logger_base = log4cpp::Category::getInstance(std::string("log_base"));

std::string bearerToken = token;

if (bearerToken == "") bearerToken = frame->GetServiceSetting("ChatGPTBearerToken");
if (bearerToken == "") bearerToken = _frame->GetServiceSetting("ChatGPTBearerToken");

if (token == "" && bearerToken.empty()) {
wxMessageBox("You must set a ChatGPT Bearer Token in the Preferences on the Services Panel", "Error", wxICON_ERROR);
Expand All @@ -41,7 +32,7 @@ std::string CallChatGPT(xLightsFrame* frame, const std::string& prompt, const st
Replace(p, std::string("\r"), std::string(""));
Replace(p, std::string("\n"), std::string("\\n"));

std::string request = "{ \"model\": \"" + std::string(CHATGPT_MODEL) + "\", \"messages\": [ { \"role\": \"user\",\"content\": \"" + JSONSafe(p) + "\" } ] , \"temperature\": " + std::string(TEMPERATURE) + " }";
std::string request = "{ \"model\": \"" + model + "\", \"messages\": [ { \"role\": \"user\",\"content\": \"" + JSONSafe(p) + "\" } ] , \"temperature\": " + std::to_string(temperature) + " }";

std::vector<std::pair<std::string, std::string>> customHeaders = {
{ "Authorization", "Bearer " + bearerToken }
Expand All @@ -50,7 +41,7 @@ std::string CallChatGPT(xLightsFrame* frame, const std::string& prompt, const st
logger_base.debug("ChatGPT: %s", request.c_str());

int responseCode = 0;
std::string response = Curl::HTTPSPost(CHATGPT_API_URL, request, "", "", "JSON", 60, customHeaders, &responseCode);
std::string response = Curl::HTTPSPost(url, request, "", "", "JSON", 60, customHeaders, &responseCode);

logger_base.debug("ChatGPT Response %d: %s", responseCode, response.c_str());

Expand Down Expand Up @@ -85,8 +76,8 @@ std::string CallChatGPT(xLightsFrame* frame, const std::string& prompt, const st
return response;
}

bool TestChatGPT(xLightsFrame* frame, const std::string& token) {
std::string response = CallChatGPT(frame, "Hello", token);
bool chatGPT::TestLLM(const std::string& token) const {
std::string response = CallLLM("Hello", token);
if (response.empty()) {
return false;
}
Expand Down
37 changes: 37 additions & 0 deletions xLights/ai/chatGPT.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#pragma once

/***************************************************************
* This source files comes from the xLights project
* https://www.xlights.org
* https://github.com/xLightsSequencer/xLights
* See the github commit history for a record of contributing
* developers.
* Copyright claimed based on commit dates recorded in Github
* License: https://github.com/xLightsSequencer/xLights/blob/master/License.txt
**************************************************************/

#include "aiBase.h"

#include <string>

// https://platform.openai.com/docs/api-reference/introduction
// to get a list of models curl https://api.openai.com/v1/models -H "Authorization: Bearer YOUR_API_KEY"

class chatGPT : public aiBase {

std::string url = "https://api.openai.com/v1/chat/completions";
std::string model = "gpt-4o-mini";
float temperature = 0.0;

public:

chatGPT(xLightsFrame* frame) : aiBase(frame) {}
virtual ~chatGPT() {}

std::string CallLLM(const std::string& prompt, const std::string& token = "") const override;
bool TestLLM(const std::string& token = "") const override;
bool IsAvailable(const std::string& token = "") const override;
std::string GetLLMName() const override {
return "ChatGPT";
}
};
5 changes: 3 additions & 2 deletions xLights/preferences/ServicesPanel.cpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#include "ServicesPanel.h"

#include "xLightsMain.h"
#include "chatGPT.h"
#include "ai/chatGPT.h"

//(*InternalHeaders(ServicesPanel)
#include <wx/intl.h>
Expand Down Expand Up @@ -176,7 +176,8 @@ void ServicesPanel::OnButtonTestClick(wxCommandEvent& event) {
if (IsServiceValid(servicesList->GetItemText(i, 0), servicesList->GetItemText(i,1), servicesList->GetItemText(i,2), servicesList->GetItemText(i,3)))
{
if (servicesList->GetItemText(i, 0) == "ChatGPT") {
if (TestChatGPT(frame, servicesList->GetItemText(i, 2))) {
chatGPT llm(frame);
if (llm.TestLLM(servicesList->GetItemText(i, 2))) {
wxMessageBox("Service " + servicesList->GetItemText(i, 0) + " is valid", "Success", wxICON_INFORMATION);
} else {
wxMessageBox("Service " + servicesList->GetItemText(i, 0) + " is not valid", "Error", wxICON_ERROR);
Expand Down
6 changes: 4 additions & 2 deletions xLights/xLights.cbp
Original file line number Diff line number Diff line change
Expand Up @@ -506,8 +506,10 @@
<Unit filename="cad/STLWriter.h" />
<Unit filename="cad/VRMLWriter.cpp" />
<Unit filename="cad/VRMLWriter.h" />
<Unit filename="chatGPT.h" />
<Unit filename="chatGPT.cpp" />
<Unit filename="ai/aiBase.h" />
<Unit filename="ai/aiBase.cpp" />
<Unit filename="ai/chatGPT.h" />
<Unit filename="ai/chatGPT.cpp" />
<Unit filename="controllers/AlphaPix.cpp" />
<Unit filename="controllers/AlphaPix.h" />
<Unit filename="controllers/BaseController.cpp" />
Expand Down
Loading

0 comments on commit f2999dc

Please sign in to comment.