From f41fb163c80400d38c7701e277e86df9f0a0de7d Mon Sep 17 00:00:00 2001 From: Mathias Winkel Date: Sat, 22 Jul 2023 01:05:29 +0200 Subject: [PATCH 1/8] PEP8 code styling, no functional change --- gpt_code_ui/kernel_program/config.py | 2 +- gpt_code_ui/kernel_program/kernel_manager.py | 4 +- gpt_code_ui/kernel_program/launch_kernel.py | 2 +- gpt_code_ui/kernel_program/main.py | 21 ++++---- gpt_code_ui/kernel_program/utils.py | 4 +- gpt_code_ui/main.py | 52 +++++++++--------- gpt_code_ui/webapp/main.py | 56 ++++++++++---------- 7 files changed, 74 insertions(+), 67 deletions(-) diff --git a/gpt_code_ui/kernel_program/config.py b/gpt_code_ui/kernel_program/config.py index aa37c08f..b4a4a6df 100644 --- a/gpt_code_ui/kernel_program/config.py +++ b/gpt_code_ui/kernel_program/config.py @@ -15,4 +15,4 @@ def get_logger(): logger = logging.getLogger(__name__) if "DEBUG" in os.environ: logger.setLevel(logging.DEBUG) - return logger \ No newline at end of file + return logger diff --git a/gpt_code_ui/kernel_program/kernel_manager.py b/gpt_code_ui/kernel_program/kernel_manager.py index 93f6d799..d32697d9 100644 --- a/gpt_code_ui/kernel_program/kernel_manager.py +++ b/gpt_code_ui/kernel_program/kernel_manager.py @@ -56,7 +56,7 @@ def cleanup_spawned_processes(): os.kill(pid, signal.CTRL_BREAK_EVENT) else: os.kill(pid, signal.SIGKILL) - + # After successful kill, cleanup pid file os.remove(fp) @@ -221,4 +221,4 @@ def start_kernel(): if __name__ == "__main__": kc = start_kernel() - start_snakemq(kc) \ No newline at end of file + start_snakemq(kc) diff --git a/gpt_code_ui/kernel_program/launch_kernel.py b/gpt_code_ui/kernel_program/launch_kernel.py index d193051d..f66b36c8 100644 --- a/gpt_code_ui/kernel_program/launch_kernel.py +++ b/gpt_code_ui/kernel_program/launch_kernel.py @@ -1,4 +1,4 @@ if __name__ == "__main__": from ipykernel import kernelapp as app - app.launch_new_instance() \ No newline at end of file + app.launch_new_instance() diff --git a/gpt_code_ui/kernel_program/main.py b/gpt_code_ui/kernel_program/main.py index 401133ee..bd9a9d90 100644 --- a/gpt_code_ui/kernel_program/main.py +++ b/gpt_code_ui/kernel_program/main.py @@ -7,7 +7,6 @@ import time import asyncio -import json import threading from queue import Queue @@ -47,6 +46,7 @@ app = Flask(__name__) CORS(app) + def start_kernel_manager(): global kernel_manager_process @@ -62,9 +62,11 @@ def start_kernel_manager(): with open(os.path.join(config.KERNEL_PID_DIR, "%d.pid" % kernel_manager_process.pid), "w") as p: p.write("kernel_manager") + def cleanup_kernel_program(): kernel_manager.cleanup_spawned_processes() + async def start_snakemq(): global messaging @@ -77,7 +79,7 @@ def on_recv(conn, ident, message): if message["value"] == "ready": logger.debug("Kernel is ready.") result_queue.put({ - "value":"Kernel is ready.", + "value": "Kernel is ready.", "type": "message" }) @@ -97,8 +99,9 @@ def send_queued_messages(): while True: if send_queue.qsize() > 0: message = send_queue.get() - utils.send_json(messaging, - {"type": "execute", "value": message["command"]}, + utils.send_json( + messaging, + {"type": "execute", "value": message["command"]}, config.IDENT_KERNEL_MANAGER ) time.sleep(0.1) @@ -117,7 +120,7 @@ async def async_link_loop(): @app.route("/api", methods=["POST", "GET"]) def handle_request(): - + if request.method == "GET": # Handle GET requests by sending everything that's in the receive_queue results = [result_queue.get() for _ in range(result_queue.qsize())] @@ -128,7 +131,8 @@ def handle_request(): send_queue.put(data) return jsonify({"result": "success"}) - + + @app.route("/restart", methods=["POST"]) def handle_restart(): @@ -152,9 +156,6 @@ async def main(): def run_flask_app(): app.run(host="0.0.0.0", port=APP_PORT) + if __name__ == "__main__": asyncio.run(main()) - - - - \ No newline at end of file diff --git a/gpt_code_ui/kernel_program/utils.py b/gpt_code_ui/kernel_program/utils.py index cce7d704..19d9e07a 100644 --- a/gpt_code_ui/kernel_program/utils.py +++ b/gpt_code_ui/kernel_program/utils.py @@ -7,6 +7,7 @@ import gpt_code_ui.kernel_program.config as config + def escape_ansi(line): ansi_escape = re.compile(r"(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]") return ansi_escape.sub("", line) @@ -16,6 +17,7 @@ def send_json(messaging, message, identity): message = snakemq.message.Message(json.dumps(message).encode("utf-8"), ttl=600) messaging.send_message(identity, message) + def init_snakemq(ident, init_type="listen"): link = snakemq.link.Link() packeter = snakemq.packeter.Packeter(link) @@ -26,4 +28,4 @@ def init_snakemq(ident, init_type="listen"): link.add_connector(("localhost", config.SNAKEMQ_PORT)) else: raise Exception("Unsupported init type.") - return messaging, link \ No newline at end of file + return messaging, link diff --git a/gpt_code_ui/main.py b/gpt_code_ui/main.py index 5683608b..4e09ba1d 100644 --- a/gpt_code_ui/main.py +++ b/gpt_code_ui/main.py @@ -16,20 +16,23 @@ APP_URL = "http://localhost:%s" % APP_PORT + def run_webapp(): try: app.run(host="0.0.0.0", port=APP_PORT, use_reloader=False) - except Exception as e: - logging.exception("Error running the webapp:") + except Exception: + logging.exception("Error running the webapp") sys.exit(1) + def run_kernel_program(): try: asyncio.run(kernel_program_main()) - except Exception as e: - logging.exception("Error running the kernel_program:") + except Exception: + logging.exception("Error running the kernel_program") sys.exit(1) + def setup_logging(): log_format = "%(asctime)s [%(levelname)s]: %(message)s" logging.basicConfig(level=logging.INFO, format=log_format) @@ -38,32 +41,33 @@ def setup_logging(): file_handler.setFormatter(logging.Formatter(log_format)) logging.getLogger().addHandler(file_handler) + def print_color(text, color="gray"): # Default to gray - code="242" + code = "242" if color == "green": - code="35" - + code = "35" + gray_code = "\033[38;5;%sm" % code reset_code = "\033[0m" print(f"{gray_code}{text}{reset_code}") def print_banner(): - - print(""" + print(""" █▀▀ █▀█ ▀█▀ ▄▄ █▀▀ █▀█ █▀▄ █▀▀ █▄█ █▀▀ ░█░ ░░ █▄▄ █▄█ █▄▀ ██▄ - """) + """) + + print("> Open GPT-Code UI in your browser %s" % APP_URL) + print("") + print("You can inspect detailed logs in app.log.") + print("") + print("Find your OpenAI API key at https://platform.openai.com/account/api-keys") + print("") + print_color("Contribute to GPT-Code UI at https://github.com/ricklamers/gpt-code-ui") - print("> Open GPT-Code UI in your browser %s" % APP_URL) - print("") - print("You can inspect detailed logs in app.log.") - print("") - print("Find your OpenAI API key at https://platform.openai.com/account/api-keys") - print("") - print_color("Contribute to GPT-Code UI at https://github.com/ricklamers/gpt-code-ui") def main(): setup_logging() @@ -80,20 +84,19 @@ def main(): try: app.test_client().get("/") break - except: + except Exception: time.sleep(0.1) - - print_banner() - + + print_banner() + webbrowser.open(APP_URL) webapp_process.join() kernel_program_process.join() - except KeyboardInterrupt: print("Terminating processes...") - + cleanup_kernel_program() kernel_program_process.terminate() @@ -103,6 +106,7 @@ def main(): kernel_program_process.join() print("Processes terminated.") - + + if __name__ == '__main__': main() diff --git a/gpt_code_ui/webapp/main.py b/gpt_code_ui/webapp/main.py index 332986e5..ce36775a 100644 --- a/gpt_code_ui/webapp/main.py +++ b/gpt_code_ui/webapp/main.py @@ -94,34 +94,34 @@ def inspect_file(filename: str) -> str: async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"): - prompt = f"""First, here is a history of what I asked you to do earlier. - The actual prompt follows after ENDOFHISTORY. - History: - {message_buffer.get_string()} - ENDOFHISTORY. - Write Python code, in a triple backtick Markdown code block, that does the following: - {user_prompt} - - Notes: - First, think step by step what you want to do and write it down in English. - Then generate valid Python code in a code block - Make sure all code is valid - it be run in a Jupyter Python 3 kernel environment. - Define every variable before you use it. - For data munging, you can use - 'numpy', # numpy==1.24.3 - 'dateparser' #dateparser==1.1.8 - 'pandas', # matplotlib==1.5.3 - 'geopandas' # geopandas==0.13.2 - For pdf extraction, you can use - 'PyPDF2', # PyPDF2==3.0.1 - 'pdfminer', # pdfminer==20191125 - 'pdfplumber', # pdfplumber==0.9.0 - For data visualization, you can use - 'matplotlib', # matplotlib==3.7.1 - Be sure to generate charts with matplotlib. If you need geographical charts, use geopandas with the geopandas.datasets module. - If the user has just uploaded a file, focus on the file that was most recently uploaded (and optionally all previously uploaded files) - - Teacher mode: if the code modifies or produces a file, at the end of the code block insert a print statement that prints a link to it as HTML string: Download file. Replace INSERT_FILENAME_HERE with the actual filename.""" + prompt = f"""First, here is a history of what I asked you to do earlier. +The actual prompt follows after ENDOFHISTORY. +History: +{message_buffer.get_string()} +ENDOFHISTORY. +Write Python code, in a triple backtick Markdown code block, that does the following: +{user_prompt} + +Notes: + First, think step by step what you want to do and write it down in English. + Then generate valid Python code in a code block + Make sure all code is valid - it be run in a Jupyter Python 3 kernel environment. + Define every variable before you use it. + For data munging, you can use + 'numpy', # numpy==1.24.3 + 'dateparser' #dateparser==1.1.8 + 'pandas', # matplotlib==1.5.3 + 'geopandas' # geopandas==0.13.2 + For pdf extraction, you can use + 'PyPDF2', # PyPDF2==3.0.1 + 'pdfminer', # pdfminer==20191125 + 'pdfplumber', # pdfplumber==0.9.0 + For data visualization, you can use + 'matplotlib', # matplotlib==3.7.1 + Be sure to generate charts with matplotlib. If you need geographical charts, use geopandas with the geopandas.datasets module. + If the user has just uploaded a file, focus on the file that was most recently uploaded (and optionally all previously uploaded files) + +Teacher mode: if the code modifies or produces a file, at the end of the code block insert a print statement that prints a link to it as HTML string: Download file. Replace INSERT_FILENAME_HERE with the actual filename.""" if user_openai_key: openai.api_key = user_openai_key From 3abfaf2302f981475d4fa328d1a5c3a1eba058c0 Mon Sep 17 00:00:00 2001 From: Mathias Winkel Date: Mon, 24 Jul 2023 15:04:33 +0200 Subject: [PATCH 2/8] Store and use the full conversation history in proper chat mode. This way, referring to previous code is simpler. --- frontend/src/App.tsx | 15 +---- gpt_code_ui/webapp/main.py | 118 +++++++++++++++++-------------------- 2 files changed, 55 insertions(+), 78 deletions(-) diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index 3212d273..805e2e9e 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -41,7 +41,7 @@ function App() { let [messages, setMessages] = useState>( Array.from([ { - text: "Hello! I'm a GPT Code assistant. Ask me to do something for you! Pro tip: you can upload a file and I'll be able to use it.", + text: "Hello! I am a GPT Code assistant. Ask me to do something for you! Pro tip: you can upload a file and I'll be able to use it.", role: "generator", type: "message", }, @@ -161,19 +161,6 @@ function App() { function completeUpload(message: string) { addMessage({ text: message, type: "message", role: "upload" }); setWaitingForSystem(WaitingStates.Idle); - - // Inform prompt server - fetch(`${Config.WEB_ADDRESS}/inject-context`, { - method: "POST", - headers: { - "Content-Type": "application/json", - }, - body: JSON.stringify({ - prompt: message, - }), - }) - .then(() => {}) - .catch((error) => console.error("Error:", error)); } function startUpload(_: string) { diff --git a/gpt_code_ui/webapp/main.py b/gpt_code_ui/webapp/main.py index ce36775a..5c3399c0 100644 --- a/gpt_code_ui/webapp/main.py +++ b/gpt_code_ui/webapp/main.py @@ -9,8 +9,6 @@ import openai import pandas as pd -from collections import deque - from flask_cors import CORS from flask import Flask, request, jsonify, send_from_directory, Response from dotenv import load_dotenv @@ -40,25 +38,56 @@ APP_PORT = int(os.environ.get("WEB_PORT", 8080)) -class LimitedLengthString: - def __init__(self, maxlen=2000): - self.data = deque() - self.len = 0 - self.maxlen = maxlen +class ChatHistory(): + def __init__(self): + self._buffer = list() - def append(self, string): - self.data.append(string) - self.len += len(string) - while self.len > self.maxlen: - popped = self.data.popleft() - self.len -= len(popped) + self.append( + "system", + """Write Python code, in a triple backtick Markdown code block, that answers the user prompts. - def get_string(self): - result = ''.join(self.data) - return result[-self.maxlen:] +Notes: + Do not use your own knowledge to answer the user prompt. Instead, focus on generating Python code for doing so. + First, think step by step what you want to do and write it down in English. + Then generate valid Python code in a single code block. + Do not add commands to install packages. + Make sure all code is valid - it will e run in a Jupyter Python 3 kernel environment. + Define every variable before you use it. + For data processing, you can use + 'numpy', # numpy==1.24.3 + 'dateparser' #dateparser==1.1.8 + 'pandas', # matplotlib==1.5.3 + 'geopandas' # geopandas==0.13.2 + 'tabulate' # tabulate==0.9.0 + For pdf extraction, you can use + 'PyPDF2', # PyPDF2==3.0.1 + 'pdfminer', # pdfminer==20191125 + 'pdfplumber', # pdfplumber==0.9.0 + For data visualization, you can use + 'matplotlib', # matplotlib==3.7.1 + Be sure to generate charts with matplotlib. If you need geographical charts, use geopandas with the geopandas.datasets module. + If the user requests to generate a table, produce code that prints a markdown table. + If the user has just uploaded a file, focus on the file that was most recently uploaded (and optionally all previously uploaded files) + +If the code modifies or produces a file, at the end of the code block insert a print statement that prints a link to it as HTML string: Download file. Replace INSERT_FILENAME_HERE with the actual filename.""") + + def append(self, role: str, content: str): + if role not in ("user", "assistant", "system"): + raise ValueError(f"Invalid role: {role}") + self._buffer.append({ + "role": role, + "content": content, + }) -message_buffer = LimitedLengthString() + def upload_file(self, filename: str, file_info: str = None): + self.append("user", f"In the following, I will refer to the file {filename}.\n{file_info}") + + def __call__(self): + return self._buffer + + +chat_history = ChatHistory() def allowed_file(filename): @@ -92,36 +121,7 @@ def inspect_file(filename: str) -> str: return '' # file reading failed. - Don't want to know why. -async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"): - - prompt = f"""First, here is a history of what I asked you to do earlier. -The actual prompt follows after ENDOFHISTORY. -History: -{message_buffer.get_string()} -ENDOFHISTORY. -Write Python code, in a triple backtick Markdown code block, that does the following: -{user_prompt} - -Notes: - First, think step by step what you want to do and write it down in English. - Then generate valid Python code in a code block - Make sure all code is valid - it be run in a Jupyter Python 3 kernel environment. - Define every variable before you use it. - For data munging, you can use - 'numpy', # numpy==1.24.3 - 'dateparser' #dateparser==1.1.8 - 'pandas', # matplotlib==1.5.3 - 'geopandas' # geopandas==0.13.2 - For pdf extraction, you can use - 'PyPDF2', # PyPDF2==3.0.1 - 'pdfminer', # pdfminer==20191125 - 'pdfplumber', # pdfplumber==0.9.0 - For data visualization, you can use - 'matplotlib', # matplotlib==3.7.1 - Be sure to generate charts with matplotlib. If you need geographical charts, use geopandas with the geopandas.datasets module. - If the user has just uploaded a file, focus on the file that was most recently uploaded (and optionally all previously uploaded files) - -Teacher mode: if the code modifies or produces a file, at the end of the code block insert a print statement that prints a link to it as HTML string: Download file. Replace INSERT_FILENAME_HERE with the actual filename.""" +async def get_code(messages, user_openai_key=None, model="gpt-3.5-turbo"): if user_openai_key: openai.api_key = user_openai_key @@ -129,10 +129,7 @@ async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"): arguments = dict( temperature=0.7, headers=OPENAI_EXTRA_HEADERS, - messages=[ - # {"role": "system", "content": system}, - {"role": "user", "content": prompt}, - ] + messages=messages, ) if openai.api_type == 'open_ai': @@ -233,16 +230,6 @@ def download_file(): return send_from_directory(os.path.join(os.getcwd(), 'workspace'), file, as_attachment=True) -@app.route('/inject-context', methods=['POST']) -def inject_context(): - user_prompt = request.json.get('prompt', '') - - # Append all messages to the message buffer for later use - message_buffer.append(user_prompt + "\n\n") - - return jsonify({"result": "success"}) - - @app.route('/generate', methods=['POST']) def generate_code(): user_prompt = request.json.get('prompt', '') @@ -252,12 +239,14 @@ def generate_code(): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) + chat_history.append("user", user_prompt) + code, text, status = loop.run_until_complete( - get_code(user_prompt, user_openai_key, model)) + get_code(chat_history(), user_openai_key, model)) loop.close() - # Append all messages to the message buffer for later use - message_buffer.append(user_prompt + "\n\n") + if status == 200: + chat_history.append("assistant", text) return jsonify({'code': code, 'text': text}), status @@ -276,6 +265,7 @@ def upload_file(): file_target = os.path.join(app.config['UPLOAD_FOLDER'], file.filename) file.save(file_target) file_info = inspect_file(file_target) + chat_history.upload_file(file.filename, file_info) return jsonify({'message': f'File {file.filename} uploaded successfully.\n{file_info}'}), 200 else: return jsonify({'error': 'File type not allowed'}), 400 From e8685e2ce46242bcc860af79b2e9317d5428b262 Mon Sep 17 00:00:00 2001 From: Mathias Winkel Date: Wed, 26 Jul 2023 11:04:03 +0200 Subject: [PATCH 3/8] add new message types from kernel manager: message_status for 'Kernel is ready' and the like message_error for error messages/backtraces from execution --- frontend/src/components/Chat.tsx | 4 ++-- gpt_code_ui/kernel_program/kernel_manager.py | 2 +- gpt_code_ui/kernel_program/main.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/frontend/src/components/Chat.tsx b/frontend/src/components/Chat.tsx index 4fbedb1e..1e625ac8 100644 --- a/frontend/src/components/Chat.tsx +++ b/frontend/src/components/Chat.tsx @@ -72,7 +72,7 @@ function Message(props: {
))} - {(props.type == "message_raw") && + {(["message_raw", "message_error"].indexOf(props.type) !== -1) && (props.showLoader ? (
{text} {props.showLoader ?
: null} @@ -80,7 +80,7 @@ function Message(props: { ) : (
))} - + {props.type == "image/png" &&
` }}>
} diff --git a/gpt_code_ui/kernel_program/kernel_manager.py b/gpt_code_ui/kernel_program/kernel_manager.py index d32697d9..b8f844fd 100644 --- a/gpt_code_ui/kernel_program/kernel_manager.py +++ b/gpt_code_ui/kernel_program/kernel_manager.py @@ -149,7 +149,7 @@ def flush_kernel_msgs(kc, tries=1, timeout=0.2): elif msg["msg_type"] == "error": send_message( utils.escape_ansi("\n".join(msg["content"]["traceback"])), - "message_raw", + "message_error", ) except queue.Empty: hit_empty += 1 diff --git a/gpt_code_ui/kernel_program/main.py b/gpt_code_ui/kernel_program/main.py index bd9a9d90..170b362b 100644 --- a/gpt_code_ui/kernel_program/main.py +++ b/gpt_code_ui/kernel_program/main.py @@ -80,10 +80,10 @@ def on_recv(conn, ident, message): logger.debug("Kernel is ready.") result_queue.put({ "value": "Kernel is ready.", - "type": "message" + "type": "message_status" }) - elif message["type"] in ["message", "message_raw", "image/png", "image/jpeg"]: + elif message["type"] in ["message", "message_raw", "message_error", "image/png", "image/jpeg"]: # TODO: 1:1 kernel <> channel mapping logger.debug("%s of type %s" % (message["value"], message["type"])) From b0cb2b6b231f10130947117976bef091e34af91a Mon Sep 17 00:00:00 2001 From: Mathias Winkel Date: Wed, 26 Jul 2023 21:12:06 +0200 Subject: [PATCH 4/8] store execution results or error messages in conversation history to allow for easier back-reference, e.g. when asking 'Please fix the code' --- gpt_code_ui/webapp/main.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gpt_code_ui/webapp/main.py b/gpt_code_ui/webapp/main.py index 5c3399c0..efbd1c73 100644 --- a/gpt_code_ui/webapp/main.py +++ b/gpt_code_ui/webapp/main.py @@ -83,6 +83,12 @@ def append(self, role: str, content: str): def upload_file(self, filename: str, file_info: str = None): self.append("user", f"In the following, I will refer to the file {filename}.\n{file_info}") + def add_execution_result(self, result: str): + self.append("user", f"Executing this code yielded the following output:\n{result}") + + def add_error(self, message: str): + self.append("user", f"Executing this code lead to an error.\nThe error message reads:\n{message}") + def __call__(self): return self._buffer @@ -206,6 +212,15 @@ def proxy_kernel_manager(path): else: resp = requests.get(f'http://localhost:{KERNEL_APP_PORT}/{path}') + # store execution results in conversation history to allow back-references by the user + for res in json.loads(resp.content).get('results', []): + if res['type'] == "message": + chat_history.add_execution_result(res['value']) + elif res['type'] == "message_error": + chat_history.add_error(res['value']) + + print(res) + excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection'] headers = [(name, value) for (name, value) in resp.raw.headers.items() From 8a5bf416c6f277f37b542014e8259e523a782c0b Mon Sep 17 00:00:00 2001 From: Mathias Winkel Date: Wed, 26 Jul 2023 21:12:47 +0200 Subject: [PATCH 5/8] output error messages with Syntax highlighting --- frontend/src/components/Chat.tsx | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/frontend/src/components/Chat.tsx b/frontend/src/components/Chat.tsx index 1e625ac8..f2bdb933 100644 --- a/frontend/src/components/Chat.tsx +++ b/frontend/src/components/Chat.tsx @@ -72,7 +72,25 @@ function Message(props: {
))} - {(["message_raw", "message_error"].indexOf(props.type) !== -1) && + {props.type == "message_error" && + (props.showLoader ? ( +
+ {text} {props.showLoader ?
: null} +
+ ) : ( +
+ Execution Error: + +
+ ))} + + {props.type == "message_raw" && (props.showLoader ? (
{text} {props.showLoader ?
: null} From a053c5fc1f74932703909a0baaf63c0741b54f59 Mon Sep 17 00:00:00 2001 From: Mathias Winkel Date: Thu, 3 Aug 2023 10:49:32 +0200 Subject: [PATCH 6/8] make sure that message_status messages are actually shown --- frontend/src/components/Chat.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/components/Chat.tsx b/frontend/src/components/Chat.tsx index f2bdb933..c6b3510c 100644 --- a/frontend/src/components/Chat.tsx +++ b/frontend/src/components/Chat.tsx @@ -90,7 +90,7 @@ function Message(props: {
))} - {props.type == "message_raw" && + {["message_raw", "message_status"].includes(props.type) && (props.showLoader ? (
{text} {props.showLoader ?
: null} From 3b92a1f43d1c95356364212eca7a23f27c8c207c Mon Sep 17 00:00:00 2001 From: Mathias Winkel Date: Thu, 3 Aug 2023 14:43:18 +0200 Subject: [PATCH 7/8] truncate entries in the history to avoid overrunning it too quickly with excessive output. --- gpt_code_ui/webapp/main.py | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/gpt_code_ui/webapp/main.py b/gpt_code_ui/webapp/main.py index efbd1c73..b3f08cdb 100644 --- a/gpt_code_ui/webapp/main.py +++ b/gpt_code_ui/webapp/main.py @@ -42,7 +42,7 @@ class ChatHistory(): def __init__(self): self._buffer = list() - self.append( + self._append( "system", """Write Python code, in a triple backtick Markdown code block, that answers the user prompts. @@ -71,23 +71,37 @@ def __init__(self): If the code modifies or produces a file, at the end of the code block insert a print statement that prints a link to it as HTML string: Download file. Replace INSERT_FILENAME_HERE with the actual filename.""") - def append(self, role: str, content: str): + def _append(self, role: str, content: str, name: str = None): if role not in ("user", "assistant", "system"): raise ValueError(f"Invalid role: {role}") - self._buffer.append({ - "role": role, - "content": content, - }) + entry = {"role": role, "content": content} + if name is not None: + entry["name"] = name + + self._buffer.append(entry) + + def _truncate(self, s: str, maxlines: int = 10) -> str: + return '\n'.join(s.splitlines()[:maxlines]) + + def add_prompt(self, prompt: str): + self._append("user", prompt) + + def add_answer(self, answer: str): + self._append("assistant", answer) def upload_file(self, filename: str, file_info: str = None): - self.append("user", f"In the following, I will refer to the file {filename}.\n{file_info}") + self._append("user", f"In the following, I will refer to the file {filename}.\n{file_info}") def add_execution_result(self, result: str): - self.append("user", f"Executing this code yielded the following output:\n{result}") + self._append( + "user", + f"These are the first lines of the output generated when executing the code:\n{self._truncate(result)}") def add_error(self, message: str): - self.append("user", f"Executing this code lead to an error.\nThe error message reads:\n{message}") + self._append( + "user", + f"Executing this code lead to an error.\nThe first lines of the error message read:\n{self._truncate(message)}") def __call__(self): return self._buffer @@ -254,14 +268,14 @@ def generate_code(): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) - chat_history.append("user", user_prompt) + chat_history.add_prompt(user_prompt) code, text, status = loop.run_until_complete( get_code(chat_history(), user_openai_key, model)) loop.close() if status == 200: - chat_history.append("assistant", text) + chat_history.add_answer(text) return jsonify({'code': code, 'text': text}), status From 7c3b4d622edbd44e91aded6533d0c2178fcccf9c Mon Sep 17 00:00:00 2001 From: Mathias Winkel Date: Thu, 3 Aug 2023 15:01:10 +0200 Subject: [PATCH 8/8] Distinguish User and Computer prompts. There is no real documentation on which difference this makes, but at least semantically, this is correct. --- gpt_code_ui/webapp/main.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/gpt_code_ui/webapp/main.py b/gpt_code_ui/webapp/main.py index b3f08cdb..ca10aa0b 100644 --- a/gpt_code_ui/webapp/main.py +++ b/gpt_code_ui/webapp/main.py @@ -85,7 +85,7 @@ def _truncate(self, s: str, maxlines: int = 10) -> str: return '\n'.join(s.splitlines()[:maxlines]) def add_prompt(self, prompt: str): - self._append("user", prompt) + self._append("user", prompt, "User") def add_answer(self, answer: str): self._append("assistant", answer) @@ -96,12 +96,14 @@ def upload_file(self, filename: str, file_info: str = None): def add_execution_result(self, result: str): self._append( "user", - f"These are the first lines of the output generated when executing the code:\n{self._truncate(result)}") + f"These are the first lines of the output generated when executing the code:\n{self._truncate(result)}", + "Computer") def add_error(self, message: str): self._append( "user", - f"Executing this code lead to an error.\nThe first lines of the error message read:\n{self._truncate(message)}") + f"Executing this code lead to an error.\nThe first lines of the error message read:\n{self._truncate(message)}", + "Computer") def __call__(self): return self._buffer