Skip to content

Commit

Permalink
update to openai 0.27 usage
Browse files Browse the repository at this point in the history
  • Loading branch information
alvaro-vinuela committed Nov 27, 2023
1 parent cbaffaa commit 0c5a007
Showing 1 changed file with 34 additions and 24 deletions.
58 changes: 34 additions & 24 deletions engineered_chatgpt_prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,9 @@
by the chatgpt model to generate the output text.
"""

# import asyncio
import asyncio
import os
import sys
import traceback
import openai
from dotenv import load_dotenv, find_dotenv
Expand All @@ -20,47 +21,58 @@

_ = load_dotenv(find_dotenv()) # read local .env file

# TODO: use async client
#client = openai.AsyncOpenAI(
# api_key=os.getenv('OPENAI_API_KEY'),
# organization=os.getenv('OPENAI_ORGANIZATION'),
#)
client = openai.AsyncOpenAI(
api_key=os.getenv('OPENAI_API_KEY'),
organization=os.getenv('OPENAI_ORGANIZATION'),
)

openai.organization = os.getenv('OPENAI_ORGANIZATION')
openai.api_key = os.getenv('OPENAI_API_KEY')
print("OpenAI version:", openai.__version__)


# async def get_completion(prompt, model="gpt-3.5-turbo"):
def get_completion(prompt, model="gpt-3.5-turbo"):
# def get_completion(prompt, model="gpt-3.5-turbo"):
async def get_completion(prompt,
model="gpt-3.5-turbo"):
"""
method to query openai API
"""
messages = [{"role": "user", "content": prompt}]
chat = None
try:
# chat = await client.chat.completions.create(
chat = openai.ChatCompletion.create( # pylint: disable=no-member

# chat = openai.ChatCompletion.create(
chat = await client.chat.completions.create(
model=model,
messages=messages,
temperature=0,
stream=True,
# this is the randomness degree of the model's output
)
except openai.error.InvalidRequestError as err:
print(f"InvalidRequestError: {err}")
print(f"Traceback: {traceback.print_exc()}")
return None

if chat is None:
print("Invalid Response")
except openai.APIConnectionError as e:
print("The server could not be reached")
print(e.__cause__) # an underlying Exception, likely raised within http
except openai.RateLimitError as e:
print("A 429 status code was received; we should back off a bit.")
except openai.APIStatusError as e:
print("Another non-200-range status code was received")
print(e.status_code)
print(e.response)
return None
return chat.choices[0].message["content"]

response = ""
async for part in chat:
response += part.choices[0].delta.content or ""
sys.stdout.write(f"\r{response}>")
sys.stdout.flush()
# print(response)
return response

class EngineeredChatgptPrompts(QWidget): # pylint: disable=too-many-instance-attributes

class EngineeredChatgptPrompts(
QWidget): # pylint: disable=too-many-instance-attributes
"""
class to hold widgets and preocess method of main application
"""

def __init__(self):
super().__init__()

Expand Down Expand Up @@ -134,9 +146,7 @@ def process_text(self):
f"(delimited by triple backticks): ```{goal}```"
f"process the following text with specified goal"
f"(delimited by triple backticks): ```{input_text}```")
processed_text = get_completion(complete_prompt)
processed_text = f'Processed Text:\n{processed_text}'
self.output_text.setText(processed_text)
asyncio.run(get_completion(complete_prompt))

def load_goal(self):
""" open a dialog inspecting text files on file system """
Expand Down

0 comments on commit 0c5a007

Please sign in to comment.