Skip to content

Commit

Permalink
Merge pull request #42 from Vaayne/improvement/enhance_error_message
Browse files Browse the repository at this point in the history
enhance error message
  • Loading branch information
vaayne authored Apr 27, 2023
2 parents ded0173 + 8c0da76 commit fb564c6
Show file tree
Hide file tree
Showing 8 changed files with 172 additions and 115 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
# Changelog

## v0.0.18 - 2023-04-27
### Fixed
- Fix using chatgpt web
- Enhance error message

## v0.0.17 - 2023-04-25
### Added
- Support new bing as backend engine
Expand Down
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "notionai-plus",
"displayName": "NotionAI Plus",
"version": "0.0.17",
"version": "0.0.18",
"description": "NotionAI Plus is a browser extension that brings the power of NotionAI to any website you visit.",
"scripts": {
"dev": "plasmo dev",
Expand Down
14 changes: 7 additions & 7 deletions src/background/ports/stream.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@ import type { PlasmoMessaging } from "@plasmohq/messaging"

import { BardChat } from "~lib/api/bard"
import { BingChat } from "~lib/api/bing"
import { ChatStream } from "~lib/api/chatgpt-api"
import { PostChatGPTStream } from "~lib/api/chatgpt-web"
import { PostNotionStream } from "~lib/api/notion-completion"
import { ChatGPTApiChat } from "~lib/api/chatgpt-api"
import { ChatGPTWebChat } from "~lib/api/chatgpt-web"
import { NotionCompletion } from "~lib/api/notion-completion"
import { EngineEnum } from "~lib/enums"
import {
RequestBody,
Expand All @@ -22,13 +22,13 @@ const handler: PlasmoMessaging.PortHandler = async (req, res) => {

switch (body.engine) {
case EngineEnum.ChatGPTWeb:
await PostChatGPTStream(`${instruction}\n\n${prompt}`, res)
await ChatGPTWebChat(`${instruction}\n\n${prompt}`, res)
break
case EngineEnum.Bing:
await BingChat(`${instruction}\n\n${prompt}`, res)
break
case EngineEnum.ChatGPTAPI:
await ChatStream(
await ChatGPTApiChat(
OPENAI_API_URL,
instruction,
prompt,
Expand All @@ -40,7 +40,7 @@ const handler: PlasmoMessaging.PortHandler = async (req, res) => {
await BardChat(`${instruction}\n\n${prompt}`, res)
break
case EngineEnum.NotionBoy:
await ChatStream(
await ChatGPTApiChat(
NOTIONBOY_API_URL,
instruction,
prompt,
Expand All @@ -49,7 +49,7 @@ const handler: PlasmoMessaging.PortHandler = async (req, res) => {
)
break
case EngineEnum.NotionAI:
await PostNotionStream(
await NotionCompletion(
res,
body.builtinPrompt,
body.context,
Expand Down
19 changes: 15 additions & 4 deletions src/lib/api/bard.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,7 @@ function generateReqId() {
return Math.floor(Math.random() * 900000) + 100000
}

export async function BardChat(
prompt: string,
res: PlasmoMessaging.Response<any>
) {
async function chat(prompt: string, res: PlasmoMessaging.Response<any>) {
const requestParams = await fetchRequestParams()
let contextIds = ["", "", ""]
const resp = await ofetch(
Expand All @@ -62,3 +59,17 @@ export async function BardChat(
contextIds = ids
res.send(text)
}

export async function BardChat(
prompt: string,
res: PlasmoMessaging.Response<any>
) {
try {
await chat(prompt, res)
} catch (err) {
console.error(err)
res.send(
"Sorry, Bard Chat is not available at the moment. error: " + err.message
)
}
}
19 changes: 15 additions & 4 deletions src/lib/api/bing.ts
Original file line number Diff line number Diff line change
Expand Up @@ -232,10 +232,7 @@ export async function createConversation(): Promise<ConversationResponse> {
return resp
}

export async function BingChat(
prompt: string,
res: PlasmoMessaging.Response<any>
) {
async function chat(prompt: string, res: PlasmoMessaging.Response<any>) {
const conversation = await createConversation()
const bingConversationStyle = BingConversationStyle.Balanced
const conversationContext = {
Expand Down Expand Up @@ -286,3 +283,17 @@ export async function BingChat(
await wsp.open()
wsp.sendPacked({ protocol: "json", version: 1 })
}

export async function BingChat(
prompt: string,
res: PlasmoMessaging.Response<any>
) {
try {
chat(prompt, res)
} catch (err) {
console.error("BingChat", err)
res.send(
"Sorry, Bing Chat is not available at the moment. error: " + err.message
)
}
}
100 changes: 47 additions & 53 deletions src/lib/api/chatgpt-api.ts
Original file line number Diff line number Diff line change
@@ -1,42 +1,18 @@
import { ofetch } from "ofetch"

import type { PlasmoMessaging } from "@plasmohq/messaging"

import { parseSSEResponse } from "~lib/utils/sse"

const MODEL = "gpt-3.5-turbo"

type ChatGPTResponseChoice = {
index: number
message: {
role: string
content: string
}
finish_reason: string
}

type ChatGPTResponseUsgae = {
prompt_tokens: number
completion_tokens: number
total_tokens: number
}

type ChatGPTResponse = {
id: string
object: string
created: number
choices: ChatGPTResponseChoice[]
usage: ChatGPTResponseUsgae
}

async function ChatStream(
async function chat(
url: string,
instraction: string,
prompt: string,
api_key: string,
res: PlasmoMessaging.Response<any>
) {
if (api_key == "") {
return "Please set your OpenAI API key in the extension options page."
}
const data = {
model: MODEL,
stream: true,
Expand All @@ -45,9 +21,7 @@ async function ChatStream(
{ role: "user", content: prompt }
]
}
// console.log(`ChatGPTAPI request: ${JSON.stringify(data)}`)

const resp = await fetch(url, {
const resp = await ofetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
Expand All @@ -57,31 +31,51 @@ async function ChatStream(
})
let content: string = ""

if (resp.status == 200) {
await parseSSEResponse(resp, (message) => {
if (message === "[DONE]") {
return
}

try {
const data = JSON.parse(message)
// console.log(content)
if (data?.choices?.length) {
const delta = data.choices[0].delta
if (delta?.content) {
content += delta.content
res.send(content)
}
await parseSSEResponse(resp, (message) => {
if (message === "[DONE]") {
return
}
try {
const data = JSON.parse(message)
// console.log(content)
if (data?.choices?.length) {
const delta = data.choices[0].delta
if (delta?.content) {
content += delta.content
res.send(content)
}
} catch (err) {
console.error(err)
res.send(err)
return
}
})
} else {
res.send(`ChatGPT return error, status: ${resp.status}`)
} catch (err) {
console.error(err)
res.send(err)
return
}
})
}

async function ChatGPTApiChat(
url: string,
instraction: string,
prompt: string,
api_key: string,
res: PlasmoMessaging.Response<any>
) {
console.log(`ChatStream: ${url}, ${instraction}, ${prompt}, ${api_key}`)
if (!api_key) {
res.send("Please set your OpenAI API key in the extension options page.")
return
}
let message = ""
for (let i = 0; i < 3; i++) {
try {
await chat(url, instraction, prompt, api_key, res)
return
} catch (err) {
console.error(err)
message = err.message
}
}
res.send(message)
}

export { ChatStream }
export { ChatGPTApiChat }
74 changes: 42 additions & 32 deletions src/lib/api/chatgpt-web.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import { ofetch } from "ofetch"
import { v4 as uuidv4 } from "uuid"

import type { PlasmoMessaging } from "@plasmohq/messaging"
Expand All @@ -17,22 +18,35 @@ async function getAccessToken(): Promise<string> {
return cacheToken as string
}

const resp = await fetch(`${CHATGPT_HOST}/api/auth/session`)
if (resp.status === 403) {
return "403 FORBIDDEN"
}
const data = await resp.json().catch(() => ({}))
const resp = await ofetch(`${CHATGPT_HOST}/api/auth/session`)
const data = await resp.json()
if (!data.accessToken) {
return "401 UNAUTHORIZED"
throw new Error("401 UNAUTHORIZED")
}
await storage.set(CACHE_KEY_TOKEN, data.accessToken)
return data.accessToken
}

async function PostChatGPTStream(
async function ChatGPTWebChat(
prompt: string,
res: PlasmoMessaging.Response<any>
) {
let message = ""
for (let i = 0; i < 3; i++) {
try {
await chat(prompt, res)
return
} catch (err) {
await storage.remove(CACHE_KEY_TOKEN)
console.error(err)
message = err.message
}
console.log(message)
}
res.send(message)
}

async function chat(prompt: string, res: PlasmoMessaging.Response<any>) {
const accessToken = await getAccessToken()

const cacheConversationId = await storage.get(CACHE_KEY_CONVERSATION_ID)
Expand All @@ -54,7 +68,7 @@ async function PostChatGPTStream(
model: CHATGPT_MODEL
}
const url = `${CHATGPT_HOST}/backend-api/conversation`
const resp = await fetch(url, {
const resp = await ofetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
Expand All @@ -65,30 +79,26 @@ async function PostChatGPTStream(

let conversationId: string = ""

if (resp.status == 200) {
await parseSSEResponse(resp, (message) => {
if (message === "[DONE]") {
// console.debug("chatgpt sse message done, start remove conversation")
removeConversation(conversationId)
return
await parseSSEResponse(resp, (message) => {
if (message === "[DONE]") {
// console.debug("chatgpt sse message done, start remove conversation")
removeConversation(conversationId)
return
}
try {
const data = JSON.parse(message)
const text = data.message?.content?.parts?.[0]
if (text) {
// console.debug("chatgpt sse message", text)
res.send(text)
conversationId = data.conversation_id
}
try {
const data = JSON.parse(message)
const text = data.message?.content?.parts?.[0]
if (text) {
// console.debug("chatgpt sse message", text)
res.send(text)
conversationId = data.conversation_id
}
} catch (err) {
console.error(err)
res.send(err)
return
}
})
} else {
res.send(resp.statusText)
}
} catch (err) {
console.error(err)
res.send(`ChatGPT return error, error: ${err.message}`)
return
}
})
}

async function removeConversation(id: string) {
Expand All @@ -110,4 +120,4 @@ async function removeConversation(id: string) {
}
}

export { PostChatGPTStream }
export { ChatGPTWebChat }
Loading

0 comments on commit fb564c6

Please sign in to comment.