Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge upstream #2

Merged
merged 19 commits into from
Nov 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
1ec8c24
fix: jsonschema integer validation (#852)
WeiAnAn Sep 20, 2024
9add1c3
add max_completions_tokens for o1 series models (#857)
chococola Sep 20, 2024
9a4f3a7
feat: add ParallelToolCalls to RunRequest (#847)
kenshin54 Sep 20, 2024
e095df5
run_id string Optional (#855)
floodwm Sep 20, 2024
38bdc81
Optimize Client Error Return (#856)
eiixy Sep 26, 2024
7f80303
Fix max_completion_tokens (#860)
alexsacr Sep 26, 2024
e9d8485
fix: ParallelToolCalls should be added to RunRequest (#861)
kenshin54 Sep 26, 2024
fdd59d9
feat: usage struct add CompletionTokensDetails (#863)
liushuangls Sep 26, 2024
bac7d59
fix MaxCompletionTokens typo (#862)
l-winston Oct 3, 2024
7c145eb
add jailbreak filter result, add ContentFilterResults on output (#864)
juliomartinsdev Oct 3, 2024
9913264
Completion API: add new params (#870)
isaacseymour Oct 9, 2024
cfe15ff
return response body as byte slice for RequestError type (#873)
AyushSawant18588 Oct 14, 2024
21f7134
Adding new moderation model constants (#875)
Mhjacobs Oct 14, 2024
b162541
Cleanup (#879)
sashabaranov Oct 15, 2024
9fe2c6c
Completion API: add Store and Metadata parameters (#878)
smackcrane Oct 15, 2024
fb15ff9
Handling for non-json response (#881)
AyushSawant18588 Oct 21, 2024
3672c0d
fix: Updated Assistent struct with latest fields based on OpenAI docs…
ecoralic Oct 21, 2024
6e08732
Updated checkPromptType function to handle prompt list in completions…
AyushSawant18588 Oct 25, 2024
d10f1b8
add chatcompletion stream delta refusal and logprobs (#882)
Yu0u Oct 29, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 0 additions & 35 deletions Makefile

This file was deleted.

2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

This library provides unofficial Go clients for [OpenAI API](https://platform.openai.com/). We support:

* ChatGPT
* ChatGPT 4o, o1
* GPT-3, GPT-4
* DALL·E 2, DALL·E 3
* Whisper
Expand Down
25 changes: 14 additions & 11 deletions assistant.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,20 @@ const (
)

type Assistant struct {
ID string `json:"id"`
Object string `json:"object"`
CreatedAt int64 `json:"created_at"`
Name *string `json:"name,omitempty"`
Description *string `json:"description,omitempty"`
Model string `json:"model"`
Instructions *string `json:"instructions,omitempty"`
Tools []AssistantTool `json:"tools"`
FileIDs []string `json:"file_ids,omitempty"`
Metadata map[string]any `json:"metadata,omitempty"`
ToolResources *AssistantToolResource `json:"tool_resources,omitempty"`
ID string `json:"id"`
Object string `json:"object"`
CreatedAt int64 `json:"created_at"`
Name *string `json:"name,omitempty"`
Description *string `json:"description,omitempty"`
Model string `json:"model"`
Instructions *string `json:"instructions,omitempty"`
Tools []AssistantTool `json:"tools"`
ToolResources *AssistantToolResource `json:"tool_resources,omitempty"`
FileIDs []string `json:"file_ids,omitempty"` // Deprecated in v2
Metadata map[string]any `json:"metadata,omitempty"`
Temperature *float32 `json:"temperature,omitempty"`
TopP *float32 `json:"top_p,omitempty"`
ResponseFormat any `json:"response_format,omitempty"`

httpHeader
}
Expand Down
80 changes: 55 additions & 25 deletions chat.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,23 @@ type Violence struct {
Severity string `json:"severity,omitempty"`
}

type JailBreak struct {
Filtered bool `json:"filtered"`
Detected bool `json:"detected"`
}

type Profanity struct {
Filtered bool `json:"filtered"`
Detected bool `json:"detected"`
}

type ContentFilterResults struct {
Hate Hate `json:"hate,omitempty"`
SelfHarm SelfHarm `json:"self_harm,omitempty"`
Sexual Sexual `json:"sexual,omitempty"`
Violence Violence `json:"violence,omitempty"`
Hate Hate `json:"hate,omitempty"`
SelfHarm SelfHarm `json:"self_harm,omitempty"`
Sexual Sexual `json:"sexual,omitempty"`
Violence Violence `json:"violence,omitempty"`
JailBreak JailBreak `json:"jailbreak,omitempty"`
Profanity Profanity `json:"profanity,omitempty"`
}

type PromptAnnotation struct {
Expand Down Expand Up @@ -200,18 +212,25 @@ type ChatCompletionResponseFormatJSONSchema struct {

// ChatCompletionRequest represents a request structure for chat completion API.
type ChatCompletionRequest struct {
Model string `json:"model"`
Messages []ChatCompletionMessage `json:"messages"`
MaxTokens int `json:"max_tokens,omitempty"`
Temperature float32 `json:"temperature,omitempty"`
TopP float32 `json:"top_p,omitempty"`
N int `json:"n,omitempty"`
Stream bool `json:"stream,omitempty"`
Stop []string `json:"stop,omitempty"`
PresencePenalty float32 `json:"presence_penalty,omitempty"`
ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"`
Seed *int `json:"seed,omitempty"`
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
Model string `json:"model"`
Messages []ChatCompletionMessage `json:"messages"`
// MaxTokens The maximum number of tokens that can be generated in the chat completion.
// This value can be used to control costs for text generated via API.
// This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models.
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens
MaxTokens int `json:"max_tokens,omitempty"`
// MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion,
// including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning
MaxCompletionTokens int `json:"max_completion_tokens,omitempty"`
Temperature float32 `json:"temperature,omitempty"`
TopP float32 `json:"top_p,omitempty"`
N int `json:"n,omitempty"`
Stream bool `json:"stream,omitempty"`
Stop []string `json:"stop,omitempty"`
PresencePenalty float32 `json:"presence_penalty,omitempty"`
ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"`
Seed *int `json:"seed,omitempty"`
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
// LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string.
// incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}`
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias
Expand All @@ -236,6 +255,11 @@ type ChatCompletionRequest struct {
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
// Disable the default behavior of parallel tool calls by setting it: false.
ParallelToolCalls any `json:"parallel_tool_calls,omitempty"`
// Store can be set to true to store the output of this completion request for use in distillations and evals.
// https://platform.openai.com/docs/api-reference/chat/create#chat-create-store
Store bool `json:"store,omitempty"`
// Metadata to store with the completion.
Metadata map[string]string `json:"metadata,omitempty"`
}

type StreamOptions struct {
Expand Down Expand Up @@ -331,19 +355,21 @@ type ChatCompletionChoice struct {
// function_call: The model decided to call a function
// content_filter: Omitted content due to a flag from our content filters
// null: API response still in progress or incomplete
FinishReason FinishReason `json:"finish_reason"`
LogProbs *LogProbs `json:"logprobs,omitempty"`
FinishReason FinishReason `json:"finish_reason"`
LogProbs *LogProbs `json:"logprobs,omitempty"`
ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
}

// ChatCompletionResponse represents a response structure for chat completion API.
type ChatCompletionResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Choices []ChatCompletionChoice `json:"choices"`
Usage Usage `json:"usage"`
SystemFingerprint string `json:"system_fingerprint"`
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Choices []ChatCompletionChoice `json:"choices"`
Usage Usage `json:"usage"`
SystemFingerprint string `json:"system_fingerprint"`
PromptFilterResults []PromptFilterResult `json:"prompt_filter_results,omitempty"`

httpHeader
}
Expand All @@ -364,6 +390,10 @@ func (c *Client) CreateChatCompletion(
return
}

if err = validateRequestForO1Models(request); err != nil {
return
}

req, err := c.newRequest(
ctx,
http.MethodPost,
Expand Down
32 changes: 28 additions & 4 deletions chat_stream.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,33 @@ type ChatCompletionStreamChoiceDelta struct {
Role string `json:"role,omitempty"`
FunctionCall *FunctionCall `json:"function_call,omitempty"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
Refusal string `json:"refusal,omitempty"`
}

type ChatCompletionStreamChoiceLogprobs struct {
Content []ChatCompletionTokenLogprob `json:"content,omitempty"`
Refusal []ChatCompletionTokenLogprob `json:"refusal,omitempty"`
}

type ChatCompletionTokenLogprob struct {
Token string `json:"token"`
Bytes []int64 `json:"bytes,omitempty"`
Logprob float64 `json:"logprob,omitempty"`
TopLogprobs []ChatCompletionTokenLogprobTopLogprob `json:"top_logprobs"`
}

type ChatCompletionTokenLogprobTopLogprob struct {
Token string `json:"token"`
Bytes []int64 `json:"bytes"`
Logprob float64 `json:"logprob"`
}

type ChatCompletionStreamChoice struct {
Index int `json:"index"`
Delta ChatCompletionStreamChoiceDelta `json:"delta"`
FinishReason FinishReason `json:"finish_reason"`
ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
Index int `json:"index"`
Delta ChatCompletionStreamChoiceDelta `json:"delta"`
Logprobs *ChatCompletionStreamChoiceLogprobs `json:"logprobs,omitempty"`
FinishReason FinishReason `json:"finish_reason"`
ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
}

type PromptFilterResult struct {
Expand Down Expand Up @@ -60,6 +80,10 @@ func (c *Client) CreateChatCompletionStream(
}

request.Stream = true
if err = validateRequestForO1Models(request); err != nil {
return
}

req, err := c.newRequest(
ctx,
http.MethodPost,
Expand Down
Loading