Skip to content

Commit

Permalink
Add top_k in ChatCompletionRequest (#1174)
Browse files Browse the repository at this point in the history
* add top_k in ChatCompletionRequest

* Update api_server.py (#1149)

#1105 (comment)

* add top_k in ChatCompletionRequestQos

---------

Co-authored-by: qianyuexingfu <[email protected]>
  • Loading branch information
lvhan028 and 909254 authored Feb 21, 2024
1 parent 685070f commit 2d02b98
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 1 deletion.
3 changes: 3 additions & 0 deletions lmdeploy/serve/openai/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,8 @@ async def chat_completions_v1(request: ChatCompletionRequest,
tokens. Only accept stop words that's encoded to one token idex.
Additional arguments supported by LMDeploy:
- top_k (int): The number of the highest probability vocabulary
tokens to keep for top-k-filtering
- ignore_eos (bool): indicator for ignoring eos
- skip_special_tokens (bool): Whether or not to remove special tokens
in the decoding. Default to be True.
Expand All @@ -303,6 +305,7 @@ async def chat_completions_v1(request: ChatCompletionRequest,

gen_config = GenerationConfig(
max_new_tokens=request.max_tokens if request.max_tokens else 512,
top_k=request.top_k,
top_p=request.top_p,
temperature=request.temperature,
repetition_penalty=request.repetition_penalty,
Expand Down
4 changes: 3 additions & 1 deletion lmdeploy/serve/openai/protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ class ChatCompletionRequestQos(BaseModel):
repetition_penalty: Optional[float] = 1.0
session_id: Optional[int] = -1
ignore_eos: Optional[bool] = False
top_k: Optional[int] = 40


class ChatCompletionRequest(BaseModel):
Expand All @@ -95,6 +96,7 @@ class ChatCompletionRequest(BaseModel):
session_id: Optional[int] = -1
ignore_eos: Optional[bool] = False
skip_special_tokens: Optional[bool] = True
top_k: Optional[int] = 40


class ChatMessage(BaseModel):
Expand Down Expand Up @@ -184,7 +186,7 @@ class CompletionRequestQos(BaseModel):
frequency_penalty: Optional[float] = 0.0
user: Optional[str] = None
# additional argument of lmdeploy
top_k: int = 40
top_k: Optional[int] = 40
repetition_penalty: Optional[float] = 1.0
session_id: Optional[int] = -1
ignore_eos: Optional[bool] = False
Expand Down
1 change: 1 addition & 0 deletions lmdeploy/serve/qos_engine/qos_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ async def generate(self, request):
if request.max_tokens else 512,
stop=request.stop,
top_p=request.top_p,
top_k=request.top_k,
temperature=request.temperature,
repetition_penalty=request.repetition_penalty,
ignore_eos=request.ignore_eos)
Expand Down

0 comments on commit 2d02b98

Please sign in to comment.