Skip to content

Commit

Permalink
minor renaming CoE convention to bundle
Browse files Browse the repository at this point in the history
  • Loading branch information
jhpiedrahitao committed Nov 11, 2024
1 parent f7731ba commit 759836b
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 18 deletions.
24 changes: 12 additions & 12 deletions libs/community/langchain_community/chat_models/sambanova.py
Original file line number Diff line number Diff line change
Expand Up @@ -599,25 +599,25 @@ class ChatSambaStudio(BaseChatModel):
ChatSambaStudio(
sambastudio_url = set with your SambaStudio deployed endpoint URL,
sambastudio_api_key = set with your SambaStudio deployed endpoint Key.
model = model or expert name (set for CoE endpoints),
model = model or expert name (set for Bundle endpoints),
max_tokens = max number of tokens to generate,
temperature = model temperature,
top_p = model top p,
top_k = model top k,
do_sample = wether to do sample
process_prompt = wether to process prompt
(set for CoE generic v1 and v2 endpoints)
(set for Bundle generic v1 and v2 endpoints)
stream_options = include usage to get generation metrics
special_tokens = start, start_role, end_role, end special tokens
(set for CoE generic v1 and v2 endpoints when process prompt
(set for Bundle generic v1 and v2 endpoints when process prompt
set to false or for StandAlone v1 and v2 endpoints)
model_kwargs: Optional = Extra Key word arguments to pass to the model.
)
Key init args — completion params:
model: str
The name of the model to use, e.g., Meta-Llama-3-70B-Instruct-4096
(set for CoE endpoints).
(set for Bundle endpoints).
streaming: bool
Whether to use streaming
max_tokens: inthandler when using non streaming methods
Expand All @@ -631,12 +631,12 @@ class ChatSambaStudio(BaseChatModel):
do_sample: bool
wether to do sample
process_prompt:
wether to process prompt (set for CoE generic v1 and v2 endpoints)
wether to process prompt (set for Bundle generic v1 and v2 endpoints)
stream_options: dict
stream options, include usage to get generation metrics
special_tokens: dict
start, start_role, end_role and end special tokens
(set for CoE generic v1 and v2 endpoints when process prompt set to false
(set for Bundle generic v1 and v2 endpoints when process prompt set to false
or for StandAlone v1 and v2 endpoints) default to llama3 special tokens
model_kwargs: dict
Extra Key word arguments to pass to the model.
Expand All @@ -655,17 +655,17 @@ class ChatSambaStudio(BaseChatModel):
chat = ChatSambaStudio=(
sambastudio_url = set with your SambaStudio deployed endpoint URL,
sambastudio_api_key = set with your SambaStudio deployed endpoint Key.
model = model or expert name (set for CoE endpoints),
model = model or expert name (set for Bundle endpoints),
max_tokens = max number of tokens to generate,
temperature = model temperature,
top_p = model top p,
top_k = model top k,
do_sample = wether to do sample
process_prompt = wether to process prompt
(set for CoE generic v1 and v2 endpoints)
(set for Bundle generic v1 and v2 endpoints)
stream_options = include usage to get generation metrics
special_tokens = start, start_role, end_role, and special tokens
(set for CoE generic v1 and v2 endpoints when process prompt
(set for Bundle generic v1 and v2 endpoints when process prompt
set to false or for StandAlone v1 and v2 endpoints)
model_kwargs: Optional = Extra Key word arguments to pass to the model.
)
Expand Down Expand Up @@ -742,7 +742,7 @@ class GetWeather(BaseModel):
"""SambaStudio streaming Url"""

model: Optional[str] = Field(default=None)
"""The name of the model or expert to use (for CoE endpoints)"""
"""The name of the model or expert to use (for Bundle endpoints)"""

streaming: bool = Field(default=False)
"""Whether to use streaming handler when using non streaming methods"""
Expand All @@ -763,7 +763,7 @@ class GetWeather(BaseModel):
"""whether to do sampling"""

process_prompt: Optional[bool] = Field(default=True)
"""whether process prompt (for CoE generic v1 and v2 endpoints)"""
"""whether process prompt (for Bundle generic v1 and v2 endpoints)"""

stream_options: Dict[str, Any] = Field(default={"include_usage": True})
"""stream options, include usage to get generation metrics"""
Expand All @@ -777,7 +777,7 @@ class GetWeather(BaseModel):
}
)
"""start, start_role, end_role and end special tokens
(set for CoE generic v1 and v2 endpoints when process prompt set to false
(set for Bundle generic v1 and v2 endpoints when process prompt set to false
or for StandAlone v1 and v2 endpoints)
default to llama3 special tokens"""

Expand Down
12 changes: 6 additions & 6 deletions libs/community/langchain_community/llms/sambanova.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,20 +27,20 @@ class SambaStudio(LLM):
sambastudio_url="your-SambaStudio-environment-URL",
sambastudio_api_key="your-SambaStudio-API-key,
model_kwargs={
"model" : model or expert name (set for CoE endpoints),
"model" : model or expert name (set for Bundle endpoints),
"max_tokens" : max number of tokens to generate,
"temperature" : model temperature,
"top_p" : model top p,
"top_k" : model top k,
"do_sample" : wether to do sample
"process_prompt": wether to process prompt
(set for CoE generic v1 and v2 endpoints)
(set for Bundle generic v1 and v2 endpoints)
},
)
Key init args — completion params:
model: str
The name of the model to use, e.g., Meta-Llama-3-70B-Instruct-4096
(set for CoE endpoints).
(set for Bundle endpoints).
streaming: bool
Whether to use streaming handler when using non streaming methods
model_kwargs: dict
Expand All @@ -56,7 +56,7 @@ class SambaStudio(LLM):
do_sample: bool
wether to do sample
process_prompt:
wether to process prompt (set for CoE generic v1 and v2 endpoints)
wether to process prompt (set for Bundle generic v1 and v2 endpoints)

Check failure on line 59 in libs/community/langchain_community/llms/sambanova.py

View workflow job for this annotation

GitHub Actions / cd libs/community / make lint #3.9

Ruff (E501)

langchain_community/llms/sambanova.py:59:89: E501 Line too long (89 > 88)
Key init args — client params:
sambastudio_url: str
SambaStudio endpoint Url
Expand All @@ -72,14 +72,14 @@ class SambaStudio(LLM):
sambastudio_url = set with your SambaStudio deployed endpoint URL,
sambastudio_api_key = set with your SambaStudio deployed endpoint Key,
model_kwargs = {
"model" : model or expert name (set for CoE endpoints),
"model" : model or expert name (set for Bundle endpoints),
"max_tokens" : max number of tokens to generate,
"temperature" : model temperature,
"top_p" : model top p,
"top_k" : model top k,
"do_sample" : wether to do sample
"process_prompt" : wether to process prompt
(set for CoE generic v1 and v2 endpoints)
(set for Bundle generic v1 and v2 endpoints)
}
)
Expand Down

0 comments on commit 759836b

Please sign in to comment.