Skip to content

Commit

Permalink
Add option to keep template indentations
Browse files Browse the repository at this point in the history
  • Loading branch information
s-jse committed May 26, 2024
1 parent 68d0dca commit 48999e3
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 6 deletions.
4 changes: 3 additions & 1 deletion chainlite/llm_generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,7 @@ def llm_generation_chain(
temperature: float = 0.0,
stop_tokens: Optional[list[str]] = None,
top_p: float = 0.9,
keep_indentation: bool = False,
postprocess: bool = False,
bind_prompt_values: dict = {},
) -> Runnable:
Expand All @@ -228,6 +229,7 @@ def llm_generation_chain(
temperature (float, optional): Dictates the randomness in the generation. Must be >= 0.0. Defaults to 0.0 (deterministic).
stop_tokens (list[str], optional): The list of tokens causing the LLM to stop generating. Defaults to None.
top_p (float, optional): The max cumulative probability for nucleus sampling, must be within 0.0 - 1.0. Defaults to 0.9.
keep_indentation (bool, optional): If True, will keep indentations at the beginning of each line in the template_file
postprocess (bool, optional): If true, postprocessing deletes incomplete sentences from the end of the generation. Defaults to False.
bind_prompt_values (dict, optional): A dictionary containing {Variable: str : Value}. Binds values to the prompt. Additional variables can be provided when the chain is called. Defaults to {}.
Expand Down Expand Up @@ -280,7 +282,7 @@ def llm_generation_chain(
"prompt_format" in llm_resource and llm_resource["prompt_format"] == "distilled"
)
prompt, distillation_instruction = load_fewshot_prompt_template(
template_file, is_distilled=is_distilled
template_file, is_distilled=is_distilled, keep_indentation=keep_indentation
)

llm = ChatLiteLLM(
Expand Down
9 changes: 5 additions & 4 deletions chainlite/load_prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def initialize_jinja_environment(loader_paths):


@lru_cache()
def load_template_file(template_file: str) -> str:
def load_template_file(template_file: str, keep_indentation: bool) -> str:
"""
This function is here just so that we can cache the templates and not have to read from disk every time.
Also removes comment blocks and white space at the beginning and end of each line. These are usually added to make prompt templates more readable.
Expand All @@ -50,7 +50,8 @@ def load_template_file(template_file: str) -> str:
jinja_environment, template_file
)[0]
raw_template = re.sub(jinja2_comment_pattern, "", raw_template)
raw_template = "\n".join([line.strip() for line in raw_template.split("\n")])
if not keep_indentation:
raw_template = "\n".join([line.strip() for line in raw_template.split("\n")])
raw_template = re.sub(
r"%}\s*", "%}", raw_template
) # remove the white space after {% for ... %} tags etc.
Expand Down Expand Up @@ -187,9 +188,9 @@ def _prompt_blocks_to_chat_messages(


def load_fewshot_prompt_template(
template_file: str, is_distilled: bool
template_file: str, is_distilled: bool, keep_indentation: bool
) -> tuple[ChatPromptTemplate, str | None]:
fp = load_template_file(template_file)
fp = load_template_file(template_file, keep_indentation)
blocks = _split_prompt_to_blocks(fp)
# pprint(blocks)
chat_prompt_template, distillation_instruction = _prompt_blocks_to_chat_messages(
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

setup(
name="chainlite",
version="0.1.7",
version="0.1.8",
author="Sina Semnani",
author_email="[email protected]",
description="A Python package that uses LangChain and LiteLLM to call large language model APIs easily",
Expand Down

0 comments on commit 48999e3

Please sign in to comment.