Skip to content

Commit

Permalink
fix inconsistent image processing in bots
Browse files Browse the repository at this point in the history
  • Loading branch information
devxpy committed Jan 21, 2024
1 parent 62c5185 commit 2515ca8
Showing 1 changed file with 13 additions and 9 deletions.
22 changes: 13 additions & 9 deletions recipes/VideoBots.py
Original file line number Diff line number Diff line change
Expand Up @@ -544,6 +544,7 @@ def render_steps(self):
if isinstance(final_prompt, str):
text_output("**Final Prompt**", value=final_prompt, height=300)
else:
st.write("**Final Prompt**")
st.json(final_prompt)

for idx, text in enumerate(st.session_state.get("raw_output_text", [])):
Expand Down Expand Up @@ -666,18 +667,16 @@ def run(self, state: dict) -> typing.Iterator[str | None]:
# except IndexError:
# user_display_name = CHATML_ROLE_USER

# construct user prompt
# save raw input for reference
state["raw_input_text"] = user_input
user_prompt = {
"role": CHATML_ROLE_USER,
"content": user_input,
}

# if documents are provided, run doc search on the saved msgs and get back the references
references = None
if request.documents:
# formulate the search query as a history of all the messages
query_msgs = saved_msgs + [user_prompt]
query_msgs = saved_msgs + [
format_chat_entry(role=CHATML_ROLE_USER, content=user_input)
]
clip_idx = convo_window_clipper(
query_msgs, model_max_tokens[model] // 2, sep=" "
)
Expand Down Expand Up @@ -741,17 +740,22 @@ def run(self, state: dict) -> typing.Iterator[str | None]:
if references:
# add task instructions
task_instructions = render_prompt_vars(request.task_instructions, state)
user_prompt["content"] = (
user_input = (
references_as_prompt(references)
+ f"\n**********\n{task_instructions.strip()}\n**********\n"
+ user_prompt["content"]
+ user_input
)

# construct user prompt
user_prompt = format_chat_entry(
role=CHATML_ROLE_USER, content=user_input, images=request.input_images
)

# truncate the history to fit the model's max tokens
history_window = scripted_msgs + saved_msgs
max_history_tokens = (
model_max_tokens[model]
- calc_gpt_tokens([system_prompt, user_prompt], is_chat_model=is_chat_model)
- calc_gpt_tokens([system_prompt, user_input], is_chat_model=is_chat_model)
- request.max_tokens
- SAFETY_BUFFER
)
Expand Down

0 comments on commit 2515ca8

Please sign in to comment.