Skip to content

Commit

Permalink
Begin merging optimizer
Browse files Browse the repository at this point in the history
  • Loading branch information
claudiosv committed Sep 4, 2024
1 parent 1e2601d commit b41d7af
Show file tree
Hide file tree
Showing 13 changed files with 415 additions and 176 deletions.
77 changes: 46 additions & 31 deletions examples/prompt_library/CoT.pdl
Original file line number Diff line number Diff line change
Expand Up @@ -31,17 +31,17 @@ defs:
model: str
answer: str
return:
- |-
Question: {{question}}

Answer: Let's think step by step.
- model: "{{ model }}"
params:
decoding_method: "greedy"
stop_sequences:
- "The answer is"
include_stop_sequence: false
- "The answer is {{ answer }}."
- |-
Question: {{ question }}
Answer: Let's think step by step.
- model: "{{ model }}"
platform: bam
parameters:
decoding_method: "greedy"
stop_sequences:
- "The answer is"
include_stop_sequence: false
- "The answer is {{ answer }}."

fewshot_cot:
function:
Expand All @@ -65,24 +65,39 @@ defs:
examples:
{ list: { obj: { question: str, reasoning: str, answer: str } } }
return:
- call: fewshot_cot
args:
examples: "{{ examples }}"
- |-
Question: {{question}}
- call: fewshot_cot
args:
examples: "{{ examples }}"
block_style: "question_cot"
- |
Question: {{ question }}
Answer: Let's think step by step.
- model: "{{ model }}"
platform: bam
parameters:
decoding_method: "greedy"
stop_sequences:
- "<|endoftext|>"
include_stop_sequence: false

Answer: Let's think step by step.
- model: "{{ model }}"
params:
decoding_method: "greedy"
stop_sequences:
- "The answer is"
include_stop_sequence: false
- "The answer is "
- def: answer
model: "{{ model }}"
params:
decoding_method: "greedy"
- "\n\nJSON Output: "
- data:
answer: "{{ answer|trim }}"
chain_of_thought_claim:
function:
question: str
model: str
examples:
{ list: { obj: { question: str, reasoning: str, answer: str } } }
return:
- call: fewshot_cot
args:
examples: "{{ examples }}"
block_style: "claim_cot"
- |
{{ question }}
Thought: Let's think step by step.
- model: "{{ model }}"
platform: bam
parameters:
decoding_method: "greedy"
stop_sequences:
- "<|endoftext|>"
include_stop_sequence: false
52 changes: 33 additions & 19 deletions examples/prompt_library/ReAct.pdl
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,10 @@ defs:
iterations: "{{ iterations+1 }}"
- def: THOUGHT
model: "{{ model }}"
params:
parameters:
time_limit: 60000
random_seed: 42
truncate_input_tokens: 8191
decoding_method: sample
repetition_penalty: 1.1
temperature: "{{ temperature }}"
Expand All @@ -102,7 +105,10 @@ defs:
until: "{{ THOUGHT.endswith('Act:') or iterations>20 }}"
- def: action_raw
model: "{{ model }}"
params:
parameters:
time_limit: 60000
random_seed: 42
truncate_input_tokens: 8191
decoding_method: sample
repetition_penalty: 1.1
temperature: "{{ temperature }}"
Expand All @@ -113,7 +119,10 @@ defs:
- "["
- def: SUBJECT
model: "{{ model }}"
params:
parameters:
time_limit: 60000
random_seed: 42
truncate_input_tokens: 8191
decoding_method: sample
repetition_penalty: 1.1
temperature: "{{ temperature }}"
Expand All @@ -130,7 +139,12 @@ defs:
subject: "{{ SUBJECT }}"
- "\nTho:"
- model: "{{ model }}"
params:
fallback: "Error calling model"
parameters:
time_limit: 60000
random_seed: 42
truncate_input_tokens: 8191
repetition_penalty: 1.1
decoding_method: sample
temperature: "{{ temperature }}"
stop_sequences: ["\n", "Act:", "Obs:", "Tho:"]
Expand Down Expand Up @@ -178,21 +192,21 @@ defs:
- repeat:
- def: THOUGHT
model: "{{ model }}"
params:
DECODING_METHOD: "{{ decoding_method }}"
TEMPERATURE: "{{ temperature }}"
STOP_SEQUENCES: ["\n", "Act:", "Obs:", "Tho:"]
INCLUDE_STOP_SEQUENCE: true
parameters:
decoding_method: "{{ decoding_method }}"
temperature: "{{ temperature }}"
stop_sequences: ["\n", "Act:", "Obs:", "Tho:"]
include_stop_sequence: true
until: "{{ THOUGHT.endswith('Act:') }}"
- def: action
model: "{{ model }}"
parser: json
spec: {name: str, arguments: obj}
params:
DECODING_METHOD: "{{ decoding_method }}"
TEMPERATURE: "{{ temperature }}"
STOP_SEQUENCES: ["\n", "<|endoftext|>"]
INCLUDE_STOP_SEQUENCE: false
parameters:
decoding_method: "{{ decoding_method }}"
temperature: "{{ temperature }}"
stop_sequences: ["\n", "<|endoftext|>"]
include_stop_sequence: false
- if: "{{ action.name != 'Finish' }}"
then:
- "\nObs: "
Expand All @@ -202,11 +216,11 @@ defs:
args:
arguments: "{{ action.arguments }}"
- model: "{{ model }}"
params:
DECODING_METHOD: "{{ decoding_method }}"
TEMPERATURE: "{{ temperature }}"
STOP_SEQUENCES: ["\n", "Act:", "Obs:", "Tho:"]
INCLUDE_STOP_SEQUENCE: false
parameters:
decoding_method: "{{ decoding_method }}"
temperature: "{{ temperature }}"
stop_sequences: ["\n", "Act:", "Obs:", "Tho:"]
include_stop_sequence: false
else: "Invalid action. Valid actions are {{ TOOL_INFO.signatures|join(', ') }} and Finish[<answer>]."
until: "{{ action.name == 'Finish' }}"
- show_result: false
Expand Down
5 changes: 3 additions & 2 deletions examples/prompt_library/ReWoo.pdl
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,11 @@ defs:
{{ task }}
- def: PLANS
model: "{{ model }}"
platform: bam
parser: # plan, step_name, tool, tool_input
regex: 'Plan:\s*(?P<plan>(?:.|\n)*?)\s*(?P<step_name>#E\d+)\s*=\s*(?P<tool>\w+)\s*\[(?P<tool_input>[^\]]+)\]'
mode: findall
params:
parameters:
decoding_method: greedy
stop_sequences:
- "<|endoftext|>"
Expand Down Expand Up @@ -152,7 +153,7 @@ defs:
Response:
- def: SOLUTION
model: "{{ model }}"
params:
parameters:
decoding_method: greedy
stop_sequences:
- "<|endoftext|>"
Expand Down
11 changes: 6 additions & 5 deletions examples/prompt_library/demos/Verifier_json.pdl
Original file line number Diff line number Diff line change
Expand Up @@ -9,30 +9,31 @@ document:
tools: "{{ default_tools }}"
tool_names: ["Search"]
- def: QUESTION
show_result: false
read:
message: "Please enter a question: "
- def: PROPOSED
call: react_json
show_result: true
args:
context:
- role: system
content: "{{ granite_models.granite_7b_lab.system_prompt }}"
context: "{{ granite_models.granite_7b_lab.system_prompt }}"
question: "{{ QUESTION }}"
model: ibm/granite-7b-lab
tools: "{{ filtered_tools }}"
trajectories: []
- "\n\n----- Verifying answer... -----\n\n"
- def: VERIFIED
call: react_json
show_result: true
args:
context: [{"role": "system", "content": ""}]
context: ""
question: |-
Is this the right answer to this question?
"{{ QUESTION }}"
Proposed answer: {{ PROPOSED.answer.topic }}

Please answer as True or False.
model: ibm/granite-34b-code-instruct
model: "{{ granite_models.granite_20b_code_instruct_v2.id }}"
tools: "{{ filtered_tools }}"
trajectories: []
- "\n\nThe answer '{{ PROPOSED.answer.topic }}' has been verified as '{{VERIFIED.answer.topic}}'.\n"
38 changes: 19 additions & 19 deletions examples/prompt_library/demos/evalplus/evalplus.pdl
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
description: EvalPlus simple
defs:
demonstrations:
data:
- answer: "def max_run_uppercase(test_str):\n cnt = 0\n res = 0\n for idx in range(0,\
\ len(test_str)):\n if test_str.isupper():\n cnt += 1\n else:\n\
\ res = cnt\n cnt = 0\n if test_str.isupper():\n\
\ res = cnt\n return (res)"
question: Write a function to find maximum run of uppercase characters in the given
string.
- answer: "def remove_length(test_str, K):\n temp = test_str.split()\n res = \n res = ' '.join(res)\n return (res) "
question: Write a function to remove all the words with k length in the given string.
# demonstrations:
# data:
# - answer: "def max_run_uppercase(test_str):\n cnt = 0\n res = 0\n for idx in range(0,\
# \ len(test_str)):\n if test_str.isupper():\n cnt += 1\n else:\n\
# \ res = cnt\n cnt = 0\n if test_str.isupper():\n\
# \ res = cnt\n return (res)"
# question: Write a function to find maximum run of uppercase characters in the given
# string.
# - answer: "def remove_length(test_str, K):\n temp = test_str.split()\n res = \n res = ' '.join(res)\n return (res) "
# question: Write a function to remove all the words with k length in the given string.
model: ibm/granite-34b-code-instruct
prompt_pattern: cot
prompt: Write a python function to toggle bits of the number except the first and the last bit.
Expand All @@ -23,12 +23,12 @@ document:
{{ example.answer }}

- |
Task: {{ prompt }}
Solution:
- model: "{{ model }}"
platform: bam
parameters:
decoding_method: "greedy"
stop_sequences:
- "<|endoftext|>"
include_stop_sequence: false
Task: {{ prompt }}
Solution:
# - model: "{{ model }}"
# platform: bam
# parameters:
# decoding_method: "greedy"
# stop_sequences:
# - "<|endoftext|>"
# include_stop_sequence: false
Loading

0 comments on commit b41d7af

Please sign in to comment.