-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathapp.py
161 lines (147 loc) · 9.36 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
import streamlit as st
import datetime
import json
from generators import generate_key_topics, generate_learning_context, generate_questions
# defining the streamlit page config. this is the title, icon, layout, and initial sidebar state.
st.set_page_config(page_title="Learn with LLMs explorer", page_icon="🧊", layout="wide", initial_sidebar_state="collapsed")
# initialize session state variables and defaults
model_options = ["gpt-4o", "gpt-4o-mini"]
if "openai_model" not in st.session_state:
st.session_state.model = model_options[1]
if "has_saved_openai_key" not in st.session_state:
st.session_state.has_saved_openai_key = False
if "openai_api_key" not in st.session_state:
st.session_state.openai_api_key = ''
if 'goals' not in st.session_state:
st.session_state.goals = ''
if 'skills' not in st.session_state:
st.session_state.skills = ''
if 'source_material' not in st.session_state:
st.session_state.source_material = ''
if "active_topic_idx" not in st.session_state:
st.session_state.active_topic_idx = 0
if "blank_out_questions_nav_trick" not in st.session_state:
st.session_state.blank_out_questions_nav_trick = 0
if "a" in st.query_params and st.query_params["a"] == st.secrets.query_auth_secret:
# when the query auth param is present and matches, it will allow the api key secret from secrets.toml to be loaded automatically into the session.
if st.secrets.openai_api_key:
st.session_state.openai_api_key = st.secrets.openai_api_key
st.session_state.has_saved_openai_key = True
# heading area
st.title("Learning with LLMs Concept Explorer")
if st.session_state.has_saved_openai_key:
st.markdown("**Feedback please!** Let us know what you think in our project channel [#liminal](slack://channel?team=T027LFU12&id=C06MJQQ1350) or via DM to [@jwhiting](slack://user?team=T027LFU12&id=U03U66G63MW) or [@Jacob Ervin](slack://user?team=T027LFU12&id=U04BV9MUJRZ)")
# left column for form inputs, right column for quiz materials
col1, col2 = st.columns([1,3])
# create a debug area to render prompts and generation responses when configured to do so via the secrets.toml file.
debug_area = None
if st.secrets.show_debug_area:
debug_area = st.container()
debug_area.markdown("----\n#### Debugging / Logging Area:")
def debug(label, thing):
if debug_area:
timestamp = datetime.datetime.now().strftime("%H:%M:%S")
debug_area.expander(f"{timestamp} {label}").write(thing)
# form inputs for the user to provide their learning goals, current skills, and source material.
with col1:
with st.form(key='inputs'):
if not st.session_state.has_saved_openai_key:
# collect an api key if the session doesn't have one imported from secrets.
st.text_input("OpenAI API key (not saved)", key='openai_api_key', placeholder="sk-...")
st.caption("Mozillians please contact @jwhiting on slack for access")
if False:
# temporarily disable model selection dropdown for now, always using 4o-mini which is excellent and cheap
st.selectbox("Model (4o mini highly recommended for cost effectiveness)", model_options, key='model')
st.subheader("Learning Goals")
goals = st.text_area("What are your learning goals? This will help the AI know what's most relevant to you.", key='goals')
st.subheader("Current Skills Level")
skills = st.text_area("Describe your experience level. The AI can use this to tailor its responses.", key='skills')
st.subheader("Source Material")
source_material = st.text_area("Enter the source material here. Using Firefox's reader view on a web page is recommended, then just copy+paste:", key='source_material')
submit_source_material = st.form_submit_button("Submit")
if submit_source_material:
# set quiz back to the first topic if regenerating new materials, since the number and nature of the topics can change.
st.session_state.active_topic_idx = 0
def quiz_questions(questions):
# render the quiz questions and answers in a collapsible expander for each question, along with the additional blurbs we've generated for each question.
if st.session_state.blank_out_questions_nav_trick != 0:
# this is a hack when navigating between quiz topic sections to blank out the questions. otherwise streamlit will keep the old questions in the UI in a disabled state while generating new ones, which just looks really bad visually and is hard to understand what is happening.
# navigation works by setting blank_out_questions_nav_trick to -1 or 1, then rerunning the script. this will cause the questions to be blanked out, then the actual index is updated, which causes new questions will be generated.
st.session_state.active_topic_idx += st.session_state.blank_out_questions_nav_trick
st.session_state.blank_out_questions_nav_trick = 0
st.rerun()
return
for q_idx, question in enumerate(questions):
with st.expander(question["questionTitle"]):
options = question["answers"]
correct_index = question["answerIndex"]
with st.form(key=f"question_{q_idx}"):
selected_index = st.radio("Select an answer:", options, key=f"answer_{q_idx}")
submitted = st.form_submit_button("Submit")
if submitted:
if options.index(selected_index) == correct_index:
st.success("Correct! 🟢")
st.markdown(f'**Why:** {question["reasoning"]}')
st.markdown(f'**Relevance to your goals and skills:** {question["context_relevance"]}')
st.markdown(f'**What to learn next:** {question["follow_up_knowledge"]}')
else:
st.error("Incorrect. 🔴")
st.markdown(f'**Why:** {question["reasoning"]}')
st.markdown(f'**Relevance to your goals and skills:** {question["context_relevance"]}')
st.markdown(f'**What to review to get this right:** {question["requisite_knowledge"]}')
def current_quiz_section(topics, learning_context):
# render the current quiz section based on the active topic index. includes the title and the navigation buttons.
if st.session_state.active_topic_idx >= len(topics):
st.session_state.active_topic_idx = 0
s_idx = st.session_state.active_topic_idx
active_topic = topics[s_idx]
col1,col2,col3 = st.columns([3,1,1])
with col1:
st.subheader(active_topic)
with col2:
if s_idx > 0:
if st.button("Previous topic"):
# see quiz_questions for an explanation of this hack
st.session_state.blank_out_questions_nav_trick = -1
st.rerun()
with col3:
if s_idx < len(topics) - 1:
if st.button("Next topic"):
st.session_state.blank_out_questions_nav_trick = 1
st.rerun()
source_material = st.session_state.source_material
openai_api_key = st.session_state.openai_api_key
model = st.session_state.model
[questions, prompt, response] = generate_questions(for_key_topic=active_topic, learning_context=learning_context, source_material=source_material, model=model, _openai_api_key=openai_api_key)
debug('questions prompt', prompt)
debug('questions response', response)
quiz_questions(questions)
def quiz():
# the top level quiz function that generates the learning context, key topics, and quiz sections+questions.
st.subheader("Initial context for the quiz:")
goals = st.session_state.goals
skills = st.session_state.skills
source_material = st.session_state.source_material
openai_api_key = st.session_state.openai_api_key
model = st.session_state.model
st.markdown("First we generate a *learning context* document based on your learning goals and current skills. It asks the LLM to think through a learning trajectory for you with sub goals, topics to focus on, and problems you might encounter:")
[learning_context,prompt,response] = generate_learning_context(goals=goals, skills=skills, model=model, _openai_api_key=openai_api_key)
debug('learning context prompt', prompt)
debug('learning context response', response)
st.expander("Learning Context generated :white_check_mark:").write(learning_context)
st.markdown("Next, we generate a set of key topics from the source material you provided to break the quiz into relevant sections:")
[topics, prompt, response] = generate_key_topics(source_material=source_material, model=model, _openai_api_key=openai_api_key)
debug('key topics prompt', prompt)
debug('key topics response', response)
debug('key topics object', topics)
st.expander("Key Topics generated :white_check_mark:").write(topics)
st.markdown("Finally, we generate a set of questions based on each key topic, with each one contextualized for you personally depending on whether your answer was right or wrong:")
st.header("Quiz:")
current_quiz_section(topics, learning_context)
with col2:
# simple check to see if all the form fields are filled in before generating the quiz.
can_generate_quiz = not (st.session_state.goals == '' or st.session_state.skills == '' or st.session_state.source_material == '' or st.session_state.openai_api_key == '')
if can_generate_quiz:
quiz()
else:
st.warning("Please fill in all fields.")