Skip to content

Commit

Permalink
model agnostic approaches
Browse files Browse the repository at this point in the history
  • Loading branch information
= Enea_Gore committed Jan 21, 2025
1 parent 7c844f1 commit c22b534
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 14 deletions.
12 changes: 0 additions & 12 deletions llm_core/llm_core/utils/llm_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,18 +113,6 @@ def get_simple_chat_prompt(
system_message: str,
human_message: str,
) -> ChatPromptTemplate:

sys = """
You are an AI Tutor. You are tasked with grading a student submission based on this problem statement and grading instructions. You must not excede the maximum amount of points. Take time to think, which points on the grading instructions are relevant for the students submission.
Further more, if a feedback is specific to a sentence in the student submission, that specify this as well on your feedback. Also specify, when possible, which grading instruction you are refering to.
# Problem statement
{problem_statement}
# Grading instructions
{grading_instructions}
Max points: {max_points}
"""

system_message_prompt = SystemMessagePromptTemplate.from_template(system_message)
human_message_prompt = HumanMessagePromptTemplate.from_template(human_message)
return ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
22 changes: 20 additions & 2 deletions llm_core/llm_core/utils/predict_and_parse.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
from athena import get_experiment_environment
from langchain_community.chat_models import ChatOllama # type: ignore
from langchain.output_parsers import PydanticOutputParser
from langchain_openai import ChatOpenAI

T = TypeVar("T", bound=BaseModel)

def isOllama(model: BaseLanguageModel) -> bool:
Expand Down Expand Up @@ -66,8 +68,24 @@ async def predict_and_parse(
if isOllama(model):
try:
outputParser = PydanticOutputParser(pydantic_object = pydantic_object)
chain = chat_prompt | model | outputParser
return await chain.ainvoke(prompt_input, config={"tags": tags})
chain = chat_prompt | model
llm_output = await chain.ainvoke(prompt_input, config={"tags": tags})
try:
result = outputParser.parse(llm_output.content)
return result
except:
outputModel = ChatOpenAI(model="gpt-4o-mini")
structured_output_llm = outputModel.with_structured_output(pydantic_object, method = "json_mode")
chat_prompt = ChatPromptTemplate.from_messages(
[
("system", "Your only task is to format the following output into json:"),
("human", "{output}"),
])
chain = RunnableSequence(
chat_prompt,
structured_output_llm
)
return await chain.ainvoke(input = {"output": llm_output.content}, config={"tags": tags})
except ValidationError as e:
raise ValueError(f"Could not parse output: {e}") from e

Expand Down

0 comments on commit c22b534

Please sign in to comment.