Skip to content

Commit

Permalink
Fix lint
Browse files Browse the repository at this point in the history
  • Loading branch information
LeonWehrhahn committed Sep 13, 2024
1 parent 132b041 commit 7678f60
Show file tree
Hide file tree
Showing 16 changed files with 95 additions and 54 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ async def suggest_feedback(exercise: Exercise, submission: Submission, is_graded
)

# If the submission is not graded (Student is requesting feedback), we reformulate the feedback to not give away the solution
if is_graded == False:
if is_graded is False:
feedback = await filter_feedback(exercise_model, feedback, module_config.approach, module_config.debug)

return convert_to_athana_feedback_model(feedback, exercise_model, is_graded)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,13 @@
from typing import Dict, Any, List, Optional

class Element:
"""
Represents an element in a UML diagram.
This class encapsulates the properties and behavior of a UML element,
including its attributes and methods.
"""

def __init__(self, data: Dict[str, Any], element_dict: Optional[Dict[str, Any]] = None):
self.id: str = data.get('id', '')
self.type: str = data.get('type', '')
Expand All @@ -14,6 +21,9 @@ def __init__(self, data: Dict[str, Any], element_dict: Optional[Dict[str, Any]]
self.resolve_references(element_dict)

def resolve_references(self, element_dict: Dict[str, Any]):
"""
Resolve attribute and method references using the provided element dictionary. The json data contains only references to other elements that represent attributes and methods. This method resolves these references to the actual names of the attributes and methods by looking up the corresponding elements via their IDs in the provided element dictionary.
"""
self.attributes = [element_dict[ref].get("name", "") for ref in self.attribute_refs if ref in element_dict]
self.methods = [element_dict[ref].get('name', '') for ref in self.method_refs if ref in element_dict]

Expand All @@ -28,10 +38,10 @@ def to_apollon(self) -> str:
if self.attributes or self.methods:
details = []
if self.attributes:
details.append(f" attributes:")
details.append(" attributes:")
details.extend(f" {attr}" for attr in self.attributes)
if self.methods:
details.append(f" methods:")
details.append(" methods:")
details.extend(f" {method}" for method in self.methods)
parts.append("{\n" + "\n".join(details) + "\n}")

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@
from typing import Dict, Any, List, Optional

class Relation:
"""
Represents a relationship between elements in a UML diagram.
This class encapsulates the properties and behavior of a UML relationship,
including its type, source and target elements, and associated messages.
"""
def __init__(self, data: Dict[str, Any], element_dict: Optional[Dict[str, Any]], index: int):
self.id: str = data.get('id', '')
self.type: str = data.get('type', '')
Expand All @@ -15,6 +21,9 @@ def __init__(self, data: Dict[str, Any], element_dict: Optional[Dict[str, Any]],
self.resolve_references(element_dict)

def resolve_references(self, element_dict: Dict[str, Any]):
"""
Resolve the source and target element references using the provided element dictionary. The json data contains only references to other elements that represent the source and target elements. This method resolves these references to the actual names of the elements by looking up the corresponding elements via their IDs in the provided element dictionary.
"""
if self.source['element'] in element_dict:
self.source['element'] = element_dict[self.source['element']].get("name", "")
if self.target['element'] in element_dict:
Expand Down Expand Up @@ -89,8 +98,8 @@ def get_relation_arrow(relation_type: str) -> str:

relation_type = relation_type.replace(" ", "").lower()

for key in arrow_map:
for key, value in arrow_map.items():
if relation_type.endswith(key):
return f"({relation_type}) {arrow_map[key]}"
return f"({relation_type}) {value}"

return f"-- {relation_type} --"
Original file line number Diff line number Diff line change
@@ -1,23 +1,30 @@
from typing import Dict, Any, List
from string import ascii_uppercase

from module_modeling_llm.apollon_transformer.parser.element import Element
from module_modeling_llm.apollon_transformer.parser.relation import Relation
from module_modeling_llm.helpers.serializers.parser.element import Element
from module_modeling_llm.helpers.serializers.parser.relation import Relation


class UMLParser:
"""
A parser for UML diagrams
This class is responsible for parsing JSON data representing a Apollon UML diagram
and converting it into a mermaid like textual representation
"""

def __init__(self, json_data: Dict[str, Any]):
self.data = json_data
self.title = self.data['type']
self.data: Dict[str, Any] = json_data
self.title: str = self.data['type']
self.elements: List[Element] = []
self.relations: List[Relation] = []
self.owners: Dict[str, List[str]] = {}
self._parse()

def _parse(self):
name_counts = {}
referenced_ids : List[str] = []
name_suffix_counters = {}
def _parse(self) -> None:
name_count: Dict[str, int] = {}
referenced_ids: List[str] = []
name_suffix_counters: Dict[str, int] = {}

# Get all referenced attributes and methods
for element_data in self.data['elements'].values():
Expand All @@ -27,15 +34,15 @@ def _parse(self):
# Count occurrences of each name
for element_data in self.data['elements'].values():
name = element_data.get('name')
name_counts[name] = name_counts.get(name, 0) + 1
name_count[name] = name_count.get(name, 0) + 1
name_suffix_counters[name] = 0

# Filter elements and ensure unique names for duplicates
# This filters out all Elements that are referenced by any other Element, as they are attributes or methods
for element_data in self.data['elements'].values():
if element_data.get('id') not in referenced_ids:
name = element_data.get('name')
if name_counts[name] > 1:
if name_count[name] > 1:
suffix_index = name_suffix_counters[name]
element_data['name'] = f"{name}{ascii_uppercase[suffix_index]}"
name_suffix_counters[name] += 1
Expand All @@ -60,7 +67,7 @@ def _parse(self):
self.owners[ownerName].append(element.name)

def to_apollon(self) -> str:
lines = [f"UML Diagram Type: {self.title}", ""]
lines: List[str] = [f"UML Diagram Type: {self.title}", ""]

if self.elements:
lines.append("@Elements:\n")
Expand Down
48 changes: 30 additions & 18 deletions modules/modeling/module_modeling_llm/module_modeling_llm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,11 @@

from athena import config_schema_provider
from module_modeling_llm.models import ModelConfigType, DefaultModelConfig
from module_modeling_llm.prompts.graded_feedback_prompt import graded_feedback_system_message, graded_feedback_human_message
from module_modeling_llm.prompts.filter_feedback_prompt import filter_feedback_system_message, filter_feedback_human_message
from module_modeling_llm.prompts.structured_grading_instructions_prompt import structured_grading_instructions_system_message, structured_grading_instructions_human_message
from module_modeling_llm.prompts import (
graded_feedback_prompt,
filter_feedback_prompt,
structured_grading_instructions_prompt
)

class GenerateSuggestionsPrompt(BaseModel):
"""
Expand All @@ -14,20 +16,30 @@ class GenerateSuggestionsPrompt(BaseModel):
_Note: **{problem_statement}**, **{example_solution}**, or **{grading_instructions}** might be omitted if the input
is too long._
"""
graded_feedback_system_message: str = Field(default=graded_feedback_system_message,
description="Message for priming AI behavior and instructing it what to do.")
graded_feedback_human_message: str = Field(default=graded_feedback_human_message,
description="Message from a human. The input on which the AI is supposed to act.")
filter_feedback_system_message: str = Field(default=filter_feedback_system_message,
description="Message for priming AI behavior for filtering ungraded feedback.")
filter_feedback_human_message: str = Field(default=filter_feedback_human_message,
description="Message for instructing AI to filter ungraded feedback.")
structured_grading_instructions_system_message : str = Field(default=structured_grading_instructions_system_message,
description="Message for instructing AI to structure the Problem Statement")
structured_grading_instructions_human_message : str = Field(default=structured_grading_instructions_human_message,
description="Message for instructing AI to filter ungraded feedback.")


graded_feedback_system_message: str = Field(
default=graded_feedback_prompt.graded_feedback_system_message,
description="Message for priming AI behavior and instructing it what to do."
)
graded_feedback_human_message: str = Field(
default=graded_feedback_prompt.graded_feedback_human_message,
description="Message from a human. The input on which the AI is supposed to act."
)
filter_feedback_system_message: str = Field(
default=filter_feedback_prompt.filter_feedback_system_message,
description="Message for priming AI behavior for filtering ungraded feedback."
)
filter_feedback_human_message: str = Field(
default=filter_feedback_prompt.filter_feedback_human_message,
description="Message for instructing AI to filter ungraded feedback."
)
structured_grading_instructions_system_message: str = Field(
default=structured_grading_instructions_prompt.structured_grading_instructions_system_message,
description="Message for instructing AI to structure the Problem Statement"
)
structured_grading_instructions_human_message: str = Field(
default=structured_grading_instructions_prompt.structured_grading_instructions_human_message,
description="Message for instructing AI to filter ungraded feedback."
)

class BasicApproachConfig(BaseModel):
"""This approach uses a LLM with a single prompt to generate feedback in a single step."""
Expand All @@ -38,4 +50,4 @@ class BasicApproachConfig(BaseModel):
@config_schema_provider
class Configuration(BaseModel):
debug: bool = Field(default=False, description="Enable debug mode.")
approach: BasicApproachConfig = Field(default=BasicApproachConfig())
approach: BasicApproachConfig = Field(default=BasicApproachConfig())
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,13 @@ async def filter_feedback(
)

feedback_result = await predict_and_parse(
model=config.model.get_model(),
model=config.model.get_model(), # type: ignore[attr-defined]
chat_prompt=chat_prompt,
prompt_input=prompt_inputs.dict(),
pydantic_object=AssessmentModel,
tags=[
f"exercise-{exercise.exerciseId}-filter",
f"submission-{exercise.submissionId}-filter",
f"exercise-{exercise.exercise_id}-filter",
f"submission-{exercise.submission_id}-filter",
]
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,13 +42,13 @@ async def generate_suggestions(
("human", config.generate_suggestions_prompt.graded_feedback_human_message)])

feedback_result = await predict_and_parse(
model=config.model.get_model(),
model=config.model.get_model(), # type: ignore[attr-defined]
chat_prompt=chat_prompt,
prompt_input=prompt_inputs.dict(),
pydantic_object=AssessmentModel,
tags=[
f"exercise-{exercise_model.exerciseId}",
f"submission-{exercise_model.submissionId}",
f"exercise-{exercise_model.exercise_id}",
f"submission-{exercise_model.submission_id}",
]
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,13 @@ async def get_structured_grading_instructions(
)

grading_instruction_result = await predict_and_parse(
model=config.model.get_model(),
model=config.model.get_model(), # type: ignore[attr-defined]
chat_prompt=chat_prompt,
prompt_input=prompt_inputs.dict(),
pydantic_object=StructuredGradingCriterion,
tags=[
f"exercise-{exercise_model.exerciseId}",
f"submission-{exercise_model.submissionId}",
f"exercise-{exercise_model.exercise_id}",
f"submission-{exercise_model.submission_id}",
]
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
from pydantic import BaseModel

class ExerciseModel(BaseModel):
submissionId: int
exerciseId: int
submission_id: int
exercise_id: int
transformed_submission: str
problem_statement: Optional[str] = None
max_points: float
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ def _get_azure_openai_deployments() -> List[str]:
"api-key": os.environ["AZURE_OPENAI_API_KEY"]
}

models_response = requests.get(f"{base_url}/models?api-version=2023-03-15-preview", headers=headers)
models_response = requests.get(f"{base_url}/models?api-version=2023-03-15-preview", headers=headers, timeout=60)
models_data = models_response.json()["data"]
deployments_response = requests.get(f"{base_url}/deployments?api-version=2023-03-15-preview", headers=headers)
deployments_response = requests.get(f"{base_url}/deployments?api-version=2023-03-15-preview", headers=headers, timeout=60)
deployments_data = deployments_response.json()["data"]

# Check if deployment["model"] is a substring of model["id"], i.e. "gpt-4o" is substring "gpt-4o-2024-05-13"
Expand Down Expand Up @@ -64,7 +64,7 @@ class OpenAIModelConfig(ModelConfig):

model_name: OpenAIModel = Field(default=default_openai_model, # type: ignore
description="The name of the model to use.")
max_tokens: PositiveInt = Field(4000, description="""\
max_tokens: PositiveInt = Field(1000, description="""\
The maximum number of [tokens](https://platform.openai.com/tokenizer) to generate in the chat completion.
The total length of input tokens and generated tokens is limited by the model's context length. \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ class FilterFeedbackInputs(BaseModel):
original_feedback: str
feedback_output_format: str


filter_feedback_system_message = """
Your task is to modify given feedback. This feedback was originally created for tutors to help them grade student submissions. We want to use this same feedback for students before the due date of the assignment. However we need to make sure that the feedback does not give away the solution. Your task is to modify the feedback so that it is appropriate for students.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ class GradedFeedbackInputs(BaseModel):
uml_diagram_format: str
feedback_output_format: str


graded_feedback_system_message = """
You are an AI tutor for {submission_uml_type} modeling exercise assessment at a prestigious university.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ class StructuredGradingInstructionsInputs(BaseModel):
example_solution: str
structured_instructions_output_format: str


structured_grading_instructions_system_message = """You are an AI tutor for {submission_uml_type} modeling exercise assessment at a prestigious university.
Create a structured grading instruction based on the given grading instructions (important), the solution diagram (if present) and the problem statement. The structured grading instruction should be highly detailed to ensure consistent grading across different tutors.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ def convert_to_athana_feedback_model(
element_ids = [exercise_model.element_id_mapping[element] for element in (feedback.element_names or [])]

feedbacks.append(Feedback(
exercise_id=exercise_model.exerciseId,
submission_id=exercise_model.submissionId,
exercise_id=exercise_model.exercise_id,
submission_id=exercise_model.submission_id,
title=feedback.title,
description=feedback.description,
element_ids=element_ids,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ def get_exercise_model(exercise: Exercise, submission: Submission) -> ExerciseMo
transformed_submission, element_id_mapping, diagram_type = ApollonJSONTransformer.transform_json(submission.model)

return ExerciseModel(
submissionId=submission.id,
exerciseId=exercise.id,
submission_id=submission.id,
exercise_id=exercise.id,
transformed_submission=transformed_submission,
problem_statement=exercise.problem_statement,
max_points=exercise.max_points,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,5 +32,5 @@ async def predict_and_parse(

try:
return await chain.ainvoke(prompt_input, config={"tags": tags})
except (ValidationError) as e:
raise ValueError(f"Could not parse output: {e}")
except ValidationError as e:
raise ValueError(f"Could not parse output: {e}") from e

0 comments on commit 7678f60

Please sign in to comment.