diff --git a/app/common/message_converters.py b/app/common/message_converters.py index 8a3ab52e..671dd565 100644 --- a/app/common/message_converters.py +++ b/app/common/message_converters.py @@ -26,6 +26,7 @@ def convert_iris_message_to_langchain_message( case _: raise ValueError(f"Unknown message role: {iris_message.sender}") + def convert_langchain_message_to_iris_message( base_message: BaseMessage, ) -> PyrisMessage: diff --git a/app/domain/__init__.py b/app/domain/__init__.py index 207f528d..2f56f3f3 100644 --- a/app/domain/__init__.py +++ b/app/domain/__init__.py @@ -1,6 +1,5 @@ from .error_response_dto import IrisErrorResponseDTO from .pipeline_execution_dto import PipelineExecutionDTO -from .pyris_message import PyrisMessage from .pipeline_execution_settings_dto import PipelineExecutionSettingsDTO from .chat.chat_pipeline_execution_dto import ChatPipelineExecutionDTO from .chat.chat_pipeline_execution_base_data_dto import ChatPipelineExecutionBaseDataDTO diff --git a/app/domain/chat/chat_pipeline_execution_base_data_dto.py b/app/domain/chat/chat_pipeline_execution_base_data_dto.py index 7ad8b0e7..e0677c76 100644 --- a/app/domain/chat/chat_pipeline_execution_base_data_dto.py +++ b/app/domain/chat/chat_pipeline_execution_base_data_dto.py @@ -2,7 +2,8 @@ from pydantic import Field, BaseModel -from app.domain import PyrisMessage, PipelineExecutionSettingsDTO +from app.domain import PipelineExecutionSettingsDTO +from app.domain.pyris_message import PyrisMessage from app.domain.data.user_dto import UserDTO from app.domain.status.stage_dto import StageDTO @@ -13,4 +14,4 @@ class ChatPipelineExecutionBaseDataDTO(BaseModel): settings: Optional[PipelineExecutionSettingsDTO] initial_stages: Optional[List[StageDTO]] = Field( default=None, alias="initialStages" - ) \ No newline at end of file + ) diff --git a/app/domain/chat/chat_pipeline_execution_dto.py b/app/domain/chat/chat_pipeline_execution_dto.py index 99c8d7c2..31fa7593 100644 --- a/app/domain/chat/chat_pipeline_execution_dto.py +++ b/app/domain/chat/chat_pipeline_execution_dto.py @@ -2,7 +2,8 @@ from pydantic import Field -from app.domain import PipelineExecutionDTO, PyrisMessage, PipelineExecutionSettingsDTO +from app.domain import PipelineExecutionDTO, PipelineExecutionSettingsDTO +from app.domain.pyris_message import PyrisMessage from app.domain.data.user_dto import UserDTO from app.domain.status.stage_dto import StageDTO diff --git a/app/domain/chat/course_chat/course_chat_pipeline_execution_dto.py b/app/domain/chat/course_chat/course_chat_pipeline_execution_dto.py index eecabdd4..7e3a2cfc 100644 --- a/app/domain/chat/course_chat/course_chat_pipeline_execution_dto.py +++ b/app/domain/chat/course_chat/course_chat_pipeline_execution_dto.py @@ -2,7 +2,6 @@ from pydantic import Field -from ..chat_pipeline_execution_base_data_dto import ChatPipelineExecutionBaseDataDTO from ..chat_pipeline_execution_dto import ChatPipelineExecutionDTO from ...data.extended_course_dto import ExtendedCourseDTO from ...data.metrics.competency_jol_dto import CompetencyJolDTO diff --git a/app/domain/chat/course_chat/course_chat_status_update_dto.py b/app/domain/chat/course_chat/course_chat_status_update_dto.py index 710a6f0e..3e54dd96 100644 --- a/app/domain/chat/course_chat/course_chat_status_update_dto.py +++ b/app/domain/chat/course_chat/course_chat_status_update_dto.py @@ -1,7 +1,8 @@ -from typing import Optional +from typing import Optional, List from app.domain.status.status_update_dto import StatusUpdateDTO class CourseChatStatusUpdateDTO(StatusUpdateDTO): result: Optional[str] = None + suggestions: List[str] = [] diff --git a/app/domain/chat/exercise_chat/exercise_chat_status_update_dto.py b/app/domain/chat/exercise_chat/exercise_chat_status_update_dto.py index a453dbd7..0c96342c 100644 --- a/app/domain/chat/exercise_chat/exercise_chat_status_update_dto.py +++ b/app/domain/chat/exercise_chat/exercise_chat_status_update_dto.py @@ -1,7 +1,8 @@ -from typing import Optional +from typing import Optional, List from app.domain.status.status_update_dto import StatusUpdateDTO class ExerciseChatStatusUpdateDTO(StatusUpdateDTO): result: Optional[str] = None + suggestions: List[str] = [] diff --git a/app/domain/chat/interaction_suggestion_dto.py b/app/domain/chat/interaction_suggestion_dto.py new file mode 100644 index 00000000..43e73acd --- /dev/null +++ b/app/domain/chat/interaction_suggestion_dto.py @@ -0,0 +1,10 @@ +from typing import Optional, List + +from pydantic import Field, BaseModel + +from app.domain import PyrisMessage + + +class InteractionSuggestionPipelineExecutionDTO(BaseModel): + chat_history: List[PyrisMessage] = Field(alias="chatHistory", default=[]) + last_message: Optional[str] = Field(alias="lastMessage", default=None) diff --git a/app/domain/data/competency_dto.py b/app/domain/data/competency_dto.py index 63a7e921..0e2c697c 100644 --- a/app/domain/data/competency_dto.py +++ b/app/domain/data/competency_dto.py @@ -19,7 +19,5 @@ class CompetencyDTO(BaseModel): title: Optional[str] = None description: Optional[str] = None taxonomy: Optional[CompetencyTaxonomy] = None - soft_due_date: Optional[datetime] = Field( - default=None, alias="softDueDate" - ) - optional: Optional[bool] = None \ No newline at end of file + soft_due_date: Optional[datetime] = Field(default=None, alias="softDueDate") + optional: Optional[bool] = None diff --git a/app/domain/data/exam_dto.py b/app/domain/data/exam_dto.py index 9ed31c1b..424bfaf1 100644 --- a/app/domain/data/exam_dto.py +++ b/app/domain/data/exam_dto.py @@ -10,6 +10,12 @@ class ExamDTO(BaseModel): is_text_exam: bool = Field(alias="isTextExam", default=False) start_date: Optional[datetime] = Field(alias="startDate", default=None) end_date: Optional[datetime] = Field(alias="endDate", default=None) - publish_results_date: Optional[datetime] = Field(alias="publishResultsDate", default=None) - exam_student_review_start: Optional[datetime] = Field(alias="examStudentReviewStart", default=None) - exam_student_review_end: Optional[datetime] = Field(alias="examStudentReviewEnd", default=None) + publish_results_date: Optional[datetime] = Field( + alias="publishResultsDate", default=None + ) + exam_student_review_start: Optional[datetime] = Field( + alias="examStudentReviewStart", default=None + ) + exam_student_review_end: Optional[datetime] = Field( + alias="examStudentReviewEnd", default=None + ) diff --git a/app/domain/data/exercise_with_submissions_dto.py b/app/domain/data/exercise_with_submissions_dto.py index 668e04ac..ee5eb4bf 100644 --- a/app/domain/data/exercise_with_submissions_dto.py +++ b/app/domain/data/exercise_with_submissions_dto.py @@ -39,11 +39,17 @@ class ExerciseWithSubmissionsDTO(BaseModel): mode: ExerciseMode = Field(alias="mode") max_points: Optional[float] = Field(alias="maxPoints", default=None) bonus_points: Optional[float] = Field(alias="bonusPoints", default=None) - difficulty_level: Optional[DifficultyLevel] = Field(alias="difficultyLevel", default=None) + difficulty_level: Optional[DifficultyLevel] = Field( + alias="difficultyLevel", default=None + ) release_date: Optional[datetime] = Field(alias="releaseDate", default=None) due_date: Optional[datetime] = Field(alias="dueDate", default=None) - inclusion_mode: Optional[IncludedInOverallScore] = Field(alias="inclusionMode", default=None) - presentation_score_enabled: Optional[bool] = Field(alias="presentationScoreEnabled", default=None) + inclusion_mode: Optional[IncludedInOverallScore] = Field( + alias="inclusionMode", default=None + ) + presentation_score_enabled: Optional[bool] = Field( + alias="presentationScoreEnabled", default=None + ) submissions: List[SimpleSubmissionDTO] = Field(default=[]) class Config: diff --git a/app/domain/data/extended_course_dto.py b/app/domain/data/extended_course_dto.py index 95b6466f..1382fb98 100644 --- a/app/domain/data/extended_course_dto.py +++ b/app/domain/data/extended_course_dto.py @@ -14,11 +14,17 @@ class ExtendedCourseDTO(BaseModel): description: Optional[str] = Field(alias="description", default=None) start_time: Optional[datetime] = Field(alias="startTime", default=None) end_time: Optional[datetime] = Field(alias="endTime", default=None) - default_programming_language: Optional[ProgrammingLanguage] = Field(alias="defaultProgrammingLanguage", default=None) + default_programming_language: Optional[ProgrammingLanguage] = Field( + alias="defaultProgrammingLanguage", default=None + ) max_complaints: Optional[int] = Field(alias="maxComplaints", default=None) max_team_complaints: Optional[int] = Field(alias="maxTeamComplaints", default=None) - max_complaint_time_days: Optional[int] = Field(alias="maxComplaintTimeDays", default=None) - max_request_more_feedback_time_days: Optional[int] = Field(alias="maxRequestMoreFeedbackTimeDays", default=None) + max_complaint_time_days: Optional[int] = Field( + alias="maxComplaintTimeDays", default=None + ) + max_request_more_feedback_time_days: Optional[int] = Field( + alias="maxRequestMoreFeedbackTimeDays", default=None + ) max_points: Optional[int] = Field(alias="maxPoints", default=None) presentation_score: Optional[int] = Field(alias="presentationScore", default=None) exercises: List[ExerciseWithSubmissionsDTO] = Field(alias="exercises", default=[]) diff --git a/app/domain/data/lecture_dto.py b/app/domain/data/lecture_dto.py index 520b3b76..223b5999 100644 --- a/app/domain/data/lecture_dto.py +++ b/app/domain/data/lecture_dto.py @@ -4,6 +4,7 @@ from app.domain.data.lecture_unit_dto import LectureUnitDTO + class PyrisLectureDTO(BaseModel): id: int = Field(alias="id") title: Optional[str] = Field(alias="title", default=None) diff --git a/app/domain/data/metrics/competency_information_dto.py b/app/domain/data/metrics/competency_information_dto.py index b1f09aa2..2c97450c 100644 --- a/app/domain/data/metrics/competency_information_dto.py +++ b/app/domain/data/metrics/competency_information_dto.py @@ -15,4 +15,4 @@ class CompetencyInformationDTO(BaseModel): mastery_threshold: Optional[int] = Field(None, alias="masteryThreshold") class Config: - populate_by_name = True \ No newline at end of file + populate_by_name = True diff --git a/app/domain/data/metrics/competency_student_metrics_dto.py b/app/domain/data/metrics/competency_student_metrics_dto.py index 0238cb4e..f2ee6a36 100644 --- a/app/domain/data/metrics/competency_student_metrics_dto.py +++ b/app/domain/data/metrics/competency_student_metrics_dto.py @@ -1,11 +1,13 @@ -from typing import Dict, Set, Optional +from typing import Dict, Set from pydantic import BaseModel, Field from app.domain.data.metrics.competency_information_dto import CompetencyInformationDTO from app.domain.data.metrics.competency_jol_dto import CompetencyJolDTO class CompetencyStudentMetricsDTO(BaseModel): - competency_information: Dict[int, CompetencyInformationDTO] = Field({}, alias="competencyInformation") + competency_information: Dict[int, CompetencyInformationDTO] = Field( + {}, alias="competencyInformation" + ) exercises: Dict[int, Set[int]] = Field({}) lecture_units: Dict[int, Set[int]] = Field({}, alias="lectureUnits") progress: Dict[int, float] = Field({}) diff --git a/app/domain/data/metrics/exercise_student_metrics_dto.py b/app/domain/data/metrics/exercise_student_metrics_dto.py index 2019aef4..ffa2924b 100644 --- a/app/domain/data/metrics/exercise_student_metrics_dto.py +++ b/app/domain/data/metrics/exercise_student_metrics_dto.py @@ -1,10 +1,12 @@ -from typing import Optional, Dict, Set +from typing import Dict, Set from pydantic import BaseModel, Field class ExerciseStudentMetricsDTO(BaseModel): average_score: Dict[int, float] = Field({}, alias="averageScore") score: Dict[int, float] = Field({}) - average_latest_submission: Dict[int, float] = Field({}, alias="averageLatestSubmission") + average_latest_submission: Dict[int, float] = Field( + {}, alias="averageLatestSubmission" + ) latest_submission: Dict[int, float] = Field({}, alias="latestSubmission") completed: Set[int] = Field({}) diff --git a/app/domain/data/metrics/lecture_unit_information_dto.py b/app/domain/data/metrics/lecture_unit_information_dto.py index ea068388..f79440fe 100644 --- a/app/domain/data/metrics/lecture_unit_information_dto.py +++ b/app/domain/data/metrics/lecture_unit_information_dto.py @@ -2,6 +2,7 @@ from pydantic import BaseModel, Field from datetime import datetime + class LectureUnitInformationDTO(BaseModel): id: Optional[int] = None name: Optional[str] = None @@ -9,4 +10,4 @@ class LectureUnitInformationDTO(BaseModel): type: Optional[str] = None class Config: - populate_by_name = True \ No newline at end of file + populate_by_name = True diff --git a/app/domain/data/metrics/lecture_unit_student_metrics_dto.py b/app/domain/data/metrics/lecture_unit_student_metrics_dto.py index 18e9bef7..1325d2f1 100644 --- a/app/domain/data/metrics/lecture_unit_student_metrics_dto.py +++ b/app/domain/data/metrics/lecture_unit_student_metrics_dto.py @@ -1,10 +1,14 @@ from typing import Dict, Set, Optional from pydantic import BaseModel, Field -from app.domain.data.metrics.lecture_unit_information_dto import LectureUnitInformationDTO +from app.domain.data.metrics.lecture_unit_information_dto import ( + LectureUnitInformationDTO, +) class LectureUnitStudentMetricsDTO(BaseModel): - lecture_unit_information: Dict[int, LectureUnitInformationDTO] = Field({}, alias="lectureUnitInformation") + lecture_unit_information: Dict[int, LectureUnitInformationDTO] = Field( + {}, alias="lectureUnitInformation" + ) completed: Optional[Set[int]] = None class Config: diff --git a/app/domain/data/metrics/student_metrics_dto.py b/app/domain/data/metrics/student_metrics_dto.py index 8e17e20a..150c5fc7 100644 --- a/app/domain/data/metrics/student_metrics_dto.py +++ b/app/domain/data/metrics/student_metrics_dto.py @@ -1,15 +1,26 @@ from typing import Optional from pydantic import Field, BaseModel -from app.domain.data.metrics.competency_student_metrics_dto import CompetencyStudentMetricsDTO -from app.domain.data.metrics.exercise_student_metrics_dto import ExerciseStudentMetricsDTO -from app.domain.data.metrics.lecture_unit_student_metrics_dto import LectureUnitStudentMetricsDTO +from app.domain.data.metrics.competency_student_metrics_dto import ( + CompetencyStudentMetricsDTO, +) +from app.domain.data.metrics.exercise_student_metrics_dto import ( + ExerciseStudentMetricsDTO, +) +from app.domain.data.metrics.lecture_unit_student_metrics_dto import ( + LectureUnitStudentMetricsDTO, +) class StudentMetricsDTO(BaseModel): - exercise_metrics: Optional[ExerciseStudentMetricsDTO] = Field(None, alias="exerciseMetrics") - lecture_unit_student_metrics_dto: Optional[LectureUnitStudentMetricsDTO] = Field(None, - alias="lectureUnitStudentMetricsDTO") - competency_metrics: Optional[CompetencyStudentMetricsDTO] = Field(None, alias="competencyMetrics") + exercise_metrics: Optional[ExerciseStudentMetricsDTO] = Field( + None, alias="exerciseMetrics" + ) + lecture_unit_student_metrics_dto: Optional[LectureUnitStudentMetricsDTO] = Field( + None, alias="lectureUnitStudentMetricsDTO" + ) + competency_metrics: Optional[CompetencyStudentMetricsDTO] = Field( + None, alias="competencyMetrics" + ) class Config: populate_by_name = True diff --git a/app/domain/data/programming_exercise_dto.py b/app/domain/data/programming_exercise_dto.py index d36e9c66..51e5e2d7 100644 --- a/app/domain/data/programming_exercise_dto.py +++ b/app/domain/data/programming_exercise_dto.py @@ -21,7 +21,9 @@ class ProgrammingLanguage(str, Enum): class ProgrammingExerciseDTO(BaseModel): id: int name: str - programming_language: Optional[str] = Field(alias="programmingLanguage", default=None) + programming_language: Optional[str] = Field( + alias="programmingLanguage", default=None + ) template_repository: Dict[str, str] = Field(alias="templateRepository", default={}) solution_repository: Dict[str, str] = Field(alias="solutionRepository", default={}) test_repository: Dict[str, str] = Field(alias="testRepository", default={}) diff --git a/app/domain/pipeline_execution_dto.py b/app/domain/pipeline_execution_dto.py index e27c7406..86299d40 100644 --- a/app/domain/pipeline_execution_dto.py +++ b/app/domain/pipeline_execution_dto.py @@ -1,9 +1,4 @@ -from typing import List, Optional - -from pydantic import BaseModel, Field - -from app.domain.pipeline_execution_settings_dto import PipelineExecutionSettingsDTO -from app.domain.status.stage_dto import StageDTO +from pydantic import BaseModel class PipelineExecutionDTO(BaseModel): diff --git a/app/domain/pipeline_execution_settings_dto.py b/app/domain/pipeline_execution_settings_dto.py index bd94ffd2..86242d23 100644 --- a/app/domain/pipeline_execution_settings_dto.py +++ b/app/domain/pipeline_execution_settings_dto.py @@ -5,5 +5,7 @@ class PipelineExecutionSettingsDTO(BaseModel): authentication_token: str = Field(alias="authenticationToken") - allowed_model_identifiers: Optional[List[str]] = Field(alias="allowedModelIdentifiers", default=[]) + allowed_model_identifiers: Optional[List[str]] = Field( + alias="allowedModelIdentifiers", default=[] + ) artemis_base_url: str = Field(alias="artemisBaseUrl") diff --git a/app/main.py b/app/main.py index 46a6c8e0..28203458 100644 --- a/app/main.py +++ b/app/main.py @@ -1,6 +1,4 @@ -from fastapi.exceptions import RequestValidationError from fastapi.responses import ORJSONResponse -from fastapi import FastAPI from starlette.background import BackgroundTask from starlette.responses import Response @@ -18,29 +16,39 @@ app = FastAPI(default_response_class=ORJSONResponse) + @app.exception_handler(RequestValidationError) async def validation_exception_handler(request: Request, exc: RequestValidationError): - exc_str = f'{exc}'.replace('\n', ' ').replace(' ', ' ') + exc_str = f"{exc}".replace("\n", " ").replace(" ", " ") logging.error(f"{request}: {exc_str}") - content = {'status_code': 10422, 'message': exc_str, 'data': None} - return JSONResponse(content=content, status_code=status.HTTP_422_UNPROCESSABLE_ENTITY) + content = {"status_code": 10422, "message": exc_str, "data": None} + return JSONResponse( + content=content, status_code=status.HTTP_422_UNPROCESSABLE_ENTITY + ) + def log_info(req_body, res_body): logging.info(req_body) logging.info(res_body) -@app.middleware('http') + + +@app.middleware("http") async def some_middleware(request: Request, call_next): req_body = await request.body() response = await call_next(request) - res_body = b'' + res_body = b"" async for chunk in response.body_iterator: res_body += chunk task = BackgroundTask(log_info, req_body, res_body) - return Response(content=res_body, status_code=response.status_code, - headers=dict(response.headers), media_type=response.media_type, background=task) - + return Response( + content=res_body, + status_code=response.status_code, + headers=dict(response.headers), + media_type=response.media_type, + background=task, + ) app.include_router(health_router) diff --git a/app/pipeline/chat/course_chat_pipeline.py b/app/pipeline/chat/course_chat_pipeline.py index 5d6ad52b..1ec558ef 100644 --- a/app/pipeline/chat/course_chat_pipeline.py +++ b/app/pipeline/chat/course_chat_pipeline.py @@ -10,26 +10,34 @@ from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ( ChatPromptTemplate, - SystemMessagePromptTemplate, - AIMessagePromptTemplate, - MessagesPlaceholder, ) from langchain_core.runnables import Runnable from langchain_core.tools import tool +from .interaction_suggestion_pipeline import ( + InteractionSuggestionPipeline, +) from ...common import convert_iris_message_to_langchain_message from ...domain import PyrisMessage -from ...domain.data.exercise_with_submissions_dto import ExerciseWithSubmissionsDTO +from app.domain.chat.interaction_suggestion_dto import ( + InteractionSuggestionPipelineExecutionDTO, +) from ...llm import CapabilityRequestHandler, RequirementList from ..prompts.iris_course_chat_prompts import ( tell_iris_initial_system_prompt, - tell_begin_agent_prompt, tell_chat_history_exists_prompt, tell_no_chat_history_prompt, tell_format_reminder_prompt, - tell_begin_agent_jol_prompt + tell_begin_agent_prompt, + tell_chat_history_exists_prompt, + tell_no_chat_history_prompt, + tell_format_reminder_prompt, + tell_begin_agent_jol_prompt, ) from ..prompts.iris_course_chat_prompts_elicit import ( elicit_iris_initial_system_prompt, - elicit_begin_agent_prompt, elicit_chat_history_exists_prompt, elicit_no_chat_history_prompt, elicit_format_reminder_prompt, - elicit_begin_agent_jol_prompt + elicit_begin_agent_prompt, + elicit_chat_history_exists_prompt, + elicit_no_chat_history_prompt, + elicit_format_reminder_prompt, + elicit_begin_agent_jol_prompt, ) from ...domain import CourseChatPipelineExecutionDTO from ...web.status.status_update import ( @@ -53,11 +61,13 @@ def get_mastery(progress, confidence): weight = 2.0 / 3.0 return (1 - weight) * progress + weight * confidence + class CourseChatPipeline(Pipeline): """Course chat pipeline that answers course related questions from students.""" llm: IrisLangchainChatModel pipeline: Runnable + suggestion_pipeline: InteractionSuggestionPipeline callback: CourseChatStatusCallback prompt: ChatPromptTemplate variant: str @@ -70,7 +80,7 @@ def __init__(self, callback: CourseChatStatusCallback, variant: str = "default") # Set the langchain chat model request_handler = CapabilityRequestHandler( requirements=RequirementList( - gpt_version_equivalent=4.5, + gpt_version_equivalent=4, context_length=16385, json_mode=True, ) @@ -83,6 +93,8 @@ def __init__(self, callback: CourseChatStatusCallback, variant: str = "default") ) self.callback = callback + self.suggestion_pipeline = InteractionSuggestionPipeline(variant="course") + # Create the pipeline self.pipeline = self.llm | StrOutputParser() @@ -100,16 +112,19 @@ def __call__(self, dto: CourseChatPipelineExecutionDTO, **kwargs): """ used_tools = [] + # Define tools @tool def get_exercise_list() -> list[dict]: """ Get the list of exercises in the course. - Use this if the student asks you about an exercise. Note: The exercise contains a list of submissions (timestamp and score) of this student so you + Use this if the student asks you about an exercise. + Note: The exercise contains a list of submissions (timestamp and score) of this student so you can provide additional context regarding their progress and tendencies over time. Also, ensure to use the provided current date and time and compare it to the start date and due date etc. Do not recommend that the student should work on exercises with a past due date. - The submissions array tells you about the status of the student in this exercise: You see when the student submitted the exercise and what score they got. + The submissions array tells you about the status of the student in this exercise: + You see when the student submitted the exercise and what score they got. A 100% score means the student solved the exercise correctly and completed it. """ used_tools.append("get_exercise_list") @@ -117,11 +132,12 @@ def get_exercise_list() -> list[dict]: exercises = [] for exercise in dto.course.exercises: exercise_dict = exercise.dict() - exercise_dict["due_date_over"] = exercise.due_date < current_time if exercise.due_date else None + exercise_dict["due_date_over"] = ( + exercise.due_date < current_time if exercise.due_date else None + ) exercises.append(exercise_dict) return exercises - @tool def get_course_details() -> dict: """ @@ -134,7 +150,8 @@ def get_course_details() -> dict: dto.course.name if dto.course else "No course provided" ), "course_description": ( - dto.course.description if dto.course and dto.course.description + dto.course.description + if dto.course and dto.course.description else "No course description provided" ), "programming_language": ( @@ -155,7 +172,9 @@ def get_course_details() -> dict: } @tool - def get_student_exercise_metrics(exercise_ids: typing.List[int]) -> Union[dict[int, dict], str]: + def get_student_exercise_metrics( + exercise_ids: typing.List[int], + ) -> Union[dict[int, dict], str]: """ Get the student exercise metrics for the given exercises. Important: You have to pass the correct exercise ids here. If you don't know it, @@ -173,15 +192,22 @@ def get_student_exercise_metrics(exercise_ids: typing.List[int]) -> Union[dict[i if not dto.metrics or not dto.metrics.exercise_metrics: return "No data available!! Do not requery." metrics = dto.metrics.exercise_metrics - if metrics.average_score and any(exercise_id in metrics.average_score for exercise_id in exercise_ids): + if metrics.average_score and any( + exercise_id in metrics.average_score for exercise_id in exercise_ids + ): return { exercise_id: { "global_average_score": metrics.average_score[exercise_id], "score_of_student": metrics.score.get(exercise_id, None), - "global_average_latest_submission": metrics.average_latest_submission.get(exercise_id, None), - "latest_submission_of_student": metrics.latest_submission.get(exercise_id, None), + "global_average_latest_submission": metrics.average_latest_submission.get( + exercise_id, None + ), + "latest_submission_of_student": metrics.latest_submission.get( + exercise_id, None + ), } - for exercise_id in exercise_ids if exercise_id in metrics.average_score + for exercise_id in exercise_ids + if exercise_id in metrics.average_score } else: return "No data available! Do not requery." @@ -191,28 +217,42 @@ def get_competency_list() -> list: """ Get the list of competencies in the course. Exercises might be associated with competencies. A competency is a skill or knowledge that a student - should have after completing the course, and instructors may add lectures and exercises to these competencies. + should have after completing the course, and instructors may add lectures and exercises + to these competencies. You can use this if the students asks you about a competency, or if you want to provide additional context regarding their progress overall or in a specific area. - A competency has the following attributes: name, description, taxonomy, soft due date, optional, and mastery threshold. - The response may include metrics for each competency, such as progress and confidence (0%-100%). These are system-generated. - The judgment of learning (JOL) values indicate the self-reported confidence by the student (0-5, 5 star). The object - describing it also indicates the system-computed confidence at the time when the student added their JoL assessment. + A competency has the following attributes: name, description, taxonomy, soft due date, optional, + and mastery threshold. + The response may include metrics for each competency, such as progress and confidence (0%-100%). + These are system-generated. + The judgment of learning (JOL) values indicate the self-reported confidence by the student (0-5, 5 star). + The object describing it also indicates the system-computed confidence at the time when the student + added their JoL assessment. """ used_tools.append("get_competency_list") if not dto.metrics or not dto.metrics.competency_metrics: return dto.course.competencies competency_metrics = dto.metrics.competency_metrics weight = 2.0 / 3.0 - return [{ - "info": competency_metrics.competency_information.get(comp, None), - "exercise_ids": competency_metrics.exercises.get(comp, []), - "progress": competency_metrics.progress.get(comp, 0), - "confidence": competency_metrics.confidence.get(comp, 0), - "mastery": ((1 - weight) * competency_metrics.progress.get(comp, 0) - + weight * competency_metrics.confidence.get(comp, 0)), - "judgment_of_learning": competency_metrics.jol_values.get[comp].json() if competency_metrics.jol_values and comp in competency_metrics.jol_values else None, - } for comp in competency_metrics.competency_information] + return [ + { + "info": competency_metrics.competency_information.get(comp, None), + "exercise_ids": competency_metrics.exercises.get(comp, []), + "progress": competency_metrics.progress.get(comp, 0), + "confidence": competency_metrics.confidence.get(comp, 0), + "mastery": ( + (1 - weight) * competency_metrics.progress.get(comp, 0) + + weight * competency_metrics.confidence.get(comp, 0) + ), + "judgment_of_learning": ( + competency_metrics.jol_values.get[comp].json() + if competency_metrics.jol_values + and comp in competency_metrics.jol_values + else None + ), + } + for comp in competency_metrics.competency_information + ] if dto.user.id % 3 < 2: iris_initial_system_prompt = tell_iris_initial_system_prompt @@ -232,49 +272,89 @@ def get_competency_list() -> list: try: logger.info("Running course chat pipeline...") history: List[PyrisMessage] = dto.chat_history[-5:] or [] - query: Optional[PyrisMessage] = (dto.chat_history[-1] if dto.chat_history else None) + query: Optional[PyrisMessage] = ( + dto.chat_history[-1] if dto.chat_history else None + ) # Set up the initial prompt - initial_prompt_with_date = iris_initial_system_prompt.replace("{current_date}", - datetime.now(tz=pytz.UTC).strftime( - "%Y-%m-%d %H:%M:%S")) + initial_prompt_with_date = iris_initial_system_prompt.replace( + "{current_date}", + datetime.now(tz=pytz.UTC).strftime("%Y-%m-%d %H:%M:%S"), + ) params = {} if self.variant == "jol": - comp = next((c for c in dto.course.competencies if c.id == dto.competency_jol.competency_id), None) + comp = next( + ( + c + for c in dto.course.competencies + if c.id == dto.competency_jol.competency_id + ), + None, + ) agent_prompt = begin_agent_jol_prompt params = { - "jol": json.dumps({ - "value": dto.competency_jol.jol_value, - "competency_mastery": get_mastery(dto.competency_jol.competency_progress, dto.competency_jol.competency_confidence), - }), + "jol": json.dumps( + { + "value": dto.competency_jol.jol_value, + "competency_mastery": get_mastery( + dto.competency_jol.competency_progress, + dto.competency_jol.competency_confidence, + ), + } + ), "competency": comp.json(), } else: - agent_prompt = begin_agent_prompt if query is not None else no_chat_history_prompt + agent_prompt = ( + begin_agent_prompt if query is not None else no_chat_history_prompt + ) params = { - "course_name": dto.course.name if dto.course else "", + "course_name": ( + dto.course.name if dto.course else "" + ), } if query is not None: # Add the conversation to the prompt - chat_history_messages = [convert_iris_message_to_langchain_message(message) for message in history] + chat_history_messages = [ + convert_iris_message_to_langchain_message(message) + for message in history + ] self.prompt = ChatPromptTemplate.from_messages( [ - ("system", initial_prompt_with_date + "\n" + chat_history_exists_prompt + "\n" + agent_prompt), + ( + "system", + initial_prompt_with_date + + "\n" + + chat_history_exists_prompt + + "\n" + + agent_prompt, + ), *chat_history_messages, - ("system", format_reminder_prompt) + ("system", format_reminder_prompt), ] ) else: self.prompt = ChatPromptTemplate.from_messages( [ - ("system", initial_prompt_with_date + "\n" + - agent_prompt + "\n" + format_reminder_prompt), + ( + "system", + initial_prompt_with_date + + "\n" + + agent_prompt + + "\n" + + format_reminder_prompt, + ), ] ) - tools = [get_course_details, get_exercise_list, get_student_exercise_metrics, get_competency_list] + tools = [ + get_course_details, + get_exercise_list, + get_student_exercise_metrics, + get_competency_list, + ] agent = create_structured_chat_agent( llm=self.llm, tools=tools, prompt=self.prompt ) @@ -296,15 +376,34 @@ def get_competency_list() -> list: self.callback.in_progress("Reading course details ...") elif action.tool == "get_competency_list": self.callback.in_progress("Reading competency list ...") - elif step['output']: - out = step['output'] + elif step["output"]: + out = step["output"] print(out) - self.callback.done(None, final_result=out) + suggestions = None + try: + if out: + suggestion_dto = InteractionSuggestionPipelineExecutionDTO( + chat_history=history, + last_message=out, + ) + suggestions = self.suggestion_pipeline(suggestion_dto) + except Exception as e: + logger.error( + "An error occurred while running the course chat interaction suggestion pipeline", + exc_info=e, + ) + traceback.print_exc() + + self.callback.done(None, final_result=out, suggestions=suggestions) except Exception as e: - logger.error(f"An error occurred while running the course chat pipeline", exc_info=e) + logger.error( + "An error occurred while running the course chat pipeline", exc_info=e + ) traceback.print_exc() - self.callback.error("An error occurred while running the course chat pipeline.") + self.callback.error( + "An error occurred while running the course chat pipeline." + ) def datetime_to_string(dt: Optional[datetime]) -> str: diff --git a/app/pipeline/chat/exercise_chat_pipeline.py b/app/pipeline/chat/exercise_chat_pipeline.py index 7ac432d6..58caafa8 100644 --- a/app/pipeline/chat/exercise_chat_pipeline.py +++ b/app/pipeline/chat/exercise_chat_pipeline.py @@ -1,23 +1,23 @@ import logging -import os -import threading import traceback from typing import List, Dict -from langchain_core.output_parsers import StrOutputParser, PydanticOutputParser +from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ( ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, - AIMessagePromptTemplate, - PromptTemplate, ) from langchain_core.runnables import Runnable from langsmith import traceable from weaviate.collections.classes.filters import Filter +from .interaction_suggestion_pipeline import InteractionSuggestionPipeline from ...common import convert_iris_message_to_langchain_message from ...domain import PyrisMessage +from ...domain.chat.interaction_suggestion_dto import ( + InteractionSuggestionPipelineExecutionDTO, +) from ...llm import CapabilityRequestHandler, RequirementList from ...domain.data.build_log_entry import BuildLogEntryDTO from ...domain.data.feedback_dto import FeedbackDTO @@ -41,12 +41,13 @@ class ExerciseChatPipeline(Pipeline): - """Exercise chat pipeline that answers exercises related questions from students. """ + """Exercise chat pipeline that answers exercises related questions from students.""" llm: IrisLangchainChatModel pipeline: Runnable callback: ExerciseChatStatusCallback file_selector_pipeline: FileSelectorPipeline + suggestion_pipeline: InteractionSuggestionPipeline prompt: ChatPromptTemplate def __init__(self, callback: ExerciseChatStatusCallback): @@ -67,6 +68,7 @@ def __init__(self, callback: ExerciseChatStatusCallback): # Create the pipelines self.file_selector_pipeline = FileSelectorPipeline() self.pipeline = self.llm | StrOutputParser() + self.suggestion_pipeline = InteractionSuggestionPipeline(variant="exercise") def __repr__(self): return f"{self.__class__.__name__}(llm={self.llm})" @@ -197,7 +199,11 @@ def _run_tutor_chat_pipeline( chat_history=history, question=query, repository=repository, - feedbacks=(submission.latest_result.feedbacks if submission and submission.latest_result else []) + feedbacks=( + submission.latest_result.feedbacks + if submission and submission.latest_result + else [] + ), ) self.callback.done() except Exception as e: @@ -245,7 +251,11 @@ def _run_tutor_chat_pipeline( ) self.prompt = ChatPromptTemplate.from_messages(prompt_val) try: - response_draft = (self.prompt | self.pipeline).with_config({"run_name": "Response Drafting"}).invoke({}) + response_draft = ( + (self.prompt | self.pipeline) + .with_config({"run_name": "Response Drafting"}) + .invoke({}) + ) self.prompt = ChatPromptTemplate.from_messages( [ SystemMessagePromptTemplate.from_template(guide_system_prompt), @@ -254,7 +264,11 @@ def _run_tutor_chat_pipeline( prompt_val = self.prompt.format_messages(response=response_draft) self.prompt = ChatPromptTemplate.from_messages(prompt_val) - guide_response = (self.prompt | self.pipeline).with_config({"run_name": "Response Refining"}).invoke({}) + guide_response = ( + (self.prompt | self.pipeline) + .with_config({"run_name": "Response Refining"}) + .invoke({}) + ) if "!ok!" in guide_response: print("Response is ok and not rewritten!!!") @@ -262,6 +276,24 @@ def _run_tutor_chat_pipeline( else: print("Response is rewritten.") self.exercise_chat_response = guide_response + self.suggestions = None + try: + if self.exercise_chat_response: + suggestion_dto = InteractionSuggestionPipelineExecutionDTO( + chat_history=history, + last_message=self.exercise_chat_response, + ) + suggestions = self.suggestion_pipeline(suggestion_dto) + logger.info( + f"Generated suggestions from interaction suggestion pipeline: {suggestions}" + ) + self.suggestions = suggestions + except Exception as e: + logger.error( + "An error occurred while running the course chat interaction suggestion pipeline", + exc_info=e, + ) + traceback.print_exc() except Exception as e: self.callback.error(f"Failed to create response: {e}") # print stack trace diff --git a/app/pipeline/chat/file_selector_pipeline.py b/app/pipeline/chat/file_selector_pipeline.py index 4b0e222b..87f92288 100644 --- a/app/pipeline/chat/file_selector_pipeline.py +++ b/app/pipeline/chat/file_selector_pipeline.py @@ -3,7 +3,7 @@ from typing import Dict, Optional, List from langchain.output_parsers import PydanticOutputParser -from langchain_core.prompts import PromptTemplate, ChatPromptTemplate +from langchain_core.prompts import PromptTemplate from langchain_core.runnables import Runnable from langsmith import traceable from pydantic import BaseModel @@ -96,17 +96,29 @@ def __call__( logger.info("Running file selector pipeline...") file_list = "\n".join(repository.keys()) - feedback_list = "\n".join(["Case: {}. Credits: {}. Info: {}".format( - feedback.test_case_name, - feedback.credits, feedback.text) - for feedback in feedbacks]) if feedbacks else "No feedbacks." + feedback_list = ( + "\n".join( + [ + "Case: {}. Credits: {}. Info: {}".format( + feedback.test_case_name, feedback.credits, feedback.text + ) + for feedback in feedbacks + ] + ) + if feedbacks + else "No feedbacks." + ) chat_history_list = "\n".join([str(message) for message in chat_history]) - response = (self.default_prompt | self.pipeline).with_config({"run_name": "File Selector Prompt"}).invoke( - { - "file_names": file_list, - "feedbacks": feedback_list, - "chat_history": chat_history_list, - "question": str(question), - } + response = ( + (self.default_prompt | self.pipeline) + .with_config({"run_name": "File Selector Prompt"}) + .invoke( + { + "file_names": file_list, + "feedbacks": feedback_list, + "chat_history": chat_history_list, + "question": str(question), + } + ) ) return response.selected_files diff --git a/app/pipeline/chat/interaction_suggestion_pipeline.py b/app/pipeline/chat/interaction_suggestion_pipeline.py new file mode 100644 index 00000000..6c722038 --- /dev/null +++ b/app/pipeline/chat/interaction_suggestion_pipeline.py @@ -0,0 +1,173 @@ +import logging +import traceback +from datetime import datetime +from typing import List, Optional + +from langchain_core.messages import AIMessage +from langchain_core.output_parsers import JsonOutputParser +from langchain_core.prompts import ( + ChatPromptTemplate, +) +from langchain_core.runnables import Runnable +from pydantic.v1 import Field, BaseModel + +from ...common import convert_iris_message_to_langchain_message +from ...domain import PyrisMessage +from app.domain.chat.interaction_suggestion_dto import ( + InteractionSuggestionPipelineExecutionDTO, +) +from ...llm import CapabilityRequestHandler, RequirementList +from ..prompts.iris_interaction_suggestion_prompts import ( + course_chat_begin_prompt, + iris_course_suggestion_initial_system_prompt, + course_chat_history_exists_prompt, + no_course_chat_history_prompt, + iris_exercise_suggestion_initial_system_prompt, + exercise_chat_history_exists_prompt, + no_exercise_chat_history_prompt, + exercise_chat_begin_prompt, + iris_default_suggestion_initial_system_prompt, + default_chat_history_exists_prompt, + no_default_chat_history_prompt, + default_chat_begin_prompt, +) + +from ...llm import CompletionArguments +from ...llm.langchain import IrisLangchainChatModel + +from ..pipeline import Pipeline + +logger = logging.getLogger(__name__) + + +class Questions(BaseModel): + questions: List[str] = Field(description="questions that students may ask") + + +class InteractionSuggestionPipeline(Pipeline): + """Course chat pipeline that answers course related questions from students.""" + + llm: IrisLangchainChatModel + pipeline: Runnable + prompt: ChatPromptTemplate + variant: str + + def __init__(self, variant: str = "default"): + super().__init__(implementation_id="interaction_suggestion_pipeline") + + self.variant = variant + + # Set the langchain chat model + request_handler = CapabilityRequestHandler( + requirements=RequirementList( + gpt_version_equivalent=4, + context_length=16385, + json_mode=True, + ) + ) + completion_args = CompletionArguments( + temperature=0.6, max_tokens=2000, response_format="JSON" + ) + self.llm = IrisLangchainChatModel( + request_handler=request_handler, completion_args=completion_args + ) + + # Create the pipeline + self.pipeline = self.llm | JsonOutputParser(pydantic_object=Questions) + + def __repr__(self): + return f"{self.__class__.__name__}(llm={self.llm})" + + def __str__(self): + return f"{self.__class__.__name__}(llm={self.llm})" + + def __call__( + self, dto: InteractionSuggestionPipelineExecutionDTO, **kwargs + ) -> list[str]: + """ + Runs the pipeline + :param dto: The pipeline execution data transfer object + :param kwargs: The keyword arguments + + """ + iris_suggestion_initial_system_prompt = ( + iris_default_suggestion_initial_system_prompt + ) + chat_history_exists_prompt = default_chat_history_exists_prompt + no_chat_history_prompt = no_default_chat_history_prompt + chat_begin_prompt = default_chat_begin_prompt + + if self.variant == "course": + iris_suggestion_initial_system_prompt = ( + iris_course_suggestion_initial_system_prompt + ) + chat_history_exists_prompt = course_chat_history_exists_prompt + no_chat_history_prompt = no_course_chat_history_prompt + chat_begin_prompt = course_chat_begin_prompt + elif self.variant == "exercise": + iris_suggestion_initial_system_prompt = ( + iris_exercise_suggestion_initial_system_prompt + ) + chat_history_exists_prompt = exercise_chat_history_exists_prompt + no_chat_history_prompt = no_exercise_chat_history_prompt + chat_begin_prompt = exercise_chat_begin_prompt + + try: + logger.info("Running course interaction suggestion pipeline...") + + history: List[PyrisMessage] = dto.chat_history or [] + query: Optional[PyrisMessage] = ( + dto.chat_history[-1] if dto.chat_history else None + ) + + if query is not None: + # Add the conversation to the prompt + chat_history_messages = [ + convert_iris_message_to_langchain_message(message) + for message in history + ] + if dto.last_message: + logger.info(f"Last message: {dto.last_message}") + last_message = AIMessage(content=dto.last_message) + chat_history_messages.append(last_message) + + self.prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + iris_suggestion_initial_system_prompt + + "\n" + + chat_history_exists_prompt, + ), + *chat_history_messages, + ("system", chat_begin_prompt), + ] + ) + else: + self.prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + iris_suggestion_initial_system_prompt + + "\n" + + no_chat_history_prompt + + "\n" + + chat_begin_prompt, + ), + ] + ) + response: Questions = (self.prompt | self.pipeline).invoke({}) + return response["questions"] + except Exception as e: + logger.error( + "An error occurred while running the course chat pipeline", exc_info=e + ) + traceback.print_exc() + return [] + + +def datetime_to_string(dt: Optional[datetime]) -> str: + if dt is None: + return "No date provided" + else: + return dt.strftime("%Y-%m-%d %H:%M:%S") diff --git a/app/pipeline/prompts/iris_course_chat_prompts_elicit.py b/app/pipeline/prompts/iris_course_chat_prompts_elicit.py index 6b614713..3da112ca 100644 --- a/app/pipeline/prompts/iris_course_chat_prompts_elicit.py +++ b/app/pipeline/prompts/iris_course_chat_prompts_elicit.py @@ -17,7 +17,7 @@ You can ask about things like the following: - what they learned through exercises and materials recently and what parts they found new and challenging - which kind of task they are struggling with the most -- What the graph about their timliness says about their organization +- What the graph about their timeliness says about their organization - if they have seen how they compare to the rest of the class and what it tells them - if they have recently taken time to look at the Analytics to their right and which patterns they can discover in their behavior and if they are effective or negative - their time spent or their performance and ask about plan for the upcoming week regarding this course diff --git a/app/pipeline/prompts/iris_exercise_chat_prompts.py b/app/pipeline/prompts/iris_exercise_chat_prompts.py index eede95da..6ab007bb 100644 --- a/app/pipeline/prompts/iris_exercise_chat_prompts.py +++ b/app/pipeline/prompts/iris_exercise_chat_prompts.py @@ -7,7 +7,7 @@ look at. An excellent educator does no work for the student. Never respond with code of the exercise! -Do not write code that fixes or improves functionality in the student's files! That is their job. +Do not write code that fixes or improves functionality in the student's files! That is their job. The goal is that they learn something from doing the task, and if you do it for them, they won't learn. You can give a single subtle clue or best practice to move the student's attention to an aspect of his problem or task, so they can find a solution on their own. @@ -54,13 +54,20 @@ something else? Q: Can you explain the Quick Sort algorithm to me? Maybe you can give me an example? -A: Quick Sort is a divide-and-conquer algorithm for sorting that works by selecting a 'pivot' element from the array and partitioning the other elements into two sub-arrays, according to whether they are less than or greater than the pivot. The sub-arrays are then recursively sorted. -For example, if we have an array ``[9, 7, 5, 11, 12, 2, 14, 3, 10, 6]``, we could choose 10 as our pivot. We then split the array into elements less than 10 ``[9, 7, 5, 2, 3, 6]`` and elements greater than 10 ``[11, 12, 14]``. We then recursively apply the same process to these two sub-arrays. -Remember, the choice of the pivot can greatly affect the efficiency of Quick Sort, but that's a more advanced topic. For now, understanding the basic process is a great start! -Now, think about how you could apply this algorithm to the task you're working on. Do you see any similarities or differences? +A: Quick Sort is a divide-and-conquer algorithm for sorting that works by selecting a 'pivot' element from the array +and partitioning the other elements into two sub-arrays, according to whether they are less than +or greater than the pivot. The sub-arrays are then recursively sorted. +For example, if we have an array ``[9, 7, 5, 11, 12, 2, 14, 3, 10, 6]``, we could choose 10 as our pivot. +We then split the array into elements less than 10 ``[9, 7, 5, 2, 3, 6]`` and elements greater than 10 ``[11, 12, 14]``. +We then recursively apply the same process to these two sub-arrays. +Remember, the choice of the pivot can greatly affect the efficiency of Quick Sort, but that's a more advanced topic. +For now, understanding the basic process is a great start! +Now, think about how you could apply this algorithm to the task you're working on. +Do you see any similarities or differences? Q: Can you show me the code for the Quick Sort algorithm? -A: I am sorry, but I cannot provide you with the code for the Quick Sort algorithm. However, I can help you understand the algorithm better. +A: I am sorry, but I cannot provide you with the code for the Quick Sort algorithm. +However, I can help you understand the algorithm better. Q: Danke für deine Hilfe A: Gerne! Wenn du weitere Fragen hast, kannst du mich gerne fragen. Ich bin hier, um zu helfen! diff --git a/app/pipeline/prompts/iris_interaction_suggestion_prompts.py b/app/pipeline/prompts/iris_interaction_suggestion_prompts.py new file mode 100644 index 00000000..999daac9 --- /dev/null +++ b/app/pipeline/prompts/iris_interaction_suggestion_prompts.py @@ -0,0 +1,190 @@ +iris_course_suggestion_initial_system_prompt = """ +Your main task is to help students come up with good questions they can ask as conversation starters, +so that they can gain insights into their learning progress and strategies. +You can use the current chat history and also observations about how their timeliness in tasks, time of engagement, +performance and progress on the defined competencies is developing to engage them. + +These questions should be framed as if a student is asking a human tutor. + +The students have access to the following metrics: +- Time spent on the tasks +- Performance on the tasks +- Progress on the defined competencies +- Mastery of the defined competencies +- The judgment of learning (JOL) values +- Global average score for each exercise +- Score the student received for each exercise +- Latest submission date for each exercise +- Global average latest submission date for each exercise + +Some useful definitions: +- Time spent: The total time spent on the tasks +- Performance: The score the student received for each exercise +- Progress: The progress on the defined competencies +- Mastery: The mastery of the defined competencies, which is a measure of how well the student has learned the material +- Judgment of learning (JOL): The student's self-reported judgment of how well they have learned the material +- Competencies: A competency is a skill or knowledge that a student should have after completing the course, +and instructors may add lectures and exercises to these competencies. +- Global average score: The average score of all students for each exercise +- Latest submission date: The date of the latest submission for each exercise +- Global average latest submission date: The average latest submission date for each exercise + +Here are some example questions you can generate: + +Q: How can I improve my performance in the course? +Q: What's the correlation between my time investment and scores? +Q: What are the most important things I should focus on to succeed in the course? +Q: What insights can my past activity offer for improving my current performance? +Q: Analyze my scores – where should I focus next? +Q: Suggest targeted practices based on my time spent +Q: How can I improve my mastery of the competencies? + +Respond with the following json blob: +``` +{{ + "questions": [ + "What insights can my past activity offer for improving my current performance?", + "What are the most important things I should focus on to succeed in the course?" + ], +}} +``` +Generate EXACTLY two questions and keep the questions CONCISE. +""" + +iris_exercise_suggestion_initial_system_prompt = """ +Your main task is to help students come up with good questions they can ask as conversation starters, +so that they can ask for help with their current programming exercise. +You can use the current chat history and also observations about their progress in the exercise so far to engage them. + +These questions should be framed as if a student is asking a human tutor. + +Here are some example questions you can generate: + +Q: How can I fix the error in my code? +Q: How can I improve the performance of my code? +Q: What are the best practices for solving this exercise? +Q: What kind of strategies can I use to solve this exercise? +Q: Analyze my code – where should I focus next? +Q: What suggestions do you have for improving my code? +Q: What is currently missing in my code? + +Respond with the following json blob: +``` +{{ + "questions": [ + "How can I fix the error in my code?", + "What are the best practices for solving this exercise?" + ], +}} +``` +Generate EXACTLY TWO questions. +""" + +iris_default_suggestion_initial_system_prompt = """ +Your main task is to help students come up with good questions they can ask as conversation starters, +so that they can engage in a conversation with a human tutor. +You can use the current chat history so far to engage them. + +Here are some example questions you can generate: + +Q: What are the alternatives for solving this problem? +Q: Tell me more about the this. +Q: What should I focus on next? +Q: What do you suggest next? +Q: What are the best practices for solving this problem? + +Respond with the following json blob: +``` +{{ + "questions": [ + "Tell me more about the this.", + "What do you suggest next?" + ], +}} +``` +Generate EXACTLY two questions and keep the questions CONCISE. +""" + +default_chat_history_exists_prompt = """ +The following messages represent the chat history of your conversation with the student so far. +Use it to generate questions that are consistent with the conversation. +The questions should be engaging, insightful so that the student continues to engage in the conversation. +Avoid repeating or reusing previous questions or messages; always in all circumstances craft new and original questions. +Never re-use any questions that are already asked. Instead, always write new and original questions. +""" + +course_chat_history_exists_prompt = """ +The following messages represent the chat history of your conversation with the student so far. +Use it to generate questions that are consistent with the conversation and informed by the student's progress. +The questions should be engaging, insightful so that the student continues to engage in the conversation. +Avoid repeating or reusing previous questions or messages; always in all circumstances craft new and original questions. +Never re-use any questions that are already asked. Instead, always write new and original questions. +""" + +exercise_chat_history_exists_prompt = """ +The following messages represent the chat history of your conversation with the student so far. +Use it to generate questions that are consistent with the conversation and informed by the student's progress +in the exercise. +The questions should be engaging, insightful so that the student continues to engage in the conversation. +Avoid repeating or reusing previous questions or messages; always in all circumstances craft new and original questions. +Never re-use any questions that are already asked. Instead, always write new and original questions. +""" + +no_course_chat_history_prompt = """ +The conversation with the student is not yet started. They have not asked any questions yet. +It is your task to generate questions that can initiate the conversation. +Check the data for anything useful to come up with questions that a student might ask to engage in a conversation. +It should trigger the student to engage in a conversation about their progress in the course. +Think of a question that a student visiting the dashboard would likely ask a human tutor +to get insights into their learning progress and strategies. +""" + +no_exercise_chat_history_prompt = """ +The conversation with the student is not yet started. They have not asked any questions yet. +It is your task to generate questions that can initiate the conversation. +Check the data for anything useful to come up with questions that a student might ask to engage in a conversation. +It should trigger the student to engage in a conversation about their progress in the exercise. +Think of a question that a student visiting the dashboard would likely ask a human tutor +to get help solving the programming exercise. +""" + +no_default_chat_history_prompt = """ +The conversation with the student is not yet started. They have not asked any questions yet. +It is your task to generate questions that can initiate the conversation. +Check the data for anything useful to come up with questions that a student might ask to engage in a conversation. +It should trigger the student to engage in a conversation with a human tutor. +""" + +course_system_prompt = """ +These are the details about the course: +- Course name: {course_name} +- Course description: {course_description} +- Default programming language: {programming_language} +- Course start date: {course_start_date} +- Course end date: {course_end_date} +""" + +course_chat_begin_prompt = """ +Now, generate questions that a student might ask a human tutor to get insights into their learning progress +and strategies. +Remember, you only generate questions, not answers. These question should be framed, +as if a student is asking a human tutor. The questions will later be used by the student to engage in a conversation +with the tutor. +Generate EXACTLY two questions and keep the questions CONCISE. +""" + +exercise_chat_begin_prompt = """ +Now, generate questions that a student might ask a human tutor to get help about their current programming exercise. +Remember, you only generate questions, not answers. These question should be framed, +as if a student is asking a human tutor. The questions will later be used by the student to engage in a conversation +with the tutor about the exercise. +Generate EXACTLY two questions. +""" + +default_chat_begin_prompt = """ +Now, generate questions that a student might ask a human tutor to engage in a conversation. +Remember, you only generate questions, not answers. These question should be framed, +as if a student is asking a human tutor. The questions will later be used by the student to engage +in a conversation with the tutor. +Generate EXACTLY two questions. +""" diff --git a/app/web/routers/pipelines.py b/app/web/routers/pipelines.py index 64afd303..dad8cba1 100644 --- a/app/web/routers/pipelines.py +++ b/app/web/routers/pipelines.py @@ -1,17 +1,17 @@ import logging import traceback from threading import Thread -from urllib.request import Request -from fastapi import APIRouter, status, Response, Depends, FastAPI -from fastapi.exceptions import RequestValidationError -from starlette.responses import JSONResponse +from fastapi import APIRouter, status, Response, Depends from app.domain import ( - ExerciseChatPipelineExecutionDTO, + ExerciseChatPipelineExecutionDTO, CourseChatPipelineExecutionDTO, ) -from app.web.status.status_update import ExerciseChatStatusCallback, CourseChatStatusCallback +from app.web.status.status_update import ( + ExerciseChatStatusCallback, + CourseChatStatusCallback, +) from app.pipeline.chat.course_chat_pipeline import CourseChatPipeline from app.pipeline.chat.exercise_chat_pipeline import ExerciseChatPipeline from app.dependencies import TokenValidator @@ -38,7 +38,8 @@ def run_exercise_chat_pipeline_worker(dto: ExerciseChatPipelineExecutionDTO): except Exception as e: logger.error(f"Error running exercise chat pipeline: {e}") logger.error(traceback.format_exc()) - callback.error('Fatal error.') + callback.error("Fatal error.") + @router.post( "/tutor-chat/{variant}/run", @@ -68,8 +69,7 @@ def run_course_chat_pipeline_worker(dto, variant): except Exception as e: logger.error(f"Error running exercise chat pipeline: {e}") logger.error(traceback.format_exc()) - callback.error('Fatal error.') - + callback.error("Fatal error.") @router.post( diff --git a/app/web/status/status_update.py b/app/web/status/status_update.py index db5e9c3e..53c58485 100644 --- a/app/web/status/status_update.py +++ b/app/web/status/status_update.py @@ -1,5 +1,4 @@ from typing import Optional, List -from abc import ABC import requests from abc import ABC @@ -9,7 +8,9 @@ ) from ...domain.status.stage_state_dto import StageStateEnum from ...domain.status.stage_dto import StageDTO -from ...domain.chat.exercise_chat.exercise_chat_status_update_dto import ExerciseChatStatusUpdateDTO +from ...domain.chat.exercise_chat.exercise_chat_status_update_dto import ( + ExerciseChatStatusUpdateDTO, +) from ...domain.status.status_update_dto import StatusUpdateDTO import logging @@ -44,6 +45,7 @@ def __init__( def on_status_update(self): """Send a status update to the Artemis API.""" try: + print(self.status.dict(by_alias=True)) requests.post( self.url, headers={ @@ -78,9 +80,17 @@ def in_progress(self, message: Optional[str] = None): self.stage.message = message self.on_status_update() else: - raise ValueError("Invalid state transition to in_progress. current state is ", self.stage.state) + raise ValueError( + "Invalid state transition to in_progress. current state is ", + self.stage.state, + ) - def done(self, message: Optional[str] = None, final_result: Optional[str] = None): + def done( + self, + message: Optional[str] = None, + final_result: Optional[str] = None, + suggestions: Optional[List[str]] = None, + ): """ Transition the current stage to DONE and update the status. If there is a next stage, set the current @@ -94,9 +104,12 @@ def done(self, message: Optional[str] = None, final_result: Optional[str] = None self.stage = next_stage else: self.status.result = final_result + self.status.suggestions = suggestions self.on_status_update() else: - raise ValueError("Invalid state transition to done. current state is ", self.stage.state) + raise ValueError( + "Invalid state transition to done. current state is ", self.stage.state + ) def error(self, message: str): """