Skip to content
This repository has been archived by the owner on Dec 8, 2024. It is now read-only.

Commit

Permalink
docs: conform to *actual* Google-style docstrings
Browse files Browse the repository at this point in the history
  • Loading branch information
LimaoC committed Aug 18, 2024
1 parent 71c52c5 commit f288e16
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 20 deletions.
3 changes: 1 addition & 2 deletions client/models/pose_detection/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@


def is_camera_aligned(pose_landmark_result: PoseLandmarkerResult) -> np.bool_:
"""
Checks whether the camera is aligned to capture the person's side view.
"""Checks whether the camera is aligned to capture the person's side view.
Args:
pose_landmarker_result: Landmarker result as returned by a
Expand Down
26 changes: 15 additions & 11 deletions client/models/pose_detection/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,23 +13,25 @@


def posture_angle(p1: Landmark, p2: Landmark) -> np.float64:
"""
Returns the angle (in degrees) between P2 and P3, where P3 is a point on the
vertical axis of P1 (i.e. its x coordinate is the same as P1's), and is the "ideal"
location of the P2 landmark for good posture.
"""Calculates the neck or torso posture angle (in degrees).
In particular, this calculates the angle (in degrees) between p2 and p3, where p3
is a point on the vertical axis of p1 (i.e. same x coordinate as p1), and
represents the "ideal" location of the p2 landmark for good posture.
The y coordinate of p3 is irrelevant but for simplicity we set it to zero.
The y coordinate of P3 is irrelevant but for simplicity we set it to zero.
For neck posture, take p1 to be the shoulder, p2 to be the ear. For torso posture,
take p1 to be the hip, p2 to be the shoulder.
For a neck inclination calculation, take P1 to be the shoulder location and pivot
point, and P2 to be the ear location. For a torso inclination calculation, take P1
to be the hip location and pivot point, and P2 to be the hip location.
REF: https://learnopencv.com/wp-content/uploads/2022/03/MediaPipe-pose-neckline-inclination.jpg
Parameters:
p1: Landmark for P1 as described above
p2: Landmark for P2 as described above
Returns:
Angle (in degrees) between P2 and P3
Neck or torso posture angle (in degrees)
"""
x1, y1 = p1.x, p1.y
x2, y2 = p2.x, p2.y
Expand All @@ -38,8 +40,7 @@ def posture_angle(p1: Landmark, p2: Landmark) -> np.float64:


def posture_classify(pose_landmark_result: PoseLandmarkerResult) -> np.bool_:
"""
Returns whether the pose in the image has good or bad posture.
"""Classifies the pose in the image as either good or bad posture.
Note: The camera should be aligned to capture the person's side view; the output
may not be accurate otherwise. See `is_camera_aligned()`.
Expand All @@ -49,6 +50,9 @@ def posture_classify(pose_landmark_result: PoseLandmarkerResult) -> np.bool_:
Parameters:
pose_landmarker_result: Landmarker result as returned by a
mediapipe.tasks.vision.PoseLandmarker
Returns:
True if the pose has good posture, False otherwise
"""
landmarks: list[list[Landmark]] = pose_landmark_result.pose_world_landmarks

Expand Down
8 changes: 5 additions & 3 deletions client/models/pose_detection/landmarking.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,10 @@

@dataclass
class AnnotatedImage:
"""
Represents mutable annoted image through data attribute. Can be used to set
annotated image within a callback asynchronously without raising an error.
"""Represents mutable annotated image through data attribute.
Can be used to set annotated image within a callback asynchronously without raising
an error.
"""

data: Optional[np.ndarray] = None
Expand Down Expand Up @@ -62,6 +63,7 @@ def display_landmarking(
annotated_image: AnnotatedImage,
) -> None:
"""Mutates annotated image to contain visualization of detected landmarks.
Also prints debugging info to the standard output.
Args:
Expand Down
6 changes: 2 additions & 4 deletions client/models/pose_detection/routines.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,7 @@ def __init__(
self.video_capture = cv2.VideoCapture(0)

def track_posture(self) -> None:
"""
Get frame from video capture device and process with pose model, then posture
"""Get frame from video capture device and process with pose model, then posture
algorithm. Print debugging info and display landmark annotated frame.
"""
success, frame = self.video_capture.read()
Expand All @@ -61,8 +60,7 @@ def __exit__(self, unused_exc_type, unused_exc_value, unused_traceback) -> None:


def create_debug_posture_tracker() -> DebugPostureTracker:
"""
Handles config of livestreamed input and model loading.
"""Handles config of livestreamed input and model loading.
Returns:
Tracker object which acts as context manager.
Expand Down

0 comments on commit f288e16

Please sign in to comment.