Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: added face detection to images #129

Merged
merged 2 commits into from
Jun 22, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
118 changes: 114 additions & 4 deletions examples/facelandmarkdetection/src/Photo.tsx
Original file line number Diff line number Diff line change
@@ -1,21 +1,122 @@
import type { BottomTabScreenProps } from "@react-navigation/bottom-tabs";
import React from "react";
import React, { useState, useCallback } from "react";
import { View, Text, Pressable, StyleSheet, Image } from "react-native";
import type { RootTabParamList } from "./navigation";
import ImagePicker from "react-native-image-crop-picker";
import { objectDetectionOnImage, type Dims } from "react-native-mediapipe";
import { useSettings } from "./app-settings";
import {
faceLandmarkDetectionModuleConstants,
useFaceLandmarkDetection,
type FaceLandmarksModuleConstants,
RunningMode,
type FaceLandmarkDetectionResultBundle,
} from "react-native-mediapipe";
import { FaceDrawFrame, convertLandmarksToSegments, type FaceSegment } from "./Drawing";

type Props = BottomTabScreenProps<RootTabParamList, "Photo">;

const PHOTO_SIZE: Dims = { width: 300, height: 400 };

export const Photo: React.FC<Props> = () => {
const [screenState, setScreenState] = React.useState<
const [screenState, setScreenState] = useState<
"initial" | "selecting" | "inferring" | "completed" | "error"
>("initial");
const { settings } = useSettings();
const [imagePath, setImagePath] = React.useState<string>();
const [imagePath, setImagePath] = useState<string>();
const [faceLandmarks] = useState<FaceLandmarksModuleConstants["knownLandmarks"]>(
faceLandmarkDetectionModuleConstants().knownLandmarks
);
const [faceSegments, setFaceSegments] = useState<FaceSegment[]>([]);

const onFaceDetectionResults = useCallback((
results: FaceLandmarkDetectionResultBundle,
viewSize: Dims,
mirrored: boolean
) => {
if (results.results.length === 0) {
setFaceSegments([]);
return;
}
const firstResult = results.results[0];
const segments = firstResult.faceLandmarks.length > 0
? [
...convertLandmarksToSegments(
firstResult.faceLandmarks[0],
faceLandmarks.lips,
"FireBrick",
{
width: results.inputImageWidth,
height: results.inputImageHeight,
},
{ width: PHOTO_SIZE.width, height: PHOTO_SIZE.height },
mirrored
),
...convertLandmarksToSegments(
firstResult.faceLandmarks[0],
faceLandmarks.leftEye,
"ForestGreen",
{
width: results.inputImageWidth,
height: results.inputImageHeight,
},
{ width: PHOTO_SIZE.width, height: PHOTO_SIZE.height },
mirrored
),
...convertLandmarksToSegments(
firstResult.faceLandmarks[0],
faceLandmarks.rightEye,
"ForestGreen",
{
width: results.inputImageWidth,
height: results.inputImageHeight,
},
{ width: PHOTO_SIZE.width, height: PHOTO_SIZE.height },
mirrored
),
...convertLandmarksToSegments(
firstResult.faceLandmarks[0],
faceLandmarks.leftEyebrow,
"Coral",
{
width: results.inputImageWidth,
height: results.inputImageHeight,
},
{ width: PHOTO_SIZE.width, height: PHOTO_SIZE.height },
mirrored
),
...convertLandmarksToSegments(
firstResult.faceLandmarks[0],
faceLandmarks.rightEyebrow,
"Coral",
{
width: results.inputImageWidth,
height: results.inputImageHeight,
},
{ width: PHOTO_SIZE.width, height: PHOTO_SIZE.height },
mirrored
),
]
: [];

console.log(JSON.stringify({ infTime: results.inferenceTime }));
setFaceSegments(segments);
}, [faceLandmarks]);

const onFaceDetectionError = useCallback((error: unknown) => {
console.error(`onError: ${error}`);
}, []);

const faceDetection = useFaceLandmarkDetection(
onFaceDetectionResults,
onFaceDetectionError,
RunningMode.IMAGE,
"face_landmarker.task",
{
delegate: settings.processor,
}
);

const onClickSelectPhoto = async () => {
setScreenState("selecting");
try {
Expand All @@ -24,6 +125,7 @@
width: PHOTO_SIZE.width,
height: PHOTO_SIZE.height,
});

const results = await objectDetectionOnImage(
image.path,
`${settings.model}.tflite`
Expand All @@ -39,6 +141,9 @@
})),
})
);

faceDetection.detect(image.path);

Check failure on line 145 in examples/facelandmarkdetection/src/Photo.tsx

View workflow job for this annotation

GitHub Actions / lint

Property 'detect' does not exist on type 'MediaPipeSolution'.

Check failure on line 145 in examples/facelandmarkdetection/src/Photo.tsx

View workflow job for this annotation

GitHub Actions / lint

Property 'detect' does not exist on type 'MediaPipeSolution'.

setImagePath(image.path);
setScreenState("completed");
} catch (e) {
Expand All @@ -47,7 +152,6 @@
}
};

// TODO : implement face landmark rendering
return (
<View style={styles.root}>
{screenState === "initial" && (
Expand All @@ -59,6 +163,11 @@
<>
<View style={styles.photoContainer}>
<Image source={{ uri: imagePath }} style={styles.photo} />
<FaceDrawFrame
style={StyleSheet.absoluteFill}
facePoints={[]}
faceSegments={faceSegments}
/>
</View>
<Pressable style={styles.selectButton} onPress={onClickSelectPhoto}>
<Text style={styles.selectButtonText}>Select a new photo</Text>
Expand All @@ -85,3 +194,4 @@
photo: { position: "absolute", top: 0, left: 0, right: 0, bottom: 0 },
errorText: { fontSize: 30, color: "red" },
});

Loading