forked from google-ai-edge/mediapipe
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Project import generated by Copybara.
GitOrigin-RevId: 373e3ac1e5839befd95bf7d73ceff3c5f1171969
- Loading branch information
MediaPipe Team
authored and
jqtang
committed
Oct 6, 2021
1 parent
137e1cc
commit 33d683c
Showing
153 changed files
with
7,868 additions
and
1,346 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -121,12 +121,10 @@ with mp_face_detection.FaceDetection( | |
# If loading a video, use 'break' instead of 'continue'. | ||
continue | ||
|
||
# Flip the image horizontally for a later selfie-view display, and convert | ||
# the BGR image to RGB. | ||
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB) | ||
# To improve performance, optionally mark the image as not writeable to | ||
# pass by reference. | ||
image.flags.writeable = False | ||
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | ||
results = face_detection.process(image) | ||
|
||
# Draw the face detection annotations on the image. | ||
|
@@ -135,7 +133,8 @@ with mp_face_detection.FaceDetection( | |
if results.detections: | ||
for detection in results.detections: | ||
mp_drawing.draw_detection(image, detection) | ||
cv2.imshow('MediaPipe Face Detection', image) | ||
# Flip the image horizontally for a selfie-view display. | ||
cv2.imshow('MediaPipe Face Detection', cv2.flip(image, 1)) | ||
if cv2.waitKey(5) & 0xFF == 27: | ||
break | ||
cap.release() | ||
|
@@ -200,7 +199,7 @@ const faceDetection = new FaceDetection({locateFile: (file) => { | |
return `https://cdn.jsdelivr.net/npm/@mediapipe/[email protected]/${file}`; | ||
}}); | ||
faceDetection.setOptions({ | ||
modelSelection: 0 | ||
modelSelection: 0, | ||
minDetectionConfidence: 0.5 | ||
}); | ||
faceDetection.onResults(onResults); | ||
|
@@ -216,6 +215,194 @@ camera.start(); | |
</script> | ||
``` | ||
|
||
### Android Solution API | ||
|
||
Please first follow general | ||
[instructions](../getting_started/android_solutions.md#integrate-mediapipe-android-solutions-api) | ||
to add MediaPipe Gradle dependencies, then try the Face Detection Solution API | ||
in the companion | ||
[example Android Studio project](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/solutions/facedetection) | ||
following | ||
[these instructions](../getting_started/android_solutions.md#build-solution-example-apps-in-android-studio) | ||
and learn more in the usage example below. | ||
|
||
* [staticImageMode](#static_image_mode) | ||
* [modelSelection](#model_selection) | ||
|
||
#### Camera Input | ||
|
||
```java | ||
// For camera input and result rendering with OpenGL. | ||
FaceDetectionOptions faceDetectionOptions = | ||
FaceDetectionOptions.builder() | ||
.setStaticImageMode(false) | ||
.setModelSelection(0).build(); | ||
FaceDetection faceDetection = new FaceDetection(this, faceDetectionOptions); | ||
faceDetection.setErrorListener( | ||
(message, e) -> Log.e(TAG, "MediaPipe Face Detection error:" + message)); | ||
|
||
// Initializes a new CameraInput instance and connects it to MediaPipe Face Detection Solution. | ||
CameraInput cameraInput = new CameraInput(this); | ||
cameraInput.setNewFrameListener( | ||
textureFrame -> faceDetection.send(textureFrame)); | ||
|
||
// Initializes a new GlSurfaceView with a ResultGlRenderer<FaceDetectionResult> instance | ||
// that provides the interfaces to run user-defined OpenGL rendering code. | ||
// See mediapipe/examples/android/solutions/facedetection/src/main/java/com/google/mediapipe/examples/facedetection/FaceDetectionResultGlRenderer.java | ||
// as an example. | ||
SolutionGlSurfaceView<FaceDetectionResult> glSurfaceView = | ||
new SolutionGlSurfaceView<>( | ||
this, faceDetection.getGlContext(), faceDetection.getGlMajorVersion()); | ||
glSurfaceView.setSolutionResultRenderer(new FaceDetectionResultGlRenderer()); | ||
glSurfaceView.setRenderInputImage(true); | ||
faceDetection.setResultListener( | ||
faceDetectionResult -> { | ||
RelativeKeypoint noseTip = | ||
FaceDetection.getFaceKeypoint(result, 0, FaceKeypoint.NOSE_TIP); | ||
Log.i( | ||
TAG, | ||
String.format( | ||
"MediaPipe Face Detection nose tip normalized coordinates (value range: [0, 1]): x=%f, y=%f", | ||
noseTip.getX(), noseTip.getY())); | ||
// Request GL rendering. | ||
glSurfaceView.setRenderData(faceDetectionResult); | ||
glSurfaceView.requestRender(); | ||
}); | ||
|
||
// The runnable to start camera after the GLSurfaceView is attached. | ||
glSurfaceView.post( | ||
() -> | ||
cameraInput.start( | ||
this, | ||
faceDetection.getGlContext(), | ||
CameraInput.CameraFacing.FRONT, | ||
glSurfaceView.getWidth(), | ||
glSurfaceView.getHeight())); | ||
``` | ||
|
||
#### Image Input | ||
|
||
```java | ||
// For reading images from gallery and drawing the output in an ImageView. | ||
FaceDetectionOptions faceDetectionOptions = | ||
FaceDetectionOptions.builder() | ||
.setStaticImageMode(true) | ||
.setModelSelection(0).build(); | ||
FaceDetection faceDetection = new FaceDetection(this, faceDetectionOptions); | ||
|
||
// Connects MediaPipe Face Detection Solution to the user-defined ImageView | ||
// instance that allows users to have the custom drawing of the output landmarks | ||
// on it. See mediapipe/examples/android/solutions/facedetection/src/main/java/com/google/mediapipe/examples/facedetection/FaceDetectionResultImageView.java | ||
// as an example. | ||
FaceDetectionResultImageView imageView = new FaceDetectionResultImageView(this); | ||
faceDetection.setResultListener( | ||
faceDetectionResult -> { | ||
int width = faceDetectionResult.inputBitmap().getWidth(); | ||
int height = faceDetectionResult.inputBitmap().getHeight(); | ||
RelativeKeypoint noseTip = | ||
FaceDetection.getFaceKeypoint(result, 0, FaceKeypoint.NOSE_TIP); | ||
Log.i( | ||
TAG, | ||
String.format( | ||
"MediaPipe Face Detection nose tip coordinates (pixel values): x=%f, y=%f", | ||
noseTip.getX() * width, noseTip.getY() * height)); | ||
// Request canvas drawing. | ||
imageView.setFaceDetectionResult(faceDetectionResult); | ||
runOnUiThread(() -> imageView.update()); | ||
}); | ||
faceDetection.setErrorListener( | ||
(message, e) -> Log.e(TAG, "MediaPipe Face Detection error:" + message)); | ||
|
||
// ActivityResultLauncher to get an image from the gallery as Bitmap. | ||
ActivityResultLauncher<Intent> imageGetter = | ||
registerForActivityResult( | ||
new ActivityResultContracts.StartActivityForResult(), | ||
result -> { | ||
Intent resultIntent = result.getData(); | ||
if (resultIntent != null && result.getResultCode() == RESULT_OK) { | ||
Bitmap bitmap = null; | ||
try { | ||
bitmap = | ||
MediaStore.Images.Media.getBitmap( | ||
this.getContentResolver(), resultIntent.getData()); | ||
// Please also rotate the Bitmap based on its orientation. | ||
} catch (IOException e) { | ||
Log.e(TAG, "Bitmap reading error:" + e); | ||
} | ||
if (bitmap != null) { | ||
faceDetection.send(bitmap); | ||
} | ||
} | ||
}); | ||
Intent gallery = new Intent( | ||
Intent.ACTION_PICK, MediaStore.Images.Media.INTERNAL_CONTENT_URI); | ||
imageGetter.launch(gallery); | ||
``` | ||
|
||
#### Video Input | ||
|
||
```java | ||
// For video input and result rendering with OpenGL. | ||
FaceDetectionOptions faceDetectionOptions = | ||
FaceDetectionOptions.builder() | ||
.setStaticImageMode(false) | ||
.setModelSelection(0).build(); | ||
FaceDetection faceDetection = new FaceDetection(this, faceDetectionOptions); | ||
faceDetection.setErrorListener( | ||
(message, e) -> Log.e(TAG, "MediaPipe Face Detection error:" + message)); | ||
|
||
// Initializes a new VideoInput instance and connects it to MediaPipe Face Detection Solution. | ||
VideoInput videoInput = new VideoInput(this); | ||
videoInput.setNewFrameListener( | ||
textureFrame -> faceDetection.send(textureFrame)); | ||
|
||
// Initializes a new GlSurfaceView with a ResultGlRenderer<FaceDetectionResult> instance | ||
// that provides the interfaces to run user-defined OpenGL rendering code. | ||
// See mediapipe/examples/android/solutions/facedetection/src/main/java/com/google/mediapipe/examples/facedetection/FaceDetectionResultGlRenderer.java | ||
// as an example. | ||
SolutionGlSurfaceView<FaceDetectionResult> glSurfaceView = | ||
new SolutionGlSurfaceView<>( | ||
this, faceDetection.getGlContext(), faceDetection.getGlMajorVersion()); | ||
glSurfaceView.setSolutionResultRenderer(new FaceDetectionResultGlRenderer()); | ||
glSurfaceView.setRenderInputImage(true); | ||
|
||
faceDetection.setResultListener( | ||
faceDetectionResult -> { | ||
RelativeKeypoint noseTip = | ||
FaceDetection.getFaceKeypoint(result, 0, FaceKeypoint.NOSE_TIP); | ||
Log.i( | ||
TAG, | ||
String.format( | ||
"MediaPipe Face Detection nose tip normalized coordinates (value range: [0, 1]): x=%f, y=%f", | ||
noseTip.getX(), noseTip.getY())); | ||
// Request GL rendering. | ||
glSurfaceView.setRenderData(faceDetectionResult); | ||
glSurfaceView.requestRender(); | ||
}); | ||
|
||
ActivityResultLauncher<Intent> videoGetter = | ||
registerForActivityResult( | ||
new ActivityResultContracts.StartActivityForResult(), | ||
result -> { | ||
Intent resultIntent = result.getData(); | ||
if (resultIntent != null) { | ||
if (result.getResultCode() == RESULT_OK) { | ||
glSurfaceView.post( | ||
() -> | ||
videoInput.start( | ||
this, | ||
resultIntent.getData(), | ||
faceDetection.getGlContext(), | ||
glSurfaceView.getWidth(), | ||
glSurfaceView.getHeight())); | ||
} | ||
} | ||
}); | ||
Intent gallery = | ||
new Intent(Intent.ACTION_PICK, MediaStore.Video.Media.INTERNAL_CONTENT_URI); | ||
videoGetter.launch(gallery); | ||
``` | ||
|
||
## Example Apps | ||
|
||
Please first see general instructions for | ||
|
Oops, something went wrong.