Skip to content

Commit

Permalink
Project import generated by Copybara.
Browse files Browse the repository at this point in the history
GitOrigin-RevId: 373e3ac1e5839befd95bf7d73ceff3c5f1171969
  • Loading branch information
MediaPipe Team authored and jqtang committed Oct 6, 2021
1 parent 137e1cc commit 33d683c
Show file tree
Hide file tree
Showing 153 changed files with 7,868 additions and 1,346 deletions.
15 changes: 9 additions & 6 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,11 @@ bazel_skylib_workspace()
load("@bazel_skylib//lib:versions.bzl", "versions")
versions.check(minimum_bazel_version = "3.7.2")

# ABSL cpp library lts_2020_09_23
# ABSL cpp library lts_2021_03_24, patch 2.
http_archive(
name = "com_google_absl",
urls = [
"https://github.com/abseil/abseil-cpp/archive/20200923.tar.gz",
"https://github.com/abseil/abseil-cpp/archive/refs/tags/20210324.2.tar.gz",
],
# Remove after https://github.com/abseil/abseil-cpp/issues/326 is solved.
patches = [
Expand All @@ -29,8 +29,8 @@ http_archive(
patch_args = [
"-p1",
],
strip_prefix = "abseil-cpp-20200923",
sha256 = "b3744a4f7a249d5eaf2309daad597631ce77ea62e0fc6abffbab4b4c3dc0fc08"
strip_prefix = "abseil-cpp-20210324.2",
sha256 = "59b862f50e710277f8ede96f083a5bb8d7c9595376146838b9580be90374ee1f"
)

http_archive(
Expand Down Expand Up @@ -333,6 +333,7 @@ maven_install(
"androidx.concurrent:concurrent-futures:1.0.0-alpha03",
"androidx.lifecycle:lifecycle-common:2.3.1",
"androidx.activity:activity:1.2.2",
"androidx.exifinterface:exifinterface:1.3.3",
"androidx.fragment:fragment:1.3.4",
"androidx.annotation:annotation:aar:1.1.0",
"androidx.appcompat:appcompat:aar:1.1.0-rc01",
Expand All @@ -349,8 +350,8 @@ maven_install(
"com.google.auto.value:auto-value:1.8.1",
"com.google.auto.value:auto-value-annotations:1.8.1",
"com.google.code.findbugs:jsr305:latest.release",
"com.google.flogger:flogger-system-backend:latest.release",
"com.google.flogger:flogger:latest.release",
"com.google.flogger:flogger-system-backend:0.6",
"com.google.flogger:flogger:0.6",
"com.google.guava:guava:27.0.1-android",
"com.google.guava:listenablefuture:1.0",
"junit:junit:4.12",
Expand Down Expand Up @@ -389,6 +390,8 @@ http_archive(
patches = [
"@//third_party:org_tensorflow_compatibility_fixes.diff",
"@//third_party:org_tensorflow_objc_cxx17.diff",
# Diff is generated with a script, don't update it manually.
"@//third_party:org_tensorflow_custom_ops.diff",
],
patch_args = [
"-p1",
Expand Down
16 changes: 9 additions & 7 deletions docs/getting_started/android_solutions.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,17 @@ the following into the project's Gradle dependencies:

```
dependencies {
// MediaPipe solution-core is the foundation of any MediaPipe solutions.
// MediaPipe solution-core is the foundation of any MediaPipe Solutions.
implementation 'com.google.mediapipe:solution-core:latest.release'
// Optional: MediaPipe Hands solution.
implementation 'com.google.mediapipe:hands:latest.release'
// Optional: MediaPipe FaceMesh solution.
// Optional: MediaPipe Face Detection Solution.
implementation 'com.google.mediapipe:facedetection:latest.release'
// Optional: MediaPipe Face Mesh Solution.
implementation 'com.google.mediapipe:facemesh:latest.release'
// Optional: MediaPipe Hands Solution.
implementation 'com.google.mediapipe:hands:latest.release'
// MediaPipe deps
implementation 'com.google.flogger:flogger:latest.release'
implementation 'com.google.flogger:flogger-system-backend:latest.release'
implementation 'com.google.flogger:flogger:0.6'
implementation 'com.google.flogger:flogger-system-backend:0.6'
implementation 'com.google.guava:guava:27.0.1-android'
implementation 'com.google.protobuf:protobuf-java:3.11.4'
// CameraX core library
Expand All @@ -45,7 +47,7 @@ dependencies {
}
```

See the detailed solutions API usage examples for different use cases in the
See the detailed solution APIs usage examples for different use cases in the
solution example apps'
[source code](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/solutions).
If the prebuilt maven packages are not sufficient, building the MediaPipe
Expand Down
2 changes: 1 addition & 1 deletion docs/getting_started/faq.md
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ monotonically increasing timestamps. By convention, realtime calculators and
graphs use the recording time or the presentation time as the timestamp for each
packet, with each timestamp representing microseconds since
`Jan/1/1970:00:00:00`. This allows packets from various sources to be processed
in a gloablly consistent order.
in a globally consistent order.

Normally for offline processing, every input packet is processed and processing
continues as long as necessary. For online processing, it is often necessary to
Expand Down
Binary file added docs/images/attention_mesh_architecture.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
197 changes: 192 additions & 5 deletions docs/solutions/face_detection.md
Original file line number Diff line number Diff line change
Expand Up @@ -121,12 +121,10 @@ with mp_face_detection.FaceDetection(
# If loading a video, use 'break' instead of 'continue'.
continue

# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = face_detection.process(image)

# Draw the face detection annotations on the image.
Expand All @@ -135,7 +133,8 @@ with mp_face_detection.FaceDetection(
if results.detections:
for detection in results.detections:
mp_drawing.draw_detection(image, detection)
cv2.imshow('MediaPipe Face Detection', image)
# Flip the image horizontally for a selfie-view display.
cv2.imshow('MediaPipe Face Detection', cv2.flip(image, 1))
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
Expand Down Expand Up @@ -200,7 +199,7 @@ const faceDetection = new FaceDetection({locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/[email protected]/${file}`;
}});
faceDetection.setOptions({
modelSelection: 0
modelSelection: 0,
minDetectionConfidence: 0.5
});
faceDetection.onResults(onResults);
Expand All @@ -216,6 +215,194 @@ camera.start();
</script>
```

### Android Solution API

Please first follow general
[instructions](../getting_started/android_solutions.md#integrate-mediapipe-android-solutions-api)
to add MediaPipe Gradle dependencies, then try the Face Detection Solution API
in the companion
[example Android Studio project](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/solutions/facedetection)
following
[these instructions](../getting_started/android_solutions.md#build-solution-example-apps-in-android-studio)
and learn more in the usage example below.

* [staticImageMode](#static_image_mode)
* [modelSelection](#model_selection)

#### Camera Input

```java
// For camera input and result rendering with OpenGL.
FaceDetectionOptions faceDetectionOptions =
FaceDetectionOptions.builder()
.setStaticImageMode(false)
.setModelSelection(0).build();
FaceDetection faceDetection = new FaceDetection(this, faceDetectionOptions);
faceDetection.setErrorListener(
(message, e) -> Log.e(TAG, "MediaPipe Face Detection error:" + message));

// Initializes a new CameraInput instance and connects it to MediaPipe Face Detection Solution.
CameraInput cameraInput = new CameraInput(this);
cameraInput.setNewFrameListener(
textureFrame -> faceDetection.send(textureFrame));

// Initializes a new GlSurfaceView with a ResultGlRenderer<FaceDetectionResult> instance
// that provides the interfaces to run user-defined OpenGL rendering code.
// See mediapipe/examples/android/solutions/facedetection/src/main/java/com/google/mediapipe/examples/facedetection/FaceDetectionResultGlRenderer.java
// as an example.
SolutionGlSurfaceView<FaceDetectionResult> glSurfaceView =
new SolutionGlSurfaceView<>(
this, faceDetection.getGlContext(), faceDetection.getGlMajorVersion());
glSurfaceView.setSolutionResultRenderer(new FaceDetectionResultGlRenderer());
glSurfaceView.setRenderInputImage(true);
faceDetection.setResultListener(
faceDetectionResult -> {
RelativeKeypoint noseTip =
FaceDetection.getFaceKeypoint(result, 0, FaceKeypoint.NOSE_TIP);
Log.i(
TAG,
String.format(
"MediaPipe Face Detection nose tip normalized coordinates (value range: [0, 1]): x=%f, y=%f",
noseTip.getX(), noseTip.getY()));
// Request GL rendering.
glSurfaceView.setRenderData(faceDetectionResult);
glSurfaceView.requestRender();
});

// The runnable to start camera after the GLSurfaceView is attached.
glSurfaceView.post(
() ->
cameraInput.start(
this,
faceDetection.getGlContext(),
CameraInput.CameraFacing.FRONT,
glSurfaceView.getWidth(),
glSurfaceView.getHeight()));
```

#### Image Input

```java
// For reading images from gallery and drawing the output in an ImageView.
FaceDetectionOptions faceDetectionOptions =
FaceDetectionOptions.builder()
.setStaticImageMode(true)
.setModelSelection(0).build();
FaceDetection faceDetection = new FaceDetection(this, faceDetectionOptions);

// Connects MediaPipe Face Detection Solution to the user-defined ImageView
// instance that allows users to have the custom drawing of the output landmarks
// on it. See mediapipe/examples/android/solutions/facedetection/src/main/java/com/google/mediapipe/examples/facedetection/FaceDetectionResultImageView.java
// as an example.
FaceDetectionResultImageView imageView = new FaceDetectionResultImageView(this);
faceDetection.setResultListener(
faceDetectionResult -> {
int width = faceDetectionResult.inputBitmap().getWidth();
int height = faceDetectionResult.inputBitmap().getHeight();
RelativeKeypoint noseTip =
FaceDetection.getFaceKeypoint(result, 0, FaceKeypoint.NOSE_TIP);
Log.i(
TAG,
String.format(
"MediaPipe Face Detection nose tip coordinates (pixel values): x=%f, y=%f",
noseTip.getX() * width, noseTip.getY() * height));
// Request canvas drawing.
imageView.setFaceDetectionResult(faceDetectionResult);
runOnUiThread(() -> imageView.update());
});
faceDetection.setErrorListener(
(message, e) -> Log.e(TAG, "MediaPipe Face Detection error:" + message));

// ActivityResultLauncher to get an image from the gallery as Bitmap.
ActivityResultLauncher<Intent> imageGetter =
registerForActivityResult(
new ActivityResultContracts.StartActivityForResult(),
result -> {
Intent resultIntent = result.getData();
if (resultIntent != null && result.getResultCode() == RESULT_OK) {
Bitmap bitmap = null;
try {
bitmap =
MediaStore.Images.Media.getBitmap(
this.getContentResolver(), resultIntent.getData());
// Please also rotate the Bitmap based on its orientation.
} catch (IOException e) {
Log.e(TAG, "Bitmap reading error:" + e);
}
if (bitmap != null) {
faceDetection.send(bitmap);
}
}
});
Intent gallery = new Intent(
Intent.ACTION_PICK, MediaStore.Images.Media.INTERNAL_CONTENT_URI);
imageGetter.launch(gallery);
```

#### Video Input

```java
// For video input and result rendering with OpenGL.
FaceDetectionOptions faceDetectionOptions =
FaceDetectionOptions.builder()
.setStaticImageMode(false)
.setModelSelection(0).build();
FaceDetection faceDetection = new FaceDetection(this, faceDetectionOptions);
faceDetection.setErrorListener(
(message, e) -> Log.e(TAG, "MediaPipe Face Detection error:" + message));

// Initializes a new VideoInput instance and connects it to MediaPipe Face Detection Solution.
VideoInput videoInput = new VideoInput(this);
videoInput.setNewFrameListener(
textureFrame -> faceDetection.send(textureFrame));

// Initializes a new GlSurfaceView with a ResultGlRenderer<FaceDetectionResult> instance
// that provides the interfaces to run user-defined OpenGL rendering code.
// See mediapipe/examples/android/solutions/facedetection/src/main/java/com/google/mediapipe/examples/facedetection/FaceDetectionResultGlRenderer.java
// as an example.
SolutionGlSurfaceView<FaceDetectionResult> glSurfaceView =
new SolutionGlSurfaceView<>(
this, faceDetection.getGlContext(), faceDetection.getGlMajorVersion());
glSurfaceView.setSolutionResultRenderer(new FaceDetectionResultGlRenderer());
glSurfaceView.setRenderInputImage(true);

faceDetection.setResultListener(
faceDetectionResult -> {
RelativeKeypoint noseTip =
FaceDetection.getFaceKeypoint(result, 0, FaceKeypoint.NOSE_TIP);
Log.i(
TAG,
String.format(
"MediaPipe Face Detection nose tip normalized coordinates (value range: [0, 1]): x=%f, y=%f",
noseTip.getX(), noseTip.getY()));
// Request GL rendering.
glSurfaceView.setRenderData(faceDetectionResult);
glSurfaceView.requestRender();
});

ActivityResultLauncher<Intent> videoGetter =
registerForActivityResult(
new ActivityResultContracts.StartActivityForResult(),
result -> {
Intent resultIntent = result.getData();
if (resultIntent != null) {
if (result.getResultCode() == RESULT_OK) {
glSurfaceView.post(
() ->
videoInput.start(
this,
resultIntent.getData(),
faceDetection.getGlContext(),
glSurfaceView.getWidth(),
glSurfaceView.getHeight()));
}
}
});
Intent gallery =
new Intent(Intent.ACTION_PICK, MediaStore.Video.Media.INTERNAL_CONTENT_URI);
videoGetter.launch(gallery);
```

## Example Apps

Please first see general instructions for
Expand Down
Loading

0 comments on commit 33d683c

Please sign in to comment.