Skip to content

Commit

Permalink
Setup python (#10)
Browse files Browse the repository at this point in the history
* Setup python
  • Loading branch information
rasapala authored Nov 6, 2023
1 parent 7fc6de7 commit 9c1a20d
Show file tree
Hide file tree
Showing 14 changed files with 392 additions and 29 deletions.
15 changes: 15 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,9 +1,24 @@
bazel-*
build
mediapipe.egg-info
mediapipe/__pycache__/
mediapipe/MediaPipe.xcodeproj
mediapipe/MediaPipe.tulsiproj/*.tulsiconf-user
mediapipe/models/ovms/face_detection_short_range/
mediapipe/models/ovms/face_landmark/
mediapipe/models/ovms/hand_landmark_full/
mediapipe/models/ovms/hand_recrop/
mediapipe/models/ovms/iris_landmark/
mediapipe/models/ovms/palm_detection_full/
mediapipe/models/ovms/pose_detection/
mediapipe/models/ovms/pose_landmark_full/
mediapipe/models/ovms/ssdlite_object_detection/
mediapipe/models/ssdlite_object_detection_labelmap.txt
mediapipe/provisioning_profile.mobileprovision
mediapipe/python/__pycache__/
node_modules/
.configure.bazelrc
.user.bazelrc
.vscode/
.vs/
*.mp4
6 changes: 0 additions & 6 deletions Dockerfile.openvino
Original file line number Diff line number Diff line change
Expand Up @@ -85,12 +85,6 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
libopencv-imgproc-dev \
libopencv-video-dev \
build-essential \
libboost-filesystem1.71.0 \
libboost-thread1.71.0 \
libboost-program-options1.71.0 \
libboost-chrono1.71.0 \
libboost-date-time1.71.0 \
libboost-atomic1.71.0 \
libjson-c4 \
unzip

Expand Down
4 changes: 3 additions & 1 deletion WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -638,10 +638,12 @@ http_archive(
build_file = "@//third_party:halide.BUILD",
)

load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")

git_repository(
name = "ovms",
remote = "https://github.com/openvinotoolkit/model_server",
commit = "77c30dc3f153b3ee78336a3a75c09af4e23c14a4", # MP update to 10.3 in OVMS
commit = "ad1381fde838f2ac2d23117df78c186a96134fcc", # Fix azure patch (#2107)
)

# DEV ovms - adjust local repository path for build
Expand Down
26 changes: 13 additions & 13 deletions mediapipe/__init__.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
# Copyright 2019 - 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from mediapipe.python import *
import mediapipe.python.solutions as solutions
import mediapipe.tasks.python as tasks


del framework
del gpu
del modules
del python
del mediapipe
del util
__version__ = '1.0'
33 changes: 33 additions & 0 deletions mediapipe/examples/python/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# OVMS python examples
- Building docker container with dependencies
```bash
git clone https://github.com/openvinotoolkit/mediapipe.git
cd mediapipe
make docker_build
```

- Start the container
```bash
docker run -it mediapipe_ovms:latest bash
```

- Prepare models for ovms
```bash
python setup_ovms.py --get_models'
```
- Build and install mediapipe python package
Make sure you are in /mediapipe dirctory
Below command takes around 1 hour depending on your internet speed and cpu
```bash
pip install .
```
- Run example ovms python script
```bash
python build/lib.linux-x86_64-cpython-38/mediapipe/examples/python/ovms_object_detection.py
```
- This script will run object detection on input video, as described in this c++ example
[OVMS Object Detection](../desktop/object_detection/README.md)
[Original demo documentation](https://google.github.io/mediapipe/solutions/object_detection)
Empty file.
21 changes: 21 additions & 0 deletions mediapipe/examples/python/ovms_object_detection.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Copyright (c) 2023 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import mediapipe as mp
ovms_object_detection = mp.solutions.ovms_object_detection
with ovms_object_detection.OvmsObjectDetection(side_inputs=
{'input_video_path':'/mediapipe/mediapipe/examples/desktop/object_detection/test_video.mp4',
'output_video_path':'/mediapipe/tested_video.mp4'}) as ovms_object_detection:
results = ovms_object_detection.process()
39 changes: 39 additions & 0 deletions mediapipe/modules/ovms_modules/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# Copyright (c) 2023 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

load(
"//mediapipe/framework/tool:mediapipe_graph.bzl",
"mediapipe_simple_subgraph",
)
load(
"//mediapipe/framework/tool:mediapipe_files.bzl",
"mediapipe_files",
)
load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
load("//mediapipe/framework:mediapipe_cc_test.bzl", "mediapipe_cc_test")

licenses(["notice"])

package(default_visibility = ["//visibility:public"])

mediapipe_simple_subgraph(
name = "object_detection_ovms",
graph = "object_detection_ovms.pbtxt",
register_as = "ObjectDetectionOvms",
deps = [
"//mediapipe/graphs/object_detection:desktop_ovms_calculators",
"@ovms//src:ovms_lib",
],
)
206 changes: 206 additions & 0 deletions mediapipe/modules/ovms_modules/object_detection_ovms.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,206 @@
# MediaPipe graph that performs object detection on desktop with OpenVINO Model Server
# on CPU.
# Used in the example in
# mediapipe/examples/desktop/object_detection:object_detection_openvino.

# max_queue_size limits the number of packets enqueued on any input stream
# by throttling inputs to the graph. This makes the graph only process one
# frame per time.
max_queue_size: 1

# Decodes an input video file into images and a video header.
node {
calculator: "OpenCvVideoDecoderCalculator"
input_side_packet: "INPUT_FILE_PATH:input_video_path"
output_stream: "VIDEO:input_video"
output_stream: "VIDEO_PRESTREAM:input_video_header"
}

# Transforms the input image on CPU to a 320x320 image. To scale the image, by
# default it uses the STRETCH scale mode that maps the entire input image to the
# entire transformed image. As a result, image aspect ratio may be changed and
# objects in the image may be deformed (stretched or squeezed), but the object
# detection model used in this graph is agnostic to that deformation.
node: {
calculator: "ImageTransformationCalculator"
input_stream: "IMAGE:input_video"
output_stream: "IMAGE:transformed_input_video"
node_options: {
[type.googleapis.com/mediapipe.ImageTransformationCalculatorOptions] {
output_width: 320
output_height: 320
}
}
}

# Converts the transformed input image on CPU into an image tensor as a
# OpenVINOTensor. The zero_center option is set to true to normalize the
# pixel values to [-1.f, 1.f] as opposed to [0.f, 1.f].
node {
calculator: "OpenVINOConverterCalculator"
input_stream: "IMAGE:transformed_input_video"
output_stream: "TENSORS:image_tensor"
node_options: {
[type.googleapis.com/mediapipe.OpenVINOConverterCalculatorOptions] {
enable_normalization: true
zero_center: true
}
}
}

# Runs a TensorFlow Lite model on CPU that takes an image tensor and outputs a
# vector of tensors representing, for instance, detection boxes/keypoints and
# scores.
node {
calculator: "OpenVINOModelServerSessionCalculator"
output_side_packet: "SESSION:session"
node_options: {
[type.googleapis.com / mediapipe.OpenVINOModelServerSessionCalculatorOptions]: {
servable_name: "ssdlite_object_detection" # servable name inside OVMS
servable_version: "1"
server_config: "mediapipe/calculators/ovms/config.json"
}
}
}
node {
calculator: "OpenVINOInferenceCalculator"
input_side_packet: "SESSION:session"
input_stream: "OVTENSORS:image_tensor"
output_stream: "OVTENSORS2:detection_tensors"
node_options: {
[type.googleapis.com / mediapipe.OpenVINOInferenceCalculatorOptions]: {
input_order_list :["normalized_input_image_tensor"]
output_order_list :["raw_outputs/box_encodings","raw_outputs/class_predictions"]
tag_to_input_tensor_names {
key: "OVTENSORS"
value: "normalized_input_image_tensor"
}
tag_to_output_tensor_names {
key: "OVTENSORS1"
value: "raw_outputs/box_encodings"
}
tag_to_output_tensor_names {
key: "OVTENSORS2"
value: "raw_outputs/class_predictions"
}
}
}
}

# Generates a single side packet containing a vector of SSD anchors based on
# the specification in the options.
node {
calculator: "SsdAnchorsCalculator"
output_side_packet: "anchors"
node_options: {
[type.googleapis.com/mediapipe.SsdAnchorsCalculatorOptions] {
num_layers: 6
min_scale: 0.2
max_scale: 0.95
input_size_height: 320
input_size_width: 320
anchor_offset_x: 0.5
anchor_offset_y: 0.5
strides: 16
strides: 32
strides: 64
strides: 128
strides: 256
strides: 512
aspect_ratios: 1.0
aspect_ratios: 2.0
aspect_ratios: 0.5
aspect_ratios: 3.0
aspect_ratios: 0.3333
reduce_boxes_in_lowest_layer: true
}
}
}

# Decodes the detection tensors generated by the TensorFlow Lite model, based on
# the SSD anchors and the specification in the options, into a vector of
# detections. Each detection describes a detected object.
node {
calculator: "OpenVINOTensorsToDetectionsCalculator"
input_stream: "TENSORS:detection_tensors"
input_side_packet: "ANCHORS:anchors"
output_stream: "DETECTIONS:detections"
node_options: {
[type.googleapis.com/mediapipe.OpenVINOTensorsToDetectionsCalculatorOptions] {
num_classes: 91
num_boxes: 2034
num_coords: 4
ignore_classes: 0
apply_exponential_on_box_size: true
x_scale: 10.0
y_scale: 10.0
h_scale: 5.0
w_scale: 5.0
}
}
}

# Performs non-max suppression to remove excessive detections.
node {
calculator: "NonMaxSuppressionCalculator"
input_stream: "detections"
output_stream: "filtered_detections"
node_options: {
[type.googleapis.com/mediapipe.NonMaxSuppressionCalculatorOptions] {
min_suppression_threshold: 0.4
min_score_threshold: 0.6
max_num_detections: 5
overlap_type: INTERSECTION_OVER_UNION
}
}
}

# Maps detection label IDs to the corresponding label text. The label map is
# provided in the label_map_path option.
node {
calculator: "DetectionLabelIdToTextCalculator"
input_stream: "filtered_detections"
output_stream: "output_detections"
node_options: {
[type.googleapis.com/mediapipe.DetectionLabelIdToTextCalculatorOptions] {
label_map_path: "/mediapipe/mediapipe/models/ssdlite_object_detection_labelmap.txt"
}
}
}

# Converts the detections to drawing primitives for annotation overlay.
node {
calculator: "DetectionsToRenderDataCalculator"
input_stream: "DETECTIONS:output_detections"
output_stream: "RENDER_DATA:render_data"
node_options: {
[type.googleapis.com/mediapipe.DetectionsToRenderDataCalculatorOptions] {
thickness: 4.0
color { r: 255 g: 0 b: 0 }
}
}
}

# Draws annotations and overlays them on top of the input images.
node {
calculator: "AnnotationOverlayCalculator"
input_stream: "IMAGE:input_video"
input_stream: "render_data"
output_stream: "IMAGE:output_video"
}

# Encodes the annotated images into a video file, adopting properties specified
# in the input video header, e.g., video framerate.
node {
calculator: "OpenCvVideoEncoderCalculator"
input_stream: "VIDEO:output_video"
input_stream: "VIDEO_PRESTREAM:input_video_header"
input_side_packet: "OUTPUT_FILE_PATH:output_video_path"
node_options: {
[type.googleapis.com/mediapipe.OpenCvVideoEncoderCalculatorOptions]: {
codec: "avc1"
video_format: "mp4"
}
}
}
2 changes: 2 additions & 0 deletions mediapipe/python/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ pybind_extension(
"//mediapipe/framework/formats:rect_registration",
"//mediapipe/modules/objectron/calculators:annotation_registration",
"//mediapipe/tasks/cc/vision/face_geometry/proto:face_geometry_registration",
# OVMS lib
"@ovms//src:ovms_lib",
],
)

Expand Down
1 change: 1 addition & 0 deletions mediapipe/python/solutions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,5 +23,6 @@
import mediapipe.python.solutions.hands_connections
import mediapipe.python.solutions.holistic
import mediapipe.python.solutions.objectron
import mediapipe.python.solutions.ovms_object_detection
import mediapipe.python.solutions.pose
import mediapipe.python.solutions.selfie_segmentation
Loading

0 comments on commit 9c1a20d

Please sign in to comment.