diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..5f661f4
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,6 @@
+# Avoid issues with container-specific assignment of user permissions
+samples/webrtc/grafana/grafana-storage/**/*
+samples/webrtc/grafana/grafana-storage
+samples/webrtc/webserver/www/js-client/**/*
+samples/webrtc/webserver/www/js-client
+
diff --git a/README.md b/README.md
index a931e8b..a166b1c 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-# Intel(R) Deep Learning Streamer Pipeline Server
+# Intel® Deep Learning Streamer Pipeline Server
| [Getting Started](#getting-started)
| [Request Customizations](#request-customizations)
@@ -6,15 +6,12 @@
| [Further Reading](#further-reading)
| [Known Issues](#known-issues) |
-Intel(R) Deep Learning Streamer Pipeline Server (Intel(R) DL Streamer Pipeline Server) is a python package and microservice for
+Intel® Deep Learning Streamer (Intel® DL Streamer) Pipeline Server is a python package and microservice for
deploying optimized media analytics pipelines. It supports pipelines
defined in
[GStreamer](https://gstreamer.freedesktop.org/documentation/?gi-language=c)*
or [FFmpeg](https://ffmpeg.org/)* and provides APIs to discover, start,
-stop, customize and monitor pipeline execution. Video Analytics
-Serving is based on [OpenVINO™ Toolkit DL
-Streamer](https://github.com/opencv/gst-video-analytics) and [FFmpeg
-Video Analytics](https://github.com/VCDP/FFmpeg-patch).
+stop, customize and monitor pipeline execution. Intel® DL Streamer Pipeline Server is based on [Intel® Deep Learning Streamer Pipeline Framework](https://github.com/dlstreamer/dlstreamer) and [FFmpeg Video Analytics](https://github.com/VCDP/FFmpeg-patch).
## Features Include
@@ -23,12 +20,12 @@ Video Analytics](https://github.com/VCDP/FFmpeg-patch).
| **Customizable Media Analytics Containers** | Scripts and dockerfiles to build and run container images with the required dependencies for hardware optimized media analytics pipelines. |
| **No-Code Pipeline Definitions and Templates** | JSON based definition files, a flexible way for developers to define and parameterize pipelines while abstracting the low level details from their users. |
| **Deep Learning Model Integration** | A simple way to package and reference [OpenVINO™](https://software.intel.com/en-us/openvino-toolkit) based models in pipeline definitions. The precision of a model can be auto-selected at runtime based on the chosen inference device. |
-| **Intel(R) DL Streamer Pipeline Server Python API** | A python module to discover, start, stop, customize and monitor pipelines based on their no-code definitions. |
-| **Intel(R) DL Streamer Pipeline Server Microservice** | A RESTful microservice providing endpoints and APIs matching the functionality of the python module. |
+| **Intel® DL Streamer Pipeline Server Python API** | A python module to discover, start, stop, customize and monitor pipelines based on their no-code definitions. |
+| **Intel® DL Streamer Pipeline Server Microservice** | A RESTful microservice providing endpoints and APIs matching the functionality of the python module. |
-> **IMPORTANT:** Intel(R) DL Streamer Pipeline Server is provided as a _sample_. It
+> **IMPORTANT:** Intel® DL Streamer Pipeline Server is provided as a _sample_. It
> is not intended to be deployed into production environments without
-> modification. Developers deploying Intel(R) DL Streamer Pipeline Server should
+> modification. Developers deploying Intel® DL Streamer Pipeline Server should
> review it against their production requirements.
The sample microservice includes five categories of media analytics pipelines. Click on the links below to find out more about each of them.
@@ -47,12 +44,12 @@ The sample microservice includes five categories of media analytics pipelines. C
| | |
|---------------------------------------------|------------------|
-| **Docker** | Intel(R) DL Streamer Pipeline Server requires Docker for its build, development, and runtime environments. Please install the latest for your platform. [Docker](https://docs.docker.com/install). |
-| **bash** | Intel(R) DL Streamer Pipeline Server's build and run scripts require bash and have been tested on systems using versions greater than or equal to: `GNU bash, version 4.3.48(1)-release (x86_64-pc-linux-gnu)`. Most users shouldn't need to update their version but if you run into issues please install the latest for your platform. Instructions for macOS®* users [here](docs/installing_bash_macos.md). |
+| **Docker** | Intel® DL Streamer Pipeline Server requires Docker for its build, development, and runtime environments. Please install the latest for your platform. [Docker](https://docs.docker.com/install). |
+| **bash** | Intel® DL Streamer Pipeline Server's build and run scripts require bash and have been tested on systems using versions greater than or equal to: `GNU bash, version 4.3.48(1)-release (x86_64-pc-linux-gnu)`. Most users shouldn't need to update their version but if you run into issues please install the latest for your platform. Instructions for macOS®* users [here](docs/installing_bash_macos.md). |
## Supported Hardware
-Refer to [OpenVINO™ System Requirements](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit/system-requirements.html) for supported development and target runtime platforms and the [OpenVINO™ Container Release Notes](https://hub.docker.com/r/openvino/ubuntu20_data_runtime) for details on providing access to accelerator devices.
+Refer to [Intel® DL Streamer Hardware Requirements](https://dlstreamer.github.io/get_started/hardware_requirements.html) for supported development and target runtime platforms and the [Intel® DL Streamer Install Guide](https://dlstreamer.github.io/get_started/install/install_guide_ubuntu.html) for details on providing access to accelerator devices.
## Building the Microservice
@@ -111,16 +108,16 @@ Expected output:
## Running a Pipeline
-The Pipeline Server includes a sample client [vaclient](./vaclient/README.md) that can connect to the service and make requests. We will use vaclient to explain how to use the key microservice features.
-> **Note:** Any RESTful tool or library can be used to send requests to the Pipeline Server service. We are using vaclient as it simplifies interaction with the service.
+The Pipeline Server includes a sample client [pipeline_client](./client/README.md) that can connect to the service and make requests. We will use pipeline_client to explain how to use the key microservice features.
+> **Note:** Any RESTful tool or library can be used to send requests to the Pipeline Server service. We are using pipeline_client as it simplifies interaction with the service.
> **Note:** The microservice has to be up and running before the sample client is invoked.
-Before running a pipeline, we need to know what pipelines are available. We do this using vaclient's `list-pipeline` command.
+Before running a pipeline, we need to know what pipelines are available. We do this using pipeline_client's `list-pipeline` command.
In new shell run the following command:
```bash
-./vaclient/vaclient.sh list-pipelines
+./client/pipeline_client.sh list-pipelines
```
```text
@@ -139,12 +136,12 @@ In new shell run the following command:
Pipelines are displayed as a name/version tuple. The name reflects the action and version supplies more details of that action. Let's go with `object_detection/person_vehicle_bike`. Now we need to choose a media source. We recommend the [IoT Devkit sample videos](https://github.com/intel-iot-devkit/sample-videos) to get started. As the pipeline version indicates support for detecting people, person-bicycle-car-detection.mp4 would be a good choice.
> **Note:** Make sure to include `raw=true` parameter in the Github URL as shown in our examples. Failure to do so will result in a pipeline execution error.
-vaclient offers a `run` command that takes two additional arguments the `pipeline` and the `uri` for the media source. The `run` command displays inference results until either the media is exhausted or `CTRL+C` is pressed.
+pipeline_client offers a `run` command that takes two additional arguments the `pipeline` and the `uri` for the media source. The `run` command displays inference results until either the media is exhausted or `CTRL+C` is pressed.
Inference result bounding boxes are displayed in the format `label (confidence) [top left width height] {meta-data}` provided applicable data is present. At the end of the pipeline run, the average fps is shown.
```bash
-./vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
+./client/pipeline_client.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
```
```text
@@ -174,12 +171,12 @@ The file path is specified in the `destination` section of the REST request and
### Queued, Running and Completed
-The vaclient `run` command starts the pipeline. The underlying REST request returns a `pipeline instance` which is used to query the state of the pipeline.
-All being well it will go into `QUEUED` then `RUNNING` state. We can interrogate the pipeline status by using the vaclient `start` command that kicks off the pipeline like `run` and then exits displaying the `pipeline instance` (a [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier)) which is used by the `status` command to view pipeline state.
+The pipeline_client `run` command starts the pipeline. The underlying REST request returns a `pipeline instance` which is used to query the state of the pipeline.
+All being well it will go into `QUEUED` then `RUNNING` state. We can interrogate the pipeline status by using the pipeline_client `start` command that kicks off the pipeline like `run` and then exits displaying the `pipeline instance` (a [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier)) which is used by the `status` command to view pipeline state.
> **NOTE:** The pipeline instance value depends on the number of pipelines started while the server is running so may differ from the value shown in the following examples.
```bash
-./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
+./client/pipeline_client.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
```
```text
@@ -190,7 +187,7 @@ Starting pipeline object_detection/person_vehicle_bike, instance = d83502e3ef314
You will need both the pipeline tuple and `instance` id for the status command. This command will display pipeline state:
```bash
-./vaclient/vaclient.sh status object_detection/person_vehicle_bike d83502e3ef314e8fbec8dc926eadd0c2
+./client/pipeline_client.sh status object_detection/person_vehicle_bike d83502e3ef314e8fbec8dc926eadd0c2
```
```text
@@ -201,7 +198,7 @@ RUNNING (49fps)
Then wait for a minute or so and try again. Pipeline will be completed.
```bash
-./vaclient/vaclient.sh status object_detection/person_vehicle_bike d83502e3ef314e8fbec8dc926eadd0c2
+./client/pipeline_client.sh status object_detection/person_vehicle_bike d83502e3ef314e8fbec8dc926eadd0c2
```
```text
@@ -215,7 +212,7 @@ If a pipeline is stopped, rather than allowed to complete, it goes into the ABOR
Start the pipeline again, this time we'll stop it.
```bash
-./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
+./client/pipeline_client.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
```
```text
@@ -224,7 +221,7 @@ Starting pipeline object_detection/person_vehicle_bike, instance = 8ad2c85af4bd4
```
```bash
-./vaclient/vaclient.sh status object_detection/person_vehicle_bike 8ad2c85a-f4bd473e8a693aff562be316
+./client/pipeline_client.sh status object_detection/person_vehicle_bike 8ad2c85a-f4bd473e8a693aff562be316
```
```text
@@ -233,7 +230,7 @@ RUNNING (50fps)
```
```bash
-./vaclient/vaclient.sh stop object_detection/person_vehicle_bike 8ad2c85af4bd473e8a693aff562be316
+./client/pipeline_client.sh stop object_detection/person_vehicle_bike 8ad2c85af4bd473e8a693aff562be316
```
```text
@@ -244,7 +241,7 @@ avg_fps: 24.33
```
```bash
-./vaclient/vaclient.sh status object_detection/person_vehicle_bike 8ad2c85af4bd473e8a693aff562be316
+./client/pipeline_client.sh status object_detection/person_vehicle_bike 8ad2c85af4bd473e8a693aff562be316
```
```text
@@ -257,7 +254,7 @@ ABORTED (47fps)
The error state covers a number of outcomes such as the request could not be satisfied, a pipeline dependency was missing or an initialization problem. We can create an error condition by supplying a valid but unreachable uri.
```bash
-./vaclient/vaclient.sh start object_detection/person_vehicle_bike http://bad-uri
+./client/pipeline_client.sh start object_detection/person_vehicle_bike http://bad-uri
```
```text
@@ -269,7 +266,7 @@ Note that the Pipeline Server does not report an error at this stage as it goes
Checking on state a few seconds later will show the error.
```bash
-./vaclient/vaclient.sh status object_detection/person_vehicle_bike 2bb2d219310a4ee881faf258fbcc4355
+./client/pipeline_client.sh status object_detection/person_vehicle_bike 2bb2d219310a4ee881faf258fbcc4355
```
```text
@@ -277,43 +274,14 @@ Checking on state a few seconds later will show the error.
ERROR (0fps)
```
-## Real Time Streaming Protocol (RTSP)
-
-RTSP allows you to connect to a server and display a video stream. The Pipeline Server includes an RTSP server that creates a stream that shows the incoming video with superimposed bounding boxes and meta-data. You will need a client that connects to the server and displays the video. We recommend [vlc](https://www.videolan.org/). For this example we'll assume the Pipeline Server and vlc are running on the same host.
-
-First start the Pipeline Server with RTSP enabled. By default, the RTSP stream will use port 8554.
-```
-docker/run.sh --enable-rtsp -v /tmp:/tmp
-```
-
-Then start a pipeline specifying the RTSP server endpoint path `pipeline-server`. In this case the RTSP endpoint would be `rtsp://localhost:8554/pipeline-server`
-
-```bash
-./vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --rtsp-path pipeline-server
-```
-
-If you see the error
-
-```text
-Starting pipeline object_detection/person_vehicle_bike, instance =
-Error in pipeline, please check pipeline-server log messages
-```
-
-You probably forgot to enable RTSP in the server.
-
-Now start `vlc` and from the `Media` menu select `Open Network Stream`. For URL enter `rtsp://localhost:8554/pipeline-server` and hit `Play`.
-> **Note:** The pipeline must be running before you hit play otherwise VLC will not be able to connect to the RTSP server.
-
-> **Note:** For shorter video files you should have VLC ready to go before starting pipeline otherwise by the time you hit play the pipeline will have completed and the RTSP server will have shut down.
-
# Request Customizations
## Change Pipeline and Source Media
-With vaclient it is easy to customize service requests. Here will use a vehicle classification pipeline `object_classification/vehicle_attributes` with the Iot Devkit video `car-detection.mp4`. Note how vaclient now displays classification metadata including type and color of vehicle.
+With pipeline_client it is easy to customize service requests. Here will use a vehicle classification pipeline `object_classification/vehicle_attributes` with the Iot Devkit video `car-detection.mp4`. Note how pipeline_client now displays classification metadata including type and color of vehicle.
```bash
-./vaclient/vaclient.sh run object_classification/vehicle_attributes https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true
+./client/pipeline_client.sh run object_classification/vehicle_attributes https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true
```
```text
@@ -342,7 +310,7 @@ Timestamp 18640000000
- vehicle (0.98) [0.40, 0.00, 0.55, 0.15] {'color': 'red', 'type': 'car'}
```
-If you look at video you can see that there are some errors in classification - there are no trucks or busses in the video. However you can see that associated confidence is much lower than the correct classification of the white and red cars.
+If you look at the video, you can see that there are some errors in classification - there are no trucks or buses in the video. However you can see that associated confidence is much lower than the correct classification of the white and red cars.
## Change Inference Accelerator Device
@@ -350,7 +318,7 @@ Inference accelerator devices can be easily selected using the device parameter.
but this time use the integrated GPU for detection inference by setting the `detection-device` parameter.
```bash
-./vaclient/vaclient.sh run object_classification/vehicle_attributes https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true --parameter detection-device GPU --parameter detection-model-instance-id person_vehicle_bike_detection_gpu
+./client/pipeline_client.sh run object_classification/vehicle_attributes https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true --parameter detection-device GPU --parameter detection-model-instance-id person_vehicle_bike_detection_gpu
```
```text
@@ -361,16 +329,40 @@ Starting pipeline object_classification/vehicle_attributes, instance =
> **Note:** The `detection-model-instance-id` parameter caches the GPU model with a unique id. For more information read about [model instance ids](docs/defining_pipelines.md#model-persistance-in-openvino-gstreamer-elements).
-vaclient's fps measurement is useful when assessing pipeline performance with different accelerators.
+pipeline_client's fps measurement is useful when assessing pipeline performance with different accelerators.
+
+## Visualize Inference
+Pipeline server allows you to optionally visualize inference results using either [Real Time Streaming Protocol (RTSP)](https://en.wikipedia.org/wiki/Real_Time_Streaming_Protocol) or [Web Real Time Communication (WebRTC)](https://webrtc.org/) by configuring the frame destination section of the request.
+
+RTSP is simpler to set up but you must have an RTSP player (e.g. [VLC](https://www.videolan.org/vlc/)) to render output. WebRTC setup is more complex (e.g., requires additional server-side microservices) but has the upside of using a web browser for client visualization.
+
+Before requesting visualization, the corresponding feature must be enabled in the server, see [Visualizing Inference Output](docs/running_pipeline_server.md#visualizing-inference-output).
+
+### RTSP
+
+RTSP allows you to connect to a server and display a video stream. The Pipeline Server includes an RTSP server that creates a stream that shows the incoming video with superimposed bounding boxes and meta-data. You will need a client that connects to the server and displays the video. We recommend [vlc](https://www.videolan.org/).
+
+First start the Pipeline Server with RTSP enabled. By default, the RTSP stream will use port 8554.
+```
+docker/run.sh --enable-rtsp -v /tmp:/tmp
+```
+
+Then start pipeline and visualize as per [RTSP section in Customizing Pipeline Requests](docs/customizing_pipeline_requests.md#rtsp).
+
+> **Note:** The pipeline must be running before you hit play otherwise VLC will not be able to connect to the RTSP server. For shorter video files you should have VLC ready to go before starting pipeline otherwise by the time you hit play the pipeline will have completed and the RTSP server will have shut down.
+
+### WebRTC
+
+WebRTC is more complex. Follow setup instructions in the [sample](samples/webrtc). More details on fine tuning request can be found in the [WebRTC section in Customizing Pipeline Requests](docs/customizing_pipeline_requests.md#webrtc).
## View REST Request
-As the previous example has shown, the vaclient application works by converting command line arguments into Pipeline Server REST requests.
+As the previous example has shown, the pipeline_client application works by converting command line arguments into Pipeline Server REST requests.
The `--show-request` option displays the REST verb, uri and body in the request.
Let's repeat the previous GPU inference example, adding RTSP output and show the underlying request.
```bash
-./vaclient/vaclient.sh run object_classification/vehicle_attributes https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true --parameter detection-device GPU --rtsp-path pipeline-server --show-request
+./client/pipeline_client.sh run object_classification/vehicle_attributes https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true --parameter detection-device GPU --rtsp-path pipeline-server --show-request
```
```text
@@ -418,7 +410,7 @@ They are easier to understand when the json is pretty-printed
1. Media source: type is `uri` and the uri is the car-detection.mp4 video
2. Destinations:
- * metadata: this is the inference results, they are sent to file `/tmp/results.jsonl` in `json-lines` format. vaclient parses this file to display the inference results and metadata.
+ * metadata: this is the inference results, they are sent to file `/tmp/results.jsonl` in `json-lines` format. pipeline_client parses this file to display the inference results and metadata.
* frames: this the watermarked frames. Here they are sent to the RTSP server and available over given endpoint `pipeline-server`.
3. Parameters set pipeline properties. See the [Defining Pipelines](docs/defining_pipelines.md) document for more details on parameters.
@@ -467,13 +459,13 @@ The Pipeline Server makes pipeline customization and model selection a simple ta
| **Documentation** | **Reference Guides** | **Tutorials** |
| ------------ | ------------------ | ----------- |
-| **-** [Defining Media Analytics Pipelines](docs/defining_pipelines.md) **-** [Building Intel(R) DL Streamer Pipeline Server](docs/building_video_analytics_serving.md) **-** [Running Intel(R) DL Streamer Pipeline Server](docs/running_video_analytics_serving.md) **-** [Customizing Pipeline Requests](docs/customizing_pipeline_requests.md) **-** [Creating Extensions](docs/creating_extensions.md)| **-** [Intel(R) DL Streamer Pipeline Server Architecture Diagram](docs/images/video_analytics_service_architecture.png) **-** [Microservice Endpoints](docs/restful_microservice_interfaces.md) **-** [Build Script Reference](docs/build_script_reference.md) **-** [Run Script Reference](docs/run_script_reference.md) **-** [VA Client Reference](vaclient/README.md)| **-** [Changing Object Detection Models](docs/changing_object_detection_models.md) **-** [Kubernetes Deployment with Load Balancing](samples/kubernetes/README.md)
+| **-** [Defining Media Analytics Pipelines](docs/defining_pipelines.md) **-** [Building Intel® DL Streamer Pipeline Server](docs/building_pipeline_server.md) **-** [Running Intel® DL Streamer Pipeline Server](docs/running_pipeline_server.md) **-** [Customizing Pipeline Requests](docs/customizing_pipeline_requests.md) **-** [Creating Extensions](docs/creating_extensions.md)| **-** [Intel® DL Streamer Pipeline Server Architecture Diagram](docs/images/pipeline_server_architecture.png) **-** [Microservice Endpoints](docs/restful_microservice_interfaces.md) **-** [Build Script Reference](docs/build_script_reference.md) **-** [Run Script Reference](docs/run_script_reference.md) **-** [Pipeline Client Reference](client/README.md)| **-** [Changing Object Detection Models](docs/changing_object_detection_models.md) **-** [Kubernetes Deployment with Load Balancing](samples/kubernetes/README.md)
## Related Links
| **Media Frameworks** | **Media Analytics** | **Samples and Reference Designs**
| ------------ | ------------------ | -----------------|
-| **-** [GStreamer](https://gstreamer.freedesktop.org/documentation/?gi-language=c)* **-** [GStreamer* Overview](docs/gstreamer_overview.md) **-** [FFmpeg](https://ffmpeg.org/)* | **-** [OpenVINO™ Toolkit](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit.html) **-** [Intel(R) Deep Learning Streamer](https://github.com/dlstreamer/dlstreamer) **-** [FFmpeg* Video Analytics](https://github.com/VCDP/FFmpeg-patch) | **-** [Open Visual Cloud Smart City Sample](https://github.com/OpenVisualCloud/Smart-City-Sample) **-** [Open Visual Cloud Ad Insertion Sample](https://github.com/OpenVisualCloud/Ad-Insertion-Sample) **-** [Edge Insights for Retail](https://software.intel.com/content/www/us/en/develop/articles/real-time-sensor-fusion-for-loss-detection.html)
+| **-** [GStreamer](https://gstreamer.freedesktop.org/documentation/?gi-language=c)* **-** [GStreamer* Overview](docs/gstreamer_overview.md) **-** [FFmpeg](https://ffmpeg.org/)* | **-** [Intel® Deep Learning Streamer Pipeline Framework](https://github.com/dlstreamer/dlstreamer) **-** [OpenVINO™ Toolkit](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit.html) **-** [FFmpeg* Video Analytics](https://github.com/VCDP/FFmpeg-patch) | **-** [Open Visual Cloud Smart City Sample](https://github.com/OpenVisualCloud/Smart-City-Sample) **-** [Open Visual Cloud Ad Insertion Sample](https://github.com/OpenVisualCloud/Ad-Insertion-Sample) **-** [Edge Insights for Retail](https://software.intel.com/content/www/us/en/develop/articles/real-time-sensor-fusion-for-loss-detection.html)
# Known Issues
diff --git a/vaclient/README.md b/client/README.md
similarity index 60%
rename from vaclient/README.md
rename to client/README.md
index fa40bf3..c2b2586 100644
--- a/vaclient/README.md
+++ b/client/README.md
@@ -1,8 +1,8 @@
-# VA Client Command Reference
-vaclient is a python app intended to be a reference for using Intel(R) Deep Learning Streamer (Intel(R) DL Streamer) Pipeline Server REST API. vaclient is included in Pipeline Server's REST container and can be easily launched using the accompanying run script, `vaclient.sh`.
+# Pipeline Client Command Reference
+pipeline_client is a python app intended to be a reference for using Intel(R) Deep Learning Streamer (Intel(R) DL Streamer) Pipeline Server REST API. pipeline_client is included in Pipeline Server's REST container and can be easily launched using the accompanying run script, `pipeline_client.sh`.
>**Note:**
-This document assumes you are familiar with Intel(R) DL Streamer Pipeline Server and have built the image locally and Pipeline Server REST instance is running for VA Client to connect to. See the main [README](../README.md) for details on building and running the service.
+This document assumes you are familiar with Intel(R) DL Streamer Pipeline Server and have built the image locally and Pipeline Server REST instance is running for Pipeline Client to connect to. See the main [README](../README.md) for details on building and running the service.
## Basic Usage
### Listing Supported Models and Pipelines
@@ -11,7 +11,7 @@ To see which models and pipelines are loaded by the service run the following co
Listing models:
```
- ./vaclient/vaclient.sh list-models
+ ./client/pipeline_client.sh list-models
```
```
@@ -26,7 +26,7 @@ Listing models:
Listing pipelines:
```
-./vaclient/vaclient.sh list-pipelines
+./client/pipeline_client.sh list-pipelines
```
```
@@ -40,12 +40,12 @@ Listing pipelines:
```
### Running Pipelines
-vaclient can be used to send pipeline start requests using the `run` command. With the `run` command you will need to enter two additional arguments the `pipeline` (in the form of pipeline_name/pipeline_version) you wish to use and the `uri` pointing to the media of your choice.
+pipeline_client can be used to send pipeline start requests using the `run` command. With the `run` command you will need to enter two additional arguments the `pipeline` (in the form of pipeline_name/pipeline_version) you wish to use and the `uri` pointing to the media of your choice.
```
-./vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
+./client/pipeline_client.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
```
-If the pipeline request is successful, an instance id is created and vaclient will print the instance. More on `instance_id` below.
-Once pre-roll is completed and pipeline begins running, the output file is processed by vaclient and inference information is printed to the screen in the following format: `label (confidence) [top left width height] {meta-data}` At the end of the pipeline run, the average fps is printed as well. If you wish to stop the pipeline mid-run, `Ctrl+C` will signal the client to send a `stop` command to the service. Once the pipeline is stopped, vaclient will output the average fps. More on `stop` below
+If the pipeline request is successful, an instance id is created and pipeline_client will print the instance. More on `instance_id` below.
+Once pre-roll is completed and pipeline begins running, the output file is processed by pipeline_client and inference information is printed to the screen in the following format: `label (confidence) [top left width height] {meta-data}` At the end of the pipeline run, the average fps is printed as well. If you wish to stop the pipeline mid-run, `Ctrl+C` will signal the client to send a `stop` command to the service. Once the pipeline is stopped, pipeline_client will output the average fps. More on `stop` below
```
Pipeline instance =
@@ -69,7 +69,7 @@ Timestamp 49250000000
- vehicle (0.64) [0.00, 0.14, 0.05, 0.34] {}
avg_fps: 39.66
```
-However, if there are errors during pipeline execution i.e GPU is specified as detection device but is not present, vaclient will terminate with an error message
+However, if there are errors during pipeline execution i.e GPU is specified as detection device but is not present, pipeline_client will terminate with an error message
```
Pipeline instance =
Error in pipeline, please check pipeline-server log messages
@@ -78,30 +78,30 @@ Error in pipeline, please check pipeline-server log messages
### Starting Pipelines
The `run` command is helpful for quickly showing inference results but `run` blocks until completion. If you want to do your own processing and only want to kickoff a pipeline, this can be done with the `start` command. `start` arguments are the same as `run`, you'll need to provide the `pipeline` and `uri`. Run the following command:
```
-./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
+./client/pipeline_client.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
```
-Similar to `run`, if the pipeline request is successful, an instance id is created and vaclient will print the instance. More on `instance_id` below.
+Similar to `run`, if the pipeline request is successful, an instance id is created and pipeline_client will print the instance. More on `instance_id` below.
```
Pipeline instance =
```
-Errors during pipeline execution are not flagged as vaclient exits after receiving instance id for a successful request. However, both `start` and `run` will flag invalid requests, for example:
+Errors during pipeline execution are not flagged as pipeline_client exits after receiving instance id for a successful request. However, both `start` and `run` will flag invalid requests, for example:
```
-./vaclient/vaclient.sh start object_detection/person_vehicle_bke https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
+./client/pipeline_client.sh start object_detection/person_vehicle_bke https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
```
The pipeline name has a typo `object_detection/person_vehicle_bke` making it invalid, this results in the error message:
```
-Status 400 - "Invalid Pipeline or Version"
+"Invalid Pipeline or Version"
```
#### Instance ID
-On a successful start of a pipeline, VA Serving assigns a pipeline `instance_id` which is a [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier) which can be used to reference the pipeline in subsequent requests. In this example, the `instance_id` is `0fe8f408ea2441bca8161e1190eefc51`
+On a successful start of a pipeline, Pipeline Server assigns a pipeline `instance_id` which is a [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier) which can be used to reference the pipeline in subsequent requests. In this example, the `instance_id` is `0fe8f408ea2441bca8161e1190eefc51`
```
Starting pipeline object_detection/person_vehicle_bike, instance = 0fe8f408ea2441bca8161e1190eefc51
```
### Stopping Pipelines
Stopping a pipeline can be accomplished using the `stop` command along with the `pipeline` and `instance id`:
```
-./vaclient/vaclient.sh stop object_detection/person_vehicle_bike 0fe8f408ea2441bca8161e1190eefc51
+./client/pipeline_client.sh stop object_detection/person_vehicle_bike 0fe8f408ea2441bca8161e1190eefc51
```
```
Stopping Pipeline...
@@ -111,9 +111,9 @@ avg_fps: 42.07
### Getting Pipeline Status
Querying the current state of the pipeline is done using the `status` command along with the `pipeline` and `instance id`:
```
-./vaclient/vaclient.sh status object_detection/person_vehicle_bike 0fe8f408ea2441bca8161e1190eefc51
+./client/pipeline_client.sh status object_detection/person_vehicle_bike 0fe8f408ea2441bca8161e1190eefc51
```
-vaclient will print the status of `QUEUED`, `RUNNING`, `ABORTED`, `COMPLETED` or `ERROR` and also fps.
+pipeline_client will print the status of `QUEUED`, `RUNNING`, `ABORTED`, `COMPLETED` or `ERROR` and also fps.
```
RUNNING (30fps)
@@ -122,7 +122,7 @@ RUNNING (30fps)
### Waiting for a pipeline to finish
If you wish to wait for a pipeline to finish running you can use the `wait` command along with the `pipeline` and `instance id`:
```
-./vaclient/vaclient.sh wait object_detection/person_vehicle_bike 0fe8f408ea2441bca8161e1190eefc51
+./client/pipeline_client.sh wait object_detection/person_vehicle_bike 0fe8f408ea2441bca8161e1190eefc51
```
The client will print the initial status of the pipeline. Then wait for completion and print the average fps.
@@ -131,19 +131,21 @@ Querying the current state of the pipeline is done using the `list-instances` co
This example starts two pipelines and then gets their status and request details.
```
-./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
+./client/pipeline_client.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
```
+Output:
```
Starting pipeline object_detection/person_vehicle_bike, instance = 94cf72b718184615bfc181c6589b240c
```
```
-./vaclient/vaclient.sh start object_classification/vehicle_attributes https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true
+./client/pipeline_client.sh start object_classification/vehicle_attributes https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true
```
+Output:
```
Starting pipeline object_classification/vehicle_attributes, instance = 978e09c561f14fa1b793e8b644f30031
```
```
-./vaclient/vaclient.sh list-instances
+./client/pipeline_client.sh list-instances
```
```
01: object_detection/person_vehicle_bike
@@ -186,26 +188,26 @@ parameters: {
See [customizing pipeline requests](../docs/customizing_pipeline_requests.md) to further understand how pipeline request options can be customized.
### --quiet
-This optional argument is meant to handle logging verbosity common across all commands to vaclient.
+This optional argument is meant to handle logging verbosity common across all commands to pipeline_client.
> **Note**: If specified, --quiet needs to be placed ahead of the specific command i.e start, run etc.
#### Start
-vaclient output will just be the pipeline instance.
+pipeline_client output will just be the pipeline instance.
```
-./vaclient/vaclient.sh --quiet start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
+./client/pipeline_client.sh --quiet start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
```
```
-2
+280ff2c4a54611ec8b900242ac110002
```
#### Run
-vaclient output will be the pipeline instance followed by inference results.
+pipeline_client output will be the pipeline instance followed by inference results.
```
-./vaclient/vaclient.sh --quiet run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
+./client/pipeline_client.sh --quiet run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true
```
```
-1
+280ff2c4a54611ec8b900242ac110002
Timestamp 1500000000
- person (0.54) [0.67, 0.88, 0.74, 1.00]
Timestamp 1666666666
@@ -213,7 +215,7 @@ Timestamp 1666666666
```
### Run/Start Arguments
-This section summarizes all the arguments for vaclient `run` and `start` commands.
+This section summarizes all the arguments for pipeline_client `run` and `start` commands.
#### pipeline (required)
Positional argument (first) that specifies the pipeline to be launched in the form of `pipeline name/pipeline version`.
@@ -223,7 +225,7 @@ Positional argument (second) that specifies the location of the content to play/
> Note: uri argument can be skipped only if passed in via --request-file
#### --destination
-By default, vaclient uses a generic template for destination:
+By default, pipeline_client uses a generic template for destination:
```json
{
"destination": {
@@ -236,15 +238,15 @@ By default, vaclient uses a generic template for destination:
```
Destination configuration can be updated with `--destination`. This argument affects only metadata part of the destination.
In the following example, passing in `--destination path /tmp/newfile.jsonl` will update the filepath for saving inference result.
-> **Note**: You may need to volume mount this new location when running VA Serving.
+> **Note**: You may need to volume mount this new location when running Pipeline Server.
```
-./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --destination path /tmp/newfile.jsonl
+./client/pipeline_client.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --destination path /tmp/newfile.jsonl
```
-If other destination types are specified (e.g. `mqtt` or `kafka` ), the pipeline will try to publish to specified broker and vaclient will subscribe to it and display published metadata. Here is an mqtt example using a broker on localhost.
+If other destination types are specified (e.g. `mqtt` or `kafka` ), the pipeline will try to publish to specified broker and pipeline_client will subscribe to it and display published metadata. Here is an mqtt example using a broker on localhost.
```
docker run -rm --network=host -d eclipse-mosquitto:1.6
-./vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --destination type mqtt --destination host localhost:1883 --destination topic pipeline-server
+./client/pipeline_client.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --destination type mqtt --destination host localhost:1883 --destination topic pipeline-server
```
```
Starting pipeline object_detection/person_vehicle_bike, instance =
@@ -263,17 +265,17 @@ Timestamp 4000000000
For more details on destination types, see [customizing pipeline requests](../docs/customizing_pipeline_requests.md#metadata).
#### --rtsp-path
If you are utilizing RTSP restreaming, `--rtsp-path` can be used to update the `server_url` path. This updates the frame part of destination under the hood.
-For example, adding `--rtsp-path new_path` will able you to view the stream at `rtsp://:/new_path`. More details on RTSP restreaming in [running_video_analytics_serving](../docs/running_video_analytics_serving.md) documentation.
+For example, adding `--rtsp-path new_path` will able you to view the stream at `rtsp://:/new_path`. More details on RTSP restreaming in [running_pipeline_server](../docs/running_pipeline_server.md) documentation.
#### --parameter
-By default, vaclient relies on pipeline parameter defaults. This can be updated with `--parameter` option. See [Defining Pipelines](../docs/defining_pipelines.md) to know how parameters are defined. The following example adds `--parameter detection-device GPU`
+By default, pipeline_client relies on pipeline parameter defaults. This can be updated with `--parameter` option. See [Defining Pipelines](../docs/defining_pipelines.md) to know how parameters are defined. The following example adds `--parameter detection-device GPU`
```
-./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --parameter detection-device GPU
+./client/pipeline_client.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --parameter detection-device GPU
```
#### --parameter-file
Specifies a JSON file that contains parameters in key, value pairs. Parameters from this file take precedence over those set by `--parameter`.
-> **Note**: As vaclient volume mounts /tmp, the parameter file may be placed there.
+> **Note**: As pipeline_client volume mounts /tmp, the parameter file may be placed there.
A sample parameter file can look like
```json
@@ -285,26 +287,26 @@ A sample parameter file can look like
```
The above file, say /tmp/sample_parameters.json may be used as follows:
```
-./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --parameter-file /tmp/sample_parameters.json
+./client/pipeline_client.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --parameter-file /tmp/sample_parameters.json
```
#### --tag
Specifies a key, value pair to update request with. This information is added to each frame's metadata.
This example adds tags for direction and location of video capture
```
-./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --tag direction east --tag camera_location parking_lot
+./client/pipeline_client.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --tag direction east --tag camera_location parking_lot
```
#### --server-address
This can be used with any command to specify a remote HTTP server address. Here we start a pipeline on remote server `http://remote-server.my-domain.com:8080`.
```
-./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --tag direction east --server=address http://remote-server.my-domain.com:8080
+./client/pipeline_client.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --tag direction east --server=address http://remote-server.my-domain.com:8080
```
#### --status-only
Use with `run` command to disable output of metadata and periodically display pipeline state and fps
```
-./vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --tag direction east --status-only
+./client/pipeline_client.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --tag direction east --status-only
```
```
Starting pipeline 0
@@ -324,7 +326,7 @@ Pipeline status @ 21s
Takes an integer value that specifies the number of streams to start (default value is 1) using specified request.
If number of streams is greater than one, "status only" display mode is used.
```
-python3 vaclient run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --status-only --number-of-streams 4 --server-address http://hbruce-desk2.jf.intel.com:8080
+./client/pipeline_client.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --status-only --number-of-streams 4 --server-address http://hbruce-desk2.jf.intel.com:8080
```
```
Starting pipeline 0
@@ -360,7 +362,7 @@ Pipeline status @ 21s
#### --request-file
Specifies a JSON file that contains the complete request i.e source, destination, tags and parameters.
See [Customizing Pipeline Requests](../docs/customizing_pipeline_requests.md) for examples of requests in JSON format.
-> **Note**: As vaclient volume mounts /tmp, the request file may be placed there.
+> **Note**: As pipeline_client volume mounts /tmp, the request file may be placed there.
A sample request file can look like
```json
@@ -383,14 +385,14 @@ A sample request file can look like
```
The above file, named for instance as /tmp/sample_request.json may be used as follows:
```
-./vaclient/vaclient.sh start object_detection/person_vehicle_bike --request-file /tmp/sample_request.json
+./client/pipeline_client.sh start object_detection/person_vehicle_bike --request-file /tmp/sample_request.json
```
#### --show-request
-All vaclient commands can be used with the `--show-request` option which will print out the HTTP request and exit i.e it will not be sent to VA Serving.
+All pipeline_client commands can be used with the `--show-request` option which will print out the HTTP request and exit i.e it will not be sent to the Pipeline Server.
This example shows the result of `--show-request` when the pipeline is started with options passed in
```
-./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --destination path /tmp/newfile.jsonl --parameter detection-device GPU --tag direction east --tag camera_location parking_lot --show-request
+./client/pipeline_client.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --destination path /tmp/newfile.jsonl --parameter detection-device GPU --tag direction east --tag camera_location parking_lot --show-request
```
```
@@ -400,7 +402,7 @@ Body:{'source': {'uri': 'https://github.com/intel-iot-devkit/sample-videos/blob/
See [View REST request](../README.md#view-rest-request) to see how the output from `--show-request` can be mapped to a curl command.
### Status/Wait/Stop Arguments
-This section summarizes all the arguments for vaclient `status`, `wait` and `stop` commands.
+This section summarizes all the arguments for pipeline_client `status`, `wait` and `stop` commands.
#### pipeline (required)
Positional argument (first) that specifies the pipeline to wait on/query status of/stop as indicated in the form of `pipeline name/pipeline version`
@@ -413,23 +415,23 @@ As mentioned before, `--show-request` option which will print out the HTTP reque
##### Status
```
-./vaclient/vaclient.sh status object_detection/person_vehicle_bike 94cf72b718184615bfc181c6589b240c --show-request
+./client/pipeline_client.sh status object_detection/person_vehicle_bike 94cf72b718184615bfc181c6589b240c --show-request
```
```
-GET http://localhost:8080/pipelines/object_detection/person_vehicle_bike/94cf72b718184615bfc181c6589b240c/status
+GET http://localhost:8080/pipelines/object_detection/person_vehicle_bike/status/94cf72b718184615bfc181c6589b240c
```
##### Wait
```
-./vaclient/vaclient.sh wait object_detection/person_vehicle_bike 94cf72b718184615bfc181c6589b240c --show-request
+./client/pipeline_client.sh wait object_detection/person_vehicle_bike 94cf72b718184615bfc181c6589b240c --show-request
```
```
-GET http://localhost:8080/pipelines/object_detection/person_vehicle_bike/94cf72b718184615bfc181c6589b240c/status
+GET http://localhost:8080/pipelines/object_detection/person_vehicle_bike/status/94cf72b718184615bfc181c6589b240c
```
##### Stop
```
-./vaclient/vaclient.sh stop object_detection/person_vehicle_bike 94cf72b718184615bfc181c6589b240c --show-request
+./client/pipeline_client.sh stop object_detection/person_vehicle_bike 94cf72b718184615bfc181c6589b240c --show-request
```
```
diff --git a/vaclient/__main__.py b/client/__main__.py
similarity index 100%
rename from vaclient/__main__.py
rename to client/__main__.py
diff --git a/vaclient/arguments.py b/client/arguments.py
similarity index 88%
rename from vaclient/arguments.py
rename to client/arguments.py
index 0185613..5f0bd72 100644
--- a/vaclient/arguments.py
+++ b/client/arguments.py
@@ -30,7 +30,7 @@
import sys
import json
import argparse
-import vaclient
+import pipeline_client
def get_typed_value(value):
@@ -53,6 +53,7 @@ def add_request_arguments(parser):
parser.add_argument('--destination', action='append', nargs=2, metavar=('key', 'value'), type=str, \
help='Update destination information with key and value pair')
parser.add_argument('--rtsp-path', type=str, help='RTSP endpoint path')
+ parser.add_argument('--webrtc-peer-id', type=str, help='WebRTC server side peer id')
parser.add_argument('--parameter', action='append', nargs=2, metavar=('key', 'value'), type=get_typed_value, \
dest='parameters', help='Update request parameter with key and value pair')
parser.add_argument('--parameter-file', type=str, dest='parameter_file', help='Update request parameter \
@@ -64,7 +65,7 @@ def add_request_arguments(parser):
parser.add_argument('--number-of-streams', type=int, default=1, dest="streams", help='Set number of streams')
parser.add_argument("--status-only", action='store_true', help='Only show status')
-def parse_args(program_name="Intel(R) DL Streamer Pipeline Server Client"):
+def parse_args(program_name="Pipeline Client"):
"""Process command line options"""
#pylint: disable=too-many-statements
parser = argparse.ArgumentParser(
@@ -75,40 +76,40 @@ def parse_args(program_name="Intel(R) DL Streamer Pipeline Server Client"):
parser_run = subparsers.add_parser('run', help='Start specified pipeline with specified source. \
Meta-data will be displayed as pipeline runs. Once pipeline ends the average fps is displayed')
- parser_run.set_defaults(command=vaclient.run)
+ parser_run.set_defaults(command=pipeline_client.run)
add_request_arguments(parser_run)
add_common_arguments(parser_run)
parser_start = subparsers.add_parser('start', help='start specified pipeline')
- parser_start.set_defaults(command=vaclient.start)
+ parser_start.set_defaults(command=pipeline_client.start)
add_request_arguments(parser_start)
add_common_arguments(parser_start)
parser_status = subparsers.add_parser('status', help='Print status of specified pipeline')
- parser_status.set_defaults(command=vaclient.status)
+ parser_status.set_defaults(command=pipeline_client.status)
add_instance_arguments(parser_status)
add_common_arguments(parser_status)
parser_wait = subparsers.add_parser('wait', help='Connect to a running pipeline and wait until completion')
- parser_wait.set_defaults(command=vaclient.wait)
+ parser_wait.set_defaults(command=pipeline_client.wait)
add_instance_arguments(parser_wait)
add_common_arguments(parser_wait)
parser_stop = subparsers.add_parser('stop', help='Stop a specified pipeline')
- parser_stop.set_defaults(command=vaclient.stop)
+ parser_stop.set_defaults(command=pipeline_client.stop)
add_instance_arguments(parser_stop)
add_common_arguments(parser_stop)
parser_list_pipelines = subparsers.add_parser('list-pipelines', help='List loaded pipelines')
- parser_list_pipelines.set_defaults(command=vaclient.list_pipelines)
+ parser_list_pipelines.set_defaults(command=pipeline_client.list_pipelines)
add_common_arguments(parser_list_pipelines)
parser_list_models = subparsers.add_parser('list-models', help='List loaded models')
- parser_list_models.set_defaults(command=vaclient.list_models)
+ parser_list_models.set_defaults(command=pipeline_client.list_models)
add_common_arguments(parser_list_models)
parser_list_instances = subparsers.add_parser('list-instances', help='List active pipeline instances')
- parser_list_instances.set_defaults(command=vaclient.list_instances)
+ parser_list_instances.set_defaults(command=pipeline_client.list_instances)
add_common_arguments(parser_list_instances)
parser.add_argument("--quiet", action="store_false",
diff --git a/vaclient/parameter_files/object-line-crossing.json b/client/parameter_files/object-line-crossing.json
similarity index 100%
rename from vaclient/parameter_files/object-line-crossing.json
rename to client/parameter_files/object-line-crossing.json
diff --git a/vaclient/parameter_files/object-zone-count.json b/client/parameter_files/object-zone-count.json
similarity index 100%
rename from vaclient/parameter_files/object-zone-count.json
rename to client/parameter_files/object-zone-count.json
diff --git a/vaclient/vaclient.py b/client/pipeline_client.py
similarity index 91%
rename from vaclient/vaclient.py
rename to client/pipeline_client.py
index 57fdfa9..248a729 100755
--- a/vaclient/vaclient.py
+++ b/client/pipeline_client.py
@@ -13,7 +13,7 @@
from html.parser import HTMLParser
import requests
import results_watcher
-from vaserving.pipeline import Pipeline
+from server.pipeline import Pipeline
RESPONSE_SUCCESS = 200
TIMEOUT = 30
@@ -40,6 +40,13 @@
"path": ""
}
}
+WEBRTC_TEMPLATE = {
+ "frame": {
+ "type": "webrtc",
+ "peer-id": ""
+ }
+}
+
SERVER_CONNECTION_FAILURE_MESSAGE = "Unable to connect to server, check if the pipeline-server microservice is running"
def html_handle_data(self, data):
@@ -131,7 +138,7 @@ def start(args):
def stop(args):
if stop_pipeline(args.server_address, args.instance, args.show_request):
- print_fps(get_pipeline_status(args.server_address, args.instance))
+ print_fps([get_pipeline_status(args.server_address, args.instance)])
def wait(args):
try:
@@ -140,11 +147,11 @@ def wait(args):
print(pipeline_status["state"])
else:
print("Unable to fetch status")
- print_fps(wait_for_pipeline_completion(args.server_address, args.instance))
+ print_fps([wait_for_pipeline_completion(args.server_address, args.instance)])
except KeyboardInterrupt:
print()
stop_pipeline(args.pipeline, args.instance)
- print_fps(wait_for_pipeline_completion(args.server_address, args.instance))
+ print_fps([wait_for_pipeline_completion(args.server_address, args.instance)])
def status(args):
pipeline_status = get_pipeline_status(args.server_address, args.instance, args.show_request)
@@ -202,6 +209,10 @@ def update_request_options(request,
rtsp_template = RTSP_TEMPLATE
rtsp_template['frame']['path'] = args.rtsp_path
request['destination'].update(rtsp_template)
+ if hasattr(args, 'webrtc_peer_id') and args.webrtc_peer_id:
+ webrtc_template = WEBRTC_TEMPLATE
+ webrtc_template['frame']['peer-id'] = args.webrtc_peer_id
+ request['destination'].update(webrtc_template)
if hasattr(args, 'request_file') and args.request_file:
with open(args.request_file, 'r') as request_file:
request.update(json.load(request_file))
@@ -256,13 +267,15 @@ def wait_for_pipeline_running(server_address,
timeout_count = 0
while status and not Pipeline.State[status["state"]] == Pipeline.State.RUNNING:
status = get_pipeline_status(server_address, instance_id)
- if not status or status["state"] == "ERROR":
- raise ValueError("Error in pipeline, please check pipeline-server log messages")
+ if not status or Pipeline.State[status["state"]].stopped():
+ break
time.sleep(SLEEP_FOR_STATUS)
timeout_count += 1
if timeout_count * SLEEP_FOR_STATUS >= timeout_sec:
print("Timed out waiting for RUNNING status")
break
+ if not status or status["state"] == "ERROR":
+ raise ValueError("Error in pipeline, please check pipeline-server log messages")
return Pipeline.State[status["state"]] == Pipeline.State.RUNNING
def wait_for_pipeline_completion(server_address, instance_id):
@@ -277,6 +290,7 @@ def wait_for_pipeline_completion(server_address, instance_id):
def wait_for_all_pipeline_completions(server_address, instance_ids, status_only=False):
status = {"state" : "RUNNING"}
+ status_list = []
stopped = False
num_streams = len(instance_ids)
if num_streams == 0:
@@ -295,15 +309,17 @@ def wait_for_all_pipeline_completions(server_address, instance_ids, status_only=
instance_id, status["state"], round(status["avg_fps"])))
if not Pipeline.State[status["state"]].stopped():
all_streams_stopped = False
+ status_list.append(status)
first_pipeline = False
stopped = all_streams_stopped
else:
time.sleep(SLEEP_FOR_STATUS)
status = get_pipeline_status(server_address, instance_ids[0])
stopped = Pipeline.State[status["state"]].stopped()
+ status_list.append(status)
if status and status["state"] == "ERROR":
raise ValueError("Error in pipeline, please check pipeline-server log messages")
- return status
+ return status_list
def get_pipeline_status(server_address, instance_id, show_request=False):
status_url = urljoin(server_address,
@@ -361,9 +377,15 @@ def delete(url, show_request=False):
raise ConnectionError(SERVER_CONNECTION_FAILURE_MESSAGE) from error
return None
-def print_fps(status):
- if status and 'avg_fps' in status:
- print('avg_fps: {:.2f}'.format(status['avg_fps']))
+def print_fps(status_list):
+ sum_of_all_fps = 0
+ num_of_pipelines = 0
+ for status in status_list:
+ if status and 'avg_fps' in status and status['avg_fps'] > 0:
+ sum_of_all_fps += status['avg_fps']
+ num_of_pipelines += 1
+ if num_of_pipelines > 0:
+ print('avg_fps: {:.2f}'.format(sum_of_all_fps/num_of_pipelines))
def print_list(item_list):
for item in item_list:
diff --git a/vaclient/vaclient.sh b/client/pipeline_client.sh
similarity index 66%
rename from vaclient/vaclient.sh
rename to client/pipeline_client.sh
index 8b88e87..0d222ca 100755
--- a/vaclient/vaclient.sh
+++ b/client/pipeline_client.sh
@@ -7,10 +7,10 @@
VOLUME_MOUNT="-v /tmp:/tmp "
IMAGE="dlstreamer-pipeline-server-gstreamer"
-VASERVING_ROOT=/home/pipeline-server
+PIPELINE_SERVER_ROOT=/home/pipeline-server
ENTRYPOINT="python3"
-ENTRYPOINT_ARGS="$VASERVING_ROOT/vaclient $@"
-LOCAL_VACLIENT_DIR=$(dirname $(readlink -f "$0"))
-ROOT_DIR=$(dirname $LOCAL_VACLIENT_DIR)
+ENTRYPOINT_ARGS="$PIPELINE_SERVER_ROOT/client $@"
+LOCAL_CLIENT_DIR=$(dirname $(readlink -f "$0"))
+ROOT_DIR=$(dirname $LOCAL_CLIENT_DIR)
"$ROOT_DIR/docker/run.sh" $INTERACTIVE --name \"\" --network host --image $IMAGE $VOLUME_MOUNT --entrypoint $ENTRYPOINT --entrypoint-args "$ENTRYPOINT_ARGS"
diff --git a/vaclient/results_watcher.py b/client/results_watcher.py
similarity index 100%
rename from vaclient/results_watcher.py
rename to client/results_watcher.py
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 360e89b..3ea18cf 100755
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -24,7 +24,7 @@ USER root
# Dependencies for OpenVINO
ARG BASE=dlstreamer-pipeline-server-gstreamer-base
-ENV VA_SERVING_BASE=${BASE}
+ENV PIPELINE_SERVER_BASE=${BASE}
SHELL ["/bin/bash", "-c"]
# Creating user pipeline-server and adding it to groups "video" and "users" to use GPU and VPU
@@ -32,21 +32,8 @@ ARG USER=pipeline-server
RUN useradd -ms /bin/bash -G video,audio,users ${USER} -d /home/pipeline-server && \
chown ${USER} -R /home/pipeline-server /root
-RUN if [ -f /opt/intel/openvino/install_dependencies/install_NEO_OCL_driver.sh ]; then \
- /opt/intel/openvino/install_dependencies/install_NEO_OCL_driver.sh -y ; exit 0; \
- fi
-
-RUN if [[ ${VA_SERVING_BASE} == *"openvino/ubuntu20_data_runtime:2021.2" ]]; then \
- DEBIAN_FRONTEND=noninteractive apt-get update && \
- apt-get install -y -q --no-install-recommends \
- intel-media-va-driver-non-free \
- gstreamer1.0-tools && \
- apt-get clean && \
- rm -rf /var/lib/apt/lists/* ;\
- fi
-
# Install boost library required for HDDL plugin
-RUN if [[ ${VA_SERVING_BASE} == *"openvino/ubuntu20_data_runtime"* ]]; then \
+RUN if [[ ${PIPELINE_SERVER_BASE} == *"openvino/ubuntu20_data_runtime"* || ${PIPELINE_SERVER_BASE} == *"intel/dlstreamer"* ]]; then \
DEBIAN_FRONTEND=noninteractive apt-get update && \
apt-get install -y -q --no-install-recommends \
libboost-program-options1.71.0 && \
@@ -55,29 +42,46 @@ RUN if [[ ${VA_SERVING_BASE} == *"openvino/ubuntu20_data_runtime"* ]]; then \
fi
RUN DEBIAN_FRONTEND=noninteractive apt-get update && \
- apt-get upgrade -y -q && \
- apt-get dist-upgrade -y -q && \
+ apt-get install -y -q --no-install-recommends \
+ gstreamer1.0-nice \
+ python3-pip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* ;
-RUN if [[ ${VA_SERVING_BASE} == *"openvisualcloud/xeone3-ubuntu1804-analytics-ffmpeg"* ]]; then \
+RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y libjemalloc-dev
+
+# Install GStreamer packages not present dlstreamer base image
+RUN if [[ ${PIPELINE_SERVER_BASE} == *"dlstreamer"* ]]; then \
DEBIAN_FRONTEND=noninteractive apt-get update && \
apt-get install -y -q --no-install-recommends \
- python3 \
- python3-setuptools \
- python3-pip && \
+ gstreamer1.0-plugins-good \
+ gstreamer1.0-alsa \
+ gstreamer1.0-libav \
+ gstreamer1.0-plugins-bad \
+ gstreamer1.0-plugins-ugly \
+ gstreamer1.0-tools \
+ gstreamer1.0-vaapi \
+ gstreamer1.0-x \
+ libgstreamer-plugins-bad1.0-0 \
+ libgstreamer-plugins-base1.0-dev \
+ libgstreamer1.0-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* ;\
fi
+RUN DEBIAN_FRONTEND=noninteractive apt-get update && \
+ apt-get upgrade -y -q && \
+ apt-get dist-upgrade -y -q && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/* ;
+
COPY ./requirements.txt /
RUN pip3 install --upgrade pip --no-cache-dir -r /requirements.txt
RUN rm -f /requirements.txt
-# Intel(R) DL Streamer Pipeline Server Python Modules
-COPY ./vaserving /home/pipeline-server/vaserving
-COPY ./vaclient /home/pipeline-server/vaclient
-COPY --chown=pipeline-server ./tools /home/pipeline-server/tools
+# Pipeline Server Python Modules
+COPY ./server /home/pipeline-server/server
+COPY ./client /home/pipeline-server/client
# Copy GVA Python extensions
COPY ./extensions /home/pipeline-server/extensions
@@ -97,9 +101,9 @@ FROM dlstreamer-pipeline-server as do_not_copy_models
# Creates a stage that copies models from the build context
FROM dlstreamer-pipeline-server as copy_models
-ONBUILD ARG MODELS_PATH
-ONBUILD ENV MODELS_PATH=${MODELS_PATH}
-ONBUILD COPY ${MODELS_PATH} /home/pipeline-server/models
+ONBUILD ARG PS_MODELS_PATH
+ONBUILD ENV PS_MODELS_PATH=${PS_MODELS_PATH}
+ONBUILD COPY $PS_MODELS_PATH /home/pipeline-server/models
# Stage that is used is controlled via MODELS_COMMAND build argument
FROM ${MODELS_COMMAND} as dlstreamer-pipeline-server-with-models
@@ -115,9 +119,9 @@ FROM dlstreamer-pipeline-server-with-models as do_not_copy_pipelines
# Creates a stage that copies pipelines from the build context
FROM dlstreamer-pipeline-server-with-models as copy_pipelines
-ONBUILD ARG PIPELINES_PATH
-ONBUILD ENV PIPELINES_PATH=${PIPELINES_PATH}
-ONBUILD COPY ${PIPELINES_PATH} /home/pipeline-server/pipelines
+ONBUILD ARG PS_PIPELINES_PATH
+ONBUILD ENV PS_PIPELINES_PATH=${PS_PIPELINES_PATH}
+ONBUILD COPY ${PS_PIPELINES_PATH} /home/pipeline-server/pipelines
# Stage that is used is controlled via PIPELINES_COMMAND build argument
FROM ${PIPELINES_COMMAND} as dlstreamer-pipeline-server-with-models-and-pipelines
@@ -128,8 +132,8 @@ FROM ${PIPELINES_COMMAND} as dlstreamer-pipeline-server-with-models-and-pipeline
# Final stage is controlled by the FINAL_STAGE build argument.
FROM dlstreamer-pipeline-server-with-models-and-pipelines as dlstreamer-pipeline-server-library
-ONBUILD RUN rm -rf /home/pipeline-server/vaserving/__main__.py
-ONBUILD RUN rm -rf /home/pipeline-server/vaserving/rest_api
+ONBUILD RUN rm -rf /home/pipeline-server/server/__main__.py
+ONBUILD RUN rm -rf /home/pipeline-server/server/rest_api
FROM dlstreamer-pipeline-server-with-models-and-pipelines as dlstreamer-pipeline-server-service
@@ -137,13 +141,27 @@ FROM dlstreamer-pipeline-server-with-models-and-pipelines as dlstreamer-pipeline
ONBUILD COPY ./requirements.service.txt /
ONBUILD RUN pip3 install --no-cache-dir -r /requirements.service.txt
ONBUILD RUN rm -f /requirements.service.txt
-ONBUILD ENTRYPOINT ["python3", "-m", "vaserving"]
+
+# WebRTC specific dependencies installed via pip
+ONBUILD COPY ./requirements.webrtc.txt /
+ONBUILD RUN if [[ ${FRAMEWORK} == "gstreamer" ]]; then \
+ pip3 install --no-cache-dir -r /requirements.webrtc.txt; \
+ fi
+ONBUILD RUN rm -f /requirements.webrtc.txt
+
+ONBUILD ENTRYPOINT ["python3", "-m", "server"]
FROM ${FINAL_STAGE} as deploy
ARG USER=pipeline-server
-ENV PYTHONPATH=$PYTHONPATH:/home/pipeline-server
+ENV HOME=/home/pipeline-server
+ENV PYTHONPATH=/home/pipeline-server:$PYTHONPATH
+ENV GST_PLUGIN_PATH=$GST_PLUGIN_PATH:/usr/lib/x86_64-linux-gnu/gstreamer-1.0/
+ENV LD_PRELOAD=libjemalloc.so
+
+ENV cl_cache_dir=/home/.cl_cache
+RUN mkdir -p -m g+s $cl_cache_dir && chown ${USER}:users $cl_cache_dir
# Prepare XDG_RUNTIME_DIR
ENV XDG_RUNTIME_DIR=/home/.xdg_runtime_dir
diff --git a/docker/build.sh b/docker/build.sh
index aaea21e..09d3ecd 100755
--- a/docker/build.sh
+++ b/docker/build.sh
@@ -10,7 +10,7 @@ DOCKERFILE_DIR=$(dirname "$(readlink -f "$0")")
SOURCE_DIR=$(dirname "$DOCKERFILE_DIR")
BASE_IMAGE_FFMPEG="openvisualcloud/xeone3-ubuntu1804-analytics-ffmpeg:20.10"
-BASE_IMAGE_GSTREAMER="openvino/ubuntu20_data_runtime:2021.4.2"
+BASE_IMAGE_GSTREAMER="intel/dlstreamer:2022.1.0-ubuntu20"
BASE_IMAGE=${BASE_IMAGE:-""}
BASE_BUILD_CONTEXT=
@@ -18,7 +18,7 @@ BASE_BUILD_DOCKERFILE=
BASE_BUILD_TAG=
USER_BASE_BUILD_ARGS=
MODELS=$SOURCE_DIR/models_list/models.list.yml
-MODELS_PATH=models
+PS_MODELS_PATH=models
PIPELINES=
FRAMEWORK="gstreamer"
TAG=
@@ -35,8 +35,10 @@ BUILD_OPTIONS="--network=host "
BASE_BUILD_OPTIONS="--network=host "
SUPPORTED_IMAGES=($BASE_IMAGE_GSTREAMER $BASE_IMAGE_FFMPEG)
-OPEN_MODEL_ZOO_TOOLS_IMAGE=${OPEN_MODEL_ZOO_TOOLS_IMAGE:-"openvino/ubuntu20_data_dev"}
-OPEN_MODEL_ZOO_VERSION=${OPEN_MODEL_ZOO_VERSION:-"2021.4.2"}
+DEFAULT_OMZ_IMAGE_GSTREAMER="intel/dlstreamer"
+DEFAULT_OMZ_VERSION_GSTREAMER="2022.1.0-ubuntu20-devel"
+DEFAULT_OMZ_IMAGE_FFMPEG="openvino/ubuntu18_data_dev"
+DEFAULT_OMZ_VERSION_FFMPEG="2021.2"
FORCE_MODEL_DOWNLOAD=
DEFAULT_GSTREAMER_BASE_BUILD_TAG="dlstreamer-pipeline-server-gstreamer-base"
@@ -246,6 +248,13 @@ get_options() {
BASE_IMAGE=${CACHE_PREFIX}$BASE_IMAGE_GSTREAMER
fi
fi
+ if [ $FRAMEWORK = 'ffmpeg' ]; then
+ OPEN_MODEL_ZOO_TOOLS_IMAGE=${OPEN_MODEL_ZOO_TOOLS_IMAGE:-$DEFAULT_OMZ_IMAGE_FFMPEG}
+ OPEN_MODEL_ZOO_VERSION=${OPEN_MODEL_ZOO_VERSION:-$DEFAULT_OMZ_VERSION_FFMPEG}
+ else
+ OPEN_MODEL_ZOO_TOOLS_IMAGE=${OPEN_MODEL_ZOO_TOOLS_IMAGE:-$DEFAULT_OMZ_IMAGE_GSTREAMER}
+ OPEN_MODEL_ZOO_VERSION=${OPEN_MODEL_ZOO_VERSION:-$DEFAULT_OMZ_VERSION_GSTREAMER}
+ fi
if [ -f "$MODELS" ]; then
if [[ ! " ${SUPPORTED_IMAGES[@]} " =~ " ${BASE_IMAGE} " ]]; then
@@ -328,7 +337,7 @@ show_base_options() {
show_image_options() {
echo ""
- echo "Building Intel(R) DL Streamer Pipeline Server Image: '${TAG}'"
+ echo "Building Pipeline Server Image: '${TAG}'"
echo ""
echo " Base: '${BASE_IMAGE}'"
echo " Build Context: '${SOURCE_DIR}'"
@@ -350,8 +359,8 @@ show_help() {
echo " [--base base image]"
echo " [--framework ffmpeg || gstreamer]"
echo " [--models path to models directory or model list file or NONE]"
- echo " [--open-model-zoo-image specify the OpenVINO(TM) image to be used for downloading models from Open Model Zoo]"
- echo " [--open-model-zoo-version specify the version of OpenVINO(TM) image to be used for downloading models from Open Model Zoo]"
+ echo " [--open-model-zoo-image specify the base image to be used for downloading models from Open Model Zoo]"
+ echo " [--open-model-zoo-version specify the version of base image to be used for downloading models from Open Model Zoo]"
echo " [--force-model-download force the download of models from Open Model Zoo]"
echo " [--pipelines path to pipelines directory relative to $SOURCE_DIR or NONE]"
echo " [--base-build-context docker context for building base image]"
@@ -396,14 +405,14 @@ fi
BUILD_ARGS+=" --build-arg BASE=$BASE_IMAGE "
BUILD_ARGS+=" --build-arg FRAMEWORK=$FRAMEWORK "
if [ -n "$MODELS" ]; then
- BUILD_ARGS+="--build-arg MODELS_PATH=$MODELS_PATH "
+ BUILD_ARGS+="--build-arg PS_MODELS_PATH=$PS_MODELS_PATH "
BUILD_ARGS+="--build-arg MODELS_COMMAND=copy_models "
else
BUILD_ARGS+="--build-arg MODELS_COMMAND=do_not_copy_models "
fi
if [ -n "$PIPELINES" ]; then
- BUILD_ARGS+="--build-arg PIPELINES_PATH=$PIPELINES "
+ BUILD_ARGS+="--build-arg PS_PIPELINES_PATH=$PIPELINES "
BUILD_ARGS+="--build-arg PIPELINES_COMMAND=copy_pipelines "
else
BUILD_ARGS+="--build-arg PIPELINES_COMMAND=do_not_copy_pipelines "
@@ -415,7 +424,6 @@ else
BUILD_ARGS+="--build-arg FINAL_STAGE=dlstreamer-pipeline-server-library "
fi
-cp -f $DOCKERFILE_DIR/Dockerfile $DOCKERFILE_DIR/Dockerfile.env
ENVIRONMENT_FILE_LIST=
if [[ "$BASE_IMAGE" == *"openvino/"* ]]; then
@@ -429,11 +437,14 @@ for ENVIRONMENT_FILE in ${ENVIRONMENT_FILES[@]}; do
fi
done
+DOCKER_FILE=$DOCKERFILE_DIR/Dockerfile
if [ ! -z "$ENVIRONMENT_FILE_LIST" ]; then
+ DOCKER_FILE=$DOCKERFILE_DIR/Dockerfile.env
+ cp -f $DOCKERFILE_DIR/Dockerfile $DOCKER_FILE
cat $ENVIRONMENT_FILE_LIST | grep -E '=' | sed -e 's/,\s\+/,/g' | tr '\n' ' ' | tr '\r' ' ' > $DOCKERFILE_DIR/final.env
echo " HOME=/home/pipeline-server " >> $DOCKERFILE_DIR/final.env
- echo "ENV " | cat - $DOCKERFILE_DIR/final.env | tr -d '\n' >> $DOCKERFILE_DIR/Dockerfile.env
- printf "\nENV PYTHONPATH=\$PYTHONPATH:/home/pipeline-server\nENV GST_PLUGIN_PATH=\$GST_PLUGIN_PATH:/usr/lib/x86_64-linux-gnu/gstreamer-1.0/" >> $DOCKERFILE_DIR/Dockerfile.env
+ echo "ENV " | cat - $DOCKERFILE_DIR/final.env | tr -d '\n' >> $DOCKER_FILE
+ printf "\nENV PYTHONPATH=/home/pipeline-server:\$PYTHONPATH\nENV LD_PRELOAD=libjemalloc.so\nENV GST_PLUGIN_PATH=\$GST_PLUGIN_PATH:/usr/lib/x86_64-linux-gnu/gstreamer-1.0/" >> $DOCKER_FILE
fi
show_image_options
@@ -441,4 +452,4 @@ show_image_options
echo "-----------------------------"
echo "Building Image..."
echo "-----------------------------"
-launch "$RUN_PREFIX docker build -f "$DOCKERFILE_DIR/Dockerfile.env" $BUILD_OPTIONS $BUILD_ARGS -t $TAG --target $TARGET $SOURCE_DIR"
+launch "$RUN_PREFIX docker build -f "$DOCKER_FILE" $BUILD_OPTIONS $BUILD_ARGS -t $TAG --target $TARGET $SOURCE_DIR"
diff --git a/docker/run.sh b/docker/run.sh
index 485db50..b10f084 100755
--- a/docker/run.sh
+++ b/docker/run.sh
@@ -24,7 +24,8 @@ USER=
INTERACTIVE=-it
DEVICE_CGROUP_RULE=
USER_GROUPS=
-ENABLE_RTSP=
+ENABLE_RTSP=${ENABLE_RTSP:-"false"}
+ENABLE_WEBRTC=${ENABLE_WEBRTC:-"false"}
RTSP_PORT=8554
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
@@ -33,7 +34,7 @@ ENVIRONMENT=$(env | cut -f1 -d= | grep -E '_(proxy)$' | sed 's/^/-e / ' | tr '\n
show_options() {
echo ""
- echo "Running Intel(R) DL Streamer Pipeline Server Image: '${IMAGE}'"
+ echo "Running Pipeline Server Image: '${IMAGE}'"
echo " Models: '${MODELS}'"
echo " Pipelines: '${PIPELINES}'"
echo " Framework: '${FRAMEWORK}'"
@@ -69,6 +70,7 @@ show_help() {
echo " [--device device to pass to docker run]"
echo " [--enable-rtsp To enable rtsp re-streaming]"
echo " [--rtsp-port Specify the port to use for rtsp re-streaming]"
+ echo " [--enable-webrtc To enable WebRTC frame destination]"
echo " [--dev run in developer mode]"
exit 0
}
@@ -262,6 +264,9 @@ while [[ "$#" -gt 0 ]]; do
--enable-rtsp)
ENABLE_RTSP=true
;;
+ --enable-webrtc)
+ ENABLE_WEBRTC=true
+ ;;
--non-interactive)
unset INTERACTIVE
;;
@@ -326,11 +331,15 @@ fi
enable_hardware_access
-if [ ! -z "$ENABLE_RTSP" ]; then
- ENVIRONMENT+="-e ENABLE_RTSP=true -e RTSP_PORT=$RTSP_PORT "
+if [ "$ENABLE_RTSP" != "false" ]; then
+ ENVIRONMENT+="-e ENABLE_RTSP=$ENABLE_RTSP -e RTSP_PORT=$RTSP_PORT "
PORTS+="-p $RTSP_PORT:$RTSP_PORT "
fi
+if [ "$ENABLE_WEBRTC" != "false" ]; then
+ ENVIRONMENT+="-e ENABLE_WEBRTC=$ENABLE_WEBRTC "
+fi
+
if [ ! -z "$MODELS" ]; then
VOLUME_MOUNT+="-v $MODELS:/home/pipeline-server/models "
fi
diff --git a/docs/build_script_reference.md b/docs/build_script_reference.md
index 467d92c..1910098 100644
--- a/docs/build_script_reference.md
+++ b/docs/build_script_reference.md
@@ -11,10 +11,10 @@ usage: build.sh
[--base base image]
[--framework ffmpeg || gstreamer]
[--models path to models directory or model list file or NONE]
- [--open-model-zoo-image specify the OpenVINO™ image to be used for downloading models from Open Model Zoo]
- [--open-model-zoo-version specify the version of OpenVINO™ image to be used for downloading models from Open Model Zoo]
+ [--open-model-zoo-image specify the base image to be used for downloading models from Open Model Zoo]
+ [--open-model-zoo-version specify the version of base image to be used for downloading models from Open Model Zoo]
[--force-model-download force the download of models from Open Model Zoo]
- [--pipelines path to pipelines directory relative to /home/thanaji/git/vaServing or NONE]
+ [--pipelines path to pipelines directory relative to