From 216ce3a08a61566cb3abcffdc0ddae30a57eb6b0 Mon Sep 17 00:00:00 2001 From: nv-camilleh Date: Mon, 6 May 2024 15:40:28 -0700 Subject: [PATCH] Update to 1.1.11 Release This release is compatible with DeepStream SDK 7.0 Ubuntu 22.04 Python 3.10 DeepStream SDK 7.0 Features: - New module platform_info() has been added for checking for WSL, integrated GPU, and aarch64. All apps and integration tests have been updated to use this module when checking for platform. See deepstream_test1 for simple usage. - SBSA is now supported. Check bindings/README.md for quick build instructions. - Apps have been updated to use request_pad_simple() instead of the deprecated method get_request_pad(). See gst-python API documentation for details. - New API nvds_measure_buffer_latency() has been added for pipeline and component latency measurement. See deepstream-test3 app and bindfunctions.cpp for reference. --- FAQ.md | 2 +- HOWTO.md | 2 +- README.md | 22 ++-- apps/README | 15 ++- apps/common/is_aarch_64.py | 25 ----- apps/common/platform_info.py | 94 ++++++++++++++++ apps/deepstream-custom-binding-test/README | 2 +- .../deepstream_custom_binding_test.py | 2 +- .../README | 2 +- .../deepstream_demux_multi_in_multi_out.py | 17 ++- .../ds_demux_pgie_config.txt | 2 +- .../README | 2 +- .../deepstream_imagedata-multistream_cupy.py | 18 ++- .../dstest_imagedata_cupy_config.txt | 2 +- .../README | 2 +- .../config_infer_primary_peoplenet.txt | 31 ++++-- ...pstream_imagedata-multistream_redaction.py | 14 ++- apps/deepstream-imagedata-multistream/README | 2 +- .../deepstream_imagedata-multistream.py | 44 +++++--- .../dstest_imagedata_config.txt | 2 +- apps/deepstream-nvdsanalytics/README | 2 +- .../deepstream_nvdsanalytics.py | 15 ++- .../dsnvanalytics_pgie_config.txt | 2 +- apps/deepstream-opticalflow/README | 2 +- .../deepstream-opticalflow.py | 3 +- apps/deepstream-preprocess-test/README | 2 +- .../deepstream_preprocess_test.py | 7 +- .../dstest1_pgie_config.txt | 2 +- apps/deepstream-rtsp-in-rtsp-out/README | 2 +- .../deepstream_test1_rtsp_in_rtsp_out.py | 7 +- .../dstest1_pgie_config.txt | 2 +- apps/deepstream-segmask/README | 2 +- apps/deepstream-segmask/deepstream_segmask.py | 15 ++- .../dstest_segmask_config.txt | 6 +- apps/deepstream-segmentation/README | 2 +- .../deepstream_segmentation.py | 17 ++- apps/deepstream-ssd-parser/README | 2 +- .../deepstream_ssd_parser.py | 3 +- apps/deepstream-test1-rtsp-out/README | 2 +- .../deepstream_test1_rtsp_out.py | 7 +- .../dstest1_pgie_config.txt | 2 +- apps/deepstream-test1-usbcam/README | 2 +- .../deepstream_test_1_usb.py | 15 ++- .../dstest1_pgie_config.txt | 2 +- apps/deepstream-test1/README | 2 +- apps/deepstream-test1/deepstream_test_1.py | 15 ++- apps/deepstream-test1/dstest1_pgie_config.txt | 2 +- apps/deepstream-test2/README | 2 +- apps/deepstream-test2/deepstream_test_2.py | 15 ++- apps/deepstream-test2/dstest2_pgie_config.txt | 2 +- apps/deepstream-test3/README | 5 +- .../config_infer_primary_peoplenet.txt | 19 ++-- apps/deepstream-test3/deepstream_test_3.py | 39 +++++-- apps/deepstream-test3/dstest3_pgie_config.txt | 2 +- apps/deepstream-test4/README | 2 +- apps/deepstream-test4/deepstream_test_4.py | 19 ++-- apps/deepstream-test4/dstest4_pgie_config.txt | 4 +- apps/runtime_source_add_delete/README | 2 +- .../deepstream_rt_src_add_del.py | 26 +++-- .../dstest_pgie_config.txt | 2 +- bindings/CMakeLists.txt | 7 +- bindings/README.md | 103 ++++++++++++------ bindings/docstrings/functionsdoc.h | 18 +++ bindings/docstrings/nvosddoc.h | 3 +- bindings/packaging/setup.py | 2 +- .../ubuntu-cross-aarch64.Dockerfile | 2 +- bindings/src/bindfunctions.cpp | 53 +++++++-- bindings/src/bindnvosd.cpp | 13 ++- bindings/src/pyds.cpp | 2 +- docs/PYTHON_API/Methods/methodsdoc.rst | 4 + docs/conf.py | 2 +- notebooks/configs/dslaunchpad_pgie_config.txt | 2 +- notebooks/deepstream_launchpad.ipynb | 6 +- notebooks/deepstream_test_1.ipynb | 19 ++-- notebooks/deepstream_test_4.ipynb | 23 ++-- tests/__pycache__/__init__.cpython-310.pyc | Bin 166 -> 0 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 173 -> 0 bytes .../frame_iterator.cpython-310.pyc | Bin 2237 -> 0 bytes .../generic_pipeline.cpython-310.pyc | Bin 3851 -> 0 bytes .../pipeline_fakesink.cpython-310.pyc | Bin 2246 -> 0 bytes .../pipeline_fakesink_tracker.cpython-310.pyc | Bin 2495 -> 0 bytes .../__pycache__/tracker_utils.cpython-310.pyc | Bin 965 -> 0 bytes .../common/__pycache__/utils.cpython-310.pyc | Bin 2247 -> 0 bytes tests/integration/README.md | 2 +- .../__pycache__/__init__.cpython-310.pyc | Bin 178 -> 0 bytes .../test.cpython-310-pytest-7.4.2.pyc | Bin 5071 -> 0 bytes tests/integration/deepstream_demo.py | 4 +- tests/integration/ds_base_config.txt | 2 +- tests/integration/ds_pgie_config.txt | 2 +- tests/integration/test.py | 14 +-- tests/{common => testcommon}/__init__.py | 0 .../{common => testcommon}/frame_iterator.py | 0 .../generic_pipeline.py | 8 +- .../pipeline_fakesink.py | 8 +- .../pipeline_fakesink_tracker.py | 8 +- .../pipeline_filesink.py | 10 +- .../pipeline_nveglglessink.py | 10 +- tests/{common => testcommon}/tracker_utils.py | 0 tests/{common => testcommon}/utils.py | 9 +- 99 files changed, 620 insertions(+), 299 deletions(-) delete mode 100644 apps/common/is_aarch_64.py create mode 100644 apps/common/platform_info.py delete mode 100644 tests/__pycache__/__init__.cpython-310.pyc delete mode 100644 tests/common/__pycache__/__init__.cpython-310.pyc delete mode 100644 tests/common/__pycache__/frame_iterator.cpython-310.pyc delete mode 100644 tests/common/__pycache__/generic_pipeline.cpython-310.pyc delete mode 100644 tests/common/__pycache__/pipeline_fakesink.cpython-310.pyc delete mode 100644 tests/common/__pycache__/pipeline_fakesink_tracker.cpython-310.pyc delete mode 100644 tests/common/__pycache__/tracker_utils.cpython-310.pyc delete mode 100644 tests/common/__pycache__/utils.cpython-310.pyc delete mode 100644 tests/integration/__pycache__/__init__.cpython-310.pyc delete mode 100644 tests/integration/__pycache__/test.cpython-310-pytest-7.4.2.pyc rename tests/{common => testcommon}/__init__.py (100%) rename tests/{common => testcommon}/frame_iterator.py (100%) rename tests/{common => testcommon}/generic_pipeline.py (95%) rename tests/{common => testcommon}/pipeline_fakesink.py (91%) rename tests/{common => testcommon}/pipeline_fakesink_tracker.py (92%) rename tests/{common => testcommon}/pipeline_filesink.py (92%) rename tests/{common => testcommon}/pipeline_nveglglessink.py (91%) rename tests/{common => testcommon}/tracker_utils.py (100%) rename tests/{common => testcommon}/utils.py (91%) diff --git a/FAQ.md b/FAQ.md index 6c6dc0e..70e295b 100644 --- a/FAQ.md +++ b/FAQ.md @@ -165,5 +165,5 @@ The pyds wheel installs the pyds.so library where all the pip packages are store Command to install the pyds wheel is: ```bash - $ pip3 install ./pyds-1.1.10-py3-none*.whl + $ pip3 install ./pyds-1.1.11-py3-none*.whl ``` \ No newline at end of file diff --git a/HOWTO.md b/HOWTO.md index 0362c86..fac9902 100644 --- a/HOWTO.md +++ b/HOWTO.md @@ -16,7 +16,7 @@ This guide provides resources for DeepStream application development in Python. ## Prerequisites * Ubuntu 22.04 -* [DeepStream SDK 6.4](https://developer.nvidia.com/deepstream-download) or later +* [DeepStream SDK 7.0](https://developer.nvidia.com/deepstream-download) or later * Python 3.10 * [Gst Python](https://gstreamer.freedesktop.org/modules/gst-python.html) v1.20.3 diff --git a/README.md b/README.md index f9d5a10..01bf331 100644 --- a/README.md +++ b/README.md @@ -2,20 +2,26 @@ This repository contains Python bindings and sample applications for the [DeepStream SDK](https://developer.nvidia.com/deepstream-sdk). -SDK version supported: 6.4 +SDK version supported: 7.0 -This release only supports Ubuntu 22.04 for DeepStreamSDK 6.4 with Python 3.10 and [gst-python](3rdparty/gst-python/) 1.20.3! Ubuntu 20.04 for DeepStreamSDK 6.3 with Python 3.8 support is NOW DEPRECATED +This release only supports Ubuntu 22.04 for DeepStreamSDK 7.0 with Python 3.10 and [gst-python](3rdparty/gst-python/) 1.20.3! Ubuntu 20.04 for DeepStreamSDK 6.3 with Python 3.8 support is NOW DEPRECATED -The bindings sources along with build instructions are available under [bindings](bindings)! We include one [guide](bindings/BINDINGSGUIDE.md) for contributing to bindings and another [guide](bindings/CUSTOMUSERMETAGUIDE.md) for advanced use-cases such as writing bindings for custom data structures. - -Download the latest release package complete with bindings and sample applications from the [release section](../../releases). +The bindings sources along with build instructions are available under [bindings](bindings)! We include one [guide](bindings/BINDINGSGUIDE.md) for contributing to bindings and another [guide](bindings/CUSTOMUSERMETAGUIDE.md) for advanced use-cases such as writing bindings for custom data structures. Please report any issues or bugs on the [DeepStream SDK Forums](https://devtalk.nvidia.com/default/board/209). This enables the DeepStream community to find help at a central location. - [DeepStream Python Apps](#deepstream-python-apps) + - [Setup](#setup) - [Python Bindings](#python-bindings) + - [Python Bindings Breaking API Change](#python-bindings-breaking-api-change) - [Sample Applications](#sample-applications) +## Setup +Once you have DeepStreamSDK pre-requisites and DeepStreamSDK installed on the system, navigate to /sources/ dir which is /opt/nvidia/deepstream/deepstream/sources/ and git clone deepstream_python_apps repo here. + +The latest bindings can be installed from [release section](../../releases). +You can also build the bindings from source using the instructions in the [bindings readme](bindings/README.md) if needed. + ## Python Bindings @@ -45,13 +51,13 @@ To run the sample applications or write your own, please consult the [HOW-TO Gui We currently provide the following sample applications: * [deepstream-test1](apps/deepstream-test1) -- 4-class object detection pipeline, also demonstrates support for new nvstreammux -* **UPDATED** [deepstream-test2](apps/deepstream-test2) -- 4-class object detection, tracking and attribute classification pipeline - now uses new names for tracker meta data types in DS 6.4 +* [deepstream-test2](apps/deepstream-test2) -- 4-class object detection, tracking and attribute classification pipeline * [deepstream-test3](apps/deepstream-test3) -- multi-stream pipeline performing 4-class object detection, also supports triton inference server, no-display mode, file-loop and silent mode -* **UPDATED** [deepstream-test4](apps/deepstream-test4) -- msgbroker for sending analytics results to the cloud - now supports MQTT protocol adaptor +* [deepstream-test4](apps/deepstream-test4) -- msgbroker for sending analytics results to the cloud * [deepstream-imagedata-multistream](apps/deepstream-imagedata-multistream) -- multi-stream pipeline with access to image buffers * [deepstream-ssd-parser](apps/deepstream-ssd-parser) -- SSD model inference via Triton server with output parsing in Python * [deepstream-test1-usbcam](apps/deepstream-test1-usbcam) -- deepstream-test1 pipeline with USB camera input -* **UPDATED** [deepstream-test1-rtsp-out](apps/deepstream-test1-rtsp-out) -- deepstream-test1 pipeline with RTSP output - now demonstrates adding software encoder option to support Jetson Orin Nano +* [deepstream-test1-rtsp-out](apps/deepstream-test1-rtsp-out) -- deepstream-test1 pipeline with RTSP output, demonstrates adding software encoder option to support Jetson Orin Nano * [deepstream-opticalflow](apps/deepstream-opticalflow) -- optical flow and visualization pipeline with flow vectors returned in NumPy array * [deepstream-segmentation](apps/deepstream-segmentation) -- segmentation and visualization pipeline with segmentation mask returned in NumPy array * [deepstream-nvdsanalytics](apps/deepstream-nvdsanalytics) -- multistream pipeline with analytics plugin diff --git a/apps/README b/apps/README index 3dcc0f6..4d53e15 100644 --- a/apps/README +++ b/apps/README @@ -20,7 +20,7 @@ DeepStream SDK Python Bindings ================================================================================ Setup pre-requisites: - Ubuntu 22.04 -- NVIDIA DeepStream SDK 6.4 +- NVIDIA DeepStream SDK 7.0 - Python 3.10 - Gst-python @@ -36,7 +36,7 @@ Package Contents Installing Pre-requisites: -------------------------------------------------------------------------------- -DeepStream SDK 6.4 +DeepStream SDK 7.0 -------------------- Download and install from https://developer.nvidia.com/deepstream-download @@ -51,16 +51,23 @@ If missing, install with the following steps: $ sudo apt update $ sudo apt install python3-gi python3-dev python3-gst-1.0 -y +cuda-python +----------- +$ pip3 install cuda-python + -------------------------------------------------------------------------------- Running the samples -------------------------------------------------------------------------------- -The apps are configured to work from inside the DeepStream SDK 6.4 installation. +The apps are configured to work from inside the DeepStream SDK 7.0 installation. Clone the deepstream_python_apps repo under /sources: $ git clone https://github.com/NVIDIA-AI-IOT/deepstream_python_apps This will create the following directory: /sources/deepstream_python_apps - +Cuda python APIs are used to distinguish between iGPU and dGPU. +Install cuda-python using: +pip3 install cuda-python +NOTE: is_aarch64.py is deprecated in favor of platform_info.py Follow README in each app's directory to run the app. Example: running test1 app: diff --git a/apps/common/is_aarch_64.py b/apps/common/is_aarch_64.py deleted file mode 100644 index 26276c3..0000000 --- a/apps/common/is_aarch_64.py +++ /dev/null @@ -1,25 +0,0 @@ -################################################################################ -# SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -################################################################################ - -import platform -import sys - - -def is_aarch64(): - return platform.uname()[4] == 'aarch64' - -sys.path.append('/opt/nvidia/deepstream/deepstream/lib') diff --git a/apps/common/platform_info.py b/apps/common/platform_info.py new file mode 100644 index 0000000..9f8d441 --- /dev/null +++ b/apps/common/platform_info.py @@ -0,0 +1,94 @@ +################################################################################ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +import sys +import platform +from threading import Lock +from cuda import cudart +from cuda import cuda + +guard_platform_info = Lock() + +class PlatformInfo: + def __init__(self): + self.is_wsl_system = False + self.wsl_verified = False + self.is_integrated_gpu_system = False + self.is_integrated_gpu_verified = False + self.is_aarch64_platform = False + self.is_aarch64_verified = False + + def is_wsl(self): + with guard_platform_info: + # Check if its already verified as WSL system or not. + if not self.wsl_verified: + try: + # Open /proc/version file + with open("/proc/version", "r") as version_file: + # Read the content + version_info = version_file.readline() + version_info = version_info.lower() + self.wsl_verified = True + + # Check if "microsoft" is present in the version information + if "microsoft" in version_info: + self.is_wsl_system = True + except Exception as e: + print(f"ERROR: Opening /proc/version failed: {e}") + + return self.is_wsl_system + + def is_integrated_gpu(self): + #Using cuda apis to identify whether integrated/discreet + #This is required to distinguish Tegra and ARM_SBSA devices + with guard_platform_info: + #Cuda initialize + if not self.is_integrated_gpu_verified: + cuda_init_result, = cuda.cuInit(0) + if cuda_init_result == cuda.CUresult.CUDA_SUCCESS: + #Get cuda devices count + device_count_result, num_devices = cuda.cuDeviceGetCount() + if device_count_result == cuda.CUresult.CUDA_SUCCESS: + #If atleast one device is found, we can use the property from + #the first device + if num_devices >= 1: + #Get properties from first device + property_result, properties = cudart.cudaGetDeviceProperties(0) + if property_result == cuda.CUresult.CUDA_SUCCESS: + print("Is it Integrated GPU? :", properties.integrated) + self.is_integrated_gpu_system = properties.integrated + self.is_integrated_gpu_verified = True + else: + print("ERROR: Getting cuda device property failed: {}".format(property_result)) + else: + print("ERROR: No cuda devices found to check whether iGPU/dGPU") + else: + print("ERROR: Getting cuda device count failed: {}".format(device_count_result)) + else: + print("ERROR: Cuda init failed: {}".format(cuda_init_result)) + + return self.is_integrated_gpu_system + + def is_platform_aarch64(self): + #Check if platform is aarch64 using uname + if not self.is_aarch64_verified: + if platform.uname()[4] == 'aarch64': + self.is_aarch64_platform = True + self.is_aarch64_verified = True + return self.is_aarch64_platform + +sys.path.append('/opt/nvidia/deepstream/deepstream/lib') diff --git a/apps/deepstream-custom-binding-test/README b/apps/deepstream-custom-binding-test/README index 7afdec7..127903d 100644 --- a/apps/deepstream-custom-binding-test/README +++ b/apps/deepstream-custom-binding-test/README @@ -16,7 +16,7 @@ ################################################################################ Prequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python diff --git a/apps/deepstream-custom-binding-test/deepstream_custom_binding_test.py b/apps/deepstream-custom-binding-test/deepstream_custom_binding_test.py index f36f512..70bea41 100644 --- a/apps/deepstream-custom-binding-test/deepstream_custom_binding_test.py +++ b/apps/deepstream-custom-binding-test/deepstream_custom_binding_test.py @@ -191,7 +191,7 @@ def main(args): source.link(h264parser) h264parser.link(decoder) - sinkpad = streammux.get_request_pad("sink_0") + sinkpad = streammux.request_pad_simple("sink_0") if not sinkpad: Gst.error(" Unable to get the sink pad of streammux") diff --git a/apps/deepstream-demux-multi-in-multi-out/README b/apps/deepstream-demux-multi-in-multi-out/README index d7834c5..0574fca 100644 --- a/apps/deepstream-demux-multi-in-multi-out/README +++ b/apps/deepstream-demux-multi-in-multi-out/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python diff --git a/apps/deepstream-demux-multi-in-multi-out/deepstream_demux_multi_in_multi_out.py b/apps/deepstream-demux-multi-in-multi-out/deepstream_demux_multi_in_multi_out.py index 85ed38c..95ed6c9 100644 --- a/apps/deepstream-demux-multi-in-multi-out/deepstream_demux_multi_in_multi_out.py +++ b/apps/deepstream-demux-multi-in-multi-out/deepstream_demux_multi_in_multi_out.py @@ -32,7 +32,7 @@ import os import math import platform -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo from common.bus_call import bus_call from common.FPS import PERF_DATA @@ -265,6 +265,7 @@ def main(args, requested_pgie=None, config=None, disable_probe=False): global perf_data perf_data = PERF_DATA(number_sources) + platform_info = PlatformInfo() # Standard GStreamer initialization Gst.init(None) @@ -294,7 +295,7 @@ def main(args, requested_pgie=None, config=None, disable_probe=False): sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i - sinkpad = streammux.get_request_pad(padname) + sinkpad = streammux.request_pad_simple(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") @@ -347,14 +348,18 @@ def main(args, requested_pgie=None, config=None, disable_probe=False): for i in range(number_sources): # pipeline nvstreamdemux -> queue -> nvvidconv -> nvosd -> (if Jetson) nvegltransform -> nveglgl # Creating EGLsink - if is_aarch64(): + if platform_info.is_integrated_gpu(): print("Creating nv3dsink \n") sink = make_element("nv3dsink", i) if not sink: sys.stderr.write(" Unable to create nv3dsink \n") else: - print("Creating EGLSink \n") - sink = make_element("nveglglessink", i) + if platform_info.is_platform_aarch64(): + print("Creating nv3dsink \n") + sink = make_element("nv3dsink", i) + else: + print("Creating EGLSink \n") + sink = make_element("nveglglessink", i) if not sink: sys.stderr.write(" Unable to create egl sink \n") pipeline.add(sink) @@ -375,7 +380,7 @@ def main(args, requested_pgie=None, config=None, disable_probe=False): # connect nvstreamdemux -> queue padname = "src_%u" % i - demuxsrcpad = nvstreamdemux.get_request_pad(padname) + demuxsrcpad = nvstreamdemux.request_pad_simple(padname) if not demuxsrcpad: sys.stderr.write("Unable to create demux src pad \n") diff --git a/apps/deepstream-demux-multi-in-multi-out/ds_demux_pgie_config.txt b/apps/deepstream-demux-multi-in-multi-out/ds_demux_pgie_config.txt index a0e4e0f..9f5352e 100644 --- a/apps/deepstream-demux-multi-in-multi-out/ds_demux_pgie_config.txt +++ b/apps/deepstream-demux-multi-in-multi-out/ds_demux_pgie_config.txt @@ -57,7 +57,7 @@ gpu-id=0 net-scale-factor=0.00392156862745098 tlt-model-key=tlt_encode tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt -model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine +model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 diff --git a/apps/deepstream-imagedata-multistream-cupy/README b/apps/deepstream-imagedata-multistream-cupy/README index 8b9d06d..6902a96 100644 --- a/apps/deepstream-imagedata-multistream-cupy/README +++ b/apps/deepstream-imagedata-multistream-cupy/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python - NumPy package diff --git a/apps/deepstream-imagedata-multistream-cupy/deepstream_imagedata-multistream_cupy.py b/apps/deepstream-imagedata-multistream-cupy/deepstream_imagedata-multistream_cupy.py index 4ce0d5d..a196c94 100644 --- a/apps/deepstream-imagedata-multistream-cupy/deepstream_imagedata-multistream_cupy.py +++ b/apps/deepstream-imagedata-multistream-cupy/deepstream_imagedata-multistream_cupy.py @@ -27,7 +27,7 @@ from ctypes import * import sys import math -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo from common.bus_call import bus_call from common.FPS import PERF_DATA import pyds @@ -237,7 +237,7 @@ def main(args): sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i - sinkpad = streammux.get_request_pad(padname) + sinkpad = streammux.request_pad_simple(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") @@ -273,8 +273,13 @@ def main(args): if not nvosd: sys.stderr.write(" Unable to create nvosd \n") - print("Creating EGLSink \n") - sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") + + if platform_info.is_platform_aarch64(): + print("Creating nv3dsink \n") + sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") + else: + print("Creating EGLSink \n") + sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") @@ -370,8 +375,9 @@ def parse_args(): if __name__ == '__main__': - if is_aarch64(): - sys.stderr.write ("\nThis app is not currently supported on aarch64. Exiting...\n\n\n\n") + platform_info = PlatformInfo() + if platform_info.is_integrated_gpu(): + sys.stderr.write ("\nThis app is not currently supported on integrated GPU. Exiting...\n\n\n\n") sys.exit(1) stream_paths = parse_args() sys.exit(main(stream_paths)) diff --git a/apps/deepstream-imagedata-multistream-cupy/dstest_imagedata_cupy_config.txt b/apps/deepstream-imagedata-multistream-cupy/dstest_imagedata_cupy_config.txt index a0e4e0f..9f5352e 100644 --- a/apps/deepstream-imagedata-multistream-cupy/dstest_imagedata_cupy_config.txt +++ b/apps/deepstream-imagedata-multistream-cupy/dstest_imagedata_cupy_config.txt @@ -57,7 +57,7 @@ gpu-id=0 net-scale-factor=0.00392156862745098 tlt-model-key=tlt_encode tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt -model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine +model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 diff --git a/apps/deepstream-imagedata-multistream-redaction/README b/apps/deepstream-imagedata-multistream-redaction/README index d26cf1c..6d00dbf 100755 --- a/apps/deepstream-imagedata-multistream-redaction/README +++ b/apps/deepstream-imagedata-multistream-redaction/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python - NumPy package diff --git a/apps/deepstream-imagedata-multistream-redaction/config_infer_primary_peoplenet.txt b/apps/deepstream-imagedata-multistream-redaction/config_infer_primary_peoplenet.txt index 5839911..95eb681 100644 --- a/apps/deepstream-imagedata-multistream-redaction/config_infer_primary_peoplenet.txt +++ b/apps/deepstream-imagedata-multistream-redaction/config_infer_primary_peoplenet.txt @@ -1,5 +1,5 @@ ################################################################################ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,18 +29,33 @@ batch-size=1 process-mode=1 model-color-format=0 ## 0=FP32, 1=INT8, 2=FP16 mode -network-mode=2 +network-mode=1 num-detected-classes=3 cluster-mode=1 interval=0 gie-unique-id=1 output-blob-names=output_bbox/BiasAdd;output_cov/Sigmoid +#Use the config params below for dbscan clustering mode +#[class-attrs-all] +#detected-min-w=4 +#detected-min-h=4 +#minBoxes=3 +#eps=0.7 + +#Use the config params below for NMS clustering mode [class-attrs-all] +topk=20 +nms-iou-threshold=0.5 +pre-cluster-threshold=0.2 + +## Per class configurations +[class-attrs-0] +topk=20 +nms-iou-threshold=0.5 pre-cluster-threshold=0.4 -## Set eps=0.7 and minBoxes for cluster-mode=1(DBSCAN) -eps=0.7 -minBoxes=1 -[class-attrs-1] -# disable bag detection -pre-cluster-threshold=1.0 + +#[class-attrs-1] +#pre-cluster-threshold=0.05 +#eps=0.7 +#dbscan-min-score=0.5 \ No newline at end of file diff --git a/apps/deepstream-imagedata-multistream-redaction/deepstream_imagedata-multistream_redaction.py b/apps/deepstream-imagedata-multistream-redaction/deepstream_imagedata-multistream_redaction.py index 65dc2ba..04662ee 100644 --- a/apps/deepstream-imagedata-multistream-redaction/deepstream_imagedata-multistream_redaction.py +++ b/apps/deepstream-imagedata-multistream-redaction/deepstream_imagedata-multistream_redaction.py @@ -32,7 +32,7 @@ import sys import math import platform -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo from common.bus_call import bus_call from common.FPS import PERF_DATA @@ -140,7 +140,7 @@ def tiler_sink_pad_buffer_probe(pad, info, u_data): frame_copy = np.array(n_frame, copy=True, order='C') # convert the array into cv2 default color format frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGRA) - if is_aarch64(): # If Jetson, since the buffer is mapped to CPU for retrieval, it must also be unmapped + if platform_info.is_integrated_gpu(): # If Jetson, since the buffer is mapped to CPU for retrieval, it must also be unmapped pyds.unmap_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) # The unmap call should be made after operations with the original array are complete. # The original array cannot be accessed after this call. @@ -210,7 +210,7 @@ def decodebin_child_added(child_proxy, Object, name, user_data): print("Decodebin child added:", name, "\n") if name.find("decodebin") != -1: Object.connect("child-added", decodebin_child_added, user_data) - if not is_aarch64() and name.find("nvv4l2decoder") != -1: + if not platform_info.is_integrated_gpu() and name.find("nvv4l2decoder") != -1: # Use CUDA unified memory in the pipeline so frames # can be easily accessed on CPU in Python. Object.set_property("cudadec-memtype", 2) @@ -269,6 +269,8 @@ def main(uri_inputs,codec,bitrate ): os.mkdir(folder_name) print("Frames will be saved in ", folder_name) + global platform_info + platform_info = PlatformInfo() # Standard GStreamer initialization Gst.init(None) @@ -301,7 +303,7 @@ def main(uri_inputs,codec,bitrate ): sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i - sinkpad = streammux.get_request_pad(padname) + sinkpad = streammux.request_pad_simple(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") @@ -354,7 +356,7 @@ def main(uri_inputs,codec,bitrate ): if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property('bitrate', bitrate) - if is_aarch64(): + if platform_info.is_integrated_gpu(): encoder.set_property('preset-level', 1) encoder.set_property('insert-sps-pps', 1) #encoder.set_property('bufapi-version', 1) @@ -399,7 +401,7 @@ def main(uri_inputs,codec,bitrate ): tiler.set_property("width", TILED_OUTPUT_WIDTH) tiler.set_property("height", TILED_OUTPUT_HEIGHT) - if not is_aarch64(): + if not platform_info.is_integrated_gpu(): # Use CUDA unified memory in the pipeline so frames # can be easily accessed on CPU in Python. mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED) diff --git a/apps/deepstream-imagedata-multistream/README b/apps/deepstream-imagedata-multistream/README index 0ce24d7..174e549 100755 --- a/apps/deepstream-imagedata-multistream/README +++ b/apps/deepstream-imagedata-multistream/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python - NumPy package diff --git a/apps/deepstream-imagedata-multistream/deepstream_imagedata-multistream.py b/apps/deepstream-imagedata-multistream/deepstream_imagedata-multistream.py index 489e2da..bb314b2 100755 --- a/apps/deepstream-imagedata-multistream/deepstream_imagedata-multistream.py +++ b/apps/deepstream-imagedata-multistream/deepstream_imagedata-multistream.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ################################################################################ -# SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,7 +30,7 @@ import sys import math import platform -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo from common.bus_call import bus_call from common.FPS import PERF_DATA import numpy as np @@ -64,7 +64,6 @@ MIN_CONFIDENCE = 0.3 MAX_CONFIDENCE = 0.4 - # tiler_sink_pad_buffer_probe will extract metadata received on tiler src pad # and update params for drawing rectangle, object information etc. def tiler_sink_pad_buffer_probe(pad, info, u_data): @@ -125,7 +124,8 @@ def tiler_sink_pad_buffer_probe(pad, info, u_data): frame_copy = np.array(n_frame, copy=True, order='C') # convert the array into cv2 default color format frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGRA) - if is_aarch64(): # If Jetson, since the buffer is mapped to CPU for retrieval, it must also be unmapped + if platform_info.is_integrated_gpu(): + # If Jetson, since the buffer is mapped to CPU for retrieval, it must also be unmapped pyds.unmap_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id) # The unmap call should be made after operations with the original array are complete. # The original array cannot be accessed after this call. @@ -212,9 +212,11 @@ def decodebin_child_added(child_proxy, Object, name, user_data): if name.find("decodebin") != -1: Object.connect("child-added", decodebin_child_added, user_data) - if not is_aarch64() and name.find("nvv4l2decoder") != -1: - # Use CUDA unified memory in the pipeline so frames - # can be easily accessed on CPU in Python. + if not platform_info.is_integrated_gpu() and name.find("nvv4l2decoder") != -1: + # Use CUDA unified memory in the pipeline so frames can be easily accessed on CPU in Python. + # 0: NVBUF_MEM_CUDA_DEVICE, 1: NVBUF_MEM_CUDA_PINNED, 2: NVBUF_MEM_CUDA_UNIFIED + # Dont use direct macro here like NVBUF_MEM_CUDA_UNIFIED since nvv4l2decoder uses a + # different enum internally Object.set_property("cudadec-memtype", 2) if "source" in name: @@ -258,6 +260,7 @@ def create_source_bin(index, uri): return None return nbin + def main(args): # Check input arguments if len(args) < 2: @@ -276,6 +279,8 @@ def main(args): os.mkdir(folder_name) print("Frames will be saved in ", folder_name) + global platform_info + platform_info = PlatformInfo() # Standard GStreamer initialization Gst.init(None) @@ -308,7 +313,7 @@ def main(args): sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i - sinkpad = streammux.get_request_pad(padname) + sinkpad = streammux.request_pad_simple(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") @@ -343,14 +348,18 @@ def main(args): nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") - if is_aarch64(): + if platform_info.is_integrated_gpu(): print("Creating nv3dsink \n") sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") if not sink: sys.stderr.write(" Unable to create nv3dsink \n") else: - print("Creating EGLSink \n") - sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") + if platform_info.is_platform_aarch64(): + print("Creating nv3dsink \n") + sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") + else: + print("Creating EGLSink \n") + sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") @@ -378,13 +387,22 @@ def main(args): sink.set_property("sync", 0) sink.set_property("qos", 0) - if not is_aarch64(): + if not platform_info.is_integrated_gpu(): # Use CUDA unified memory in the pipeline so frames # can be easily accessed on CPU in Python. mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED) streammux.set_property("nvbuf-memory-type", mem_type) nvvidconv.set_property("nvbuf-memory-type", mem_type) - nvvidconv1.set_property("nvbuf-memory-type", mem_type) + if platform_info.is_wsl(): + #opencv functions like cv2.line and cv2.putText is not able to access NVBUF_MEM_CUDA_UNIFIED memory + #in WSL systems due to some reason and gives SEGFAULT. Use NVBUF_MEM_CUDA_PINNED memory for such + #usecases in WSL. Here, nvvidconv1's buffer is used in tiler sink pad probe and cv2 operations are + #done on that. + print("using nvbuf_mem_cuda_pinned memory for nvvidconv1\n") + vc_mem_type = int(pyds.NVBUF_MEM_CUDA_PINNED) + nvvidconv1.set_property("nvbuf-memory-type", vc_mem_type) + else: + nvvidconv1.set_property("nvbuf-memory-type", mem_type) tiler.set_property("nvbuf-memory-type", mem_type) print("Adding elements to Pipeline \n") diff --git a/apps/deepstream-imagedata-multistream/dstest_imagedata_config.txt b/apps/deepstream-imagedata-multistream/dstest_imagedata_config.txt index a0e4e0f..9f5352e 100755 --- a/apps/deepstream-imagedata-multistream/dstest_imagedata_config.txt +++ b/apps/deepstream-imagedata-multistream/dstest_imagedata_config.txt @@ -57,7 +57,7 @@ gpu-id=0 net-scale-factor=0.00392156862745098 tlt-model-key=tlt_encode tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt -model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine +model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 diff --git a/apps/deepstream-nvdsanalytics/README b/apps/deepstream-nvdsanalytics/README index 9757907..1a53857 100755 --- a/apps/deepstream-nvdsanalytics/README +++ b/apps/deepstream-nvdsanalytics/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python diff --git a/apps/deepstream-nvdsanalytics/deepstream_nvdsanalytics.py b/apps/deepstream-nvdsanalytics/deepstream_nvdsanalytics.py index 0b4cdef..dfca5c2 100755 --- a/apps/deepstream-nvdsanalytics/deepstream_nvdsanalytics.py +++ b/apps/deepstream-nvdsanalytics/deepstream_nvdsanalytics.py @@ -28,7 +28,7 @@ import sys import math import platform -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo from common.bus_call import bus_call from common.FPS import PERF_DATA @@ -227,6 +227,7 @@ def main(args): perf_data = PERF_DATA(len(args) - 1) number_sources=len(args)-1 + platform_info = PlatformInfo() # Standard GStreamer initialization Gst.init(None) @@ -256,7 +257,7 @@ def main(args): sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname="sink_%u" %i - sinkpad= streammux.get_request_pad(padname) + sinkpad= streammux.request_pad_simple(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad=source_bin.get_static_pad("src") @@ -311,14 +312,18 @@ def main(args): nvosd.set_property('process-mode',OSD_PROCESS_MODE) nvosd.set_property('display-text',OSD_DISPLAY_TEXT) - if is_aarch64(): + if platform_info.is_integrated_gpu(): print("Creating nv3dsink \n") sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") if not sink: sys.stderr.write(" Unable to create nv3dsink \n") else: - print("Creating EGLSink \n") - sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") + if platform_info.is_platform_aarch64(): + print("Creating nv3dsink \n") + sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") + else: + print("Creating EGLSink \n") + sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") diff --git a/apps/deepstream-nvdsanalytics/dsnvanalytics_pgie_config.txt b/apps/deepstream-nvdsanalytics/dsnvanalytics_pgie_config.txt index a0e4e0f..9f5352e 100644 --- a/apps/deepstream-nvdsanalytics/dsnvanalytics_pgie_config.txt +++ b/apps/deepstream-nvdsanalytics/dsnvanalytics_pgie_config.txt @@ -57,7 +57,7 @@ gpu-id=0 net-scale-factor=0.00392156862745098 tlt-model-key=tlt_encode tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt -model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine +model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 diff --git a/apps/deepstream-opticalflow/README b/apps/deepstream-opticalflow/README index 4b35bb4..3877998 100755 --- a/apps/deepstream-opticalflow/README +++ b/apps/deepstream-opticalflow/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python - NumPy package diff --git a/apps/deepstream-opticalflow/deepstream-opticalflow.py b/apps/deepstream-opticalflow/deepstream-opticalflow.py index 1cbae72..07f840c 100755 --- a/apps/deepstream-opticalflow/deepstream-opticalflow.py +++ b/apps/deepstream-opticalflow/deepstream-opticalflow.py @@ -27,7 +27,6 @@ from gi.repository import GLib, Gst import sys import math -from common.is_aarch_64 import is_aarch64 from common.bus_call import bus_call import os from os import path @@ -244,7 +243,7 @@ def main(args): sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname="sink_%u" %i - sinkpad= streammux.get_request_pad(padname) + sinkpad= streammux.request_pad_simple(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad=source_bin.get_static_pad("src") diff --git a/apps/deepstream-preprocess-test/README b/apps/deepstream-preprocess-test/README index 60de131..ace3d1b 100644 --- a/apps/deepstream-preprocess-test/README +++ b/apps/deepstream-preprocess-test/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python - GstRtspServer diff --git a/apps/deepstream-preprocess-test/deepstream_preprocess_test.py b/apps/deepstream-preprocess-test/deepstream_preprocess_test.py index 3b267ce..6badd7d 100644 --- a/apps/deepstream-preprocess-test/deepstream_preprocess_test.py +++ b/apps/deepstream-preprocess-test/deepstream_preprocess_test.py @@ -21,7 +21,7 @@ sys.path.append("../") from common.bus_call import bus_call -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo import pyds import platform import math @@ -204,6 +204,7 @@ def main(args): global perf_data perf_data = PERF_DATA(len(args)) number_sources = len(args) + platform_info = PlatformInfo() # Standard GStreamer initialization Gst.init(None) @@ -233,7 +234,7 @@ def main(args): sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = f"sink_{i}" - sinkpad = streammux.get_request_pad(padname) + sinkpad = streammux.request_pad_simple(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") @@ -279,7 +280,7 @@ def main(args): if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property("bitrate", bitrate) - if is_aarch64(): + if platform_info.is_integrated_gpu(): encoder.set_property("preset-level", 1) encoder.set_property("insert-sps-pps", 1) #encoder.set_property("bufapi-version", 1) diff --git a/apps/deepstream-preprocess-test/dstest1_pgie_config.txt b/apps/deepstream-preprocess-test/dstest1_pgie_config.txt index a1d9f03..a0d18bd 100644 --- a/apps/deepstream-preprocess-test/dstest1_pgie_config.txt +++ b/apps/deepstream-preprocess-test/dstest1_pgie_config.txt @@ -57,7 +57,7 @@ gpu-id=0 net-scale-factor=0.00392156862745098 tlt-model-key=tlt_encode tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt -model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine +model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 diff --git a/apps/deepstream-rtsp-in-rtsp-out/README b/apps/deepstream-rtsp-in-rtsp-out/README index d57167c..8dfed98 100755 --- a/apps/deepstream-rtsp-in-rtsp-out/README +++ b/apps/deepstream-rtsp-in-rtsp-out/README @@ -16,7 +16,7 @@ ################################################################################ Prequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python - GstRtspServer diff --git a/apps/deepstream-rtsp-in-rtsp-out/deepstream_test1_rtsp_in_rtsp_out.py b/apps/deepstream-rtsp-in-rtsp-out/deepstream_test1_rtsp_in_rtsp_out.py index 022784e..51379aa 100755 --- a/apps/deepstream-rtsp-in-rtsp-out/deepstream_test1_rtsp_in_rtsp_out.py +++ b/apps/deepstream-rtsp-in-rtsp-out/deepstream_test1_rtsp_in_rtsp_out.py @@ -19,7 +19,7 @@ import sys sys.path.append("../") from common.bus_call import bus_call -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo import pyds import platform import math @@ -175,6 +175,7 @@ def main(args): # Check input arguments number_sources = len(args) + platform_info = PlatformInfo() # Standard GStreamer initialization Gst.init(None) @@ -204,7 +205,7 @@ def main(args): sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i - sinkpad = streammux.get_request_pad(padname) + sinkpad = streammux.request_pad_simple(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") @@ -252,7 +253,7 @@ def main(args): if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property("bitrate", bitrate) - if is_aarch64(): + if platform_info.is_integrated_gpu(): encoder.set_property("preset-level", 1) encoder.set_property("insert-sps-pps", 1) #encoder.set_property("bufapi-version", 1) diff --git a/apps/deepstream-rtsp-in-rtsp-out/dstest1_pgie_config.txt b/apps/deepstream-rtsp-in-rtsp-out/dstest1_pgie_config.txt index a0e4e0f..9f5352e 100755 --- a/apps/deepstream-rtsp-in-rtsp-out/dstest1_pgie_config.txt +++ b/apps/deepstream-rtsp-in-rtsp-out/dstest1_pgie_config.txt @@ -57,7 +57,7 @@ gpu-id=0 net-scale-factor=0.00392156862745098 tlt-model-key=tlt_encode tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt -model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine +model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 diff --git a/apps/deepstream-segmask/README b/apps/deepstream-segmask/README index 4c13d08..eca364b 100644 --- a/apps/deepstream-segmask/README +++ b/apps/deepstream-segmask/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python - NumPy package diff --git a/apps/deepstream-segmask/deepstream_segmask.py b/apps/deepstream-segmask/deepstream_segmask.py index d719e63..c2fdd15 100644 --- a/apps/deepstream-segmask/deepstream_segmask.py +++ b/apps/deepstream-segmask/deepstream_segmask.py @@ -31,7 +31,7 @@ import sys import math import platform -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo from common.bus_call import bus_call from common.FPS import PERF_DATA import numpy as np @@ -247,6 +247,7 @@ def main(stream_paths, output_folder): os.mkdir(folder_name) print("Frames will be saved in ", folder_name) + platform_info = PlatformInfo() # Standard GStreamer initialization Gst.init(None) @@ -277,7 +278,7 @@ def main(stream_paths, output_folder): sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname = "sink_%u" % i - sinkpad = streammux.get_request_pad(padname) + sinkpad = streammux.request_pad_simple(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad = source_bin.get_static_pad("src") @@ -301,14 +302,18 @@ def main(stream_paths, output_folder): nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay") if not nvosd: sys.stderr.write(" Unable to create nvosd \n") - if is_aarch64(): + if platform_info.is_integrated_gpu(): print("Creating nv3dsink \n") sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") if not sink: sys.stderr.write(" Unable to create nv3dsink \n") else: - print("Creating EGLSink \n") - sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") + if platform_info.is_platform_aarch64(): + print("Creating nv3dsink \n") + sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") + else: + print("Creating EGLSink \n") + sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") diff --git a/apps/deepstream-segmask/dstest_segmask_config.txt b/apps/deepstream-segmask/dstest_segmask_config.txt index fd0fb5e..c049d53 100644 --- a/apps/deepstream-segmask/dstest_segmask_config.txt +++ b/apps/deepstream-segmask/dstest_segmask_config.txt @@ -26,10 +26,10 @@ net-scale-factor=0.017507 offsets=123.675;116.280;103.53 model-color-format=0 labelfile-path=../../../../samples/configs/tao_pretrained_models/peopleSegNet_labels.txt -tlt-encoded-model=../../../../samples/models/tao_pretrained_models/peopleSegNet/V2/peoplesegnet_resnet50.etlt +tlt-encoded-model=../../../../samples/models/tao_pretrained_models/peopleSegNet/peoplesegnet_resnet50.etlt tlt-model-key=nvidia_tlt -model-engine-file=../../../../samples/models/tao_pretrained_models/peopleSegNet/V2/peoplesegnet_resnet50.etlt_b1_gpu0_fp16.engine -int8-calib-file=../../../../samples/models/tao_pretrained_models/peopleSegNet/V2/peoplesegnet_resnet50_int8.txt +model-engine-file=../../../../samples/models/tao_pretrained_models/peopleSegNet/peoplesegnet_resnet50.etlt_b1_gpu0_fp16.engine +int8-calib-file=../../../../samples/models/tao_pretrained_models/peopleSegNet/peoplesegnet_resnet50_int8.txt infer-dims=3;576;960 uff-input-blob-name=Input batch-size=1 diff --git a/apps/deepstream-segmentation/README b/apps/deepstream-segmentation/README index 0123d9a..e2b68bc 100644 --- a/apps/deepstream-segmentation/README +++ b/apps/deepstream-segmentation/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python - NumPy package diff --git a/apps/deepstream-segmentation/deepstream_segmentation.py b/apps/deepstream-segmentation/deepstream_segmentation.py index af46fb6..7d27c48 100755 --- a/apps/deepstream-segmentation/deepstream_segmentation.py +++ b/apps/deepstream-segmentation/deepstream_segmentation.py @@ -25,7 +25,7 @@ gi.require_version('Gst', '1.0') from gi.repository import GLib, Gst -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo from common.bus_call import bus_call import cv2 import pyds @@ -140,6 +140,7 @@ def main(args): config_file = args[1] num_sources = len(args) - 3 # Standard GStreamer initialization + platform_info = PlatformInfo() Gst.init(None) # Create gstreamer elements @@ -189,20 +190,24 @@ def main(args): if not nvsegvisual: sys.stderr.write("Unable to create nvsegvisual\n") - if is_aarch64(): + if platform_info.is_integrated_gpu(): print("Creating nv3dsink \n") sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") if not sink: sys.stderr.write(" Unable to create nv3dsink \n") else: - print("Creating EGLSink \n") - sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") + if platform_info.is_platform_aarch64(): + print("Creating nv3dsink \n") + sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") + else: + print("Creating EGLSink \n") + sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") print("Playing file %s " % args[2]) source.set_property('location', args[2]) - if is_aarch64() and ("mjpeg" in args[2] or "mjpg" in args[2]): + if platform_info.is_integrated_gpu() and ("mjpeg" in args[2] or "mjpg" in args[2]): print ("setting decoder mjpeg property") decoder.set_property('mjpeg', 1) streammux.set_property('width', 1920) @@ -237,7 +242,7 @@ def main(args): source.link(jpegparser) jpegparser.link(decoder) - sinkpad = streammux.get_request_pad("sink_0") + sinkpad = streammux.request_pad_simple("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") diff --git a/apps/deepstream-ssd-parser/README b/apps/deepstream-ssd-parser/README index 8bd0b97..c26e4c4 100644 --- a/apps/deepstream-ssd-parser/README +++ b/apps/deepstream-ssd-parser/README @@ -16,7 +16,7 @@ ################################################################################ Prequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - NVIDIA Triton Inference Server - Python 3.10 - Gst-python diff --git a/apps/deepstream-ssd-parser/deepstream_ssd_parser.py b/apps/deepstream-ssd-parser/deepstream_ssd_parser.py index b084b90..4af9593 100755 --- a/apps/deepstream-ssd-parser/deepstream_ssd_parser.py +++ b/apps/deepstream-ssd-parser/deepstream_ssd_parser.py @@ -25,7 +25,6 @@ import gi gi.require_version("Gst", "1.0") from gi.repository import GLib, Gst -from common.is_aarch_64 import is_aarch64 from common.bus_call import bus_call from ssd_parser import nvds_infer_parse_custom_tf_ssd, DetectionParam, NmsParam, BoxSizeParam import pyds @@ -399,7 +398,7 @@ def main(args): source.link(h264parser) h264parser.link(decoder) - sinkpad = streammux.get_request_pad("sink_0") + sinkpad = streammux.request_pad_simple("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") diff --git a/apps/deepstream-test1-rtsp-out/README b/apps/deepstream-test1-rtsp-out/README index bc5d11a..be2ef29 100644 --- a/apps/deepstream-test1-rtsp-out/README +++ b/apps/deepstream-test1-rtsp-out/README @@ -16,7 +16,7 @@ ################################################################################ Prequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python - GstRtspServer diff --git a/apps/deepstream-test1-rtsp-out/deepstream_test1_rtsp_out.py b/apps/deepstream-test1-rtsp-out/deepstream_test1_rtsp_out.py index 8265ed7..4634d91 100755 --- a/apps/deepstream-test1-rtsp-out/deepstream_test1_rtsp_out.py +++ b/apps/deepstream-test1-rtsp-out/deepstream_test1_rtsp_out.py @@ -25,7 +25,7 @@ gi.require_version('Gst', '1.0') gi.require_version('GstRtspServer', '1.0') from gi.repository import GLib, Gst, GstRtspServer -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo from common.bus_call import bus_call import pyds @@ -122,6 +122,7 @@ def osd_sink_pad_buffer_probe(pad,info,u_data): def main(args): + platform_info = PlatformInfo() # Standard GStreamer initialization Gst.init(None) @@ -198,7 +199,7 @@ def main(args): if not encoder: sys.stderr.write(" Unable to create encoder") encoder.set_property('bitrate', bitrate) - if is_aarch64() and enc_type == 0: + if platform_info.is_integrated_gpu() and enc_type == 0: encoder.set_property('preset-level', 1) encoder.set_property('insert-sps-pps', 1) #encoder.set_property('bufapi-version', 1) @@ -255,7 +256,7 @@ def main(args): print("Linking elements in the Pipeline \n") source.link(h264parser) h264parser.link(decoder) - sinkpad = streammux.get_request_pad("sink_0") + sinkpad = streammux.request_pad_simple("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") diff --git a/apps/deepstream-test1-rtsp-out/dstest1_pgie_config.txt b/apps/deepstream-test1-rtsp-out/dstest1_pgie_config.txt index a0e4e0f..9f5352e 100644 --- a/apps/deepstream-test1-rtsp-out/dstest1_pgie_config.txt +++ b/apps/deepstream-test1-rtsp-out/dstest1_pgie_config.txt @@ -57,7 +57,7 @@ gpu-id=0 net-scale-factor=0.00392156862745098 tlt-model-key=tlt_encode tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt -model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine +model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 diff --git a/apps/deepstream-test1-usbcam/README b/apps/deepstream-test1-usbcam/README index 3a83129..d708582 100644 --- a/apps/deepstream-test1-usbcam/README +++ b/apps/deepstream-test1-usbcam/README @@ -16,7 +16,7 @@ ################################################################################ Prequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python diff --git a/apps/deepstream-test1-usbcam/deepstream_test_1_usb.py b/apps/deepstream-test1-usbcam/deepstream_test_1_usb.py index eaf4459..9237e6c 100755 --- a/apps/deepstream-test1-usbcam/deepstream_test_1_usb.py +++ b/apps/deepstream-test1-usbcam/deepstream_test_1_usb.py @@ -22,7 +22,7 @@ import gi gi.require_version('Gst', '1.0') from gi.repository import GLib, Gst -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo from common.bus_call import bus_call import pyds @@ -124,6 +124,7 @@ def main(args): sys.stderr.write("usage: %s \n" % args[0]) sys.exit(1) + platform_info = PlatformInfo() # Standard GStreamer initialization Gst.init(None) @@ -195,14 +196,18 @@ def main(args): sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output - if is_aarch64(): + if platform_info.is_integrated_gpu(): print("Creating nv3dsink \n") sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") if not sink: sys.stderr.write(" Unable to create nv3dsink \n") else: - print("Creating EGLSink \n") - sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") + if platform_info.is_platform_aarch64(): + print("Creating nv3dsink \n") + sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") + else: + print("Creating EGLSink \n") + sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") @@ -239,7 +244,7 @@ def main(args): vidconvsrc.link(nvvidconvsrc) nvvidconvsrc.link(caps_vidconvsrc) - sinkpad = streammux.get_request_pad("sink_0") + sinkpad = streammux.request_pad_simple("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = caps_vidconvsrc.get_static_pad("src") diff --git a/apps/deepstream-test1-usbcam/dstest1_pgie_config.txt b/apps/deepstream-test1-usbcam/dstest1_pgie_config.txt index a0e4e0f..9f5352e 100755 --- a/apps/deepstream-test1-usbcam/dstest1_pgie_config.txt +++ b/apps/deepstream-test1-usbcam/dstest1_pgie_config.txt @@ -57,7 +57,7 @@ gpu-id=0 net-scale-factor=0.00392156862745098 tlt-model-key=tlt_encode tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt -model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine +model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 diff --git a/apps/deepstream-test1/README b/apps/deepstream-test1/README index c1ec6dc..10c5a35 100644 --- a/apps/deepstream-test1/README +++ b/apps/deepstream-test1/README @@ -16,7 +16,7 @@ ################################################################################ Prequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python diff --git a/apps/deepstream-test1/deepstream_test_1.py b/apps/deepstream-test1/deepstream_test_1.py index 861cefc..1367fb4 100755 --- a/apps/deepstream-test1/deepstream_test_1.py +++ b/apps/deepstream-test1/deepstream_test_1.py @@ -23,7 +23,7 @@ import gi gi.require_version('Gst', '1.0') from gi.repository import GLib, Gst -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo from common.bus_call import bus_call import pyds @@ -126,6 +126,7 @@ def main(args): sys.stderr.write("usage: %s \n" % args[0]) sys.exit(1) + platform_info = PlatformInfo() # Standard GStreamer initialization Gst.init(None) @@ -179,14 +180,18 @@ def main(args): sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output - if is_aarch64(): + if platform_info.is_integrated_gpu(): print("Creating nv3dsink \n") sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") if not sink: sys.stderr.write(" Unable to create nv3dsink \n") else: - print("Creating EGLSink \n") - sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") + if platform_info.is_platform_aarch64(): + print("Creating nv3dsink \n") + sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") + else: + print("Creating EGLSink \n") + sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") @@ -217,7 +222,7 @@ def main(args): source.link(h264parser) h264parser.link(decoder) - sinkpad = streammux.get_request_pad("sink_0") + sinkpad = streammux.request_pad_simple("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") diff --git a/apps/deepstream-test1/dstest1_pgie_config.txt b/apps/deepstream-test1/dstest1_pgie_config.txt index a0e4e0f..a1d9f03 100644 --- a/apps/deepstream-test1/dstest1_pgie_config.txt +++ b/apps/deepstream-test1/dstest1_pgie_config.txt @@ -61,7 +61,7 @@ model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficca labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 -batch-size=30 +batch-size=1 process-mode=1 model-color-format=0 ## 0=FP32, 1=INT8, 2=FP16 mode diff --git a/apps/deepstream-test2/README b/apps/deepstream-test2/README index e3c2acd..abea0ad 100644 --- a/apps/deepstream-test2/README +++ b/apps/deepstream-test2/README @@ -16,7 +16,7 @@ ################################################################################ Prequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python diff --git a/apps/deepstream-test2/deepstream_test_2.py b/apps/deepstream-test2/deepstream_test_2.py index e525dc6..f787a7f 100755 --- a/apps/deepstream-test2/deepstream_test_2.py +++ b/apps/deepstream-test2/deepstream_test_2.py @@ -25,7 +25,7 @@ import gi gi.require_version('Gst', '1.0') from gi.repository import GLib, Gst -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo from common.bus_call import bus_call import pyds @@ -166,6 +166,7 @@ def main(args): sys.stderr.write("usage: %s \n" % args[0]) sys.exit(1) + platform_info = PlatformInfo() # Standard GStreamer initialization Gst.init(None) @@ -232,14 +233,18 @@ def main(args): sys.stderr.write(" Unable to create nvosd \n") # Finally render the osd output - if is_aarch64(): + if platform_info.is_integrated_gpu(): print("Creating nv3dsink \n") sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") if not sink: sys.stderr.write(" Unable to create nv3dsink \n") else: - print("Creating EGLSink \n") - sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") + if platform_info.is_platform_aarch64(): + print("Creating nv3dsink \n") + sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") + else: + print("Creating EGLSink \n") + sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") @@ -297,7 +302,7 @@ def main(args): source.link(h264parser) h264parser.link(decoder) - sinkpad = streammux.get_request_pad("sink_0") + sinkpad = streammux.request_pad_simple("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") diff --git a/apps/deepstream-test2/dstest2_pgie_config.txt b/apps/deepstream-test2/dstest2_pgie_config.txt index a0e4e0f..a1d9f03 100644 --- a/apps/deepstream-test2/dstest2_pgie_config.txt +++ b/apps/deepstream-test2/dstest2_pgie_config.txt @@ -61,7 +61,7 @@ model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficca labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 -batch-size=30 +batch-size=1 process-mode=1 model-color-format=0 ## 0=FP32, 1=INT8, 2=FP16 mode diff --git a/apps/deepstream-test3/README b/apps/deepstream-test3/README index 87a91e0..1dcc988 100755 --- a/apps/deepstream-test3/README +++ b/apps/deepstream-test3/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - NVIDIA Triton Inference Server (optional) - Python 3.10 - Gst-python @@ -131,6 +131,8 @@ Note: 5) -s/--silent option can be used to suppress verbose output. 6) --file-loop option can be used to loop input files after EOS. 7) --disable-probe option can be used to disable the probe function and to use nvdslogger for perf measurements. +8) To enable Pipeline Latency Measurement, set environment variable : NVDS_ENABLE_LATENCY_MEASUREMENT=1 +9) To enable Component Level Latency Measurement, set environment variable : NVDS_ENABLE_COMPONENT_LATENCY_MEASUREMENT=1 in addition to NVDS_ENABLE_LATENCY_MEASUREMENT=1 This document describes the sample deepstream-test3 application. @@ -142,6 +144,7 @@ This document describes the sample deepstream-test3 application. batch for better resource utilization. * Extract the stream metadata, which contains useful information about the frames in the batched buffer. + * Showcases how to enable latency measurement using probe function Refer to the deepstream-test1 sample documentation for an example of simple single-stream inference, bounding-box overlay, and rendering. diff --git a/apps/deepstream-test3/config_infer_primary_peoplenet.txt b/apps/deepstream-test3/config_infer_primary_peoplenet.txt index db8b833..f5fee3f 100644 --- a/apps/deepstream-test3/config_infer_primary_peoplenet.txt +++ b/apps/deepstream-test3/config_infer_primary_peoplenet.txt @@ -1,5 +1,5 @@ ################################################################################ -# SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -47,16 +47,15 @@ output-blob-names=output_bbox/BiasAdd:0;output_cov/Sigmoid:0 [class-attrs-all] topk=20 nms-iou-threshold=0.5 -pre-cluster-threshold=0.4 +pre-cluster-threshold=0.2 ## Per class configurations -#[class-attrs-0] -#topk=20 -#nms-iou-threshold=0.5 -#pre-cluster-threshold=0.4 +[class-attrs-0] +topk=20 +nms-iou-threshold=0.5 +pre-cluster-threshold=0.4 -[class-attrs-1] -#disable bag detection -pre-cluster-threshold=1.0 +#[class-attrs-1] +#pre-cluster-threshold=0.05 #eps=0.7 -#dbscan-min-score=0.5 +#dbscan-min-score=0.5 \ No newline at end of file diff --git a/apps/deepstream-test3/deepstream_test_3.py b/apps/deepstream-test3/deepstream_test_3.py index 75a64d5..6625881 100755 --- a/apps/deepstream-test3/deepstream_test_3.py +++ b/apps/deepstream-test3/deepstream_test_3.py @@ -20,6 +20,7 @@ import sys sys.path.append('../') from pathlib import Path +from os import environ import gi import configparser import argparse @@ -30,7 +31,7 @@ import sys import math import platform -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo from common.bus_call import bus_call from common.FPS import PERF_DATA @@ -40,6 +41,7 @@ silent = False file_loop = False perf_data = None +measure_latency = False MAX_DISPLAY_LEN=64 PGIE_CLASS_ID_VEHICLE = 0 @@ -69,6 +71,16 @@ def pgie_src_pad_buffer_probe(pad,info,u_data): # Retrieve batch metadata from the gst_buffer # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the # C address of gst_buffer as input, which is obtained with hash(gst_buffer) + + # Enable latency measurement via probe if environment variable NVDS_ENABLE_LATENCY_MEASUREMENT=1 is set. + # To enable component level latency measurement, please set environment variable + # NVDS_ENABLE_COMPONENT_LATENCY_MEASUREMENT=1 in addition to the above. + global measure_latency + if measure_latency: + num_sources_in_batch = pyds.nvds_measure_buffer_latency(hash(gst_buffer)) + if num_sources_in_batch == 0: + print("Unable to get number of sources in GstBuffer for latency measurement") + batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer)) l_frame = batch_meta.frame_meta_list while l_frame is not None: @@ -205,6 +217,7 @@ def main(args, requested_pgie=None, config=None, disable_probe=False): number_sources=len(args) + platform_info = PlatformInfo() # Standard GStreamer initialization Gst.init(None) @@ -234,7 +247,7 @@ def main(args, requested_pgie=None, config=None, disable_probe=False): sys.stderr.write("Unable to create source bin \n") pipeline.add(source_bin) padname="sink_%u" %i - sinkpad= streammux.get_request_pad(padname) + sinkpad= streammux.request_pad_simple(padname) if not sinkpad: sys.stderr.write("Unable to create sink pad bin \n") srcpad=source_bin.get_static_pad("src") @@ -286,8 +299,8 @@ def main(args, requested_pgie=None, config=None, disable_probe=False): nvosd.set_property('display-text',OSD_DISPLAY_TEXT) if file_loop: - if is_aarch64(): - # Set nvbuf-memory-type=4 for aarch64 for file-loop (nvurisrcbin case) + if platform_info.is_integrated_gpu(): + # Set nvbuf-memory-type=4 for integrated gpu for file-loop (nvurisrcbin case) streammux.set_property('nvbuf-memory-type', 4) else: # Set nvbuf-memory-type=2 for x86 for file-loop (nvurisrcbin case) @@ -299,14 +312,18 @@ def main(args, requested_pgie=None, config=None, disable_probe=False): sink.set_property('enable-last-sample', 0) sink.set_property('sync', 0) else: - if is_aarch64(): + if platform_info.is_integrated_gpu(): print("Creating nv3dsink \n") sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") if not sink: sys.stderr.write(" Unable to create nv3dsink \n") else: - print("Creating EGLSink \n") - sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") + if platform_info.is_platform_aarch64(): + print("Creating nv3dsink \n") + sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") + else: + print("Creating EGLSink \n") + sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") @@ -380,6 +397,14 @@ def main(args, requested_pgie=None, config=None, disable_probe=False): # perf callback function to print fps every 5 sec GLib.timeout_add(5000, perf_data.perf_print_callback) + # Enable latency measurement via probe if environment variable NVDS_ENABLE_LATENCY_MEASUREMENT=1 is set. + # To enable component level latency measurement, please set environment variable + # NVDS_ENABLE_COMPONENT_LATENCY_MEASUREMENT=1 in addition to the above. + if environ.get('NVDS_ENABLE_LATENCY_MEASUREMENT') == '1': + print ("Pipeline Latency Measurement enabled!\nPlease set env var NVDS_ENABLE_COMPONENT_LATENCY_MEASUREMENT=1 for Component Latency Measurement") + global measure_latency + measure_latency = True + # List the sources print("Now playing...") for i, source in enumerate(args): diff --git a/apps/deepstream-test3/dstest3_pgie_config.txt b/apps/deepstream-test3/dstest3_pgie_config.txt index a0e4e0f..9f5352e 100755 --- a/apps/deepstream-test3/dstest3_pgie_config.txt +++ b/apps/deepstream-test3/dstest3_pgie_config.txt @@ -57,7 +57,7 @@ gpu-id=0 net-scale-factor=0.00392156862745098 tlt-model-key=tlt_encode tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt -model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine +model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 diff --git a/apps/deepstream-test4/README b/apps/deepstream-test4/README index 97283ad..0c7fcaf 100755 --- a/apps/deepstream-test4/README +++ b/apps/deepstream-test4/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python diff --git a/apps/deepstream-test4/deepstream_test_4.py b/apps/deepstream-test4/deepstream_test_4.py index f471992..22675a5 100755 --- a/apps/deepstream-test4/deepstream_test_4.py +++ b/apps/deepstream-test4/deepstream_test_4.py @@ -26,7 +26,7 @@ from gi.repository import GLib, Gst import sys from optparse import OptionParser -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo from common.bus_call import bus_call from common.utils import long_to_uint64 import pyds @@ -230,6 +230,7 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): def main(args): + platform_info = PlatformInfo() Gst.init(None) # Deprecated: following meta_copy_func and meta_free_func @@ -305,14 +306,18 @@ def main(args): if not sink: sys.stderr.write(" Unable to create fakesink \n") else: - if is_aarch64(): + if platform_info.is_integrated_gpu(): print("Creating nv3dsink \n") sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") if not sink: sys.stderr.write(" Unable to create nv3dsink \n") else: - print("Creating EGLSink \n") - sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") + if platform_info.is_platform_aarch64(): + print("Creating nv3dsink \n") + sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") + else: + print("Creating EGLSink \n") + sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") @@ -352,7 +357,7 @@ def main(args): source.link(h264parser) h264parser.link(decoder) - sinkpad = streammux.get_request_pad("sink_0") + sinkpad = streammux.request_pad_simple("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") srcpad = decoder.get_static_pad("src") @@ -368,8 +373,8 @@ def main(args): msgconv.link(msgbroker) queue2.link(sink) sink_pad = queue1.get_static_pad("sink") - tee_msg_pad = tee.get_request_pad('src_%u') - tee_render_pad = tee.get_request_pad("src_%u") + tee_msg_pad = tee.request_pad_simple('src_%u') + tee_render_pad = tee.request_pad_simple("src_%u") if not tee_msg_pad or not tee_render_pad: sys.stderr.write("Unable to get request pads\n") tee_msg_pad.link(sink_pad) diff --git a/apps/deepstream-test4/dstest4_pgie_config.txt b/apps/deepstream-test4/dstest4_pgie_config.txt index a0e4e0f..a0d18bd 100755 --- a/apps/deepstream-test4/dstest4_pgie_config.txt +++ b/apps/deepstream-test4/dstest4_pgie_config.txt @@ -57,11 +57,11 @@ gpu-id=0 net-scale-factor=0.00392156862745098 tlt-model-key=tlt_encode tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt -model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine +model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 -batch-size=30 +batch-size=1 process-mode=1 model-color-format=0 ## 0=FP32, 1=INT8, 2=FP16 mode diff --git a/apps/runtime_source_add_delete/README b/apps/runtime_source_add_delete/README index 5e3e2c3..8e8fadc 100644 --- a/apps/runtime_source_add_delete/README +++ b/apps/runtime_source_add_delete/README @@ -16,7 +16,7 @@ ################################################################################ Prerequisites: -- DeepStreamSDK 6.4 +- DeepStreamSDK 7.0 - Python 3.10 - Gst-python diff --git a/apps/runtime_source_add_delete/deepstream_rt_src_add_del.py b/apps/runtime_source_add_delete/deepstream_rt_src_add_del.py index c3dc1f6..5ef0441 100644 --- a/apps/runtime_source_add_delete/deepstream_rt_src_add_del.py +++ b/apps/runtime_source_add_delete/deepstream_rt_src_add_del.py @@ -30,7 +30,7 @@ import math import random import platform -from common.is_aarch_64 import is_aarch64 +from common.platform_info import PlatformInfo import pyds @@ -87,7 +87,7 @@ def decodebin_child_added(child_proxy,Object,name,user_data): if(name.find("decodebin") != -1): Object.connect("child-added",decodebin_child_added,user_data) if(name.find("nvv4l2decoder") != -1): - if (is_aarch64()): + if (platform_info.is_integrated_gpu()): Object.set_property("enable-max-performance", True) Object.set_property("drop-frame-interval", 0) Object.set_property("num-extra-surfaces", 0) @@ -110,7 +110,9 @@ def cb_newpad(decodebin,pad,data): pad_name = "sink_%u" % source_id print(pad_name) #Get a sink pad from the streammux, link to decodebin - sinkpad = streammux.get_request_pad(pad_name) + sinkpad = streammux.request_pad_simple(pad_name) + if not sinkpad: + sys.stderr.write("Unable to create sink pad bin \n") if pad.link(sinkpad) == Gst.PadLinkReturn.OK: print("Decodebin linked to pipeline") else: @@ -325,6 +327,8 @@ def main(args): num_sources=len(args)-1 + global platform_info + platform_info = PlatformInfo() # Standard GStreamer initialization Gst.init(None) @@ -399,14 +403,18 @@ def main(args): sys.stderr.write(" Unable to make sgie2 \n") - if is_aarch64(): + if platform_info.is_integrated_gpu(): print("Creating nv3dsink \n") sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") if not sink: sys.stderr.write(" Unable to create nv3dsink \n") else: - print("Creating EGLSink \n") - sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") + if platform_info.is_platform_aarch64(): + print("Creating nv3dsink \n") + sink = Gst.ElementFactory.make("nv3dsink", "nv3d-sink") + else: + print("Creating EGLSink \n") + sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer") if not sink: sys.stderr.write(" Unable to create egl sink \n") if is_live: @@ -470,8 +478,8 @@ def main(args): nvvideoconvert.set_property("gpu_id", GPU_ID) nvosd.set_property("gpu_id", GPU_ID) - #Set gpu ID of sink if not aarch64 - if(not is_aarch64()): + #Set gpu ID of sink if not integrated gpu + if(not platform_info.is_integrated_gpu() and not platform_info.is_platform_aarch64()): sink.set_property("gpu_id", GPU_ID) print("Adding elements to Pipeline \n") @@ -486,7 +494,7 @@ def main(args): # We link elements in the following order: # sourcebin -> streammux -> nvinfer -> nvtracker -> nvdsanalytics -> - # nvtiler -> nvvideoconvert -> nvdsosd -> (if aarch64, transform ->) sink + # nvtiler -> nvvideoconvert -> nvdsosd -> sink print("Linking elements in the Pipeline \n") streammux.link(pgie) pgie.link(tracker) diff --git a/apps/runtime_source_add_delete/dstest_pgie_config.txt b/apps/runtime_source_add_delete/dstest_pgie_config.txt index 0cf6e19..a3c84ea 100644 --- a/apps/runtime_source_add_delete/dstest_pgie_config.txt +++ b/apps/runtime_source_add_delete/dstest_pgie_config.txt @@ -57,7 +57,7 @@ gpu-id=0 net-scale-factor=0.00392156862745098 tlt-model-key=tlt_encode tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt -model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine +model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 diff --git a/bindings/CMakeLists.txt b/bindings/CMakeLists.txt index de3ccc7..7b01fb2 100644 --- a/bindings/CMakeLists.txt +++ b/bindings/CMakeLists.txt @@ -26,7 +26,10 @@ check_variable_set(PYTHON_MAJOR_VERSION 3) check_variable_set(PYTHON_MINOR_VERSION 10) check_variable_set(PIP_PLATFORM linux_x86_64) check_variable_set(DS_PATH "/opt/nvidia/deepstream/deepstream") - +if (DEFINED IS_SBSA) + message("IS_SBSA is set. Enabling definitions for ARM_SBSA") + add_compile_definitions(IS_SBSA) +endif() # Checking values are allowed macro(check_variable_allowed var_name var_list) @@ -49,7 +52,7 @@ set(CMAKE_SHARED_LINKER_FLAGS "-Wl,--no-undefined") # Setting python build versions set(PYTHON_VERSION ${PYTHON_MAJOR_VERSION}.${PYTHON_MINOR_VERSION}) -set(PIP_WHEEL pyds-1.1.10-py3-none-${PIP_PLATFORM}.whl) +set(PIP_WHEEL pyds-1.1.11-py3-none-${PIP_PLATFORM}.whl) # Describing pyds build project(pyds DESCRIPTION "Python bindings for Deepstream") diff --git a/bindings/README.md b/bindings/README.md index 9cbeefd..9f60e00 100644 --- a/bindings/README.md +++ b/bindings/README.md @@ -1,9 +1,9 @@ # DeepStream python bindings -SDK version supported: 6.4 +SDK version supported: 7.0 The latest prebuilt release package complete with python bindings and sample applications can be downloaded from the [release section](../../../releases) -for both x86 and Jetson platforms. +for x86, Jetson and SBSA platforms. This readme describes how to compile and install DeepStream python bindings (henceforth referred as bindings). This process is mainly useful for making customizations in the bindings and compiling it yourself instead of using the prebuilt versions provided in the release section. @@ -15,17 +15,19 @@ The readme is divided into three main parts: - [1.3 Initialization of submodules](#13-initialization-of-submodules) - [1.4 Installing Gst-python](#14-installing-gst-python) - [2 Compiling the bindings](#2-compiling-the-bindings) - - [2.1 Quick build (x86-ubuntu-22.04 | python 3.10 | Deepstream 6.4)](#21-quick-build-x86-ubuntu-2204--python-310--deepstream-64) - - [2.2 Advanced build](#22-advanced-build) - - [2.2.1 Using Cmake options](#221-using-cmake-options) - - [2.2.2 Available cmake options](#222-available-cmake-options) - - [2.2.3 Example](#223-example) - - [2.3 Cross-Compilation for aarch64 on x86](#23-cross-compilation-for-aarch64-on-x86) - - [2.3.1 Build Pre-requisites](#231-build-pre-requisites) - - [2.3.2 Download the JetPack SDK 6.0 DP](#232-download-the-jetpack-sdk-60-dp) - - [2.3.3 Generate the cross-compile build container](#233-generate-the-cross-compile-build-container) - - [2.3.4 Launch the cross-compile build container](#234-launch-the-cross-compile-build-container) - - [2.3.5 Build DeepStreamSDK python bindings](#235-build-deepstreamsdk-python-bindings) + - [2.1 Quick build 1 (x86-ubuntu-22.04 | python 3.10 | Deepstream 7.0)](#21-quick-build-1-x86-ubuntu-2204--python-310--deepstream-70) + - [2.2 Quick build 2 (Jetson-ubuntu-22.04 | python 3.10 | Deepstream 7.0)](#22-quick-build-2-jetson-ubuntu-2204--python-310--deepstream-70) + - [2.3 Quick build 3 (SBSA-ubuntu-22.04 | python 3.10 | Deepstream 7.0)](#23-quick-build-3-sbsa-ubuntu-2204--python-310--deepstream-70) + - [2.4 Advanced build](#24-advanced-build) + - [2.4.1 Using Cmake options](#241-using-cmake-options) + - [2.4.2 Available cmake options](#242-available-cmake-options) + - [2.4.3 Example](#243-example) + - [2.5 Cross-Compilation for aarch64 on x86](#25-cross-compilation-for-aarch64-on-x86) + - [2.5.1 Build Pre-requisites](#251-build-pre-requisites) + - [2.5.2 Download the JetPack SDK 6.0 GA](#252-download-the-jetpack-sdk-60-ga) + - [2.5.3 Generate the cross-compile build container](#253-generate-the-cross-compile-build-container) + - [2.5.4 Launch the cross-compile build container](#254-launch-the-cross-compile-build-container) + - [2.5.5 Build DeepStreamSDK python bindings](#255-build-deepstreamsdk-python-bindings) - [3 Installing the bindings](#3-installing-the-bindings) - [3.1 Installing the pip wheel](#31-installing-the-pip-wheel) - [3.1.1 pip wheel troubleshooting](#311-pip-wheel-troubleshooting) @@ -77,8 +79,7 @@ sudo update-ca-certificates Build and install gst-python: ```bash cd 3rdparty/gstreamer/subprojects/gst-python/ -meson build -meson configure +meson setup build cd build ninja ninja install @@ -90,7 +91,7 @@ Python bindings are compiled using CMake. Following commands provide quick cmake configurations for common compilation options: -### 2.1 Quick build (x86-ubuntu-22.04 | python 3.10 | Deepstream 6.4) +### 2.1 Quick build 1 (x86-ubuntu-22.04 | python 3.10 | Deepstream 7.0) ```bash cd deepstream_python_apps/bindings mkdir build @@ -99,28 +100,53 @@ cmake .. make -j$(nproc) ``` + +### 2.2 Quick build 2 (Jetson-ubuntu-22.04 | python 3.10 | Deepstream 7.0) +The following commands will work for a native build on Jetson platform. +```bash +cd deepstream_python_apps/bindings +mkdir build +cd build +cmake .. -DPYTHON_MAJOR_VERSION=3 -DPYTHON_MINOR_VERSION=10 \ + -DPIP_PLATFORM=linux_aarch64 -DDS_PATH=/opt/nvidia/deepstream/deepstream/ +make -j$(nproc) +``` + + +### 2.3 Quick build 3 (SBSA-ubuntu-22.04 | python 3.10 | Deepstream 7.0) +The following commands will work for a native build on SBSA platform. +```bash +cd deepstream_python_apps/bindings +mkdir build +cd build +cmake .. -DPYTHON_MAJOR_VERSION=3 -DPYTHON_MINOR_VERSION=10 -DIS_SBSA=1 \ + -DPIP_PLATFORM=linux_aarch64 -DDS_PATH=/opt/nvidia/deepstream/deepstream/ +make -j$(nproc) +``` + -### 2.2 Advanced build +### 2.4 Advanced build -#### 2.2.1 Using Cmake options +#### 2.4.1 Using Cmake options Multiple options can be used with cmake as follows: ```bash cmake .. [-D= [-D= [-D= ... ]]] ``` -#### 2.2.2 Available cmake options +#### 2.4.2 Available cmake options | Var | Default value | Purpose | Available values |-----|:-------------:|---------|:----------------: -| DS_VERSION | 6.4 | Used to determine default deepstream library path | should match to the deepstream version installed on your computer +| DS_VERSION | 7.0 | Used to determine default deepstream library path | should match to the deepstream version installed on your computer | PYTHON_MAJOR_VERSION | 3 | Used to set the python version used for the bindings | 3 | PYTHON_MINOR_VERSION | 10 | Used to set the python version used for the bindings | 10 | PIP_PLATFORM | linux_x86_64 | Used to select the target architecture to compile the bindings | linux_x86_64, linux_aarch64 | DS_PATH | /opt/nvidia/deepstream/deepstream-${DS_VERSION} | Path where deepstream libraries are available | Should match the existing deepstream library folder +| IS_SBSA | (Optional) | Indicate whether the build is for SBSA platform | 1 -#### 2.2.3 Example +#### 2.4.3 Example -Following commands can be used to compile the bindings natively on Jetson devices +Following commands can be used to compile the bindings natively on Jetson devices. ```bash cd deepstream_python_apps/bindings @@ -131,14 +157,25 @@ cmake .. -DPYTHON_MAJOR_VERSION=3 -DPYTHON_MINOR_VERSION=10 \ make ``` +Following commands can be used to compile the bindings natively on SBSA platform. + +```bash +cd deepstream_python_apps/bindings +mkdir build +cd build +cmake .. -DPYTHON_MAJOR_VERSION=3 -DPYTHON_MINOR_VERSION=10 -DIS_SBSA=1\ + -DPIP_PLATFORM=linux_aarch64 -DDS_PATH=/opt/nvidia/deepstream/deepstream/ +make +``` + -### 2.3 Cross-Compilation for aarch64 on x86 +### 2.5 Cross-Compilation for aarch64 on x86 This section outlines how to enable cross-compiling of the DeepStreamSDK python bindings for aarch64 using Docker on x86 host. NOTE: This will only emulate the CPU, so any calls requiring embedded hardware, such as using CUDA or inference are not supported. -#### 2.3.1 Build Pre-requisites +#### 2.5.1 Build Pre-requisites We use [qemu](https://www.qemu.org/) processor emulator to achieve cross-compilation. Qemu can be installed on the x86 Ubuntu host machine as shown below: @@ -150,17 +187,17 @@ sudo apt-get install qemu binfmt-support qemu-user-static docker run --rm --privileged dockerhub.nvidia.com/multiarch/qemu-user-static --reset -p yes # Verify qemu installation -docker run --platform linux/aarch64 --rm -t nvcr.io/nvidia/deepstream:6.4-samples-multiarch uname -m +docker run --platform linux/aarch64 --rm -t nvcr.io/nvidia/deepstream:7.0-samples-multiarch uname -m #aarch64 ``` -#### 2.3.2 Download the JetPack SDK 6.0 DP +#### 2.5.2 Download the JetPack SDK 6.0 GA Cross-compilation for Jetson on x86 host requires some low level libraries which can be downloaded using SDK Manager. Follow these steps to obtain these libraries, which are utilized by the docker build later. 1. Download and install the [NVIDIA SDK manager](https://developer.nvidia.com/nvidia-sdk-manager) 2. Launch the SDK Manager and login with your NVIDIA developer account. -3. Select the platform and target OS (example: Jetson AGX Xavier, `Linux Jetpack 6.0 DP`) and click Continue. +3. Select the platform and target OS (example: Jetson AGX Xavier, `Linux Jetpack 6.0 GA`) and click Continue. 4. Under `Download & Install Options` change the download folder and select `Download now, Install later`. Agree to the license terms and click Continue. 5. Go to the download folder, and run: @@ -172,7 +209,7 @@ mkdir -p deepstream_python_apps/bindings/docker/jetpack_files mv ~/Downloads/nvidia/sdkm_downloads/* /deepstream_python_apps/bindings/docker/jetpack_files ``` -#### 2.3.3 Generate the cross-compile build container +#### 2.5.3 Generate the cross-compile build container Below command generates the build container @@ -181,20 +218,20 @@ Below command generates the build container cd deepstream_python_apps/bindings # Make sure you are in deepstream_python_apps/bindings directory # This command builds the cross-compile docker and adds the mentioned tag -docker build --platform linux/aarch64 --tag=deepstream-6.4-ubuntu22.04-python-l4t -f qemu_docker/ubuntu-cross-aarch64.Dockerfile . +docker build --platform linux/aarch64 --tag=deepstream-7.0-ubuntu22.04-python-l4t -f qemu_docker/ubuntu-cross-aarch64.Dockerfile . ``` -#### 2.3.4 Launch the cross-compile build container +#### 2.5.4 Launch the cross-compile build container ```bash # Create a directory to mount to the container and store your pyds wheel package in mkdir export_pyds # Run the container. Make sure the tag matches the one from Generate step above -docker run --platform linux/aarch64 -it --entrypoint bash -v $PWD/export_pyds:/export_pyds deepstream-6.4-ubuntu22.04-python-l4t +docker run --platform linux/aarch64 -it --entrypoint bash -v $PWD/export_pyds:/export_pyds deepstream-7.0-ubuntu22.04-python-l4t ``` -#### 2.3.5 Build DeepStreamSDK python bindings +#### 2.5.5 Build DeepStreamSDK python bindings After the container launches successfully, while inside the cross-compile docker, run following commands: @@ -232,7 +269,7 @@ Following commands can be used to install the generated pip wheel. ### 3.1 Installing the pip wheel ```bash -pip3 install ./pyds-1.1.10-py3-none*.whl +pip3 install ./pyds-1.1.11-py3-none*.whl ``` #### 3.1.1 pip wheel troubleshooting diff --git a/bindings/docstrings/functionsdoc.h b/bindings/docstrings/functionsdoc.h index 531d092..2796605 100644 --- a/bindings/docstrings/functionsdoc.h +++ b/bindings/docstrings/functionsdoc.h @@ -609,5 +609,23 @@ namespace pydsdoc This function only works for RTSP sources i.e. GStreamer elements "rtspsrc" or "uridecodebin" with an RTSP uri. :arg src_elem: GStreamer source element to be configured.)pyds"; + + constexpr const char* nvds_measure_buffer_latency=R"pyds( + Measures the latency of all frames present in the current batch. + + :arg buffer: GstBuffer from which to retrieve the :class:`NvDsBatchMeta` + + :returns: :sources number in batch. + + Example usage: + :: + + #enable pipeline latency measurement + export NVDS_ENABLE_LATENCY_MEASUREMENT=1 + #enable compoment latency measurement + export NVDS_ENABLE_COMPONENT_LATENCY_MEASUREMENT=1 + + #add this code in plugin probe function. + num_sources_in_batch = pyds.nvds_measure_buffer_latency(hash(gst_buffer));)pyds"; } } \ No newline at end of file diff --git a/bindings/docstrings/nvosddoc.h b/bindings/docstrings/nvosddoc.h index fcd6d0e..43a4d27 100644 --- a/bindings/docstrings/nvosddoc.h +++ b/bindings/docstrings/nvosddoc.h @@ -287,7 +287,8 @@ namespace pydsdoc :ivar width: *int*, Mask width.)pyds"; constexpr const char* get_mask_array=R"pyds(Retrieve mask data as numpy array)pyds"; + constexpr const char* alloc_mask_array=R"pyds(Retrieve and allocate mask data as numpy array)pyds"; constexpr const char* cast=R"pyds(cast given object/data to :class:`NvOSD_MaskParams`, call pyds.NvOSD_MaskParams.cast(data))pyds"; } } -} \ No newline at end of file +} diff --git a/bindings/packaging/setup.py b/bindings/packaging/setup.py index 2b4f533..d71f1e8 100644 --- a/bindings/packaging/setup.py +++ b/bindings/packaging/setup.py @@ -17,7 +17,7 @@ setuptools.setup( name="pyds", - version="1.1.10", + version="1.1.11", author="NVIDIA", description="Install precompiled DeepStream Python bindings extension", url="nvidia.com", diff --git a/bindings/qemu_docker/ubuntu-cross-aarch64.Dockerfile b/bindings/qemu_docker/ubuntu-cross-aarch64.Dockerfile index 78c0841..a954b9d 100644 --- a/bindings/qemu_docker/ubuntu-cross-aarch64.Dockerfile +++ b/bindings/qemu_docker/ubuntu-cross-aarch64.Dockerfile @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM nvcr.io/nvidia/deepstream:6.4-samples-multiarch +FROM nvcr.io/nvidia/deepstream:7.0-triton-multiarch LABEL maintainer="NVIDIA CORPORATION" # Set timezone. diff --git a/bindings/src/bindfunctions.cpp b/bindings/src/bindfunctions.cpp index 5ca87b6..cc2c9cc 100644 --- a/bindings/src/bindfunctions.cpp +++ b/bindings/src/bindfunctions.cpp @@ -315,17 +315,17 @@ namespace pydeepstream { auto *inputnvsurface = reinterpret_cast(inmap.data); gst_buffer_unmap(buffer, &inmap); - if (inputnvsurface->surfaceList->colorFormat != - NVBUF_COLOR_FORMAT_RGBA) { + if (inputnvsurface->surfaceList->colorFormat != NVBUF_COLOR_FORMAT_RGBA && + inputnvsurface->surfaceList->colorFormat != NVBUF_COLOR_FORMAT_RGB ) { throw std::runtime_error( - "get_nvds_buf_Surface: Currently we only support RGBA color Format"); + "get_nvds_buf_Surface: Currently we only support RGBA/RGB color Format"); } - int channels = 4; + int channels = inputnvsurface->surfaceList->colorFormat != NVBUF_COLOR_FORMAT_RGB ? 4:3; /* use const reference here so input_surface is not altered during mapping and syncing for CPU */ const NvBufSurfaceParams &input_surface = inputnvsurface->surfaceList[batchID]; -#ifdef __aarch64__ +#if defined __aarch64__ && !defined IS_SBSA /* Map the buffer if it has not been mapped already, before syncing the mapped buffer to CPU.*/ if (nullptr == input_surface.mappedAddr.addr[0]) { @@ -384,19 +384,19 @@ namespace pydeepstream { auto *inputnvsurface = reinterpret_cast(inmap.data); gst_buffer_unmap(buffer, &inmap); - if (inputnvsurface->surfaceList->colorFormat != - NVBUF_COLOR_FORMAT_RGBA) { + if (inputnvsurface->surfaceList->colorFormat != NVBUF_COLOR_FORMAT_RGBA && + inputnvsurface->surfaceList->colorFormat != NVBUF_COLOR_FORMAT_RGB) { throw std::runtime_error( - "get_nvds_buf_surface_gpu: Currently we only support RGBA color Format"); + "get_nvds_buf_surface_gpu: Currently we only support RGB/RGBA color Format"); } -#ifdef __aarch64__ +#if defined __aarch64__ && !defined IS_SBSA /* Map the buffer if it has not been mapped already, otherwise sync the mapped buffer to CPU.*/ throw std::runtime_error( "get_nvds_buf_surface_gpu: Currently we only support x86"); #else - int channels = 4; + int channels = inputnvsurface->surfaceList->colorFormat != NVBUF_COLOR_FORMAT_RGB ? 4:3; int height = inputnvsurface->surfaceList[batchID].height; int width = inputnvsurface->surfaceList[batchID].width; int pitch = inputnvsurface->surfaceList[batchID].pitch; @@ -806,5 +806,38 @@ namespace pydeepstream { "src_elem"_a, pydsdoc::methodsDoc::configure_source_for_ntp_sync); + m.def("nvds_measure_buffer_latency", + [](size_t gst_buffer) { + int num_sources_in_batch = 0; + if(nvds_enable_latency_measurement) + { + auto *buffer = reinterpret_cast(gst_buffer); + NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buffer); + if (!batch_meta) { + cout <<"Batch meta not found for buffer "<< buffer << endl; + return num_sources_in_batch; + } + + nvds_acquire_meta_lock (batch_meta); + NvDsFrameLatencyInfo* latency_info = (NvDsFrameLatencyInfo*)g_malloc0( + sizeof(NvDsFrameLatencyInfo) * batch_meta->max_frames_in_batch); + nvds_release_meta_lock (batch_meta); + if(latency_info){ + num_sources_in_batch = nvds_measure_buffer_latency(buffer, latency_info); + cout << "************BATCH-NUM = "<< latency_info[0].frame_num << "**************" << endl; + for(int i = 0; i < num_sources_in_batch; i++) + { + cout << "Source id = " << latency_info[i].source_id << + " Frame_num = " << latency_info[i].frame_num << + " Frame latency = " << latency_info[i].latency << " (ms) " << endl; + } + g_free(latency_info); + latency_info = NULL; + } + } + return num_sources_in_batch; + }, + "gst_buffer"_a, py::return_value_policy::reference, + pydsdoc::methodsDoc::nvds_measure_buffer_latency); } } diff --git a/bindings/src/bindnvosd.cpp b/bindings/src/bindnvosd.cpp index 17ef02c..9da0aac 100644 --- a/bindings/src/bindnvosd.cpp +++ b/bindings/src/bindnvosd.cpp @@ -428,7 +428,18 @@ namespace pydeepstream { }, py::return_value_policy::reference, pydsdoc::NvOSD::NvOSD_MaskParams::get_mask_array) - + .def("alloc_mask_array", + [](NvOSD_MaskParams &self) -> py::array { + if (self.data) + g_free(self.data); + self.size = self.width*self.height*sizeof(float); + self.data = (float*)g_malloc0(self.size); + auto dtype = py::dtype(py::format_descriptor::format()); + return py::array(dtype, {self.size / sizeof(float)}, {sizeof(float)}, self.data, py::cast(self.data)); + }, + py::return_value_policy::reference, + pydsdoc::NvOSD::NvOSD_MaskParams::alloc_mask_array) + .def("cast", [](void *data) { return (NvOSD_MaskParams *) data; diff --git a/bindings/src/pyds.cpp b/bindings/src/pyds.cpp index 00406a4..30d02ff 100644 --- a/bindings/src/pyds.cpp +++ b/bindings/src/pyds.cpp @@ -35,7 +35,7 @@ #include */ -#define PYDS_VERSION "1.1.10" +#define PYDS_VERSION "1.1.11" using namespace std; namespace py = pybind11; diff --git a/docs/PYTHON_API/Methods/methodsdoc.rst b/docs/PYTHON_API/Methods/methodsdoc.rst index 41fb599..10d9804 100644 --- a/docs/PYTHON_API/Methods/methodsdoc.rst +++ b/docs/PYTHON_API/Methods/methodsdoc.rst @@ -410,4 +410,8 @@ NvBufSurfaceMapEglImage .. autofunction:: pyds.NvBufSurfaceMapEglImage +============================= +nvds_measure_buffer_latency +============================= +.. autofunction:: pyds.nvds_measure_buffer_latency \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index d69249a..3258826 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -38,7 +38,7 @@ project = 'Deepstream' copyright = '2019-2023, NVIDIA.' author = 'NVIDIA' -version = 'Deepstream Version: 6.4' +version = 'Deepstream Version: 7.0' release = version diff --git a/notebooks/configs/dslaunchpad_pgie_config.txt b/notebooks/configs/dslaunchpad_pgie_config.txt index 26885aa..8898e45 100644 --- a/notebooks/configs/dslaunchpad_pgie_config.txt +++ b/notebooks/configs/dslaunchpad_pgie_config.txt @@ -57,7 +57,7 @@ gpu-id=0 net-scale-factor=0.00392156862745098 tlt-model-key=tlt_encode tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt -model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine +model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 diff --git a/notebooks/deepstream_launchpad.ipynb b/notebooks/deepstream_launchpad.ipynb index b71dd6b..7f46109 100644 --- a/notebooks/deepstream_launchpad.ipynb +++ b/notebooks/deepstream_launchpad.ipynb @@ -524,7 +524,7 @@ " sys.stderr.write(\"Unable to create source bin \\n\")\n", " pipeline.add(source_bin) # Add source bin to pipeline\n", " padname=\"sink_%u\" %i\n", - " sinkpad= streammux.get_request_pad(padname) # Retrieve a sink pad from the streammux element\n", + " sinkpad= streammux.request_pad_simple(padname) # Retrieve a sink pad from the streammux element\n", " if not sinkpad:\n", " sys.stderr.write(\"Unable to create sink pad bin \\n\")\n", " srcpad=source_bin.get_static_pad(\"src\") # Retrieve the source pad of the source bin\n", @@ -578,7 +578,7 @@ "net-scale-factor=0.00392156862745098 # Pixel scaling factor\n", "tlt-model-key=tlt_encode # Key for the TAO toolkit encoded model.\n", "tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt # Pathname of the TAO toolkit encoded model.\n", - "model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine # Path to serialized model engine file\n", + "model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine # Path to serialized model engine file\n", "labelfile-path=../../../../samples/models/Primary_Detector/labels.txt # Path to text file containing labels\n", "int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin # Path to calibration file\n", "force-implicit-batch-dim=1 # Force the implicit batch dimension mode\n", @@ -1045,7 +1045,7 @@ " sys.stderr.write(\"Unable to create source bin \\n\")\n", " pipeline.add(source_bin) # Add source bin to pipeline\n", " padname=\"sink_%u\" %i\n", - " sinkpad= streammux.get_request_pad(padname) # Retrieve a sink pad from the streammux element\n", + " sinkpad= streammux.request_pad_simple(padname) # Retrieve a sink pad from the streammux element\n", " if not sinkpad:\n", " sys.stderr.write(\"Unable to create sink pad bin \\n\")\n", " srcpad=source_bin.get_static_pad(\"src\") # Retrieve the source pad of the source bin\n", diff --git a/notebooks/deepstream_test_1.ipynb b/notebooks/deepstream_test_1.ipynb index 0ce5d67..0fcfdb6 100644 --- a/notebooks/deepstream_test_1.ipynb +++ b/notebooks/deepstream_test_1.ipynb @@ -50,9 +50,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - " * **DeepStream SDK 6.4**\n", + " * **DeepStream SDK 7.0**\n", "\n", - "To setup and install DeepStream 6.4, please follow the steps at https://developer.nvidia.com/deepstream-getting-started\n", + "To setup and install DeepStream 7.0, please follow the steps at https://developer.nvidia.com/deepstream-getting-started\n", " \n", " * **DeepStream Python Apps**\n", "\n", @@ -87,7 +87,7 @@ "import gi\n", "gi.require_version('Gst', '1.0')\n", "from gi.repository import GLib, Gst\n", - "from common.is_aarch_64 import is_aarch64\n", + "from common.platform_info import PlatformInfo\n", "from common.bus_call import bus_call\n", "\n", "import pyds" @@ -233,6 +233,7 @@ "metadata": {}, "outputs": [], "source": [ + "platform_info = PlatformInfo()\n", "# Standard initialization procedure\n", "Gst.init(None)\n", "\n", @@ -396,14 +397,18 @@ "metadata": {}, "outputs": [], "source": [ - "if is_aarch64():\n", + "if platform_info.is_integrated_gpu():\n", " print(\"Creating nv3dsink \\n\")\n", " sink = Gst.ElementFactory.make(\"nv3dsink\", \"nv3d-sink\")\n", " if not sink:\n", " sys.stderr.write(\" Unable to create nv3dsink \\n\")\n", "else:\n", - " print(\"Creating EGLSink \\n\")\n", - " sink = Gst.ElementFactory.make(\"nveglglessink\", \"nvvideo-renderer\")\n", + " if platform_info.is_platform_aarch64():\n", + " print(\"Creating nv3dsink \\n\")\n", + " sink = Gst.ElementFactory.make(\"nv3dsink\", \"nv3d-sink\")\n", + " else:\n", + " print(\"Creating EGLSink \\n\")\n", + " sink = Gst.ElementFactory.make(\"nveglglessink\", \"nvvideo-renderer\")\n", " if not sink:\n", " sys.stderr.write(\" Unable to create egl sink \\n\")" ] @@ -473,7 +478,7 @@ "source.link(h264parser)\n", "h264parser.link(decoder)\n", "\n", - "sinkpad = streammux.get_request_pad(\"sink_0\")\n", + "sinkpad = streammux.request_pad_simple(\"sink_0\")\n", "if not sinkpad:\n", " sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n", "srcpad = decoder.get_static_pad(\"src\")\n", diff --git a/notebooks/deepstream_test_4.ipynb b/notebooks/deepstream_test_4.ipynb index 6bc9587..34405f2 100644 --- a/notebooks/deepstream_test_4.ipynb +++ b/notebooks/deepstream_test_4.ipynb @@ -45,9 +45,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - " * **DeepStream SDK 6.4**\n", + " * **DeepStream SDK 7.0**\n", "\n", - "To setup and install DeepStream 6.4, please follow the steps at https://developer.nvidia.com/deepstream-getting-started\n", + "To setup and install DeepStream 7.0, please follow the steps at https://developer.nvidia.com/deepstream-getting-started\n", " \n", " * **DeepStream Python Apps**\n", "\n", @@ -133,7 +133,7 @@ "from gi.repository import GLib, Gst\n", "import sys\n", "from optparse import OptionParser\n", - "from common.is_aarch_64 import is_aarch64\n", + "from common.platform_info import PlatformInfo\n", "from common.bus_call import bus_call\n", "from common.utils import long_to_uint64\n", "import pyds" @@ -458,6 +458,7 @@ "metadata": {}, "outputs": [], "source": [ + "platform_info = PlatformInfo()\n", "Gst.init(None)\n", "\n", "# Deprecated: following meta_copy_func and meta_free_func\n", @@ -716,14 +717,18 @@ " if not sink:\n", " sys.stderr.write(\" Unable to create fakesink \\n\")\n", "else:\n", - " if is_aarch64():\n", + " if platform_info.is_integrated_gpu():\n", " print(\"Creating nv3dsink \\n\")\n", " sink = Gst.ElementFactory.make(\"nv3dsink\", \"nv3d-sink\")\n", " if not sink:\n", " sys.stderr.write(\" Unable to create nv3dsink \\n\")\n", " else:\n", - " print(\"Creating EGLSink \\n\")\n", - " sink = Gst.ElementFactory.make(\"nveglglessink\", \"nvvideo-renderer\")\n", + " if platform_info.is_platform_aarch64():\n", + " print(\"Creating nv3dsink \\n\")\n", + " sink = Gst.ElementFactory.make(\"nv3dsink\", \"nv3d-sink\")\n", + " else:\n", + " print(\"Creating EGLSink \\n\")\n", + " sink = Gst.ElementFactory.make(\"nveglglessink\", \"nvvideo-renderer\")\n", " if not sink:\n", " sys.stderr.write(\" Unable to create egl sink \\n\")" ] @@ -806,7 +811,7 @@ "source.link(h264parser)\n", "h264parser.link(decoder)\n", "\n", - "sinkpad = streammux.get_request_pad(\"sink_0\")\n", + "sinkpad = streammux.request_pad_simple(\"sink_0\")\n", "if not sinkpad:\n", " sys.stderr.write(\" Unable to get the sink pad of streammux \\n\")\n", "srcpad = decoder.get_static_pad(\"src\")\n", @@ -822,8 +827,8 @@ "msgconv.link(msgbroker)\n", "queue2.link(sink)\n", "sink_pad = queue1.get_static_pad(\"sink\")\n", - "tee_msg_pad = tee.get_request_pad('src_%u')\n", - "tee_render_pad = tee.get_request_pad(\"src_%u\")\n", + "tee_msg_pad = tee.request_pad_simple('src_%u')\n", + "tee_render_pad = tee.request_pad_simple(\"src_%u\")\n", "if not tee_msg_pad or not tee_render_pad:\n", " sys.stderr.write(\"Unable to get request pads\\n\")\n", "tee_msg_pad.link(sink_pad)\n", diff --git a/tests/__pycache__/__init__.cpython-310.pyc b/tests/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index a82ef5860feac804be45556886f083254b5ada84..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 166 zcmd1j<>g`kf_q;@Q$h4&5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!HgettoTeqLE- zN@k*dN@{9BaY<2XVlEO>*G$huzc|0NC^@xQKczUnpt2+*KTp3TwYa2MKR!M)FS8^* aUaz3?7Kcr4eoARhsvXFRVkRKL!Tg`kf_q;@Q$h4&5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;x_OettoTeqLE- zN@k*dN@{9BaY<2XVlEO>*G$huzc|0NC^@xQKczUnpt2+*KTp3TwYa2MKRG`)7s!c^ g&&Z1E7`8p0kDbist4Uf~R0TmLRw`L4RXw4q0@Mg0l8UHQtdK^t>q#=J-I=ZJ z4P-SrpyW4jPEM#7F8muGxNzzZKvYPmyssxa*_6biv0vNoV?TavKQ5ZhkU;w5uhsO| zIYR!#%536Lc>=$B44EK;4oIgeX@{otlnExBGe)tE51b+I@S)prDcK~#72XLE-ZATV zg6@!({~E?1TgKT`dg46T__W20zq#G*r#&qhboH=*HaSpv0>63$SxP!okPZ_O&Ur#R zj^M&QAssF};X~<)Ks2EAL@4H<^zE!o(|jtEVfvL$WugnY)pKF6X+j&P{sMmWK4c{= zNlZ!xIW0NltaKoE1lb|4=vIrHFy0;K@h*;O0&5rDzG+x%tWpUMBGE}KvYu|ahO2b2 zYZ@0AA59CX-AsUO2!2K60coT_Jv_eCgf>#L+}FUUxx)qndw!X{4;~eJ zZrRIaY`6iL31Ii^D&A$jS~EwFDerrLvAoylKn+6`SE19p=%&p6m`o~BegXYl{%HK{tx`zHG(50wH07f!4>Ny4e?3A35BVKY9 zX)5S3t>UiPsiUFwn4Zxiw{-jNkym0mu%zn4b7J*r4nau$ zTyy7hg)moG`~P-@znw$vuIplbFV*XyzMqeGcT>6ES~VfG#9do8 z?nq|2Hhgp_l)1a7rX4Z*)Th4n4G;$O= z)7;U;s8V6L%%(ZLL=|uzf!p>iK%~SAYIvUhq$TQ%S*hl(4{(Kw5cM1mwZ+^5NBfYN z`DclECW~(RZK}sIH*EXsR#b_QGfITvS-x8sZ)}wsf>5uux+ngilAh|iy91PKxr#Kr zft6`_6Q@F@p3yDP)?P9g#PNM-R|2wtaT-ug=a~m3qsugc8hh(Gv$c7~Sa{wO{2fIz zwb$P5sVAd>&)z^MXmtc*1&sY0yb0$1uiF)gN`(Vd{2ZRDIEHsA9u{IeKs$=#m*Zqm z^~fb$2!`2)UD`N4!0N>ct%dR|QAgi{UsYE-WPum(z=^o+Y55YL7Q^;3vmn!#<4mUU wK`NEq^Sx|CrlUe-@DLwLd`;|rS>#odnc%aE@?%gAqVN?7mI4~FMY`<#2c**{mjD0& diff --git a/tests/common/__pycache__/generic_pipeline.cpython-310.pyc b/tests/common/__pycache__/generic_pipeline.cpython-310.pyc deleted file mode 100644 index 7dd890755de233b92063898ea507f2df2bae967f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3851 zcmaJ^&2!tv6~`wDLL?;z%7U*g7QAq}AzC#Sq7|aG#q38`YwH`VWOXKPyM3tLW2q+Y8||O9 z9O0l_d&ei~Y0K8Z=~%|SWH9K_=+%pcy^TlSf#6oxW>(i>cGqPNbHBE_9`mq@ueU!< zM|_ZG{DT1>@=Q&RqD3w0<(W#-EX~fMG<%$jVWQGJi;~lPtfDiiB3?`hBV0Wf>y#JO z$z<>0kw*|yduqkj)Pb~>(}$7J_V=WiBs{_ei8|BCHmdv0g4`+1g} z4tS*Us3&-$cre~zhm7-)R9JYpQhjjLet0PJvFLGm z$YeaaROfkisJK+}u$K>qXq@qki?kPy7Upe_E_D#cFsF*+2#sjL?(FjD7obNGex+=E-)nKSjKK5_ih8aeaI z?toNwVzLQw%IT#!tlCc%A4*-9=wIl6xiE;K9KFrb-trZ>&H<;nPxz&FFOtEA&bkq= ze&}7&K7a^8Ax#?ua_^d^tK)kV63UC}ONRad8ie@1bzzH!vcbgmOD3lNsl9LYodUJB zGCmwk?jA%e=h7gUf0g27rrGkmwp_7~4;Sd6@QUC?P%xTrs3v}l7sq%EF_A)bdr~q$ z$qDUE`}S~Wt&~1FdL*W zP;8J$6DJ7q@d+Gj0;D{l1!$$fUViuG7T1QZnVihMq@4paruNKQ;>rQetQkV!)HS&B z2v?$oVKCcIP>46E?iHu0QKBg(02LM`6K|MRZ?7T-`1(D@7!xlLbN{;#gYjz$kj|xr z#m{NAUyyhRv2d+!Vqd2`emu^46lpiNQ(NHils}@Gzk-AMLizInEn??HVDWx=FNhF{mwUpn#t}vS((d36iQh%c|F4M z;|aTruHjnHoHcNhSFZ|iYZvE#Q|;}NQ5O*U1I8OKG{H}iGr8`C1p_a}ui>0Lur$O0 z8l={OplZ+Q6gx9_YWF>gQcwMtF!IfD)D;lRm`iaRQ?(2776v$Pp}D#5dogi4&d3t` zvC5ZW;S0=IXt6`=^I+4F-#Po+b@~gwtBgh_58Dx@t!`_BXdgP_PIen;YaB<_*;HHp_r5XJ@N&f*oW z5%=-pOFSn0g$R9hJ9O%H#R*ZXQvgKEvpwe(EV>ci*E~VkrbtgLYBKkNMUB<*vqRSE z`=u9V)xwhW8)V)D^u}a2;M~1ODnUMizlYHx2S(mr)BZ@LnJTPe_&_RT^@Y)VOi{?$ zKRtQpPak!T1^v5dSBx|5bv{2i5x+!x1Ek_(^j*+yiSq+Qj*x=>-X<9Fkq&&bOJBG_ zVvod45+++Fb~RC`u(|QWZ>Y~W$T)*+ZkAiwf(aESwZ4V7Kf+^vxG1#_yx>;g<91PV zgG%7lUlTN{RTuB0(B3Ff=em-NMm%HMIZO4n;7`Y?;PC|)68xzfXK7pTQ7%*Zr7=b- y60sZRXQN&GY_!KJ9Y~XInQY#y@~Tbh;LYOK;kV>xNs0%%KnoB<5x42ysQ({^FsH-- diff --git a/tests/common/__pycache__/pipeline_fakesink.cpython-310.pyc b/tests/common/__pycache__/pipeline_fakesink.cpython-310.pyc deleted file mode 100644 index 30e03a7a2620401282a9d76f8833edf8ca73198b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2246 zcmah~OOG2x5bmD0$M)I_A&3Zw!%K3++DWupBoqPyv3oNSn;h_^HJ)yJXEU#Kj~8rt zPl*(MhWD7i!?6!sIPn9J&|aWw#x~x~ff-Fbrl-5QtG@asYc_oX*YAJ5BktA-`2(Hp z%L3;cc+?XRoNyYGVWsJimcB7Ggt0Wa!OaVU;+Pd%i9NIvXXuo^9lMD)^e8zZ+~Mv^ z!d>CLFo!ixk4e}26ILbzZ62v&Zr<l)fU`eAe`!a^D_)TO$0BvK)x@NtxjI7&r` zQ?@U2<*3iW5Cpe2G+vQ0<&+yQy`jlXZoMQ!i`(1*ZNDNG89ET&)$R4Fhru(UqV!p~ zUJVzu;is%wmeaiA1qugT?4{VXr~Z~z_!eJ(^^ z9dgUuJLv8AtK6wRS1g|uCt2Drgep`&%#sA0O^o%%d-+V>gra_k)x8D6F&w&mTYp=A z!!W4#&-}gXIO(m=8X`>B7j_JS<~w-Qmmn5o381;4=f=WV0(ihO7p8ivFiy>-1zkfa zS(r<^a8BJ*Z$Y_rVXnYo59Hgh0%Q_kIluEX4Mwr(6j^5?iVnW(c z!q?rJZZr6Ri8x`S8B2o%(u*%t5fo9#V2(D`Ole0I03oUEpJh}4;sVYYRBI&4zSE7z z0e_6{lUXjb^X=2a!|#9WTJl}o(oXpy8&A`)h_Y1Mpi8kd78I=xMe`TF`g&Eg29EINFce_47gn*sa!Shq%PlPzEU&aYuxe=WHgLl3;L4Hl_;_4|LYcN-0Z|Kb zf$T{KT`KRu0I(!YU(>QUTS@mG#z8XT!J{v5GUC)H^ZRe)ngOdm1Hgzxq=o7|jI%I^ z)gv&f+0uoX5_wRjU7>S#r*dFR0)1vz_!@kjA@^-B;(TuC}e@=BOHK^_6R#c?clC`NZ76s5r~VO-3SpqDt*hoPPq~PrM{&-R zIOP#dS$xb{Lw*IoH8QPdywDDGrZeNY*E5;Ptmj(KVm5Q2?H8J*^&D*Ps$26_cfw;X z#Pw1(Ae(Fna+$3_ zuCmpEF{qxI1MSQzv}fA6KG7$}#GF_c7WoW@-Plv^L7Z?YqRT3Qj?7L(#8u^RV{0=H zh2)o2f}IizRZmY(HdaHkHP9csf9E7P>dy(yhp_nDGotoN`g9 z$_!+sAxvhHsb-eSNN}FASmsGMQr^KVILHoLp5(CF3xYU}iy&y*%91=eP<}2l^b|3d z%8z9bh9WxL+En#?mN!8^lw38gnxPnOZNAi1B?zJH%))gE)hgPHfb`AB?#1p1gVsRiK}j{e`D z^~$Q(3`6(+8Moe%pxwCxA}W*lLlsNe`Vg=D7Gk2EBU>-@Gh zP_??QP0Vw}5@MIYDM?o46;dA;Ri?E2J0CSWnM^ZVmK&A?1|0?1FsTML8AcTEh z*1f7d3fUuS?vHY=obR9P?ELU^+Y%p=NsV$~aBz}FMVzI|E=^EA>>$Iihxi2Jrs6Km zs7RPl&RR!P67TuCuUmRef6o}-d6$!~W<_+k`O^d-E-3i`)TX^An={lhGoO*a;Y(-} z+J?5E9cUZcg?6AlXcxKyO}_V`E6`PFAG!uzg|0)_5I;m}7B3B4L6Nf%(*|N%pVZ}# zvzRuihb=wUCic0_meJdoG-PkqyK>cA$9kBHRcs)qL=KQ!A{WRjkq1;MQ3c2^kq=ZY zQ5C30h^>~qb_Wu62T3^Wvv8Zz zSW=Sv+W6~t1uDSJaEzqmL!K70`yk1pFp=8;(?x`f1w@5Jl|-Ec#ZfGiSRp}Hikl=T z@5TENNL3My@3r1Y{*sI5=|AJxmcN6XCM)ibzkFjtamcS#%oP{6{Qb70EHYl)qfI|2 z@db%CZQrEI5&Y>1*o8``-JBMHHQ@ae+f6S8|LRfzi_R&(a0y1%-BgnxE2IYPr_u{QPc}a!o9i=hS9%bhRx`ZSC9nM2_LY&zGBnB<#HvXFi3JaEgGr*F`XKHKo@X*HGBFaA@lz7J yUET`a>8;QmO#jCO`Rqax#Mb2w3kH?ERr=X`Fg@EgRgr9vRmdsJ9cNSXa%d6MFiQyb}mIhN)gnPP=w;8Aj5Q$-3^`H8Ix@54%v$> zSpSA9v?uAml!F&fp2dq^e3|J=izd7;?leEoTweenUl+2HbUFnEe4 zR}d&rlEV&Ahq_0wLnzsXp7$M3U@Iebxfm^612h*+9wR6?C1>yvj^WhNa7Kvr+%q8X z0R~uyI!EE0)V^E$j&3H=gnFO+xyxbh9Ru}O;W@myJNLD(-F+e+D~|@+KLOg(Z~~vb zL-L+zS7V;`56L{xf%de&0dF5_57~xSiCWSg+AXvn5dzY}OCm*Q8_u0A(^4YM_L%4q z;|`fRCx+}H1X(c|@ZmZ9dL0*~iYNP=@-(K5l~M_ojxV|WM{Db`ET$r3GNv*qXKGYT zV#TDAaaN4SHfa8mOcl@NS~)Y}MxKs$X}WpGGP7NXpjt-;$s)BBMahKXOeOX$o9v%;ozETUFQ=;pGA>%_mCOu8GiseHWr_!RoSVoeo0JuR0eL`Ujk< zQFVP0ju;<~R22-%X`j=oo#*|W@Ad~gXVrBK8isW&tmu{hlFgoUs~g=(p}J|F7YB@X z`J~(PP1GcpsgO*V=vgiIt0wYLU}{3iGR2FDG}m6d%UEgsUdyzb50f%gBNJM|Ql*4x zH}|^;_Sj4&xTCQ4&3^?oE+&|0abOEHa@4|DP0W2;d6Dq{R@gTsA0UKeiG(C_BI39X xX=d=-Yel%bLLxl%h`5J|Jx46#Qv1yh>n8l|1ip%KL8m#}w0)FT!*Drr{{Uhz`|tn& diff --git a/tests/common/__pycache__/utils.cpython-310.pyc b/tests/common/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index fd4bb6f9ba8e5220522adfaa82cbe2b47653d595..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2247 zcmah~OK%%D5MJ&}YdsvHb>4j#Xwy~&Y&C5V6h%@v4ICqfD1{v$fKXtu7Aa+wm3Bi? z4lKyX!TAOK1@f_fiHBZ#^10}tMS=7iN{MC1K)K+MoE?(GkKYXQTCMD${QlQP@$Hi1 z{6T}o;$yIZs`enb!&&CEXDw?pJ9gWyZN0W+kOIr>v@#wX~qeHh{=1aT`ZG~5O6>XK*_%hlWXFE>g!ZG%CTE^Q_dhFd?|EA%Z z($?eMzUXM_;#bEbgLw(j2B`N@)vpi})^`rPiKkiL?R(E%c~SfQU>b6E#F#U2r==qY ze)&7j+$Ms}+JsG)j+|fI1OFg^eHr%d#GeFR_mV^N7VImq--3PhjJ>yLzqn5$XpNUQ zhkPyXt|=`=GANJhPZK#zhrJ(GZz!r|SV)=6cjG6Gn(_61A?%S&<&jV->4_)M7;p3O zj`7sKGNIBuA88Z3l&KcRe=$n6DLuWn-Fn#CGPNR+O2jW`TUlcvr3p5-w;ykt%Iq&R zxom{S+Z`!W8qBsaewOEjVcK|h1{b@d-fxb)1c3i})XWRr9PXt&O`2SYVs@y7?%M73 zTTPXZWJjnbS8=hgpXI}*7D}sTCm#$j80j=q>&3o_uyNc;vg{gN;YSc5tKy58A65UZ zvK8hB)qkmlhRG{1=%Ah|M2ihn^*Mxah^yc!<1Sd`agY0G{W%u{UINP^Q-1LC&i#Be z)IxS>{@X%*s+Li!=xAr^961vwW}HE~V1^TW6YBfi!CGGHiDWQr1h}$vAWT{5MC#b~ zqg2J+R4Q#MU6~9-+@U?q(hRgX<)F8(MAns`UZ z8-IA>|N2|24!QBR&><*1>_PhI%not@$NNmy>;V809gO;Y_lm>a=e}Iifb0;YN9#im z;BMeIQ~Di=A-8vQ z7W7h2=(t;`Caly7L}W1!*)k}2&7xm-#~&=A#?=LW+#Od=ICQt+$!R!&vI$pL1j(MV!JFr)j&a14>#upcSJ+Wya z>_M<0u`)L0I3DDDlu^GL$1g@nHv1!2Y40ycd`W_mz=9y2%Bv7xp<0SY04xftHLLAF zd#b#{qz<(P9#fFe=2(g)w>7@;zxkz^rMtkSgawRWB>I^NlcKw-_ibOBbS!Hk7@~s$OUyc+rVSgcM-w#P`a&*uzd6C%_|Q{ z$b=nwFO&&7$QgJO_L6KQ;26u~ZP+dNV_~Ue`LGx3oQxmRMzmv{QtbEWG?wKpnsk!{ z0d0fJxV_X|kOJ?F6!D&rD$R$c)=Sr=C~}qRT<%M9%D!4QS6dQTi0wJ;;tQ^vTvK#s dM!z2k2}Nl>x&R*8ujg`kf_q;@Q$h4&5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;x_$ettoTeqLE- zN@k*dN@{9BaY<2XVlEO>*G$huzc|0NC^@xQKczUnpt2+*KTp3TwYa2MKQpf+HN7aY lBoinSAD@|*SrQ+wS5SG2!zMRBr8Fni4rE<16OdqG002*@E#LqE diff --git a/tests/integration/__pycache__/test.cpython-310-pytest-7.4.2.pyc b/tests/integration/__pycache__/test.cpython-310-pytest-7.4.2.pyc deleted file mode 100644 index c6d228e3577f389bcf85e887c815518f942e5405..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5071 zcmcIoTW{RP73Pp!l6${MvMt-P6~|7pZ6#Uqtry3&Y{{?_TXrNhKo>0tYKJRv&E-DF=~cv zF>5B|s&BQ@wX|&0{7frb%gVCu=UVw%UY1jSp;fFEW!dmct#YjlWwTbHmRs=%P16j` z(j3k2=(IqKASvx=wUJ|6%g|b&l?@ee1aNdGRU4&a^aMRgPtoxmy*5Tq(=(qbwG;F# zoq+r#B`Zqx+)LP*vQ#A^e~&pW_W^gA!`lo5r6*qK`d;APbDp}97d!>o$)mDW=G33M z*!{w~%Wa-i?U1!YmvPUH>;`MM?0P$BcXJtJ`;G+996_o_W-BW~l3i z5ofN`8ZJgoEA-uHCTW3O*6kZ}m&56ex$9Sj<+tk&_u9crC8a1Yzfb`P?C7PEeqY(w zMd}le@(p2ZxZe5(-_EQ#yuL9NdE4&xs4Tm5D(plXQ`~F0?G8UEB}<)Zc)mLoI(%b$ zbb5wH_L>v9y=_eM=e(*NWM!AA-C=e2Ah#mdKGo_xciBNXWM0c*TT@=paG4uGhYHLQ zu6x6C9E^Z00@>`)!~@~nkg%F!CD;@zBUAtmCL$LWN&uSGYn``>h*PogRPUOx8P&PDYtdY6HVDn5yrJ&tAWO%o zJtA{s4_?WRdP3=1v`9;_wXZ;Wr2}B6eX_K#>}w)t67TO{Q)_3GTF4tLlj z43Ti~(SLp1Q`e`j&Lqz#!WR~9@r`yc!`+BSGhV>mb?`*+vZZ7?+!BREXQLC;QT_q0 z5iL_E+wb5gJM1~E+3B0;7X0Ye^x+|O+*1FC&K>J=uit_~tWg3-uS2fl6~sCik=j(o z!L@+NX*U#2xtW}Zz9h2kwI)2#4lI<3eDbt?CyH#3%15(LgjMee_6>Fc6tK4egoOj~ z5ZOhPp=S1NgvkM1f|AIswV&Jl%`SoD6|UGK7CB9}FTQER!-HPGUAi4kwi->|n#9u8YE)H)1|o#3+XdJkxOLffy&9qLIy;3^pT?jWZm8_WUk)M27*!(I9|b3;~dw?c=xB5g+wde)GM!n|A)7{nrDZ6-$SMw_)9ZoAJz z-wC7%RWs}zT*YNnl|f7G=_*K-gHsl26pAdUwv&xs9?FO9EqnVgnUEj`cz!U($$o$8PVKlJ*r)_^)7dB}H>8Wh1HtF%PRkZa(MD=$b_rz4yIaX_M=AyojP0b~Irr|4;kXFL+? z^z4q&O>s3&y#Pn-Dc}~#uC}KvC_gp3Cc5g_h|QbI=53zhDLMgfWdd@om!Iq97yu?b zJvQk19j$9|16;lqTj2C{aKUDr;+Ay!nK%P38=NsX`^;`eI(@R6mHO`gMD1p~Ii8Cx zIytVy5C&i$c{5f^2MDA7ywowy`JM4693>u6ja3K7L_Ve`3kqVK-hiL!HWSL{5( zB*I&RlC0uGm!=tE;az|uu59j2t}F@6Ct!s4K3IZ$+uo<}%)FaGguKt;)p=LIy?K9z zR__adY93yhWGUdHL^n;!mNY zNON>GzT4m;BZ~>svSgXJ!=98O?-Ciwswgrcp*idBp`c@d5_AG9kfH@xX-py=4>l%E zrAfHj)SCKJH7cMCYN=f?z^1uJx*Ei1>9VENLZ$#l`6;??bm18Jo0)_?pgGEV7VFSI zw`(CUfd1=X&^py+yU_$=K(>Lr1^OFJFbG)(^Of~tuMRv$)=Rzl$}k`B3z+W~uo0aC zzkznMS?Tpp_v$0f(Vhf&jO;Pe9P3HIoO_ZJJxOjLIoXrsdy-SVdSSS}X>Z=xJTHy1 zxFm9YIxb`L2XMUXLxhJ2j}Y*F8O9d&7~u)RM+nOZcp+>R;YR?~agmlRSQ3CVqRqEL zSLopujYMwg*+TRPgsUh!yY%tGioLo#fA_=1W&6qe%Bp>DdH&I&{b+G@UW{SiCr-rg zA&Ypt*1UtXTYC}8-??he;RvlyDeHD@Y z_`$;BW3c}aFi@u_4-_PXC*b9_61N5$U9k+%?j2vDuaFe66$hYY(x zhUC_{OBus%k{o6G?e)gG$h`iHPs~k+SFRs1AaR&6&9z~sC;$Myq^sokKjW2Ew8pKJuBl)5d+A^5ruwz6CrQ9i znfnQYI}_=FNSHmDyg>NOmP6Q&ae^~AI+_M5+k&x4K!wrr-!9T9#JNK~`d>A6qxXN9 zgiOm%p20NV=+u8r3@uEYJ~U(hjyv4*qr_kP50lr55z7lM}pxBHFDGyblZ-gKeHU)8b17>qA1 HD_8vwRLxy; diff --git a/tests/integration/deepstream_demo.py b/tests/integration/deepstream_demo.py index d2cb9e9..28e19e9 100755 --- a/tests/integration/deepstream_demo.py +++ b/tests/integration/deepstream_demo.py @@ -20,7 +20,7 @@ sys.path.append('../..') -from tests.common.utils import is_aarch64, load_deepstream_libs +from tests.common.utils import is_integrated_gpu, load_deepstream_libs from tests.common.pipeline_filesink import PipelineFileSink from tests.common.frame_iterator import FrameIterator @@ -131,7 +131,7 @@ def make_and_run_pipeline(config_file, video_path): } probe_function = FrameIterator(frame_function, box_function, data_probe) - sp = PipelineFileSink(properties, is_aarch64()) + sp = PipelineFileSink(properties, is_integrated_gpu()) sp.set_probe(probe_function) sp.run() diff --git a/tests/integration/ds_base_config.txt b/tests/integration/ds_base_config.txt index a0e4e0f..9f5352e 100644 --- a/tests/integration/ds_base_config.txt +++ b/tests/integration/ds_base_config.txt @@ -57,7 +57,7 @@ gpu-id=0 net-scale-factor=0.00392156862745098 tlt-model-key=tlt_encode tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt -model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine +model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 diff --git a/tests/integration/ds_pgie_config.txt b/tests/integration/ds_pgie_config.txt index a0e4e0f..9f5352e 100644 --- a/tests/integration/ds_pgie_config.txt +++ b/tests/integration/ds_pgie_config.txt @@ -57,7 +57,7 @@ gpu-id=0 net-scale-factor=0.00392156862745098 tlt-model-key=tlt_encode tlt-encoded-model=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt -model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b1_gpu0_int8.engine +model-engine-file=../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b30_gpu0_int8.engine labelfile-path=../../../../samples/models/Primary_Detector/labels.txt int8-calib-file=../../../../samples/models/Primary_Detector/cal_trt.bin force-implicit-batch-dim=1 diff --git a/tests/integration/test.py b/tests/integration/test.py index 27ccc2c..c73703c 100644 --- a/tests/integration/test.py +++ b/tests/integration/test.py @@ -18,11 +18,11 @@ import pytest import pyds -from tests.common.frame_iterator import FrameIterator -from tests.common.pipeline_fakesink import PipelineFakesink -from tests.common.pipeline_fakesink_tracker import PipelineFakesinkTracker -from tests.common.tracker_utils import get_tracker_properties_from_config -from tests.common.utils import is_aarch64 +from tests.testcommon.frame_iterator import FrameIterator +from tests.testcommon.pipeline_fakesink import PipelineFakesink +from tests.testcommon.pipeline_fakesink_tracker import PipelineFakesinkTracker +from tests.testcommon.tracker_utils import get_tracker_properties_from_config +from tests.testcommon.utils import is_integrated_gpu VIDEO_PATH1 = "/opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.h264" STANDARD_PROPERTIES1 = { @@ -94,7 +94,7 @@ def box_function(batch_meta, frame_meta, obj_meta, dict_data): probe_function = FrameIterator(frame_function, box_function, data_probe) # Creating the pipeline - sp = PipelineFakesink(STANDARD_PROPERTIES1, is_aarch64()) + sp = PipelineFakesink(STANDARD_PROPERTIES1, is_integrated_gpu()) # registering the probe function sp.set_probe(probe_function) @@ -202,7 +202,7 @@ def box_function(batch_meta, frame_meta, obj_meta, dict_data): user_function) # Creating the pipeline - sp = PipelineFakesinkTracker(properties, is_aarch64()) + sp = PipelineFakesinkTracker(properties, is_integrated_gpu()) # registering the probe function sp.set_probe(probe_function) diff --git a/tests/common/__init__.py b/tests/testcommon/__init__.py similarity index 100% rename from tests/common/__init__.py rename to tests/testcommon/__init__.py diff --git a/tests/common/frame_iterator.py b/tests/testcommon/frame_iterator.py similarity index 100% rename from tests/common/frame_iterator.py rename to tests/testcommon/frame_iterator.py diff --git a/tests/common/generic_pipeline.py b/tests/testcommon/generic_pipeline.py similarity index 95% rename from tests/common/generic_pipeline.py rename to tests/testcommon/generic_pipeline.py index cf05e62..9a2414d 100644 --- a/tests/common/generic_pipeline.py +++ b/tests/testcommon/generic_pipeline.py @@ -22,7 +22,7 @@ gi.require_version('Gst', '1.0') from gi.repository import Gst, GLib -from tests.common.utils import bus_call +from tests.testcommon.utils import bus_call class PipelineElement: @@ -42,13 +42,13 @@ class GenericPipeline: its content. There are """ - def __init__(self, properties, is_aarch64, data_pipeline, + def __init__(self, properties, is_integrated_gpu, data_pipeline, data_pipeline_arm64): self._pipeline = None self._loop = None self._pipeline_content = {} self._properties = properties - self._is_aarch64 = is_aarch64 + self._is_integrated_gpu = is_integrated_gpu self._data_pipeline = data_pipeline self._data_pipeline_arm64 = data_pipeline_arm64 # Standard GStreamer initialization @@ -101,7 +101,7 @@ def _create_pipeline(self): for elm in self._data_pipeline: self._create_element(elm) - if self._is_aarch64: + if self._is_integrated_gpu: for elm in self._data_pipeline_arm64: self._create_element(elm) diff --git a/tests/common/pipeline_fakesink.py b/tests/testcommon/pipeline_fakesink.py similarity index 91% rename from tests/common/pipeline_fakesink.py rename to tests/testcommon/pipeline_fakesink.py index 6f2e12e..e381dfd 100644 --- a/tests/common/pipeline_fakesink.py +++ b/tests/testcommon/pipeline_fakesink.py @@ -22,12 +22,12 @@ gi.require_version('Gst', '1.0') from gi.repository import GObject, Gst -from tests.common.generic_pipeline import GenericPipeline +from tests.testcommon.generic_pipeline import GenericPipeline class PipelineFakesink(GenericPipeline): - def __init__(self, properties, is_aarch64): + def __init__(self, properties, is_integrated_gpu): pipeline_base = [ ["filesrc", "file-source"], # source ["h264parse", "h264-parser"], # h264parser @@ -40,7 +40,7 @@ def __init__(self, properties, is_aarch64): ] pipeline_arm64 = [ ] - super().__init__(properties, is_aarch64, pipeline_base, + super().__init__(properties, is_integrated_gpu, pipeline_base, pipeline_arm64) def set_probe(self, probe_function): @@ -70,7 +70,7 @@ def _link_elements(self): sys.stderr.write(" Unable to get source pad of decoder \n") return False - sinkpad = streammux.get_request_pad("sink_0") + sinkpad = streammux.request_pad_simple("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") return False diff --git a/tests/common/pipeline_fakesink_tracker.py b/tests/testcommon/pipeline_fakesink_tracker.py similarity index 92% rename from tests/common/pipeline_fakesink_tracker.py rename to tests/testcommon/pipeline_fakesink_tracker.py index 38d10aa..0966cbf 100644 --- a/tests/common/pipeline_fakesink_tracker.py +++ b/tests/testcommon/pipeline_fakesink_tracker.py @@ -22,12 +22,12 @@ gi.require_version('Gst', '1.0') from gi.repository import GObject, Gst -from tests.common.generic_pipeline import GenericPipeline +from tests.testcommon.generic_pipeline import GenericPipeline class PipelineFakesinkTracker(GenericPipeline): - def __init__(self, properties, is_aarch64): + def __init__(self, properties, is_integrated_gpu): pipeline_base = [ ["filesrc", "file-source"], # source ["h264parse", "h264-parser"], # h264parser @@ -43,7 +43,7 @@ def __init__(self, properties, is_aarch64): ] pipeline_arm64 = [ ] - super().__init__(properties, is_aarch64, pipeline_base, + super().__init__(properties, is_integrated_gpu, pipeline_base, pipeline_arm64) def set_probe(self, probe_function): @@ -76,7 +76,7 @@ def _link_elements(self): sys.stderr.write(" Unable to get source pad of decoder \n") return False - sinkpad = streammux.get_request_pad("sink_0") + sinkpad = streammux.request_pad_simple("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") return False diff --git a/tests/common/pipeline_filesink.py b/tests/testcommon/pipeline_filesink.py similarity index 92% rename from tests/common/pipeline_filesink.py rename to tests/testcommon/pipeline_filesink.py index 6a50de0..bd81866 100644 --- a/tests/common/pipeline_filesink.py +++ b/tests/testcommon/pipeline_filesink.py @@ -22,12 +22,12 @@ gi.require_version('Gst', '1.0') from gi.repository import GObject, Gst -from tests.common.generic_pipeline import GenericPipeline +from tests.testcommon.generic_pipeline import GenericPipeline class PipelineFileSink(GenericPipeline): - def __init__(self, properties, is_aarch64): + def __init__(self, properties, is_integrated_gpu): pipeline_base = [ ["filesrc", "file-source"], # source ["h264parse", "h264-parser"], # h264parser @@ -47,7 +47,7 @@ def __init__(self, properties, is_aarch64): pipeline_arm64 = [ ["nvegltransform", "nvegl-transform"] # transform ] - super().__init__(properties, is_aarch64, pipeline_base, + super().__init__(properties, is_integrated_gpu, pipeline_base, pipeline_arm64) def set_probe(self, probe_function): @@ -83,7 +83,7 @@ def _link_elements(self): sys.stderr.write(" Unable to get source pad of decoder \n") return False - sinkpad = streammux.get_request_pad("sink_0") + sinkpad = streammux.request_pad_simple("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") return False @@ -102,7 +102,7 @@ def _link_elements(self): encoder.link(codeparser) codeparser.link(container) container.link(sink) - if self._is_aarch64: + if self._is_integrated_gpu: transform = gebn("nvegl-transform") nvosd.link(transform) transform.link(sink) diff --git a/tests/common/pipeline_nveglglessink.py b/tests/testcommon/pipeline_nveglglessink.py similarity index 91% rename from tests/common/pipeline_nveglglessink.py rename to tests/testcommon/pipeline_nveglglessink.py index 9d260a5..6c67502 100644 --- a/tests/common/pipeline_nveglglessink.py +++ b/tests/testcommon/pipeline_nveglglessink.py @@ -22,12 +22,12 @@ gi.require_version('Gst', '1.0') from gi.repository import GObject, Gst -from tests.common.generic_pipeline import GenericPipeline +from tests.testcommon.generic_pipeline import GenericPipeline class PipelineNveglgleSink(GenericPipeline): - def __init__(self, properties, is_aarch64): + def __init__(self, properties, is_integrated_gpu): pipeline_base = [ ["filesrc", "file-source"], # source ["h264parse", "h264-parser"], # h264parser @@ -41,7 +41,7 @@ def __init__(self, properties, is_aarch64): pipeline_arm64 = [ ["nvegltransform", "nvegl-transform"] # transform ] - super().__init__(properties, is_aarch64, pipeline_base, + super().__init__(properties, is_integrated_gpu, pipeline_base, pipeline_arm64) def set_probe(self, probe_function): @@ -71,7 +71,7 @@ def _link_elements(self): sys.stderr.write(" Unable to get source pad of decoder \n") return False - sinkpad = streammux.get_request_pad("sink_0") + sinkpad = streammux.request_pad_simple("sink_0") if not sinkpad: sys.stderr.write(" Unable to get the sink pad of streammux \n") return False @@ -80,7 +80,7 @@ def _link_elements(self): streammux.link(pgie) pgie.link(nvvidconv) nvvidconv.link(nvosd) - if self._is_aarch64: + if self._is_integrated_gpu: transform = gebn("nvegl-transform") nvosd.link(transform) transform.link(sink) diff --git a/tests/common/tracker_utils.py b/tests/testcommon/tracker_utils.py similarity index 100% rename from tests/common/tracker_utils.py rename to tests/testcommon/tracker_utils.py diff --git a/tests/common/utils.py b/tests/testcommon/utils.py similarity index 91% rename from tests/common/utils.py rename to tests/testcommon/utils.py index 9d75c63..76cdab7 100644 --- a/tests/common/utils.py +++ b/tests/testcommon/utils.py @@ -19,6 +19,10 @@ import time import gi +sys.path.append('../../') +sys.path.append('../../apps/') +from common.platform_info import PlatformInfo + gi.require_version('Gst', '1.0') from gi.repository import GObject, Gst @@ -66,8 +70,9 @@ def load_deepstream_libs(): sys.path.append('/opt/nvidia/deepstream/deepstream/lib') -def is_aarch64(): - return platform.uname()[4] == 'aarch64' +def is_integrated_gpu(): + platforminfo = PlatformInfo() + return platforminfo.is_integrated_gpu() def long_to_int(l):