From 224ad10924bf5fe511b58b47a10bb64047fbccc1 Mon Sep 17 00:00:00 2001 From: <> Date: Wed, 17 Jan 2024 01:51:49 +0000 Subject: [PATCH] Deployed 8f83e08 with MkDocs version: 1.5.3 --- .nojekyll | 0 404.html | 1296 ++++ LICENSE.html | 1514 ++++ OVMS/camera_serial_number.html | 1400 ++++ OVMS/capiPipelineRun.html | 1584 ++++ OVMS/capiYolov5EnsemblePipelineRun.html | 1435 ++++ OVMS/capiYolov5PipelineRun.html | 1450 ++++ OVMS/pipelineDockerCompose.html | 1462 ++++ OVMS/pipelinebenchmarking.html | 1658 ++++ OVMS/pipelinerun.html | 1499 ++++ OVMS/pipelinesetup.html | 1489 ++++ OVMS/profileLauncherConfigs.html | 1441 ++++ OVMS/quick_pipelinerun.html | 1631 ++++ OVMS/quick_stream_density.html | 1615 ++++ ...unObjectDetectionPipelineWithNewModel.html | 1547 ++++ OVMS/stop_pipeline_run.html | 1424 ++++ OVMS/supportingDifferentLanguage.html | 1450 ++++ OVMS/supportingDifferentModel.html | 1458 ++++ assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.7389ff0e.min.js | 29 + assets/javascripts/bundle.7389ff0e.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.c011b7c0.min.js | 42 + .../workers/search.c011b7c0.min.js.map | 7 + assets/stylesheets/main.50c56a3b.min.css | 1 + assets/stylesheets/main.50c56a3b.min.css.map | 1 + assets/stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + dev-tools/environment_variables.html | 1617 ++++ dev-tools/overview.html | 1377 ++++ dev-tools/references.html | 1464 ++++ dev-tools/run_camera_simulator.html | 1510 ++++ dev-tools/telemetry/setup.html | 1404 ++++ faq.html | 1486 ++++ hardwaresetup.html | 1557 ++++ images/automated-checkout-1.0.jpg | Bin 0 -> 61820 bytes images/automated-checkout-ovms.jpg | Bin 0 -> 47002 bytes images/dashboard.png | Bin 0 -> 111670 bytes images/execute_a_dev_video.png | Bin 0 -> 179866 bytes images/import.png | Bin 0 -> 22914 bytes images/list_dev_videos.png | Bin 0 -> 40266 bytes images/logo-white-75px.png | Bin 0 -> 2332 bytes .../portainer_dashobaord_docker_compose.png | Bin 0 -> 159820 bytes images/vision-data-flow.jpg | Bin 0 -> 49146 bytes index.html | 1437 ++++ platforms.html | 1347 ++++ query_usb_camera.html | 1356 ++++ release-notes/v1-0-1.html | 1517 ++++ release-notes/v1-5-0.html | 1580 ++++ releasenotes.html | 1323 ++++ roadmap.html | 1456 ++++ search/search_index.json | 1 + sitemap.xml | 163 + sitemap.xml.gz | Bin 0 -> 592 bytes stylesheets/branding.css | 33 + stylesheets/extra.css | 16 + stylesheets/index.css | 43 + troubleshooting.html | 1387 ++++ webcam_rtsp.html | 1327 ++++ 92 files changed, 56044 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 LICENSE.html create mode 100644 OVMS/camera_serial_number.html create mode 100644 OVMS/capiPipelineRun.html create mode 100644 OVMS/capiYolov5EnsemblePipelineRun.html create mode 100644 OVMS/capiYolov5PipelineRun.html create mode 100644 OVMS/pipelineDockerCompose.html create mode 100644 OVMS/pipelinebenchmarking.html create mode 100644 OVMS/pipelinerun.html create mode 100644 OVMS/pipelinesetup.html create mode 100644 OVMS/profileLauncherConfigs.html create mode 100644 OVMS/quick_pipelinerun.html create mode 100644 OVMS/quick_stream_density.html create mode 100644 OVMS/runObjectDetectionPipelineWithNewModel.html create mode 100644 OVMS/stop_pipeline_run.html create mode 100644 OVMS/supportingDifferentLanguage.html create mode 100644 OVMS/supportingDifferentModel.html create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.7389ff0e.min.js create mode 100644 assets/javascripts/bundle.7389ff0e.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.c011b7c0.min.js create mode 100644 assets/javascripts/workers/search.c011b7c0.min.js.map create mode 100644 assets/stylesheets/main.50c56a3b.min.css create mode 100644 assets/stylesheets/main.50c56a3b.min.css.map create mode 100644 assets/stylesheets/palette.06af60db.min.css create mode 100644 assets/stylesheets/palette.06af60db.min.css.map create mode 100644 dev-tools/environment_variables.html create mode 100644 dev-tools/overview.html create mode 100644 dev-tools/references.html create mode 100644 dev-tools/run_camera_simulator.html create mode 100644 dev-tools/telemetry/setup.html create mode 100644 faq.html create mode 100644 hardwaresetup.html create mode 100644 images/automated-checkout-1.0.jpg create mode 100644 images/automated-checkout-ovms.jpg create mode 100644 images/dashboard.png create mode 100644 images/execute_a_dev_video.png create mode 100644 images/import.png create mode 100644 images/list_dev_videos.png create mode 100644 images/logo-white-75px.png create mode 100644 images/portainer_dashobaord_docker_compose.png create mode 100644 images/vision-data-flow.jpg create mode 100644 index.html create mode 100644 platforms.html create mode 100644 query_usb_camera.html create mode 100644 release-notes/v1-0-1.html create mode 100644 release-notes/v1-5-0.html create mode 100644 releasenotes.html create mode 100644 roadmap.html create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz create mode 100644 stylesheets/branding.css create mode 100644 stylesheets/extra.css create mode 100644 stylesheets/index.css create mode 100644 troubleshooting.html create mode 100644 webcam_rtsp.html diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..449d3994 --- /dev/null +++ b/404.html @@ -0,0 +1,1296 @@ + + + +
+ + + + + + + + + + + + + + + Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+"License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document.
+"Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License.
+"Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity.
+"You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License.
+"Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files.
+"Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types.
+"Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below).
+"Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof.
+"Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution."
+"Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work.
+Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form.
+Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed.
+Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions:
+(a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and
+(b) You must cause any modified files to carry prominent notices + stating that You changed the files; and
+(c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and
+(d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License.
+You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License.
+Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions.
+Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file.
+Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License.
+Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages.
+Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability.
+END OF TERMS AND CONDITIONS
+APPENDIX: How to apply the Apache License to your work.
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
Copyright 2023 Intel Corporation
+Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+
Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License.
+ + + + + + + + + + + + + +Do the following to get the serial number of an Intel® RealSense™ Camera:
+ make build-dlstreamer-realsense
+
Plug in your Intel® RealSense™ Camera into the system;
+Use the makefile target get-realsense-serial-num
to get the serial number of your Intel® RealSense™ Camera:
$ make get-realsense-serial-num
+
You should see a serial number printed out. If you do not see the expected results, check if the Intel® RealSense™ Camera is plugged in.
+ + + + + + + + + + + + + +OpenVINO Model Server has many ways to run inferencing pipeline: +TensorFlow Serving gRPC API, KServe gRPC API, TensorFlow Serving REST API, KServe REST API and OVMS C API through OpenVINO model server (OVMS). Here is a demonstration for using OVMS C API method to run face detection inferencing pipeline with steps below:
+Here is the template config file location: configs/opencv-ovms/models/2022/config_template.json
, edit the file and append the following face detection model configuration section to the template
+
,
+{"config": {
+ "name": "face-detection-retail-0005",
+ "base_path": "face-detection-retail-0005/FP16-INT8",
+ "shape": "(1,3,800,800)",
+ "nireq": 2,
+ "batch_size":"1",
+ "plugin_config": {"PERFORMANCE_HINT": "LATENCY"},
+ "target_device": "{target_device}"},
+ "latest": { "num_versions": 2 }
+ }
+
Note
+shape
is optional and takes precedence over batch_size, please remove this attribute if you don't know the value for the model.
Note
+Please leave target_device
value as it is, as the value {target_device}
will be recognized and replaced by script run.
You can find the parameter description in the ovms docs.
+Here is the list of files we added in directory of configs/opencv-ovms/gst_capi/pipelines/face_detection/
:
main.cpp
- this is all the work about pre-processing before sending to OVMS for inferencing and post-processing for displaying.Makefile
- to help building the pre-processing and post-processing binary.You can add multiple environment variable files to configs/opencv-ovms/envs/
directory for your pipeline. For face detection pipeline run, we have added configs/opencv-ovms/envs/capi_face_detection.env
environment variable file. Below is a list of explanation for all environment variables and current default values we set for face detection pipeline run, this list can be extended for any future modification.
EV Name | +Face Detection Default Value | +Description | +
---|---|---|
RENDER_PORTRAIT_MODE | +1 | +rendering in portrait mode, value: 0 or 1 | +
GST_DEBUG | +1 | +running GStreamer in debug mode, value: 0 or 1 | +
USE_ONEVPL | +1 | +using OneVPL CPU & GPU Support, value: 0 or 1 | +
PIPELINE_EXEC_PATH | +pipelines/face_detection/face_detection | +pipeline execution path inside container | +
GST_VAAPI_DRM_DEVICE | +/dev/dri/renderD128 | +GStreamer VAAPI DRM device input | +
TARGET_GPU_DEVICE | +--privileged | +allow using GPU devices if any | +
LOG_LEVEL | +0 | +GST_DEBUG log level to be set when running gst pipeline | +
RENDER_MODE | +1 | +option to display the input source video stream with the inferencing results, value: 0 or 1 | +
cl_cache_dir | +/home/intel/gst-ovms/.cl-cache | +cache directory in container | +
WINDOW_WIDTH | +1920 | +display window width | +
WINDOW_HEIGHT | +1080 | +display window height | +
DETECTION_THRESHOLD | +0.9 | +detection threshold value in floating point that needs to be between 0.0 to 1.0 | +
details of face detection pipeline environment variable file can be viewed in configs/opencv-ovms/envs/capi_face_detection.env
.
The details about Profile Launcher configuration can be found here, details of capi face detection profile launcher configuration can be viewed in configs/opencv-ovms/cmd_client/res/capi_face_detection/configuration.yaml
.
Here are the quick start steps to build and run OVMS C API face detection pipeline profile:
+make build-capi_face_detection
cd benchmark-scripts/ && ./download_sample_videos.sh && cd ..
make run-camera-simulator
PIPELINE_PROFILE="capi_face_detection" RENDER_MODE=1 sudo -E ./run.sh --platform core --inputsrc rtsp://127.0.0.1:8554/camera_0
Note
+The pipeline run will automatically download the OpenVINO model files listed in configs/opencv-ovms/models/2022/config_template.json
OpenVINO Model Server has many ways to run inferencing pipeline: +TensorFlow Serving gRPC API, KServe gRPC API, TensorFlow Serving REST API, KServe REST API and OVMS C API through OpenVINO model server (OVMS). Here we are demonstrating for using OVMS C API method to run inferencing pipeline yolov5s ensemble models in following steps:
+The model template configuration file has been updated with model configs of yolov5, efficientnetb0_FP32INT8 and custom configurations, please view configs/opencv-ovms/models/2022/config_template.json
for detail.
Note
+New model yolov5 is similar to yolov5s configuration except the layout difference.
+The pre-processing and post-processing work files are added in directory of configs/opencv-ovms/gst_capi/pipelines/capi_yolov5_ensemble/
, please view directory for details.
You can add multiple environment variable files to configs/opencv-ovms/envs/
directory for your pipeline, we've added capi_yolov5_ensemble.env
for yolov5 ensemble pipeline run. Below is a list of explanation for all environment variables and current default values we set, this list can be extended for any future modification.
EV Name | +Default Value | +Description | +
---|---|---|
RENDER_PORTRAIT_MODE | +1 | +rendering in portrait mode, value: 0 or 1 | +
GST_DEBUG | +1 | +running GStreamer in debug mode, value: 0 or 1 | +
USE_ONEVPL | +1 | +using OneVPL CPU & GPU Support, value: 0 or 1 | +
PIPELINE_EXEC_PATH | +pipelines/capi_yolov5_ensemble/capi_yolov5_ensemble | +pipeline execution path inside container | +
GST_VAAPI_DRM_DEVICE | +/dev/dri/renderD128 | +GStreamer VAAPI DRM device input | +
TARGET_GPU_DEVICE | +--privileged | +allow using GPU devices if any | +
LOG_LEVEL | +0 | +GST_DEBUG log level to be set when running gst pipeline | +
RENDER_MODE | +1 | +option to display the input source video stream with the inferencing results, value: 0 or 1 | +
cl_cache_dir | +/home/intel/gst-ovms/.cl-cache | +cache directory in container | +
WINDOW_WIDTH | +1920 | +display window width | +
WINDOW_HEIGHT | +1080 | +display window height | +
DETECTION_THRESHOLD | +0.7 | +detection threshold value in floating point that needs to be between 0.0 to 1.0 | +
BARCODE | +1 | +For capi_yolov5_ensemble pipeline, you can enable barcode detection. value: 0 or 1 | +
details of yolov5s pipeline environment variable file can be viewed in configs/opencv-ovms/envs/capi_yolov5_ensemble.env
.
The details about Profile Launcher configuration can be found here, details for yolov5 pipeline profile launcher configuration can be viewed in configs/opencv-ovms/cmd_client/res/capi_yolov5_ensemble/configuration.yaml
Here are the quick start steps to build and run capi yolov5 pipeline profile :
+make build-capi_yolov5_ensemble
cd benchmark-scripts/ && ./download_sample_videos.sh && cd ..
make run-camera-simulator
PIPELINE_PROFILE="capi_yolov5_ensemble" RENDER_MODE=1 sudo -E ./run.sh --platform core --inputsrc rtsp://127.0.0.1:8554/camera_0
Note
+The pipeline will automatically download the OpenVINO model files listed in configs/opencv-ovms/models/2022/config_template.json
To stop existing container: make clean-capi_yolov5_ensemble
+To stop all running containers including camera simulator and remove all log files: make clean-all
OpenVINO Model Server has many ways to run inferencing pipeline: +TensorFlow Serving gRPC API, KServe gRPC API, TensorFlow Serving REST API, KServe REST API and OVMS C API through OpenVINO model server (OVMS). Here we are demonstrating for using OVMS C API method to run inferencing pipeline yolov5s model in following steps:
+Here is the template config file location: configs/opencv-ovms/models/2022/config_template.json
, edit the file and append the new model's configuration into the template, such as yolov5 model as shown below:
+
{
+ "config": {
+ "name": "yolov5s",
+ "base_path": "/models/yolov5s/FP16-INT8",
+ "layout": "NHWC:NCHW",
+ "shape": "(1,416,416,3)",
+ "nireq": 1,
+ "batch_size": "1",
+ "plugin_config": {
+ "PERFORMANCE_HINT": "LATENCY"
+ },
+ "target_device": "{target_device}"
+ }
+ }
+
Note
+shape
is optional and takes precedence over batch_size, please remove this attribute if you don't know the value for the model.
Note
+Please leave target_device
value as it is, as the value {target_device}
will be recognized and replaced by script run.
You can find the parameter description in the ovms docs.
+Here is the list of files we added in directory of configs/opencv-ovms/gst_capi/pipelines/capi_yolov5/
:
main.cpp
- this is all the work about pre-processing before sending to OVMS for inferencing and post-processing for displaying.Makefile
- to help building the pre-processing and post-processing binary.You can add multiple environment variable files to configs/opencv-ovms/envs/
directory for your pipeline, we've added capi_yolov5.env
for yolov5 pipeline run. Below is a list of explanation for all environment variables and current default values we set, this list can be extended for any future modification.
EV Name | +Default Value | +Description | +
---|---|---|
RENDER_PORTRAIT_MODE | +1 | +rendering in portrait mode, value: 0 or 1 | +
GST_DEBUG | +1 | +running GStreamer in debug mode, value: 0 or 1 | +
USE_ONEVPL | +1 | +using OneVPL CPU & GPU Support, value: 0 or 1 | +
PIPELINE_EXEC_PATH | +pipelines/capi_yolov5/capi_yolov5 | +pipeline execution path inside container | +
GST_VAAPI_DRM_DEVICE | +/dev/dri/renderD128 | +GStreamer VAAPI DRM device input | +
TARGET_GPU_DEVICE | +--privileged | +allow using GPU devices if any | +
LOG_LEVEL | +0 | +GST_DEBUG log level to be set when running gst pipeline | +
RENDER_MODE | +1 | +option to display the input source video stream with the inferencing results, value: 0 or 1 | +
cl_cache_dir | +/home/intel/gst-ovms/.cl-cache | +cache directory in container | +
WINDOW_WIDTH | +1920 | +display window width | +
WINDOW_HEIGHT | +1080 | +display window height | +
DETECTION_THRESHOLD | +0.7 | +detection threshold value in floating point that needs to be between 0.0 to 1.0 | +
details of yolov5s pipeline environment variable file can be viewed in configs/opencv-ovms/envs/capi_yolov5.env
.
The details about Profile Launcher configuration can be found here, details for yolov5 pipeline profile launcher configuration can be viewed in configs/opencv-ovms/cmd_client/res/capi_yolov5/configuration.yaml
Here are the quick start steps to build and run capi yolov5 pipeline profile :
+make build-capi_yolov5
cd benchmark-scripts/ && ./download_sample_videos.sh && cd ..
make run-camera-simulator
PIPELINE_PROFILE="capi_yolov5" RENDER_MODE=1 sudo -E ./run.sh --platform core --inputsrc rtsp://127.0.0.1:8554/camera_1
Note
+The pipeline will automatically download the OpenVINO model files listed in configs/opencv-ovms/models/2022/config_template.json
Pipelines can be run using docker-compose files. Changes are custom made inside the docker-compose.yml
file for integration with the Developer Toolbox.
Note
+To utilize all the features offered by Automated Self-Checkout, run the pipelines as illustrated in the section Run Pipelines.
+Prerequisites
+Before running, Set Up the Pipelines.
+Note
+Ensure Docker Compose v2 is installed in order to run the pipelines via this feature.
+Customize the docker-compose.yml
to add the number of camera simulators required and the number of different type of pipelines that need to be run
Note
+Follow all the instructions in docker-compose.yml
for customizations.
Run the pipelines
+make run-pipelines
+
All the containers i.e camera simulators, OVMS server and pipelines should start without any errors in portainer as shown below in Figure 1 -
+ +Stop the pipelines
+make down-pipelines
+
You can benchmark pipelines with a collection of scripts to get the pipeline performance metrics such as video processing in frame-per-second (FPS), memory usage, power consumption, and so on.
+Before benchmarking, make sure you set up the pipeline.
+Build the benchmark Docker* images + Benchmark scripts are containerized inside Docker. The following table lists the commands for various platforms. Choose and run the command corresponding to your hardware configuration.
+Platform | +Docker Build Command | +Check Success | +
---|---|---|
Intel® integrated and Arc™ GPUs | +cd benchmark-scripts |
+Docker images command to show both benchmark:dev and benchmark:igt images |
+
Intel® Flex GPUs | +cd benchmark-scripts |
+Docker images command to show both benchmark:dev and benchmark:xpu images |
+
Warning
+Build command may take a while, depending on your internet connection and machine specifications.
+Determine the appropriate parameters for
+ +Choose a given pipeline profile, and run the benchmark for that pipeline profile. To see all available pipeline profiles, use make list-profiles
command on the project base directory.
# if you are in the benchmark-scripts directory then do a cd ..
+$ cd ..
+
+$ make list-profiles
+
The benchmark.sh
shell script is in the base/benchmark_scripts directory. The following snippet give an example to run multiple pipelines benchmarking for the object detection
pipelines.
cd ./benchmark_scripts
+PIPELINE_PROFILE="object_detection" RENDER_MODE=0 sudo -E ./benchmark.sh --pipelines <number of pipelines> --logdir <output dir>/data --init_duration 30 --duration 120 --platform <core|xeon|dgpu.x> --inputsrc <ex:4k rtsp stream with 10 objects>
+
Note
+The benchmark.sh
can either benchmark a specific number of pipelines or benchmark stream density based on the desired FPS.
The benchmark script can take either of the following video input sources:
+Real Time Streaming Protocol (RTSP)
+--inputsrc rtsp://127.0.0.1:8554/camera_0
+
Note
+Using RTSP source with benchmark.sh
will automatically run the camera simulator. The camera simulator will start an RTSP stream for each video file in the sample-media folder.
USB Camera
+--inputsrc /dev/video<N>, where N is 0 or an integer
+
Intel® RealSense™ Camera
+--inputsrc <RealSense camera serial number>
+
To know the serial number of the Intel® RealSense™ Camera, refer to Get Serial Number of Intel® RealSense™ Camera.
+Video File
+--inputsrc file:my_video_file.mp4
+
Note
+Video files must be in the sample-media folder, so that the Docker container can access the files. You can provide your own video files or download a sample video file using the script download_sample_videos.sh.
+Intel® Core™ Processor
+--platform core.x
if GPUs are available, then replace this parameter with targeted GPUs such as core (for all GPUs), core.0, core.1, and so on--platform core
will evenly distribute and utilize all available core GPUsIntel® Xeon® Scalable Processor
+--platform xeon
will use the Xeon CPU for the pipelinesDGPU (Intel® Data Center GPU Flex 140, Intel® Data Center GPU Flex 170, and Intel® Arc™ Setup)
+--platform dgpu.x
replace this parameter with targeted GPUs such as dgpu (for all GPUs), dgpu.0, dgpu.1, and so on--platform dgpu
will evenly distribute and utilize all available dgpusThe primary purpose of benchmarking with a specified number of pipelines is to discover the performance and system requirements for a given use case.
+Example
+Here is an example of running benchmarking object detection pipelines with specified number of pipelines:
+PIPELINE_PROFILE="object_detection" RENDER_MODE=0 sudo -E ./benchmark.sh --pipelines <number of pipelines> --logdir <output dir>/data --init_duration 30 --duration 120 --platform <core|xeon|dgpu.x> --inputsrc <ex:4k rtsp stream with 10 objects>
+
where, the configurable input parameters include:
+--performance_mode
configures the scaling governor of the system. Supported modes are performance and powersave (default).--logdir
configures the benchmarking output directory--duration
configures the duration, in number of seconds, the benchmarking will run--init_duration
configures the duration, in number of seconds, to wait for system initialization before the benchmarking metrics or data collection beginsExample
+You can run multiple pipeline benchmarking with different configurations before consolidating all pipeline output results.
+To get the consolidated pipeline results, run the following make
command:
make consolidate ROOT_DIRECTORY=<output dir>
+
This command will consolidate the performance metrics that exist in the specified ROOT_DIRECTORY
.
Here is an example of consolidated output:
+Success
+Output of Consolidate_multiple_run_of_metrics.py
,Metric,data
+0,Total Text count,0
+1,Total Barcode count,2
+2,Camera_1 FPS,15.0
+3,Camera_0 FPS,15.0
+4,CPU Utilization %,16.548
+5,Memory Utilization %,21.162
+6,Disk Read MB/s,0.0
+7,Disk Write MB/s,0.025
+8,S0 Memory Bandwidth Usage MB/s,1872.632
+9,S0 Power Draw W,27.502
+10,GPU_0 VDBOX0 Utilization %,0.0
+11,GPU_0 GPU Utilization %,17.282
+
There are several pipeline profiles that support different programming languages and different pipeline models. You may specify language choice and model input. Then you may prefix benchmark script run command with specific profile.
+An example of stream density benchmark script in golang:
+PIPELINE_PROFILE="grpc_go" sudo -E ./benchmark.sh --stream_density 14.9 --logdir mytest/data --duration 60 --init_duration 20 --platform core --inputsrc rtsp://127.0.0.1:8554/camera_0
+
An example of stream density benchmark script in python:
+PIPELINE_PROFILE="grpc_python" sudo -E ./benchmark.sh --stream_density 14.9 --logdir mytest/data --duration 60 --init_duration 60 --platform core --inputsrc rtsp://127.0.0.1:8554/camera_0
+
If prefix is not provided, then the default value is "grpc_python".
+camera-simulator.sh
: This script starts the camera simulator. Create two folders named camera-simulator and sample-media. Place camera-simulator.sh
in the camera-simulator folder. Manually copy the video files to the sample-media folder or run the download_sample_videos.sh
script to download sample videos. The camera-simulator.sh
script will start a simulator for each .mp4 video that it finds in the sample-media folder and will enumerate them as camera_0, camera_1, and so on. Make sure that the path to the camera-simulator.sh
script is mentioned correctly in the camera-simulator.sh
script.
stop_server.sh
: This script stops and removes all Docker containers closing the pipelines.
When the pipeline is run, the run.sh
script starts the service and performs inferencing on the selected input media. The output of running the pipeline provides the inference results for each frame based on the media source such as text, barcode, and so on, as well as the frames per second (FPS). Pipeline run provides many options in media type, system process platform type, and additional optional parameters. These options give you the opportunity to compare what system process platform is better for your need.
You can run the pipeline script, run.sh
with a given pipeline profile via the environment variable PIPELINE_PROFILE
, and the following additional input parameters:
Run the command based on your requirement. Once choices are selected for #1-3 above, to start the pipeline run, use the commands from the Examples section below.
+In the following examples, environment variables are used to select the desired PIPELINE_PROFILE
and RENDER_MODE
. This table uses run.sh
to run the object_detection pipeline profile:
Input source Type | +Input Source Parameter | +Command | +
---|---|---|
Simulated camera | +rtsp://127.0.0.1:8554/camera_X |
+PIPELINE_PROFILE="object_detection" RENDER_MODE=1 sudo -E ./run.sh --platform core|xeon|dgpu.x --inputsrc rtsp://127.0.0.1:8554/camera_1 |
+
RealSense camera | +<serial_number> --realsense_enabled |
+PIPELINE_PROFILE="object_detection" RENDER_MODE=1 sudo -E ./run.sh --platform core|xeon|dgpu.x --inputsrc |
+
USB camera | +/dev/video0 |
+PIPELINE_PROFILE="object_detection" RENDER_MODE=1 sudo -E ./run.sh --platform core|xeon|dgpu.x --inputsrc /dev/video0 |
+
Video file | +file:my_video_file.mp4 |
+PIPELINE_PROFILE="object_detection" RENDER_MODE=1 sudo -E ./run.sh --platform core|xeon|dgpu.x --inputsrc file:my_video_file.mp4 |
+
Note
+The value of x in dgpu.x
can be 0, 1, 2, and so on depending on the number of discrete GPUs in the system.
Clone the repository
+git clone https://github.com/intel-retail/automated-self-checkout.git && cd ./automated-self-checkout
+
Build the profile launcher binary executable
+make build-profile-launcher
+
Each profile is an unique pipeline use case. We provide some profile examples, and the configuration examples of profiles are located here. Go here to find out the detail descriptions for the configuration of profile used by profile launcher.
+Build the benchmark Docker images
+cd benchmark-scripts
+make build-all
+
+cd ..
+
Note
+A successfully built benchmark Docker images should contain the following Docker images from docker images benchmark --format 'table{{.Repository}}\t{{.Tag}}'
command:
Note
+After successfully built benchmark Docker images, please remember to change the directory back to the project base directory from the current benchmark-scripts directory (i.e. cd ..
) for the following steps.
Download the models manually (Optional)
+Note
+The model downloader script is automatically called as part of run.sh.
+./download_models/getModels.sh
+
Warning
+Depending on your internet connection, this might take some time.
+(Optional) Download the video file manually. This video is used as the input source to give to the pipeline.
+Note
+The sample image downloader script is automatically called as part of run.sh.
+./configs/opencv-ovms/scripts/image_download.sh
+
Warning
+Depending on your internet connection, this might take some time.
+(optional) Download the bit model manually
+a. Here is the command to build the container for bit model downloading:
+docker build -f Dockerfile.bitModel -t bit_model_downloader:dev .
+
b. Here is the script to run the container that downloads the bit models:
+docker run bit_model_downloader:dev
+
Build the reference design images. This table shows the commands for the OpenVINO (OVMS) model Server and profile-launcher build command:
+Target | +Docker Build Command | +Check Success | +
---|---|---|
OVMS Server | +make build-ovms-server |
+docker images command output contains Docker image openvino/model_server:2023.1-gpu |
+
OVMS Profile Launcher | +make build-profile-launcher |
+ls -al ./profile-launcher command to show the binary executable |
+
Note
+Build command may take a while, depending on your internet connection and machine specifications.
+Note
+If the build command succeeds, you will see all the built Docker images files as indicated in the Check Success column. If the build fails, check the console output for errors.
+Proxy
+If docker build system requires a proxy network, just set your proxy env standard way on your terminal as below and make build:
+export HTTP_PROXY="http://your-proxy-url.com:port"
+export HTTPS_PROXY="https://your-proxy-url.com:port"
+make build-ovms-server
+make build-profile-launcher
+
For the profile launcher, each profile has its own configuration for different pipelines. The configuration of each profile is done through a yaml configuration file, configuration.yaml. One example of configuration.yaml is shown here for classification profile:
+OvmsSingleContainer: false
+OvmsServer:
+ ServerDockerScript: start_ovms_server.sh
+ ServerDockerImage: openvino/model_server:2023.1-gpu
+ ServerContainerName: ovms-server
+ ServerConfig: "/models/config.json"
+ StartupMessage: Starting OVMS server
+ InitWaitTime: 10s
+ EnvironmentVariableFiles:
+ - ovms_server.env
+ # StartUpPolicy:
+ # when there is an error on launching ovms server startup, choose one of these values for the behavior of profile-launcher:
+ # remove-and-restart: it will remove the existing container with the same container name if any and then restart the container
+ # exit: it will exit the profile-launcher and
+ # ignore: it will ignore the error and continue (this is the default value if not given or none of the above)
+ StartUpPolicy: ignore
+OvmsClient:
+ DockerLauncher:
+ Script: docker-launcher.sh
+ DockerImage: python-demo:dev
+ ContainerName: classification
+ Volumes:
+ - "$RUN_PATH/results:/tmp/results"
+ - ~/.Xauthority:/home/dlstreamer/.Xauthority
+ - /tmp/.X11-unix
+ PipelineScript: ./classification/python/entrypoint.sh
+ PipelineInputArgs: "" # space delimited like we run the script in command and take those input arguments
+ EnvironmentVariableFiles:
+ - classification.env
+
The description of each configuration element is explained below:
+Configuration Element | +Description | +
---|---|
OvmsSingleContainer | +This boolean flag indicates whether this profile is running as a single OpenVino Model Server (OVMS) container or not, e.g. the C-API pipeline use case will use this as true . It can indicate the distributed architecture of OVMS client-server when this flag is false. |
+
OvmsServer | +This is configuration section for OpenVino Model Server in the case of client-server architecture. | +
OvmsServer/ServerDockerScript | +The infra-structure shell script to start an instance of OVMS server. | +
OvmsServer/ServerDockerImage | +The Docker image tag name for OpenVino Model Server. | +
OvmsServer/ServerContainerName | +The Docker container base name for OpenVino Model Server. | +
OvmsServer/ServerConfig | +The model config.json file name path for OpenVino Model Server. | +
OvmsServer/StartupMessage | +The starting message shown in the console or log when OpenVino Model Server instance is launched. | +
OvmsServer/InitWaitTime | +The waiting time duration (like 5s, 5m, .. etc) after OpenVino Model Server is launched to allow some settling time before launching the pipeline from the client. | +
OvmsServer/EnvironmentVariableFiles | +The list of environment variable files applied for starting OpenVino Model Server Docker instance. | +
OvmsServer/StartUpPolicy | +This configuration controls the behavior of OpenVino Model Server Docker instance when there is error occurred during launching. Use one of these values: remove-and-restart : it will remove the existing container with the same container name if any and then restart the container exit : it will exit the profile-launcher ignore : it will ignore the error and continue (this is the default value if not given or none of the above). |
+
OvmsClient | +This is configuration section for the OVMS client running pipelines in the case of client-server architecture. The C-API pipeline use case should also use this section to configure. |
+
OvmsClient/DockerLauncher | +This is configuration section for the generic Docker launcher to run pipelines for a given profile. | +
OvmsClient/DockerLauncher/Script | +The generic Docker launcher script file name. | +
OvmsClient/DockerLauncher/DockerImage | +The Docker image tag name for the pipeline profile. | +
OvmsClient/DockerLauncher/ContainerName | +The Docker container base name for the running pipeline profile. | +
OvmsClient/DockerLauncher/Volumes | +The Docker container volume mounts for the running pipeline profile. | +
OvmsClient/PipelineScript | +The file name path for the pipeline profile to launch. The file path here is in the perspective of the running container. i.e. the path inside the running container. | +
OvmsClient/PipelineInputArgs | +Any input arguments or parameters for the above pipeline script to take. Like any command line argument, they are space-delimited if multiple arguments. | +
OvmsClient/EnvironmentVariableFiles | +The list of environment variable files applied for the running pipeline profile Docker instance. | +
Before running, set up the pipeline.
+Download the video files to the sample-media directory: +
cd benchmark-scripts;
+./download_sample_videos.sh;
+cd ..;
+
Example - Specify Resolution and Framerate
+This example downloads a sample video for 1080p@15fps. +
cd benchmark-scripts;
+./download_sample_videos.sh 1920 1080 15;
+cd ..;
+
Note
+Only AVC encoded files are supported.
+After the video files are downloaded to the sample-media folder, start the camera simulator: +
make run-camera-simulator
+
Wait for few seconds, and then check if the camera-simulator containers are running: +
docker ps --format 'table{{.Image}}\t{{.Status}}\t{{.Names}}'
+
Success
+Your output is as follows:
+IMAGE | +STATUS | +NAMES | +
---|---|---|
openvino/ubuntu20_data_runtime:2021.4.2 | +Up 11 seconds | +camera-simulator0 | +
aler9/rtsp-simple-server | +Up 13 seconds | +camera-simulator | +
Note
+There could be multiple containers with the image "openvino/ubuntu20_data_runtime:2021.4.2", depending on the number of sample-media video files provided.
+Failure
+If all the Docker* containers are not visible, then review the console output for errors. Sometimes dependencies fail to resolve. Address obvious issues and retry.
+There are several pipeline profiles to chose from. Use the make list-profiles
to see the different pipeline options. In this example, the instance_segmentation
pipeline profile will be used.
Use the following command to run instance segmentation using OVMS on core.
+PIPELINE_PROFILE="instance_segmentation" RENDER_MODE=1 sudo -E ./run.sh --platform core --inputsrc rtsp://127.0.0.1:8554/camera_0
+
Check the status of the pipeline.
+docker ps --format 'table{{.Image}}\t{{.Status}}\t{{.Names}}' -a
+
Success
+Here is a sample output:
+IMAGE | +STATUS | +NAMES | +
---|---|---|
openvino/model_server-gpu:latest | +Up 59 seconds | +ovms-server0 | +
Failure
+If you do not see above Docker container(s), review the console output for errors. Sometimes dependencies fail to resolve and must be run again. Address obvious issues and try again repeating the above steps. Here are couple debugging tips:
+check the docker logs using following command to see if there is an issue with the container
+docker logs <containerName>
+
check ovms log in automated-self-checkout/results/r0.jsonl
+Check the output in the results
directory.
Example - results/r0.jsonl sample
+The output in results/r0.jsonl file lists average processing time in milliseconds and average number of frames per second. This file is intended for scripts to parse. +
Processing time: 53.17 ms; fps: 18.81
+Processing time: 47.98 ms; fps: 20.84
+Processing time: 48.35 ms; fps: 20.68
+Processing time: 46.88 ms; fps: 21.33
+Processing time: 47.56 ms; fps: 21.03
+Processing time: 49.66 ms; fps: 20.14
+Processing time: 52.49 ms; fps: 19.05
+Processing time: 52.27 ms; fps: 19.13
+Processing time: 50.86 ms; fps: 19.66
+Processing time: 58.19 ms; fps: 17.18
+Processing time: 58.28 ms; fps: 17.16
+Processing time: 52.17 ms; fps: 19.17
+Processing time: 50.89 ms; fps: 19.65
+Processing time: 49.58 ms; fps: 20.17
+Processing time: 51.14 ms; fps: 19.55
+
Example - results/pipeline0.log sample
+The output in results/pipeline0.log lists average number of frames per second. Below is a snap shot of the output: +
18.81
+20.84
+20.68
+21.33
+21.03
+20.14
+19.05
+19.13
+19.66
+17.18
+17.16
+19.17
+19.65
+20.17
+19.55
+
Note
+The automated-self-checkout/results/ directory is volume mounted to the pipeline container.
+make clean-all
+
In this section, we show the steps to run the stream density for a chosen pipeline profile. By definition, the objective of the stream density is +to bench-mark the maximum number of multiple running pipelines at the same time while still maintaining the goal-setting target frames-per-second (FPS).
+Before running, set up the pipeline if not already done.
+To make sure we have a good stream density benchmarking, it is recommended to stop all other running pipelines before running the stream density. +To stop all running pipelines and clean up, run +
make clean-all
+
For running stream density, the benchmark scripts are utilized. To set up the benchmarking, we need to build the benchmark Docker images first.
+Build the benchmark Docker* images + Benchmark scripts are containerized inside Docker. The easiest way to build all benchmark Docker images, run +
cd ./benchmark-scripts
+make
+
It is also possible to choose which benchmark Docker images to build based on different platforms.
+The following table lists the commands for various platforms. Choose and run the command corresponding to your hardware configuration.
+Platform | +Docker Build Command | +Check Success | +
---|---|---|
Intel® integrated and Arc™ GPUs | +cd benchmark-scripts |
+Docker images command to show both benchmark:dev and benchmark:igt images |
+
Intel® Flex GPUs | +cd benchmark-scripts |
+Docker images command to show both benchmark:dev and benchmark:xpu images |
+
Warning
+Build command may take a while, depending on your internet connection and machine specifications.
+We will use the camera simulator as the input source to show the stream density. Please refer to the section of Start the Camera Simulator in Quick Start Guide to Run Pipeline on how to start the camera simulator.
+There are several pipeline profiles to choose from for running pipeline stream density. Use the make list-profiles
to see the different pipeline options. In this example, the object_detection
pipeline profile will be used for running stream density.
To run the stream density, the benchmark shell script, benchmark.sh
, is used. The script is in the <project_base_dir>/benchmark-scripts directory. Use the following command to run objection detection pipeline profile using OVMS on core.
cd ./benchmark-scripts
+PIPELINE_PROFILE="object_detection" RENDER_MODE=0 sudo -E ./benchmark.sh --stream_density 15.0 --logdir object_detection/data --duration 120 --init_duration 40 --platform core --inputsrc rtsp://127.0.0.1:8554/camera_1
+
Note
+Description of some key benchmarking input parameters is given as below:
+Parameter Name | +Example Value | +Description | +
---|---|---|
--stream_density | +15.0 | +The value 15.0 after the --stream_density is the target FPS that we want to achieve for running maximum number of object detection pipelines while the averaged of all pipelines from the output FPS still maintaining that target FPS value. | +
--logdir | +object_detection/data | +the output directory of benchmarking resource details | +
--duration | +120 | +the time duration, in number of seconds, the benchmarking will run | +
--init_duration | +40 | +the time duration, in number of seconds, to wait for system initialization before the benchmarking metrics or data collection begins | +
Note
+For stream density run, it is recommended to turn off the display to conserve the system resources hence setting RENDER_MODE=0
Note
+This takes a while for the whole stream density benchmarking process depending on your system resources like CPU, memory, ... etc.
+Note
+The benchmark.sh script automatically cleans all running Docker containers after it is done.
+If the hardware supports, then one can also run the benchmarking on different devices like CPU or GPU. This can be done through the environment variable DEVICE
. The following is an example to run the object_detection profile using GPU:
PIPELINE_PROFILE="object_detection" RENDER_MODE=0 DEVICE="GPU.0" sudo -E ./benchmark.sh --stream_density 14.95 --logdir object_detection/data --duration 120 --init_duration 40 --platform dgpu.0 --inputsrc rtsp://127.0.0.1:8554/camera_1
+
Note
+The performance of running object detection benchmarking should be better while running on GPU using model precision FP32. If supported, then you can change the model precision by going to the folder configs/opencv-ovms/models/2022
from the root of project folder and editing the base_path
for that particular model in the config_template.json
file. For example, you can change the the base_path of FP32
to FP16
assuming the precision FP16
of the model is available:
...
+ "config": {
+ "name": "ssd_mobilenet_v1_coco",
+ "base_path": "/models/ssd_mobilenet_v1_coco/FP32",
+ ...
+ }
+
The directory structure of models with both precisions should look like the followings:
+ssd_mobilenet_v1_coco
+├── FP32
+| └── 1
+| ├── ssd_mobilenet_v1_coco.bin
+| └── ssd_mobilenet_v1_coco.xml
+├── FP16
+| └── 1
+| ├── ssd_mobilenet_v1_coco.bin
+| └── ssd_mobilenet_v1_coco.xml
+
Check the output in the base results
directory.
After the stream density is done, the results of stream density can be seen on the base directory of the results
directory:
cat ../results/stream_density.log
+
Example - results/stream_density.log sample
+The output in results/stream_density.log file gives the detailed information of stream density results: +
......
+ FPS for pipeline0: 15.1225
+ FPS for pipeline1: 15.19
+ FPS for pipeline2: 15.18
+ Total FPS throughput: 45.4925
+ Total FPS per stream: 15.1642
+ Max stream density achieved for target FPS 15.0 is 3
+ Finished stream density benchmarking
+ stream_density done!
+
OpenVINO Model Server has many ways to run inferencing pipeline: +TensorFlow Serving gRPC API, KServe gRPC API, TensorFlow Serving REST API, KServe REST API and OVMS C API through OpenVINO model server (OVMS). For running object detection pipeline, it is based on KServe gRPC API method, default model used is ssd_mobilenet_v1_coco. You can use different model to run object detection. Here are the steps:
+Here is the config file location: configs/opencv-ovms/models/2022/config_template.json
, edit the file and append the following configuration section template
+
,
+{
+ "config": {
+ "name": "ssd_mobilenet_v1_coco",
+ "base_path": "/models/ssd_mobilenet_v1_coco/FP32",
+ "nireq": 1,
+ "batch_size": "1",
+ "plugin_config": {
+ "PERFORMANCE_HINT": "LATENCY"
+ },
+ "target_device": "{target_device}"
+ },
+ "latest": {
+ "num_versions": 1
+ }
+ }
+
Note
+Please leave target_device
value as it is, as the value {target_device}
will be recognized and replaced by script run.
You can find the parameter description in the ovms docs.
+The pipeline run script automatically download the model files if it is part of open model zoo supported list; otherwise, please add your model files manually to configs/opencv-ovms/models/2022/
. When you add your model manually, make sure to follow the model file structure as
ssd_mobilenet_v1_coco
+├── FP32
+ └── 1
+ ├── ssd_mobilenet_v1_coco.bin
+ └── ssd_mobilenet_v1_coco.xml
+
You can update the object detection environment variables in file: configs/opencv-ovms/envs/object_detection.env
, here is default value and explanation for each environment variable:
EV Name | +Default Value | +Description | +
---|---|---|
DETECTION_MODEL_NAME | +ssd_mobilenet_v1_coco | +model name for object detection | +
DETECTION_LABEL_FILE | +coco_91cl_bkgr.txt | +label file name to use on object detection for model | +
DETECTION_ARCHITECTURE_TYPE | +ssd | +architecture type for object detection model | +
DETECTION_OUTPUT_RESOLUTION | +1920x1080 | +output resolution for object detection result | +
DETECTION_THRESHOLD | +0.50 | +threshold for object detection in floating point that needs to be between 0.0 to 1.0 | +
MQTT | ++ | enable MQTT notification of result, value: empty | +
RENDER_MODE | +1 | +display the input source video stream with the inferencing results, value: 0 | +
make build-python-apps
cd benchmark-scripts/ && ./download_sample_videos.sh && cd ..
make run-camera-simulator
docker run --network host --rm -d -it -p 1883:1883 -p 9001:9001 eclipse-mosquitto
PIPELINE_PROFILE="object_detection" RENDER_MODE=1 MQTT=127.0.0.1:1883 sudo -E ./run.sh --platform core --inputsrc rtsp://127.0.0.1:8554/camera_0
(remove the MQTT environment variable if not using it)docker ps
to know the container name.make clean-profile-launcher
to stop and clean up the client side containers, or make clean-all
to stop and clean up everything.You can call make clean-ovms
to stop the pipeline and all running containers for OVMS, hence the results directory log files will stop growing. Below is the table of make commands you can call to clean things up per your needs:
Clean-Up Options | +Command | +
---|---|
clean instance-segmentation container if any | +make clean-segmentation |
+
clean grpc-go dev container if any | +make clean-grpc-go |
+
clean all related containers launched by profile-launcher if any | +make clean-profile-launcher |
+
clean ovms-server container | +make clean-ovms-server |
+
clean ovms-server and all containers launched by profile-launcher | +make clean-ovms |
+
clean results/ folder | +make clean-results |
+
clean all downloaded models | +make clean-models |
+
stops pipelines and cleans all containers, simulator, results, telemetry and webcam | +make clean-all |
+
For running OVMS as inferencing engine through grpc, these are the supported languages:
+Input source Type | +Command | +
---|---|
Python | +PIPELINE_PROFILE="grpc_python" sudo -E ./run.sh --platform core --inputsrc rtsp://127.0.0.1:8554/camera_1 |
+
Golang | +PIPELINE_PROFILE="grpc_go" sudo -E ./run.sh --platform core --inputsrc rtsp://127.0.0.1:8554/camera_1 |
+
Note
+Above example scripts are based on camera simulator for rtsp input source, before running them, please run the camera simulator. If you used a different input source, fill in the appropriate value for --inputsrc
.
For running OVMS as inferencing engine through grpc, we are supporting different models for your need.
+Here is the list of inferencing models we are currently supporting in python:
+You can switch between them by editing the configuration file configs/opencv-ovms/cmd_client/res/grpc_python/configuration.yaml
, uncomment # PipelineInputArgs: "--model_name instance-segmentation-security-1040"
for supporting instance-segmentation-security-1040 and comment out rest; or you can uncomment # PipelineInputArgs: "--model_name bit_64"
for supporting bit_64 and comment out rest.
Here is the configuration.yaml content, default to use instance-segmentation-security-1040
model
+
OvmsClient:
+ PipelineScript: run_grpc_python.sh
+ PipelineInputArgs: "--model_name instance-segmentation-security-1040" # space delimited like we run the script in command and take those input arguments
+ # PipelineInputArgs: "--model_name bit_64" # space delimited like we run the script in command and take those input arguments
+ # PipelineInputArgs: "--model_name yolov5s" # space delimited like we run the script in command and take those input arguments
+
You can download models by editing download_models/models.lst
file, you can add new models(from https://github.com/openvinotoolkit/open_model_zoo/blob/master/demos/object_detection_demo/python/models.lst) to it or uncomment from existing list in this file, saved the file once editing is done. Then you can download the list using following steps:
cd download_models
make build
make run
after above steps, the downloaded models can be found in configs/opencv-ovms/models/2022
directory.
Note
+Model files in configs/opencv-ovms/models/2022
directory will be replaced with new downloads if previously existed.
{"use strict";/*!
+ * escape-html
+ * Copyright(c) 2012-2013 TJ Holowaychuk
+ * Copyright(c) 2015 Andreas Lubbe
+ * Copyright(c) 2015 Tiancheng "Timothy" Gu
+ * MIT Licensed
+ */var Wa=/["'&<>]/;Vn.exports=Ua;function Ua(e){var t=""+e,r=Wa.exec(t);if(!r)return t;var o,n="",i=0,s=0;for(i=r.index;i