From 487b217856b8b7d592894f0995a417a60b66ef13 Mon Sep 17 00:00:00 2001 From: wujiawei Date: Fri, 9 Dec 2022 14:00:13 +0800 Subject: [PATCH 01/28] refine for docker --- docker/Dockerfile | 10 ++++++---- docker/docker_instructions_ov2.0.md | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 6f04c23d..b5ff38f0 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -55,15 +55,17 @@ requests \ && apt-get install -y --no-install-recommends libboost-all-dev WORKDIR /usr/lib/x86_64-linux-gnu RUN ln -sf libboost_python-py36.so libboost_python37.so +RUN pip install --upgrade pip +RUN pip install openvino-dev[tensorflow2]==2022.1 # build ros2 openvino toolkit WORKDIR /root RUN mkdir -p catkin_ws/src WORKDIR /root/catkin_ws/src -RUN git clone https://github.com/intel/ros2_object_msgs.git -RUN git clone -b ros2 https://github.com/intel/ros2_openvino_toolkit.git -RUN git clone -b ${VERSION} https://github.com/ros-perception/vision_opencv.git -RUN git clone -b ros2-development https://github.com/IntelRealSense/realsense-ros.git +RUN git init && git clone https://github.com/intel/ros2_object_msgs.git \ +&& git clone -b ros2 https://github.com/intel/ros2_openvino_toolkit.git \ +&& git clone -b ${VERSION} https://github.com/ros-perception/vision_opencv.git \ +&& git clone -b ros2-development https://github.com/IntelRealSense/realsense-ros.git RUN apt-get install ros-${VERSION}-diagnostic-updater WORKDIR /root/catkin_ws RUN source /opt/ros/${VERSION}/setup.bash && source /opt/intel/openvino_2022/setupvars.sh && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release diff --git a/docker/docker_instructions_ov2.0.md b/docker/docker_instructions_ov2.0.md index 0fa45a92..1414da15 100644 --- a/docker/docker_instructions_ov2.0.md +++ b/docker/docker_instructions_ov2.0.md @@ -79,7 +79,7 @@ omz_downloader --list download_model.lst -o /opt/openvino_toolkit/models/ * If the model (tensorflow, caffe, MXNet, ONNX, Kaldi) need to be converted to intermediate representation (such as the model for object detection): ``` cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list -omz_converter --list convert_model.lst -o /opt/openvino_toolkit/models/convert +omz_converter --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert ``` * Copy label files (execute once) **Note**:Need to make label_dirs if skip steps for set output_dirs above. From a974aa4c06723483f0a7003240cafecc48d61e84 Mon Sep 17 00:00:00 2001 From: wujiawei Date: Fri, 9 Dec 2022 16:18:41 +0800 Subject: [PATCH 02/28] refine to adapt humble --- dynamic_vino_lib/CMakeLists.txt | 6 +++--- .../dynamic_vino_lib/models/attributes/base_attribute.hpp | 1 + sample/CMakeLists.txt | 6 +++--- sample/src/pipeline_composite.cpp | 4 +++- tests/CMakeLists.txt | 6 +++--- vino_param_lib/CMakeLists.txt | 4 ++-- 6 files changed, 15 insertions(+), 12 deletions(-) diff --git a/dynamic_vino_lib/CMakeLists.txt b/dynamic_vino_lib/CMakeLists.txt index 6e893116..aec4c368 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/dynamic_vino_lib/CMakeLists.txt @@ -17,10 +17,10 @@ cmake_minimum_required(VERSION 3.5) project(dynamic_vino_lib) #################################### -## to use C++14 -set(CMAKE_CXX_STANDARD 14) +## to use C++17 +set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_FLAGS "-std=c++14 ${CMAKE_CXX_FLAGS}") +set(CMAKE_CXX_FLAGS "-std=c++17 ${CMAKE_CXX_FLAGS}") #################################### #################################### diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.hpp b/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.hpp index 90e6b187..b09b04bc 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.hpp +++ b/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.hpp @@ -24,6 +24,7 @@ #include #include #include +#include #include "openvino/openvino.hpp" #include "dynamic_vino_lib/slog.hpp" diff --git a/sample/CMakeLists.txt b/sample/CMakeLists.txt index 25b7b447..726d7ca5 100644 --- a/sample/CMakeLists.txt +++ b/sample/CMakeLists.txt @@ -13,11 +13,11 @@ set(OpenVINO_LIBRARIES openvino::runtime) cmake_minimum_required(VERSION 3.5) project(dynamic_vino_sample) -# Default to C++14 +# Default to C++17 if(NOT CMAKE_CXX_STANDARD) - set(CMAKE_CXX_STANDARD 14) + set(CMAKE_CXX_STANDARD 17) endif() -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wall -Wextra -Wpedantic) diff --git a/sample/src/pipeline_composite.cpp b/sample/src/pipeline_composite.cpp index f895fc33..d0af96cd 100644 --- a/sample/src/pipeline_composite.cpp +++ b/sample/src/pipeline_composite.cpp @@ -94,7 +94,9 @@ class ComposablePipeline : public rclcpp::Node std::string getConfigPath() { - return declare_parameter("config").get(); + // TODO: Fix api for humble + // return declare_parameter("config").get(); + return ""; } }; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 88344197..6b35f020 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -19,10 +19,10 @@ project(dynamic_vino_test) list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake) #################################### -## to use C++14 -set(CMAKE_CXX_STANDARD 14) +## to use C++17 +set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_FLAGS "-std=c++14 ${CMAKE_CXX_FLAGS}") +set(CMAKE_CXX_FLAGS "-std=c++17 ${CMAKE_CXX_FLAGS}") #################################### find_package(OpenVINO) diff --git a/vino_param_lib/CMakeLists.txt b/vino_param_lib/CMakeLists.txt index c3cddd8f..b615ed3f 100644 --- a/vino_param_lib/CMakeLists.txt +++ b/vino_param_lib/CMakeLists.txt @@ -67,8 +67,8 @@ if(UNIX OR APPLE) # Generic flags. set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -fno-operator-names -Wformat -Wformat-security -Wall") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") - # Dot not forward c++14 flag to GPU beucause it is not supported + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") + # Dot not forward c++17 flag to GPU beucause it is not supported set(CUDA_PROPAGATE_HOST_FLAGS OFF) set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -D_FORTIFY_SOURCE=2") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie") From 9992ed15505942935e05f5bb5700e9f9d796d8a0 Mon Sep 17 00:00:00 2001 From: wujiawei Date: Tue, 13 Dec 2022 13:01:23 +0800 Subject: [PATCH 03/28] fix doc and yaml path --- doc/quick_start/getting_started_with_ros2_ov2.0.md | 2 +- sample/param/pipeline_segmentation_maskrcnn.yaml | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/doc/quick_start/getting_started_with_ros2_ov2.0.md b/doc/quick_start/getting_started_with_ros2_ov2.0.md index 54560cf5..0547588e 100644 --- a/doc/quick_start/getting_started_with_ros2_ov2.0.md +++ b/doc/quick_start/getting_started_with_ros2_ov2.0.md @@ -62,7 +62,7 @@ omz_downloader --list download_model.lst -o /opt/openvino_toolkit/models/ * If the model (tensorflow, caffe, MXNet, ONNX, Kaldi) need to be converted to intermediate representation (such as the model for object detection): ``` cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list -omz_converter --list convert_model.lst -o /opt/openvino_toolkit/models/convert +omz_converter --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert ``` ### Install OpenVINO 2022.1 by source code * See all available models diff --git a/sample/param/pipeline_segmentation_maskrcnn.yaml b/sample/param/pipeline_segmentation_maskrcnn.yaml index 667f6211..d5b8671a 100644 --- a/sample/param/pipeline_segmentation_maskrcnn.yaml +++ b/sample/param/pipeline_segmentation_maskrcnn.yaml @@ -1,18 +1,16 @@ Pipelines: - name: segmentation - inputs: [Video] - # input_path: /home/jiawei/test-seg-2022/src/ros2_openvino_toolkit/data/images/road-segmentation.png - input_path: /home/jiawei/openvino_test_video/sample-videos/car-detection.mp4 + inputs: [StandardCamera] infers: - name: ObjectSegmentationMaskrcnn - model: /home/jiawei/mask_rcnn_inception_v2_coco_2018_01_28/OUT/frozen_inference_graph.xml + model: /opt/openvino_toolkit/models/convert/public/mask_rcnn_inception_v2_coco_2018_01_28/OUT/frozen_inference_graph.xml engine: CPU #"HETERO:CPU,GPU,MYRIAD" label: to/be/set/xxx.labels batch: 1 confidence_threshold: 0.5 outputs: [ImageWindow, RosTopic, RViz] connects: - - left: Video + - left: StandardCamera right: [ObjectSegmentationMaskrcnn] - left: ObjectSegmentationMaskrcnn right: [ImageWindow] From 11661cb5dba28a69b8b1de6f01b07746793df022 Mon Sep 17 00:00:00 2001 From: wujiawei Date: Fri, 16 Dec 2022 00:54:25 +0800 Subject: [PATCH 04/28] fix the structure of ros2 document and add yaml configuration guide --- README.md | 159 +++++++++++++++--- doc/getting_started_with_Dashing.md | 120 ------------- doc/getting_started_with_Foxy_Ubuntu18.04.md | 141 ---------------- doc/getting_started_with_Foxy_Ubuntu20.04.md | 117 ------------- doc/inferences/Face_Detection.md | 21 --- doc/inferences/Face_Reidentification.md | 10 -- doc/inferences/Object_Detection.md | 91 ---------- doc/inferences/Object_Segmentation.md | 24 --- doc/inferences/People_Reidentification.md | 13 -- doc/inferences/Vehicle_Detection.md | 14 -- doc/installation/BINARY_INSTALLATION.md | 74 -------- doc/installation/OPEN_SOURCE_INSTALLATION.md | 82 --------- doc/installation/installation.md | 11 -- doc/launching/launch.md | 37 ---- doc/launching/service.md | 27 --- doc/launching/set_environment.md | 32 ---- ...etting_started_with_Dashing_Ubuntu18.04.md | 0 .../getting_started_with_Foxy_Ubuntu20.04.md | 0 ...tting_started_with_Galactic_Ubuntu20.04.md | 121 ------------- .../getting_started_with_ros2_ov2.0.md | 3 +- doc/quick_start/yaml_configuration_guide.md | 121 +++++++++++++ .../Design_Architecture_and_logic_flow.md | 27 --- doc/tables_of_contents/prerequisite.md | 31 ---- .../supported_features/Supported_features.md | 33 ---- .../inference_functionality_overview.md | 16 -- .../supported_features/input_resource.md | 8 - .../supported_features/output_types.md | 43 ----- .../tutorials/Multiple_Pipelines.md | 54 ------ .../configuration_file_customization.md | 58 ------- 29 files changed, 256 insertions(+), 1232 deletions(-) delete mode 100644 doc/getting_started_with_Dashing.md delete mode 100644 doc/getting_started_with_Foxy_Ubuntu18.04.md delete mode 100644 doc/getting_started_with_Foxy_Ubuntu20.04.md delete mode 100644 doc/inferences/Face_Detection.md delete mode 100644 doc/inferences/Face_Reidentification.md delete mode 100644 doc/inferences/Object_Detection.md delete mode 100644 doc/inferences/Object_Segmentation.md delete mode 100644 doc/inferences/People_Reidentification.md delete mode 100644 doc/inferences/Vehicle_Detection.md delete mode 100644 doc/installation/BINARY_INSTALLATION.md delete mode 100644 doc/installation/OPEN_SOURCE_INSTALLATION.md delete mode 100644 doc/installation/installation.md delete mode 100644 doc/launching/launch.md delete mode 100644 doc/launching/service.md delete mode 100644 doc/launching/set_environment.md delete mode 100644 doc/quick_start/getting_started_with_Dashing_Ubuntu18.04.md delete mode 100644 doc/quick_start/getting_started_with_Foxy_Ubuntu20.04.md delete mode 100644 doc/quick_start/getting_started_with_Galactic_Ubuntu20.04.md create mode 100644 doc/quick_start/yaml_configuration_guide.md delete mode 100644 doc/tables_of_contents/Design_Architecture_and_logic_flow.md delete mode 100644 doc/tables_of_contents/prerequisite.md delete mode 100644 doc/tables_of_contents/supported_features/Supported_features.md delete mode 100644 doc/tables_of_contents/supported_features/inference_functionality_overview.md delete mode 100644 doc/tables_of_contents/supported_features/input_resource.md delete mode 100644 doc/tables_of_contents/supported_features/output_types.md delete mode 100644 doc/tables_of_contents/tutorials/Multiple_Pipelines.md delete mode 100644 doc/tables_of_contents/tutorials/configuration_file_customization.md diff --git a/README.md b/README.md index 5014857c..72896a0e 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,15 @@ # ros2_openvino_toolkit -ROS2 Version supported: - -* [x] ROS2 Dashing -* [x] ROS2 Eloquent +# Introduction +## ROS2 Version supported +* [x] ROS2 Galactic * [x] ROS2 Foxy +* [x] ROS2 Humble -Inference Features supported: - +## Inference Features supported * [x] Object Detection * [x] Face Detection -* [x] Age-Gender Recognition +* [x] Age Gender Recognition * [x] Emotion Recognition * [x] Head Pose Estimation * [x] Object Segmentation @@ -18,29 +17,139 @@ Inference Features supported: * [x] Vehicle Attribute Detection * [x] Vehicle License Plate Detection -## Introduction +## Prerequisite +* Processor: A platform with Intel processors assembled. (see [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2020-3-lts-relnotes.html) for the full list of Intel processors supported.) +* OS: Ubuntu 20.04, Ubuntu 22.04 +* ROS2: Foxy, Galactic, Humble +* OpenVINO: V2022.1, V2022.2 +* Python: 3.6, 3.7, 3.8, 3.9 +* [Optional] RealSense D400 Series Camera +* [Optional] Intel NCS2 Stick + +## Design Architecture +From the view of hirarchical architecture design, the package is divided into different functional components, as shown in below picture. -The OpenVINO™ (Open visual inference and neural network optimization) toolkit provides a ROS-adaptered runtime framework of neural network which quickly deploys applications and solutions for vision inference. By leveraging Intel® OpenVINO™ toolkit and corresponding libraries, this ROS2 runtime framework extends workloads across Intel® hardware (including accelerators) and maximizes performance. +![OpenVINO_Architecture](./data/images/design_arch.PNG "OpenVINO RunTime Architecture") +- **Intel® OpenVINO™ toolkit** provides a ROS-adapted runtime framework of neural network which quickly deploys applications and solutions for vision inference. By leveraging Intel® OpenVINO™ toolkit and corresponding libraries, this ROS2 runtime framework extends workloads across Intel® hardware (including accelerators) and maximizes performance. + - Increase deep learning workload performance up to 19x1 with computer vision accelerators from Intel. + - Unleash convolutional neural network (CNN)-based deep learning inference using a common API. + - Speed development using optimized OpenCV* and OpenVX* functions. See more from [here](https://github.com/openvinotoolkit/openvino) for Intel OpenVINO™ introduction. +- **ros OpenVINO Runtime Framework** is the main body of this repo. it provides key logic implementation for pipeline lifecycle management, resource management and ROS system adapter, which extends Intel OpenVINO toolkit and libraries. Furthermore, this runtime framework provides ways to ease launching, configuration and data analytics and re-use. +- **Diversal Input resources** are the data resources to be infered and analyzed with the OpenVINO framework. +- **ROS interfaces and outputs** currently include _Topic_ and _service_. Natively, RViz output and CV image window output are also supported by refactoring topic message and inferrence results. +- **Optimized Models** provides by Model Optimizer component of Intel® OpenVINO™ toolkit. Imports trained models from various frameworks (Caffe*, Tensorflow*, MxNet*, ONNX*, Kaldi*) and converts them to a unified intermediate representation file. It also optimizes topologies through node merging, horizontal fusion, eliminating batch normalization, and quantization.It also supports graph freeze and graph summarize along with dynamic input freezing. -## Prerequisite +## Logic Flow +From the view of logic implementation, the package introduces the definitions of parameter manager, pipeline and pipeline manager. The below picture depicts how these entities co-work together when the corresponding program is launched. -* Processor: A platform with Intel processors assembled. (see [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2020-3-lts-relnotes.html) for the full list of Intel processors supported.) -* OS: Ubuntu 20.04 -* ROS2: Foxy Fitzroy -* OpenVINO: V2021.3, see [the release notes](https://software.intel.com/content/www/us/en/develop/articles/openvino-relnotes.html) for more info. -* [Optional] RealSense D400 Series Camera -* [Optional] Intel NCS2 Stick -## Tables of contents -* [Design Architecture and Logic Flow](./doc/tables_of_contents/Design_Architecture_and_logic_flow.md) -* [Supported Features](./doc/tables_of_contents/supported_features/Supported_features.md) -* Tutorials - - [How to configure a inference pipeline?](./doc/tables_of_contents/tutorials/configuration_file_customization.md) - - [How to create multiple pipelines in a process?](./doc/tables_of_contents/tutorials/Multiple_Pipelines.md) - -## Installation & Launching -See Getting Start Pages for [ROS2 Dashing](./doc/getting_started_with_Dashing.md) or [ROS2 Foxy](./doc/getting_started_with_Foxy_Ubuntu20.04.md) for detailed installation & lauching instructions. +![Logic_Flow](./data/images/impletation_logic.PNG "OpenVINO RunTime Logic Flow") + +Once a corresponding program is launched with a specified .yaml config file passed in the .launch file or via commandline, _**parameter manager**_ analyzes the configurations about pipeline and the whole framework, then shares the parsed configuration information with pipeline procedure. A _**pipeline instance**_ is created by following the configuration info and is added into _**pipeline manager**_ for lifecycle control and inference action triggering. + +The contents in **.yaml config file** should be well structured and follow the supported rules and entity names. Please see [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for how to create or edit the config files. + +**Pipeline** fulfills the whole data handling process: initiliazing Input Component for image data gathering and formating; building up the structured inference network and passing the formatted data through the inference network; transfering the inference results and handling output, etc. + +**Pipeline manager** manages all the created pipelines according to the inference requests or external demands (say, system exception, resource limitation, or end user's operation). Because of co-working with resource management and being aware of the whole framework, it covers the ability of performance optimization by sharing system resource between pipelines and reducing the burden of data copy. + +# Supported Features +## Multiple Input Components +Currently, the package support several kinds of input resources of gaining image data: + +|Input Resource|Description| +|--------------------|------------------------------------------------------------------| +|StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.| +|RealSenseCamera| Intel RealSense RGB-D Camera, directly calling RealSense Camera via librealsense plugin of openCV.| +|RealSenseCameraTopic| Any ROS topic which is structured in image message.| +|Image| Any image file which can be parsed by openCV, such as .png, .jpeg.| +|Video| Any video file which can be parsed by openCV.| +|IpCamera| Any RTSP server which can push video stream.| + +## Inference Implementations +Currently, the inference feature list is supported: + +|Inference|Description| +|-----------------------|------------------------------------------------------------------| +|Face Detection|Object Detection task applied to face recognition using a sequence of neural networks.| +|Emotion Recognition| Emotion recognition based on detected face image.| +|Age & Gender Recognition| Age and gender recognition based on detected face image.| +|Head Pose Estimation| Head pose estimation based on detected face image.| +|Object Detection| Object detection based on SSD-based trained models.| +|Vehicle and License Detection| Vehicle and license detection based on Intel models.| +|Object Segmentation| object segmentation.| +|Person Reidentification| Person Reidentification based on object detection.| + +## ROS interfaces and outputs +### Topic +* #### Subscribed Topic +- Image topic: +```/camera/color/image_raw```([sensor_msgs::Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +* #### Published Topic +- Face Detection: +```/ros2_openvino_toolkit/face_detection```([object_msgs::ObjectsInBoxes](https://github.com/intel/object_msgs/blob/master/msg/ObjectsInBoxes.msg)) +- Emotion Recognition: +```/ros2_openvino_toolkit/emotion_detection```([people_msgs::EmotionsStamped](./people_msgs/msg/EmotionsStamped.msg)) +- Age and Gender Recognition: +```/ros2_openvino_toolkit/age_gender_detection```([people_msgs::AgeGenderStamped](./people_msgs/msg/AgeGenderStamped.msg)) +- Head Pose Estimation: +```/ros2_openvino_toolkit/head_pose_detection```([people_msgs::HeadPoseStamped](./people_msgs/msg/HeadPoseStamped.msg)) +- Object Detection: +```/ros2_openvino_toolkit/detected_objects```([object_msgs::ObjectsInBoxes](https://github.com/intel/object_msgs/blob/master/msg/ObjectsInBoxes.msg)) +- Object Segmentation: +```/ros2_openvino_toolkit/segmented_objects```([people_msgs::ObjectsInMasks](./people_msgs/msg/ObjectsInMasks.msg)) +- Person Reidentification: +```/ros2_openvino_toolkit/reidentified_persons```([people_msgs::ReidentificationStamped](./people_msgs/msg/ReidentificationStamped.msg)) +- Vehicle Detection: +```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::VehicleAttribsStamped](./people_msgs/msg/VehicleAttribsStamped.msg) +- Vehicle License Detection: +```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::LicensePlateStamped](./people_msgs/msg/LicensePlateStamped.msg) +- Rviz Output: +```/ros2_openvino_toolkit/image_rviz```([sensor_msgs::Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) + +### Service +- Object Detection Service: +```/detect_object``` ([object_msgs::DetectObject](https://github.com/intel/object_msgs/blob/master/srv/DetectObject.srv)) +- Face Detection Service: +```/detect_face``` ([object_msgs::DetectObject](https://github.com/intel/object_msgs/blob/master/srv/DetectObject.srv)) +- Age & Gender Detection Service: +```/detect_age_gender``` ([people_msgs::AgeGender](./people_msgs/srv/AgeGenderSrv.srv)) +- Headpose Detection Service: +```/detect_head_pose``` ([people_msgs::HeadPose](./people_msgs/srv/HeadPoseSrv.srv)) +- Emotion Detection Service: +```/detect_emotion``` ([people_msgs::Emotion](./people_msgs/srv/EmotionSrv.srv)) + +### RViz +RViz dispaly is also supported by the composited topic of original image frame with inference result. +To show in RViz tool, add an image marker with the composited topic: +```/ros2_openvino_toolkit/image_rviz```([sensor_msgs::Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) + +### Image Window +OpenCV based image window is natively supported by the package. +To enable window, Image Window output should be added into the output choices in .yaml config file. see [the config file guidance](./doc/quick_start/yaml_configuration_guide.md) for checking/adding this feature in your launching. + +## Demo Result Snapshots +See below pictures for the demo result snapshots. +* face detection input from standard camera +![face_detection_demo_image](./data/images/face_detection.png "face detection demo image") + +* object detection input from realsense camera +![object_detection_demo_realsense](./data/images/object_detection.gif "object detection demo realsense") + +* object segmentation input from video +![object_segmentation_demo_video](./data/images/object_segmentation.gif "object segmentation demo video") + +* Person Reidentification input from standard camera +![person_reidentification_demo_video](./data/images/person-reidentification.gif "person reidentification demo video") + +# Installation & Launching +* Refer to the quick start document for [ROS2](./doc/quick_start/getting_started_with_ros2_ov2.0.md) for detailed installation & lauching instructions. +* Refer to the quick start document for [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. + +# Reference +* Open_model_zoo: Refer to the OpenVINO document for [open_model_zoo](https://github.com/openvinotoolkit/open_model_zoo/tree/master) for detailed model structure and demo samples. +* OpenVINO api 2.0: Refer to the OpenVINO document for [OpenVINO api 2.0](https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html) for latest api 2.0 transition guide. # More Information * ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw diff --git a/doc/getting_started_with_Dashing.md b/doc/getting_started_with_Dashing.md deleted file mode 100644 index bd0c7a02..00000000 --- a/doc/getting_started_with_Dashing.md +++ /dev/null @@ -1,120 +0,0 @@ -# ROS2_OpenVINO_Toolkit - -**NOTE:** -Below steps have been tested on **Ubuntu 18.04**. - -## 1. Environment Setup -* Install ROS2 Dashing [(guide)](https://index.ros.org/doc/ros2/Installation/Dashing/) -* Install OpenVINO™ Toolkit Version: 2020.3 [(guide)](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit/download.html) -**Note:** Please use root privileges to run the installer when installing the core components. -* Install Intel® RealSense™ SDK 2.0 [(tag v2.30.0)](https://github.com/IntelRealSense/librealsense/tree/v2.30.0) or later version. - -## 2. Building and Installation -* Build demo code in OpenVINO toolkit -```bash - # root is required instead of sudo - source /opt/intel//bin/setupvars.sh - cd /opt/intel//deployment_tools/open_model_zoo/demos - source build_demos.sh -``` -* Install ROS2_OpenVINO packages -```bash -mkdir -p ~/my_ros2_ws/src -cd ~/my_ros2_ws/src -git clone https://github.com/intel/ros2_openvino_toolkit -git clone https://github.com/intel/ros2_object_msgs -git clone https://github.com/ros-perception/vision_opencv -b ros2 -git clone https://github.com/ros2/message_filters.git -git clone https://github.com/ros-perception/image_common.git -b dashing -git clone https://github.com/intel/ros2_intel_realsense.git -b refactor -``` -* Build package -```bash -source /opt/intel//bin/setupvars.sh -cd ~/my_ros2_ws/src -colcon build --symlink-install -source ./install/local_setup.bash -``` - -## 3. Running the Demo -* Preparation - * Configure the Neural Compute Stick USB Driver - ```bash - cd ~/Downloads - cat < 97-usbboot.rules - SUBSYSTEM=="usb", ATTRS{idProduct}=="2150", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - SUBSYSTEM=="usb", ATTRS{idProduct}=="2485", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - SUBSYSTEM=="usb", ATTRS{idProduct}=="f63b", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - EOF - sudo cp 97-usbboot.rules /etc/udev/rules.d/ - sudo udevadm control --reload-rules - sudo udevadm trigger - sudo ldconfig - rm 97-usbboot.rules - ``` - * Download the optimized Intermediate Representation (IR) of model (execute once) - - ```bash - cd /opt/intel//deployment_tools/open_model_zoo/tools/downloader - sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output - sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output - sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output - sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output - sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output - sudo python3 downloader.py --name person-reidentification-retail-0031 --output_dir /opt/openvino_toolkit/models/person-reidentification/output - sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output - sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output - sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output - sudo python3 downloader.py --name semantic-segmentation-adas-0001 --output_dir /opt/openvino_toolkit/models/semantic-segmentation/output - sudo python3 downloader.py --name person-attributes-recognition-crossroad-0230 --output_dir /opt/openvino_toolkit/models/person-attributes/output - ``` - * copy label files (execute once) - ```bash - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP32/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP16/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32 - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - ``` - * If the model (tensorflow, caffe, MXNet, ONNX, Kaldi)need to be converted to intermediate representation (For example the model for object detection) - ```bash - sudo python3 downloader.py --name mobilenet-ssd --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet_ssd/caffe/output - cd /opt/intel//deployment_tools/model_optimizer - sudo python3 mo.py --input_model /opt/openvino_toolkit/models/object_detection/mobilenet_ssd/caffe/output/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet_ssd/caffe/output - ``` - * Before launch, check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml, make sure the paramter like model path, label path, inputs are right. - -* run face detection sample code input from StandardCamera. -```bash -ros2 launch dynamic_vino_sample pipeline_people.launch.py -``` -* run face detection sample code input from Image. -```bash -ros2 launch dynamic_vino_sample pipeline_image.launch.py -``` -* run object segmentation sample code input from RealSenseCameraTopic. -```bash -ros2 launch dynamic_vino_sample pipeline_segmentation.launch.py -``` -* run object segmentation sample code input from Image. -```bash -ros2 launch dynamic_vino_sample pipeline_segmentation_image.launch.py -``` -* run vehicle detection sample code input from StandardCamera. -```bash -ros2 launch dynamic_vino_sample pipeline_vehicle_detection.launch.py -``` -* run person attributes sample code input from StandardCamera. -```bash -ros2 launch dynamic_vino_sample pipeline_person_attributes.launch.py -``` -* run person reidentification sample code input from StandardCamera. -```bash -ros2 launch dynamic_vino_sample pipeline_reidentification.launch.py -``` - - - diff --git a/doc/getting_started_with_Foxy_Ubuntu18.04.md b/doc/getting_started_with_Foxy_Ubuntu18.04.md deleted file mode 100644 index fc73c22b..00000000 --- a/doc/getting_started_with_Foxy_Ubuntu18.04.md +++ /dev/null @@ -1,141 +0,0 @@ -# ROS2_FOXY_OpenVINO_Toolkit - -**NOTE:** -Below steps have been tested on **Ubuntu 18.04**. - -## 1. Install ROS2 Foxy from source code -* Before colcon build, update the cmake to 3.14+ -```bash -mkdir -p ~/cmake -cd ~/cmake -wget -t 3 -c https://www.cmake.org/files/v3.14/cmake-3.14.3.tar.gz -tar xf cmake-3.14.3.tar.gz -cd cmake-3.14.3 -./bootstrap --parallel=$(nproc --all) -make --jobs=$(nproc --all) -sudo make install -sudo ldconfig -```` -* Install ROS2 Foxy [(guide)](https://index.ros.org/doc/ros2/Installation/Foxy/Linux-Development-Setup/) - -* Build ROS2 Foxy packages from source code -```bash -cd ~/ros2_foxy -colcon build --symlink-install -. ~/ros2_foxy/install/setup.bash -``` -## 2. Environment Setup -* Install OpenVINO™ Toolkit Version: 2020.3 [(guide)](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit/download.html) -**Note:** Please use root privileges to run the installer when installing the core components. -* Install Intel® RealSense™ SDK 2.0 [(tag v2.30.0)](https://github.com/IntelRealSense/librealsense/tree/v2.30.0) - -## 3. Building and Installation -* Build demo code in OpenVINO toolkit -```bash - # root is required instead of sudo - source /opt/intel//bin/setupvars.sh - cd /opt/intel//deployment_tools/open_model_zoo/demos - source build_demos.sh -``` -* Install ROS2_OpenVINO packages -```bash -mkdir -p ~/my_ros2_ws/src -cd ~/my_ros2_ws/src -git clone https://github.com/intel/ros2_openvino_toolkit -git clone https://github.com/intel/ros2_object_msgs -git clone https://github.com/ros-perception/vision_opencv -b ros2 -git clone https://github.com/ros2/message_filters.git -git clone https://github.com/ros-perception/image_common.git -b dashing -git clone https://github.com/intel/ros2_intel_realsense.git -b refactor -``` -* Build package -```bash -source ~/ros2_foxy/install/local_setup.bash -source /opt/intel//bin/setupvars.sh -cd ~/my_ros2_ws/src -colcon build --symlink-install -source ./install/local_setup.bash -``` - -## 4. Running the Demo -* Preparation - * Configure the Neural Compute Stick USB Driver - ```bash - cd ~/Downloads - cat < 97-usbboot.rules - SUBSYSTEM=="usb", ATTRS{idProduct}=="2150", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - SUBSYSTEM=="usb", ATTRS{idProduct}=="2485", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - SUBSYSTEM=="usb", ATTRS{idProduct}=="f63b", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - EOF - sudo cp 97-usbboot.rules /etc/udev/rules.d/ - sudo udevadm control --reload-rules - sudo udevadm trigger - sudo ldconfig - rm 97-usbboot.rules - ``` - * Download the optimized Intermediate Representation (IR) of model (execute once) - - ```bash - cd /opt/intel//deployment_tools/open_model_zoo/tools/downloader - sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output - sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output - sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output - sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output - sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output - sudo python3 downloader.py --name person-reidentification-retail-0031 --output_dir /opt/openvino_toolkit/models/person-reidentification/output - sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output - sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output - sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output - sudo python3 downloader.py --name road-segmentation-adas-0001 --output_dir /opt/openvino_toolkit/models/road-segmentation/output - sudo python3 downloader.py --name person-attributes-recognition-crossroad-0230 --output_dir /opt/openvino_toolkit/models/person-attributes/output - ``` - * copy label files (execute once) - ```bash - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP32/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP16/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32 - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - ``` - * If the model (tensorflow, caffe, MXNet, ONNX, Kaldi)need to be converted to intermediate representation (For example the model for object detection) - ```bash - sudo python3 downloader.py --name mobilenet-ssd --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet_ssd/caffe/output - cd /opt/intel//deployment_tools/model_optimizer - sudo python3 mo.py --input_model /opt/openvino_toolkit/models/object_detection/mobilenet_ssd/caffe/output/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet_ssd/caffe/output - ``` - * Before launch, check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml, make sure the paramter like model path, label path, inputs are right. - -* run face detection sample code input from StandardCamera. -```bash -ros2 launch dynamic_vino_sample pipeline_people.launch.py -``` -* run face detection sample code input from Image. -```bash -ros2 launch dynamic_vino_sample pipeline_image.launch.py -``` -* run object segmentation sample code input from RealSenseCameraTopic. -```bash -ros2 launch dynamic_vino_sample pipeline_segmentation.launch.py -``` -* run object segmentation sample code input from Image. -```bash -ros2 launch dynamic_vino_sample pipeline_segmentation_image.launch.py -``` -* run vehicle detection sample code input from StandardCamera. -```bash -ros2 launch dynamic_vino_sample pipeline_vehicle_detection.launch.py -``` -* run person attributes sample code input from StandardCamera. -```bash -ros2 launch dynamic_vino_sample pipeline_person_attributes.launch.py -``` -* run person reidentification sample code input from StandardCamera. -```bash -ros2 launch dynamic_vino_sample pipeline_reidentification.launch.py -``` - - - diff --git a/doc/getting_started_with_Foxy_Ubuntu20.04.md b/doc/getting_started_with_Foxy_Ubuntu20.04.md deleted file mode 100644 index cb576acc..00000000 --- a/doc/getting_started_with_Foxy_Ubuntu20.04.md +++ /dev/null @@ -1,117 +0,0 @@ -# ROS2_FOXY_OpenVINO_Toolkit - -**NOTE:** -Below steps have been tested on **Ubuntu 20.04**. - -## 1. Environment Setup -* Install ROS2 Foxy ([guide](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html)) -* Install Intel® OpenVINO™ Toolkit Version: 2021.3 ([guide](https://docs.openvinotoolkit.org/latest/openvino_docs_install_guides_installing_openvino_apt.html)) -* Install Intel® RealSense ™ SDK ([guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md)) - -## 2. Building and Installation -* Build demo code in OpenVINO toolkit -``` - # root is required instead of sudo - source /opt/intel//bin/setupvars.sh - cd /opt/intel//deployment_tools/open_model_zoo/demos - source build_demos.sh -``` -* Install ROS2_OpenVINO packages -``` -mkdir -p ~/my_ros2_ws/src -cd ~/my_ros2_ws/src -git clone https://github.com/intel/ros2_openvino_toolkit -b dev-ov.2021.3 -git clone https://github.com/intel/ros2_object_msgs -git clone https://github.com/intel/ros2_intel_realsense.git -b refactor -``` -* Build package -``` -source ~/ros2_foxy/install/local_setup.bash -source /opt/intel//bin/setupvars.sh -cd ~/my_ros2_ws/src -colcon build --symlink-install -source ./install/local_setup.bash -``` - -## 3. Running the Demo -* Preparation - * Configure the Neural Compute Stick USB Driver (if needed) -``` - cd ~/Downloads - cat < 97-usbboot.rules - SUBSYSTEM=="usb", ATTRS{idProduct}=="2150", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - SUBSYSTEM=="usb", ATTRS{idProduct}=="2485", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - SUBSYSTEM=="usb", ATTRS{idProduct}=="f63b", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - EOF - sudo cp 97-usbboot.rules /etc/udev/rules.d/ - sudo udevadm control --reload-rules - sudo udevadm trigger - sudo ldconfig - rm 97-usbboot.rules -``` - -* See all available models -``` -cd /opt/intel//deployment_tools/open_model_zoo/tools/downloader -sudo python3 downloader.py --print_all -``` - -* Download the optimized Intermediate Representation (IR) of model (execute once), for example: -``` -cd /opt/intel//deployment_tools/open_model_zoo/tools/downloader -sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output -``` - -* copy label files (execute once) -``` - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP32/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP16/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32 - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo cp ~/my_ros2_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ -``` - -* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi)need to be converted to intermediate representation (For example the model for object detection) -``` - sudo python3 downloader.py --name mobilenet-ssd --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet_ssd/caffe/output - cd /opt/intel//deployment_tools/model_optimizer - sudo python3 mo.py --input_model /opt/openvino_toolkit/models/object_detection/mobilenet_ssd/caffe/output/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet_ssd/caffe/output -``` - -* Before launch, check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml, make sure the paramter like model path, label path, inputs are right. -* run face detection sample code input from StandardCamera. -``` -ros2 launch dynamic_vino_sample pipeline_people.launch.py -``` -* run face detection sample code input from Image. -``` -ros2 launch dynamic_vino_sample pipeline_image.launch.py -``` -* run object segmentation sample code input from RealSenseCameraTopic. -``` -ros2 launch dynamic_vino_sample pipeline_segmentation.launch.py -``` -* run object segmentation sample code input from Image. -``` -ros2 launch dynamic_vino_sample pipeline_segmentation_image.launch.py -``` -* run vehicle detection sample code input from StandardCamera. -``` -ros2 launch dynamic_vino_sample pipeline_vehicle_detection.launch.py -``` -* run person attributes sample code input from StandardCamera. -``` -ros2 launch dynamic_vino_sample pipeline_person_attributes.launch.py -``` -* run person reidentification sample code input from StandardCamera. -``` -ros2 launch dynamic_vino_sample pipeline_reidentification.launch.py -``` - -# More Information -* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw - -###### *Any security issue should be reported using process at https://01.org/security* diff --git a/doc/inferences/Face_Detection.md b/doc/inferences/Face_Detection.md deleted file mode 100644 index 3bd2c8fa..00000000 --- a/doc/inferences/Face_Detection.md +++ /dev/null @@ -1,21 +0,0 @@ -# Face Detection - -## Demo Result Snapshots -See below pictures for the demo result snapshots. -* face detection input from image -![face_detection_demo_image](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/face_detection.png "face detection demo image") -## Download Models -* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash - cd $model_downloader - sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output - sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output - sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output - sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output - ``` -* copy label files (excute _once_)
- ```bash - sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ - sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - ``` diff --git a/doc/inferences/Face_Reidentification.md b/doc/inferences/Face_Reidentification.md deleted file mode 100644 index 9a496fff..00000000 --- a/doc/inferences/Face_Reidentification.md +++ /dev/null @@ -1,10 +0,0 @@ -# Face Reidentification -## Download Models -* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash - cd $model_downloader - sudo python3 downloader.py --name landmarks-regression-retail-0009 --output_dir /opt/openvino_toolkit/models/landmarks-regression/output - sudo python3 downloader.py --name face-reidentification-retail-0095 --output_dir /opt/openvino_toolkit/models/face-reidentification/output - ``` - - diff --git a/doc/inferences/Object_Detection.md b/doc/inferences/Object_Detection.md deleted file mode 100644 index 905b134d..00000000 --- a/doc/inferences/Object_Detection.md +++ /dev/null @@ -1,91 +0,0 @@ -# Object Detection -## Introduction -The section depict the kind of Object Detection, which produces object classification and its location based ROI. -Two kinds of models are supported currently: -- SSD based Object Detection Models - * SSD300-VGG16, SSD500-VGG16, Mobilenet-SSD (both caffe and tensorflow) -- YoloV2 - -## Demo Result Snapshots -* object detection input from realsense camera - -![object_detection_demo_realsense](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/object_detection.gif "object detection demo realsense") - -## Download Models ->> Before using the supported models, you need to first downloand and optimize them into OpenVINO mode. mobilenet-SSD caffe model is the default one used in the Object Detection configuration. - -#### mobilenet-ssd -* download and convert a trained model to produce an optimized Intermediate Representation (IR) of the model - ```bash - cd $model_downloader - sudo python3 ./downloader.py --name mobilenet-ssd - #FP32 precision model - sudo python3 $model_optimizer/mo.py --input_model $model_downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32 --mean_values [127.5,127.5,127.5] --scale_values [127.5] - #FP16 precision model - sudo python3 $model_optimizer/mo.py --input_model $model_downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16 --data_type=FP16 --mean_values [127.5,127.5,127.5] --scale_values [127.5] - ``` -* copy label files (excute _once_)
- ```bash - sudo cp $openvino_labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32 - sudo cp $openvino_labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16 - ``` -#### YOLOv2-voc -* Darkflow to protobuf(.pb) - - install [darkflow](https://github.com/thtrieu/darkflow) - - install prerequsites - ```bash - pip3 install tensorflow opencv-python numpy networkx cython - ``` - - Get darkflow and YOLO-OpenVINO - ```bash - mkdir -p ~/code && cd ~/code - git clone https://github.com/thtrieu/darkflow - git clone https://github.com/chaoli2/YOLO-OpenVINO - sudo ln -sf ~/code/darkflow /opt/openvino_toolkit/ - ``` - - modify the line self.offset = 16 in the ./darkflow/utils/loader.py file and replace with self.offset = 20 - - Install darkflow - ```bash - cd ~/code/darkflow - pip3 install . - ``` - - Copy voc.names in YOLO-OpenVINO/common to labels.txt in darkflow. - ```bash - cp ~/code/YOLO-OpenVINO/common/voc.names ~/code/darkflow/labels.txt - ``` - - Get yolov2 weights and cfg - ```bash - cd ~/code/darkflow - mkdir -p models - cd models - wget -c https://pjreddie.com/media/files/yolov2-voc.weights - wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov2-voc.cfg - ``` - - Run convert script - ```bash - cd ~/code/darkflow - flow --model models/yolov2-voc.cfg --load models/yolov2-voc.weights --savepb - ``` -* Convert YOLOv2-voc TensorFlow Model to the optimized Intermediate Representation (IR) of model - ```bash - cd ~/code/darkflow - # FP32 precision model - sudo python3 $model_optimizer/mo_tf.py \ - --input_model built_graph/yolov2-voc.pb \ - --batch 1 \ - --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/yolo_v2_voc.json \ - --data_type FP32 \ - --output_dir /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP32 - # FP16 precision model - sudo python3 $model_optimizer/mo_tf.py \ - --input_model built_graph/yolov2-voc.pb \ - --batch 1 \ - --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/yolo_v2_voc.json \ - --data_type FP16 \ - --output_dir /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16 - ``` -* copy label files (excute _once_)
- ```bash - sudo cp $openvino_labels/object_detection/yolov2-voc.labels /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP32 - sudo cp $openvino_labels/object_detection/yolov2-voc.labels /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16 - ``` diff --git a/doc/inferences/Object_Segmentation.md b/doc/inferences/Object_Segmentation.md deleted file mode 100644 index 7e998af9..00000000 --- a/doc/inferences/Object_Segmentation.md +++ /dev/null @@ -1,24 +0,0 @@ -# Object Segmentation -## Demo Result Snapshots -See below pictures for the demo result snapshots. -* object segmentation input from video -![object_segmentation_demo_video](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/object_segmentation.gif "object segmentation demo video") -## Download Models -* download and convert a trained model to produce an optimized Intermediate Representation (IR) of the model - ```bash - #object segmentation model - mkdir -p ~/Downloads/models - cd ~/Downloads/models - wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz - tar -zxvf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz - cd mask_rcnn_inception_v2_coco_2018_01_28 - #FP32 - sudo python3 $model_optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --output_dir /opt/openvino_toolkit/models/segmentation/output/FP32 - #FP16 - sudo python3 $model_optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --data_type=FP16 --output_dir /opt/openvino_toolkit/models/segmentation/output/FP16 - ``` -* copy label files (excute _once_)
- ```bash - sudo cp $openvino_labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/segmentation/output/FP32 - sudo cp $openvino_labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/segmentation/output/FP16 - ``` diff --git a/doc/inferences/People_Reidentification.md b/doc/inferences/People_Reidentification.md deleted file mode 100644 index 39c276d6..00000000 --- a/doc/inferences/People_Reidentification.md +++ /dev/null @@ -1,13 +0,0 @@ -# People Reidentification -## Demo Result Snapshots -See below pictures for the demo result snapshots. -* Person Reidentification input from standard camera -![person_reidentification_demo_video](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/person-reidentification.gif "person reidentification demo video") -## Download Models -* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash - cd $model_downloader - sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output - sudo python3 downloader.py --name person-reidentification-retail-0076 --output_dir /opt/openvino_toolkit/models/person-reidentification/output - ``` - diff --git a/doc/inferences/Vehicle_Detection.md b/doc/inferences/Vehicle_Detection.md deleted file mode 100644 index 8fdb1a5b..00000000 --- a/doc/inferences/Vehicle_Detection.md +++ /dev/null @@ -1,14 +0,0 @@ -# Vehicle Detection -## Download Models -### OpenSource Version -* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash - cd $model_downloader - sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output - sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output - sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output - ``` -* copy label files (excute _once_)
- ```bash - sudo cp $openvino_labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output - ``` diff --git a/doc/installation/BINARY_INSTALLATION.md b/doc/installation/BINARY_INSTALLATION.md deleted file mode 100644 index ebe1cf71..00000000 --- a/doc/installation/BINARY_INSTALLATION.md +++ /dev/null @@ -1,74 +0,0 @@ -# ros2_openvino_toolkit -## 1. Prerequisite -- An x86_64 computer running Ubuntu 18.04. Below processors are supported: - * 6th-8th Generation Intel® Core™ - * Intel® Xeon® v5 family - * Intel® Xeon® v6 family -- ROS2 [Dashing](https://github.com/ros2/ros2/wiki) -- [OpenVINO™ Toolkit](https://software.intel.com/en-us/openvino-toolkit) -- RGB Camera, e.g. RealSense D400 Series or standard USB camera or Video/Image File -- Graphics are required only if you use a GPU. The official system requirements for GPU are: - * 6th to 8th generation Intel® Core™ processors with Iris® Pro graphics and Intel® HD Graphics - * 6th to 8th generation Intel® Xeon® processors with Iris Pro graphics and Intel HD Graphics (excluding the e5 product family, which does not have graphics) - * Intel® Pentium® processors N4200/5, N3350/5, N3450/5 with Intel HD Graphics - -- Use one of the following methods to determine the GPU on your hardware: - * [lspci] command: GPU info may lie in the [VGA compatible controller] line. - * Ubuntu system: Menu [System Settings] --> [Details] may help you find the graphics information. - * Openvino: Download the install package, install_GUI.sh inside will check the GPU information before installation. - -## 2. Environment Setup -**Note**:You can choose to build the environment using *./environment_setup_binary.sh* script in the script subfolder.The *modules.conf* file in the same directory as the .sh file is the configuration file that controls the installation process.You can modify the *modules.conf* to customize your installation process. -```bash -./environment_setup_binary.sh -``` -**Note**:You can also choose to follow the steps below to build the environment step by step. -* Install ROS2 [Dashing](https://github.com/ros2/ros2/wiki) ([guide](https://index.ros.org/doc/ros2/Installation/Dashing/Linux-Development-Setup/))
-* Install [OpenVINO™ Toolkit 2019R3.1](https://software.intel.com/en-us/articles/OpenVINO-Install-Linux) ([download](https://software.intel.com/en-us/openvino-toolkit/choose-download/free-download-linux))
- **Note**: Please use *root privileges* to run the installer when installing the core components. -* Install [the Intel® Graphics Compute Runtime for OpenCL™ driver components required to use the GPU plugin](https://docs.openvinotoolkit.org/latest/_docs_install_guides_installing_openvino_linux.html#additional-GPU-steps) - -- Install Intel® RealSense™ SDK 2.0 [(tag v2.30.0)](https://github.com/IntelRealSense/librealsense/tree/v2.30.0)
- * [Install from package](https://github.com/IntelRealSense/librealsense/blob/v2.30.0/doc/distribution_linux.md)
- -## 3. Building and Installation -* Build sample code under openvino toolkit - ```bash - # root is required instead of sudo - source /opt/intel/openvino/bin/setupvars.sh - cd /opt/intel/openvino/deployment_tools/inference_engine/samples/ - mkdir build - cd build - cmake .. - make - ``` -* set ENV CPU_EXTENSION_LIB and GFLAGS_LIB - ```bash - export CPU_EXTENSION_LIB=/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib/libcpu_extension.so - export GFLAGS_LIB=/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib/libgflags_nothreads.a - ``` -* Install ROS2_OpenVINO packages - ```bash - mkdir -p ~/ros2_overlay_ws/src - cd ~/ros2_overlay_ws/src - git clone https://github.com/intel/ros2_openvino_toolkit - git clone https://github.com/intel/ros2_object_msgs - git clone https://github.com/ros-perception/vision_opencv -b ros2 - git clone https://github.com/ros2/message_filters.git - git clone https://github.com/ros-perception/image_common.git -b dashing - git clone https://github.com/intel/ros2_intel_realsense.git -b refactor - ``` - -* Build package - ``` - source ~/ros2_ws/install/local_setup.bash - source /opt/intel/openvino/bin/setupvars.sh - cd ~/ros2_overlay_ws - colcon build --symlink-install - source ./install/local_setup.bash - sudo mkdir -p /opt/openvino_toolkit - sudo ln -sf ~/ros2_overlay_ws/src/ros2_openvino_toolkit /opt/openvino_toolkit/ - ``` - - - diff --git a/doc/installation/OPEN_SOURCE_INSTALLATION.md b/doc/installation/OPEN_SOURCE_INSTALLATION.md deleted file mode 100644 index cba2ce0c..00000000 --- a/doc/installation/OPEN_SOURCE_INSTALLATION.md +++ /dev/null @@ -1,82 +0,0 @@ -# ros2_openvino_toolkit - -## 1. Prerequisite -- An x86_64 computer running Ubuntu 18.04. Below processors are supported: - * 6th-8th Generation Intel® Core™ - * Intel® Xeon® v5 family - * Intel® Xeon® v6 family -- ROS2 [Dashing](https://github.com/ros2/ros2/wiki) - -- OpenVINO™ Toolkit Open Source
- * The [Deep Learning Deployment Toolkit](https://github.com/openvinotoolkit/openvino) that helps to enable fast, heterogeneous deep learning inferencing for Intel® processors (CPU and GPU/Intel® Processor Graphics), and supports more than 100 public and custom models.
- * [Open Model Zoo](https://github.com/opencv/open_model_zoo) includes 20+ pre-trained deep learning models to expedite development and improve deep learning inference on Intel® processors (CPU, Intel Processor Graphics, FPGA, VPU), along with many samples to easily get started. - -- RGB Camera, e.g. RealSense D400 Series or standard USB camera or Video/Image File -- Graphics are required only if you use a GPU. The official system requirements for GPU are: - * 6th to 8th generation Intel® Core™ processors with Iris® Pro graphics and Intel® HD Graphics - * 6th to 8th generation Intel® Xeon® processors with Iris Pro graphics and Intel HD Graphics (excluding the e5 product family, which does not have graphics) - * Intel® Pentium® processors N4200/5, N3350/5, N3450/5 with Intel HD Graphics - -- Use one of the following methods to determine the GPU on your hardware: - * [lspci] command: GPU info may lie in the [VGA compatible controller] line. - * Ubuntu system: Menu [System Settings] --> [Details] may help you find the graphics information. - * Openvino: Download the install package, install_GUI.sh inside will check the GPU information before installation. - -## 2. Environment Setup -**Note**:You can choose to build the environment using *./environment_setup_binary.sh* script in the script subfolder.The *modules.conf* file in the same directory as the .sh file is the configuration file that controls the installation process.You can modify the *modules.conf* to customize your installation process. -```bash -./environment_setup.sh -``` -**Note**:You can also choose to follow the steps below to build the environment step by step. -* Install ROS2 [Dashing](https://github.com/ros2/ros2/wiki) ([guide](https://index.ros.org/doc/ros2/Installation/Dashing/Linux-Development-Setup/))
-* Install OpenVINO™ Toolkit Open Source
- * Install OpenCL Driver for GPU
- ```bash - cd ~/Downloads - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-gmmlib_18.4.1_amd64.deb - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-igc-core_18.50.1270_amd64.deb - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-igc-opencl_18.50.1270_amd64.deb - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-opencl_19.04.12237_amd64.deb - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-ocloc_19.04.12237_amd64.deb - sudo dpkg -i *.deb - ``` - * Install [Deep Learning Deployment Toolkit](https://github.com/openvinotoolkit/openvino)([tag 2019_R3.1](https://github.com/openvinotoolkit/openvino/blob/2019_R3.1/inference-engine/README.md))
- * Install [Open Model Zoo](https://github.com/opencv/open_model_zoo)([tag 2019_R3.1](https://github.com/opencv/open_model_zoo/blob/2019_R3.1/demos/README.md))
- -- Install Intel® RealSense™ SDK 2.0 [(tag v2.30.0)](https://github.com/IntelRealSense/librealsense/tree/v2.30.0)
- * [Install from package](https://github.com/IntelRealSense/librealsense/blob/v2.30.0/doc/distribution_linux.md)
- -## 3. Building and Installation - -* set ENV InferenceEngine_DIR, CPU_EXTENSION_LIB and GFLAGS_LIB - ```bash - export InferenceEngine_DIR=/opt/openvino_toolkit/dldt/inference-engine/build/ - export CPU_EXTENSION_LIB=/opt/openvino_toolkit/dldt/inference-engine/bin/intel64/Release/lib/libcpu_extension.so - export GFLAGS_LIB=/opt/openvino_toolkit/dldt/inference-engine/bin/intel64/Release/lib/libgflags_nothreads.a - ``` -* Install ROS2_OpenVINO packages - ```bash - mkdir -p ~/ros2_overlay_ws/src - cd ~/ros2_overlay_ws/src - git clone https://github.com/intel/ros2_openvino_toolkit - git clone https://github.com/intel/ros2_object_msgs - git clone https://github.com/ros-perception/vision_opencv -b ros2 - git clone https://github.com/ros2/message_filters.git - git clone https://github.com/ros-perception/image_common.git -b dashing - git clone https://github.com/intel/ros2_intel_realsense.git -b refactor - ``` - -* Build package - ``` - source ~/ros2_ws/install/local_setup.bash - cd ~/ros2_overlay_ws - colcon build --symlink-install - source ./install/local_setup.bash - sudo mkdir -p /opt/openvino_toolkit - sudo ln -sf ~/ros2_overlay_ws/src/ros2_openvino_toolkit /opt/openvino_toolkit/ - ``` - - - - - diff --git a/doc/installation/installation.md b/doc/installation/installation.md deleted file mode 100644 index 6596a35a..00000000 --- a/doc/installation/installation.md +++ /dev/null @@ -1,11 +0,0 @@ - -# Installation ->> Intel releases 2 different series of OpenVINO Toolkit, we call them as [OpenSource Version](https://github.com/openvinotoolkit/openvino/) and [Binary Version](https://software.intel.com/en-us/openvino-toolkit). You may choose any of them to install. - -**NOTE:** If you are not sure which version you would use, it is recommended for you to choose [Binary Version](https://software.intel.com/en-us/openvino-toolkit), which can simplify your environment setup. - -## OpenSource Version -One-step installation scripts are provided for the dependencies' installation. Please see [the guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/installation/OPEN_SOURCE_INSTALLATION.md) for details. - -## Binary Version -One-step installation scripts are provided for the dependencies' installation. Please see [the guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/installation/BINARY_INSTALLATION.md) for details. diff --git a/doc/launching/launch.md b/doc/launching/launch.md deleted file mode 100644 index efc1d1ae..00000000 --- a/doc/launching/launch.md +++ /dev/null @@ -1,37 +0,0 @@ -# Launching -## 1. Setup Environment -Please refer to this [guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/launching/set_environment.md) for details. - -**NOTE:** Configure *once* the Neural Compute Stick USB Driver by following between instructions, in case you have a NCS or NCS2 in hand. - ```bash - cd ~/Downloads - SUBSYSTEM=="usb", ATTRS{idProduct}=="2150", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - SUBSYSTEM=="usb", ATTRS{idProduct}=="2485", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - SUBSYSTEM=="usb", ATTRS{idProduct}=="f63b", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - EOF - sudo cp 97-usbboot.rules /etc/udev/rules.d/ - sudo udevadm control --reload-rules - sudo udevadm trigger - sudo ldconfig - rm 97-usbboot.rules - ``` -## 2. Launch Program -### Topic -Each inference listed in [section Inference Implementations](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/Supported_features.md#inference-implementations) is created default launching configurations( xxx.launch.py) in OpenVINO Sample package. You can follow the utility of ROS2 launch instruction to launch them. For example: - ```bash - ros2 launch dynamic_vino_sample pipeline_object.launch.py - ``` - -The full list of xxx.launch.py is shown in below tabel: - -|Download Models|Launch File|Description| -|---|---|---| -|[Object Detection](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Object_Detection.md)|pipeline_object.launch.py|Launching file for **Object Detection**, by default mobilenet_ssd model and standard USB camera are used.| -|[Face Detection](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Face_Detection.md)|pipeline_people.launch.py|Launching file for **Face Detection**, also including **Age/Gender Recognition, HeadPose Estimation, and Emotion Recognition**.| -|[Object Segmentation](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Object_Segmentation.md)|pipeline_segmentation.launch.py|Launching file for **Object Segmentation**.| -|[Person Re-Identification](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/People_Reidentification.md)|pipeline_person_reid.launch.py|Launching file for **Person Re-Identification**.| -|[Face Re-Identification](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Face_Reidentification.md)|pipeline_face_reid.launch.py|Launching file for **Face Segmentation**, in which **Face Landmark Detection** is included.| -|[Vehicle Detection](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Vehicle_Detection.md)|pipeline_vehicle_detection.launch.py|Launching file for **vehicle detection**, in which **license plate recognition** is included.| - -### Service -See [service Page](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/launching/service.md) for detailed launching instructions. diff --git a/doc/launching/service.md b/doc/launching/service.md deleted file mode 100644 index c5f5701f..00000000 --- a/doc/launching/service.md +++ /dev/null @@ -1,27 +0,0 @@ -# Service -## Download Models -### Object Detection Service -* See [object detection download model](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Object_Detection.md#mobilenet-ssd) section for detailed instructions. - -### People Detection Service -* See [People Detection download model](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Face_Detection.md#opensource-version) section for detaild instructions. - -## Launching -* run object detection service sample code input from Image - Run image processing service: - ```bash - ros2 launch dynamic_vino_sample image_object_server.launch.py - ``` - Run example application with an absolute path of an image on another console: - ```bash - ros2 run dynamic_vino_sample image_object_client /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/car.png - ``` -* run face detection service sample code input from Image - Run image processing service: - ```bash - ros2 launch dynamic_vino_sample image_people_server.launch.py - ``` - Run example application with an absolute path of an image on another console: - ```bash - ros2 run dynamic_vino_sample image_people_client /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/team.jpg - ``` diff --git a/doc/launching/set_environment.md b/doc/launching/set_environment.md deleted file mode 100644 index d50006a3..00000000 --- a/doc/launching/set_environment.md +++ /dev/null @@ -1,32 +0,0 @@ -# Set Environment -## OpenSource Version -* Set ENV LD_LIBRARY_PATH and openvino_version - ```bash - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/openvino_toolkit/dldt/inference-engine/bin/intel64/Release/lib - export openvino_version=opensource - ``` -* Install prerequisites - ```bash - cd /opt/openvino_toolkit/dldt/model-optimizer/install_prerequisites - sudo ./install_prerequisites.sh - ``` -* Set model tool variable - ```bash - source /opt/openvino_toolkit/ros2_openvino_toolkit/script/set_variable.sh - ``` -## Binary Version -* Set ENV LD_LIBRARY_PATH and openvino_version - ```bash - source /opt/intel/openvino/bin/setupvars.sh - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib - export openvino_version=binary - ``` -* Install prerequisites - ```bash - cd /opt/intel/openvino/deployment_tools/model_optimizer/install_prerequisites - sudo ./install_prerequisites.sh - ``` -* Set model tool variable - ```bash - source /opt/openvino_toolkit/ros2_openvino_toolkit/script/set_variable.sh - ``` diff --git a/doc/quick_start/getting_started_with_Dashing_Ubuntu18.04.md b/doc/quick_start/getting_started_with_Dashing_Ubuntu18.04.md deleted file mode 100644 index e69de29b..00000000 diff --git a/doc/quick_start/getting_started_with_Foxy_Ubuntu20.04.md b/doc/quick_start/getting_started_with_Foxy_Ubuntu20.04.md deleted file mode 100644 index e69de29b..00000000 diff --git a/doc/quick_start/getting_started_with_Galactic_Ubuntu20.04.md b/doc/quick_start/getting_started_with_Galactic_Ubuntu20.04.md deleted file mode 100644 index 244e562d..00000000 --- a/doc/quick_start/getting_started_with_Galactic_Ubuntu20.04.md +++ /dev/null @@ -1,121 +0,0 @@ -# ROS2_GALACTIC_OpenVINO_Toolkit - -**NOTE:** -Below steps have been tested on **Ubuntu 20.04**. - -## 1. Environment Setup -* Install ROS2 Galactic ([guide](https://docs.ros.org/en/galactic/Installation/Ubuntu-Install-Debians.html)) -* Install Intel® OpenVINO™ Toolkit Version: 2021.4 ([guide](https://docs.openvino.ai/2021.4/openvino_docs_install_guides_installing_openvino_linux.html)) or building by source code ([guide](https://github.com/openvinotoolkit/openvino/wiki/BuildingForLinux)) -* Install Intel® RealSense ™ SDK ([guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md)) - -## 2. Building and Installation -* Install ROS2_OpenVINO_Toolkit packages -``` -mkdir -p ~/catkin_ws/src -cd ~/catkin_ws/src -git clone https://github.com/intel/ros2_openvino_toolkit -b galactic_dev -git clone https://github.com/intel/ros2_object_msgs -git clone https://github.com/IntelRealSense/realsense-ros.git -b ros2 -git clone https://github.com/ros-perception/vision_opencv.git -b ros2 -``` -* Install dependencies -``` -sudo apt-get install ros-galactic-diagnostic-updater -``` -* Build package -``` -source /opt/ros/galactic/setup.bash -source /opt/intel/openvino_2021/bin/setupvars.sh -cd ~/catkin_ws -colcon build --symlink-install -source ./install/local_setup.bash -``` - -## 3. Running the Demo -* See all available models -``` -cd /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader -sudo python3 downloader.py --print_all -``` - -* Download the optimized Intermediate Representation (IR) of model (execute once), for example: -``` -cd /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader -sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output -sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output -sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output -sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output -sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output -sudo python3 downloader.py --name person-reidentification-retail-0277 --output_dir /opt/openvino_toolkit/models/person-reidentification/output -sudo python3 downloader.py --name landmarks-regression-retail-0009 --output_dir /opt/openvino_toolkit/models/landmarks-regression/output -sudo python3 downloader.py --name semantic-segmentation-adas-0001 --output_dir /opt/openvino_toolkit/models/semantic-segmentation/output -sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output -sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recognition/output -sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output -sudo python3 downloader.py --name person-attributes-recognition-crossroad-0230 --output_dir /opt/openvino_toolkit/models/person-attributes/output -``` - -* copy label files (execute once) -``` - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP16/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32 - -``` - -* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi)need to be converted to intermediate representation (For example the model for object detection) - * mobilenet-ssd - ``` - sudo python3 downloader.py --name mobilenet-ssd --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet_ssd/caffe/output - cd /opt/intel/openvino_2021/deployment_tools/model_optimizer - sudo python3 mo.py --input_model /opt/openvino_toolkit/models/object_detection/mobilenet_ssd/caffe/output/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet_ssd/caffe/output - ``` - * deeplabv3 - ``` - cd /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader - sudo python3 downloader.py --name deeplabv3 --output_dir /opt/openvino_toolkit/models/deeplabv3/output - sudo python3 converter.py --name=deeplabv3 --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py - ``` - -* Before launch, check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml, make sure the paramter like model path, label path, inputs are right. - * run face detection sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_people.launch.py - ``` - * run person reidentification sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_reidentification.launch.py - ``` - * run person face reidentification sample code input from RealSenseCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_face_reidentification.launch.py - ``` - * run face detection sample code input from Image. - ``` - ros2 launch dynamic_vino_sample pipeline_image.launch.py - ``` - * run object segmentation sample code input from RealSenseCameraTopic. - ``` - ros2 launch dynamic_vino_sample pipeline_segmentation.launch.py - ``` - - * run vehicle detection sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_vehicle_detection.launch.py - ``` - * run person attributes sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_person_attributes.launch.py - ``` - -# More Information -* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw - -###### *Any security issue should be reported using process at https://01.org/security* - diff --git a/doc/quick_start/getting_started_with_ros2_ov2.0.md b/doc/quick_start/getting_started_with_ros2_ov2.0.md index 0547588e..e0aad3a2 100644 --- a/doc/quick_start/getting_started_with_ros2_ov2.0.md +++ b/doc/quick_start/getting_started_with_ros2_ov2.0.md @@ -94,8 +94,7 @@ sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/fr sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32 ``` -* Please check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml before lauching, make sure parameters such as model_path, label_path and input_path are set correctly. - +* Please check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml before lauching, make sure parameters such as model_path, label_path and input_path are set correctly. Refer to the quick start document for [Yaml configuration guidance](./yaml_configuration_guide.md) for detailed configuration guidance. * run face detection sample code input from StandardCamera. ``` ros2 launch dynamic_vino_sample pipeline_people.launch.py diff --git a/doc/quick_start/yaml_configuration_guide.md b/doc/quick_start/yaml_configuration_guide.md new file mode 100644 index 00000000..06b8826c --- /dev/null +++ b/doc/quick_start/yaml_configuration_guide.md @@ -0,0 +1,121 @@ +# Introduction + +The contents in .yaml config file should be well structured and follow the supported rules and entity names. + +# Sample +## [pipeline_people.yaml](https://github.com/intel/ros2_openvino_toolkit/blob/ros2/sample/param/pipeline_people.yaml) +```bash +Pipelines: +- name: people + inputs: [StandardCamera] + infers: + - name: FaceDetection + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: AgeGenderRecognition + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + - name: EmotionRecognition + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels + batch: 16 + - name: HeadPoseEstimation + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + outputs: [ImageWindow, RosTopic, RViz] + connects: + - left: StandardCamera + right: [FaceDetection] + - left: FaceDetection + right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, ImageWindow, RosTopic, RViz] + - left: AgeGenderRecognition + right: [ImageWindow, RosTopic, RViz] + - left: EmotionRecognition + right: [ImageWindow, RosTopic, RViz] + - left: HeadPoseEstimation + right: [ImageWindow, RosTopic, RViz] + +Common: +``` +## Interface Description + +### name +The name value of this pipeline can be anyone other than null. + +### inputs +**Note**:The input parameter can only have one value.
+Currently, options for inputs are: + +|Option|Description| +|--------------------|------------------------------------------------------------------| +|StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.| +|RealSenseCamera| Intel RealSense RGB-D Camera, directly calling RealSense Camera via librealsense plugin of openCV.| +|RealSenseCameraTopic| Any ROS topic which is structured in image message.| +|Image| Any image file which can be parsed by openCV, such as .png, .jpeg.| +|Video| Any video file which can be parsed by openCV.| +|IpCamera| Any RTSP server which can push video stream.| + +### input_path +The **input_path** need to be specified when input is an image or video file. + +### infers +The Inference Engine is a set of C++ classes to provides an API to read the Intermediate Representation, set the input and output formats, and execute the model on devices. + +* #### name +The name of inference engine need to be specified here. Currently, the inference feature list is supported: + +|Inference|Description| +|-----------------------|------------------------------------------------------------------| +|FaceDetection|Object Detection task applied to face recognition using a sequence of neural networks.| +|EmotionRecognition| Emotion recognition based on detected face image.| +|AgeGenderRecognition| Age and gener recognition based on detected face image.| +|HeadPoseEstimation| Head pose estimation based on detected face image.| +|ObjectDetection| object detection based on SSD-based trained models.| +|VehicleDetection| Vehicle and passenger detection based on Intel models.| +|ObjectSegmentation| object detection and segmentation.| + +* #### model +The path of model need to be specified here. The scheme below illustrates the typical workflow for deploying a trained deep learning model. +![trained deep learning model](https://github.com/intel/ros2_openvino_toolkit/blob/ros2/data/images/CVSDK_Flow.png "trained deep learning model") + +* #### engine +**Note**:Currently, only CPU and GPU are supported.
+Target device options are: + +|Target Device| +|-----------------------| +|CPU| +|Intel® Integrated Graphics| +|FPGA| +|Intel® Movidius™ Neural Compute Stick| + +* #### label +Currently, this parameter does not work. + +* #### batch +Enable dynamic batch size for the inference engine net. + +### outputs +**Note**:The output parameter can be one or more.
+Currently, the output options are: + +|Option|Description| +|--------------------|------------------------------------------------------------------| +|ImageWindow| Window showing results| +|RosTopic| Output the topic| +|RViz| Display the result in rviz| + +### confidence_threshold +Set the threshold of detection probability. + +### connects +The topology of a pipe can only have one value on the left and multiple values on the right. The value of the first left node should be the same as the specified **inputs**. diff --git a/doc/tables_of_contents/Design_Architecture_and_logic_flow.md b/doc/tables_of_contents/Design_Architecture_and_logic_flow.md deleted file mode 100644 index 86c48bb3..00000000 --- a/doc/tables_of_contents/Design_Architecture_and_logic_flow.md +++ /dev/null @@ -1,27 +0,0 @@ -# Design Architecture -From the view of hirarchical architecture design, the package is divided into different functional components, as shown in below picture. - -![OpenVINO_Architecture](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/design_arch.PNG "OpenVINO RunTime Architecture") - -- **Intel® OpenVINO™ toolkit** is leveraged to provide deep learning basic implementation for data inference. is free software that helps developers and data scientists speed up computer vision workloads, streamline deep learning inference and deployments, -and enable easy, heterogeneous execution across Intel® platforms from edge to cloud. It helps to: - - Increase deep learning workload performance up to 19x1 with computer vision accelerators from Intel. - - Unleash convolutional neural network (CNN)-based deep learning inference using a common API. - - Speed development using optimized OpenCV* and OpenVX* functions. -- **ROS2 OpenVINO Runtime Framework** is the main body of this repo. it provides key logic implementation for pipeline lifecycle management, resource management and ROS system adapter, which extends Intel OpenVINO toolkit and libraries. Furthermore, this runtime framework provides ways to ease launching, configuration and data analytics and re-use. -- **Diversal Input resources** are the data resources to be infered and analyzed with the OpenVINO framework. -- **ROS interfaces and outputs** currently include _Topic_ and _service_. Natively, RViz output and CV image window output are also supported by refactoring topic message and inferrence results. -- **Optimized Models** provides by Model Optimizer component of Intel® OpenVINO™ toolkit. Imports trained models from various frameworks (Caffe*, Tensorflow*, MxNet*, ONNX*, Kaldi*) and converts them to a unified intermediate representation file. It also optimizes topologies through node merging, horizontal fusion, eliminating batch normalization, and quantization.It also supports graph freeze and graph summarize along with dynamic input freezing. - -# Logic Flow -From the view of logic implementation, the package introduces the definitions of parameter manager, pipeline and pipeline manager. The below picture depicts how these entities co-work together when the corresponding program is launched. - -![Logic_Flow](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/impletation_logic.PNG "OpenVINO RunTime Logic Flow") - -Once a corresponding program is launched with a specified .yaml config file passed in the .launch.py file or via commandline, _**parameter manager**_ analyzes the configurations about pipeline and the whole framework, then shares the parsed configuration information with pipeline procedure. A _**pipeline instance**_ is created by following the configuration info and is added into _**pipeline manager**_ for lifecycle control and inference action triggering. - -The contents in **.yaml config file** should be well structured and follow the supported rules and entity names. Please see [the configuration guidance](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/tutorials/configuration_file_customization.md) for how to create or edit the config files. - -**Pipeline** fulfills the whole data handling process: initiliazing Input Component for image data gathering and formating; building up the structured inference network and passing the formatted data through the inference network; transfering the inference results and handling output, etc. - -**Pipeline manager** manages all the created pipelines according to the inference requests or external demands (say, system exception, resource limitation, or end user's operation). Because of co-working with resource management and being aware of the whole framework, it covers the ability of performance optimization by sharing system resource between pipelines and reducing the burden of data copy. diff --git a/doc/tables_of_contents/prerequisite.md b/doc/tables_of_contents/prerequisite.md deleted file mode 100644 index f42279d7..00000000 --- a/doc/tables_of_contents/prerequisite.md +++ /dev/null @@ -1,31 +0,0 @@ -# Development and Target Platform - ->> The development and target platforms have the same requirements, but you can select different components during the installation, based on your intended use. - -## Hardware -### Processor Supported: -- Intel architecture processor, e.g. 6th~8th generation Intel® Core™ -- Intel® Xeon® v5 family -- Intel® Xeon® v6 family -- Intel® Pentium® processor N4200/5, N3350/5, N3450/5 with Intel® HD Graphics - -**Notes**: -- Processor graphics are not included in all processors. See [Product Specifications](https://ark.intel.com/) for information about your processor. -- A chipset that supports processor graphics is required for Intel® Xeon® processors. -- Use one of the following methods to determine the GPU on your hardware: - * [lspci] command: GPU info may lie in the [VGA compatible controller] line. - * Ubuntu system: Menu [System Settings] --> [Details] may help you find the graphics information. - * Openvino: Download the install package, install_GUI.sh inside will check the GPU information before installation. - -### Pripheral Depended: -- Intel® Movidius™ Neural Compute Stick -- Intel® Neural Compute Stick 2 -- Intel® Vision Accelerator Design with Intel® Movidius™ VPU -- RGB Camera, e.g. RealSense D400 Series or standard USB camera - -## Operating Systems -- Ubuntu 16.04 or 18.04 long-term support (LTS), 64-bit: Minimum supported kernel is 4.14 -- CentOS 7.4, 64-bit (for target only) -- Yocto Project Poky Jethro v2.0.3, 64-bit (for target only and requires modifications) - -**Note**: Since **Ubuntu 18.04** in the list is the only one well supported by ROS2 core, it is highly recommended to use as the OS. diff --git a/doc/tables_of_contents/supported_features/Supported_features.md b/doc/tables_of_contents/supported_features/Supported_features.md deleted file mode 100644 index 3117ac71..00000000 --- a/doc/tables_of_contents/supported_features/Supported_features.md +++ /dev/null @@ -1,33 +0,0 @@ -# Supported Features -## Input Resources -Currently, the package supports RGB frame data from several kinds of input resources: -- Standard USB Camera -- Realsense Camera -- Image Topic -- Image File -- Video File - -See more from [the input resource description](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/input_resource.md). - -## Inference Implementations -Inferences shown in below list are supported: -- Face Detection -- Emotion Recognition -- Age and Gender Recognition -- Head Pose Estimation -- Object Detection -- Vehicle and License Detection -- Object Segmentation -- Person Re-Identification -- Face Re-Identification - -[Inference functionality overview](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/inference_functionality_overview.md). - -## Output Types -The inference results can be output in several types. One or more types can be enabled for any infernece pipeline: -- Topic Publishing -- Image View Window -- RViz Showing -- Service (as a mechanism responding user's request about object detection results.) - -See more from [output types](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/output_types.md) page. diff --git a/doc/tables_of_contents/supported_features/inference_functionality_overview.md b/doc/tables_of_contents/supported_features/inference_functionality_overview.md deleted file mode 100644 index 35afb571..00000000 --- a/doc/tables_of_contents/supported_features/inference_functionality_overview.md +++ /dev/null @@ -1,16 +0,0 @@ -# Infernece Feature List -Currently, the inference feature list is supported: - -|Inference Label|Description|Outputs Topic| -|---|---|---| -|FaceDetection|Object Detection task applied to face recognition using a sequence of neural networks.|```/ros2_openvino_toolkit/face_detection```([object_msgs:msg:ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| -|EmotionRecognition| Emotion recognition based on detected face image.|```/ros2_openvino_toolkit/emotions_recognition```([people_msgs:msg:EmotionsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/EmotionsStamped.msg))| -|AgeGenderRecognition| Age and gener recognition based on detected face image.|```/ros2_openvino_toolkit/age_genders_Recognition```([people_msgs:msg:AgeGenderStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/AgeGenderStamped.msg))| -|HeadPoseEstimation| Head pose estimation based on detected face image.|```/ros2_openvino_toolkit/headposes_estimation```([people_msgs:msg:HeadPoseStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/HeadPoseStamped.msg))| -|ObjectDetection| object detection based on SSD-based trained models.|```/ros2_openvino_toolkit/detected_objects```([object_msgs::msg::ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| -|VehicleAttribsDetection| Vehicle detection based on Intel models.|```/ros2_openvino_toolkit/detected_vehicles_attribs```([people_msgs::msg::VehicleAttribsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/VehicleAttribsStamped.msg))| -|LicensePlateDetection| License detection based on Intel models.|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/LicensePlateStamped.msg))| -|ObjectSegmentation| object detection and segmentation.|```/ros2_openvino_toolkit/segmented_obejcts```([people_msgs::msg::ObjectsInMasks](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ObjectsInMasks.msg))| -|PersonReidentification| Person Reidentification based on object detection.|```/ros2_openvino_toolkit/reidentified_persons```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))| -|LandmarksDetection| Landmark regression based on face detection.|```/ros2_openvino_toolkit/detected_landmarks```([people_msgs::msg::LandmarkStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/LandmarkStamped.msg))| -|FaceReidentification| Face Reidentification based on face detection.|```/ros2_openvino_toolkit/reidentified_faces```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))| diff --git a/doc/tables_of_contents/supported_features/input_resource.md b/doc/tables_of_contents/supported_features/input_resource.md deleted file mode 100644 index 43cd3af0..00000000 --- a/doc/tables_of_contents/supported_features/input_resource.md +++ /dev/null @@ -1,8 +0,0 @@ -# Full list of supported Input Resources -|Input Resource Name|Description| -|---|-------------------------------------------| -|StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.| -|RealSenseCamera| Intel RealSense RGB-D Camera,directly calling RealSense Camera via librealsense plugin of openCV.| -|RealSenseCameraTopic| any ROS topic which is structured in image message.The topic to be inputted must be remapped to name ```/openvino_toolkit/image_raw```(type [sensor_msgs::msg::Image](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/Image.msg))| -|Image| Any image file which can be parsed by openCV, such as .png, .jpeg.| -|Video| Any video file which can be parsed by openCV.| \ No newline at end of file diff --git a/doc/tables_of_contents/supported_features/output_types.md b/doc/tables_of_contents/supported_features/output_types.md deleted file mode 100644 index 315c0cb9..00000000 --- a/doc/tables_of_contents/supported_features/output_types.md +++ /dev/null @@ -1,43 +0,0 @@ -# Output Types ->> The inference results can be output in several types. One or more types can be enabled for any inference pipeline. -## Topic Publishing ->> Specific topic(s) can be generated and published according to the given inference functionalities.
- -|Inference|Published Topic| -|---|---| -|People Detection|```/ros2_openvino_toolkit/face_detection```([object_msgs:msg:ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| -|Emotion Recognition|```/ros2_openvino_toolkit/emotions_recognition```([people_msgs:msg:EmotionsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/EmotionsStamped.msg))|/ros2_openvino_toolkit/face_detection(object_msgs:msg:ObjectsInBoxes) -|Age and Gender Recognition|```/ros2_openvino_toolkit/age_genders_Recognition```([people_msgs:msg:AgeGenderStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/AgeGenderStamped.msg))| -|Head Pose Estimation|```/ros2_openvino_toolkit/headposes_estimation```([people_msgs:msg:HeadPoseStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/HeadPoseStamped.msg))| -|Object Detection|```/ros2_openvino_toolkit/detected_objects```([object_msgs::msg::ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| -|Object Segmentation|```/ros2_openvino_toolkit/segmented_obejcts```([people_msgs::msg::ObjectsInMasks](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ObjectsInMasks.msg))| -|Person Reidentification|```/ros2_openvino_toolkit/reidentified_persons```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))| -|Face Reidenfication|```/ros2_openvino_toolkit/reidentified_faces```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))| -|Vehicle Detection|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::VehicleAttribsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/VehicleAttribsStamped.msg))| -|Vehicle License Detection|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/LicensePlateStamped.msg))| - -## Image View Window ->> The original image and the inference results are rendered together and shown in a CV window. -## RViz Showing ->> The Rendered image (rendering inference results into the original image) was transformed into sensor_msgs::msg::Image topic, that can be shown in RViz application. -- RViz Published Topic -```/ros2_openvino_toolkit/image_rviz```([sensor_msgs::msg::Image](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/Image.msg)) - -## Service ->> Several ROS2 Services are created, expecting to be used in client/server mode, especially when synchronously getting inference results for a given image frame or when managing inference pipeline's lifecycle.
- -- **Face Detection or Object Detection for a given Image file** - -|Inference|Service| -|---|---| -|Object Detection Service|```/detect_object```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))| -|Face Detection Service|```/detect_face```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))| -|Age Gender Detection Service|```/detect_age_gender```([people_msgs::srv::AgeGender](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/srv/AgeGender.srv))| -|Headpose Detection Service|```/detect_head_pose```([people_msgs::srv::HeadPose](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/srv/HeadPose.srv))| -|Emotion Detection Service|```/detect_emotion```([people_msgs::srv::Emotion](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/srv/Emotion.srv))| - -- **Inference Pipeline Lifecycle Management** - - Create new pipeline - - Start/Stop/Pause a pipeline - - Get pipeline list or status - diff --git a/doc/tables_of_contents/tutorials/Multiple_Pipelines.md b/doc/tables_of_contents/tutorials/Multiple_Pipelines.md deleted file mode 100644 index cd03aec7..00000000 --- a/doc/tables_of_contents/tutorials/Multiple_Pipelines.md +++ /dev/null @@ -1,54 +0,0 @@ -# Multiple Pipelines ->> This is a way to run more than one pipeline in the same process.Having multiple pipelines in a single instance allows each pipeline to have custom configuration and different performance. - -## prerequest -see [this guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/tutorials/configuration_file_customization.md) to see how to customize a pipeline. - -## A demo for multiple pipeline -```bash -1 Pipelines: - 2 - name: object1 - 3 inputs: [StandardCamera] - 4 infers: - 5 - name: ObjectDetection - 6 model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml - 7 engine: CPU - 8 label: to/be/set/xxx.labels - 9 batch: 1 - 10 confidence_threshold: 0.5 - 11 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - 12 outputs: [ImageWindow, RosTopic, RViz] - 13 connects: - 14 - left: StandardCamera - 15 right: [ObjectDetection] - 16 - left: ObjectDetection - 17 right: [ImageWindow] - 18 - left: ObjectDetection - 19 right: [RosTopic] - 20 - left: ObjectDetection - 21 right: [RViz] - 22 - 23 - name: object2 - 24 inputs: [RealSenseCamera] - 25 infers: - 26 - name: ObjectDetection - 27 model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml - 28 engine: CPU - 29 label: to/be/set/xxx.labels - 30 batch: 1 - 31 confidence_threshold: 0.5 - 32 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - 33 outputs: [ImageWindow, RosTopic, RViz] - 34 connects: - 35 - left: RealSenseCamera - 36 right: [ObjectDetection] - 37 - left: ObjectDetection - 38 right: [ImageWindow] - 39 - left: ObjectDetection - 40 right: [RosTopic] - 41 - left: ObjectDetection - 42 right: [RViz] - 43 - 44 OpenvinoCommon: - -``` diff --git a/doc/tables_of_contents/tutorials/configuration_file_customization.md b/doc/tables_of_contents/tutorials/configuration_file_customization.md deleted file mode 100644 index 703459b6..00000000 --- a/doc/tables_of_contents/tutorials/configuration_file_customization.md +++ /dev/null @@ -1,58 +0,0 @@ -# Configuration File Customization - -One of the key added values of ROS2 OpenVINO is automatically create new pipeline on demand according to the given configuration files. In order to create new pipelines, the end user only need to create a new configuration file or update one already existed. The configuration file must be written by following some rules. - - 1 Pipelines: - 2 - name: object - 3 inputs: [RealSenseCamera] - 4 infers: - 5 - name: ObjectDetection - 6 model: /opt/intel/openvino/deployment_tools/tools/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml - 7 engine: MYRIAD - 8 label: to/be/set/xxx.labels - 9 batch: 1 - 10 confidence_threshold: 0.5 - 11 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - 12 outputs: [ImageWindow, RosTopic, RViz] - 13 connects: - 14 - left: RealSenseCamera - 15 right: [ObjectDetection] - 16 - left: ObjectDetection - 17 right: [ImageWindow] - 18 - left: ObjectDetection - 19 right: [RosTopic] - 20 - left: ObjectDetection - 21 right: [RViz] - -In this sample, a pipeline is to be created with this topology: - -```flow -input=operation: RealSenseCamera -infer=operation: ObjectDetection -output1=operation: ImageWindow -output2=operation: RosTopic -output3=operation: RViz - -input-infer-output1 -infer-output2 -infer-output3 -``` - -Detail Description for each line shows in below tabel: - -|Line No.|Description| -|-------------|---| -| 1 |Keyword, label for pipeline parameters. The pipeline configuration must be started by this line.| -|2|Pipeline name, the published topics bound to this name. (e.g. /openvino_toolkit/**object**/face_detection)| -|3|The name of chosen input device, should be one and only one of [the list](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/Supported_features.md#input-resources) (taking the item "Input Resource Name").| -|4|key word for inference section. one or more inferences can be included in a pipeline's inference section.| -|5|The name of Inference instance, should be in [the list](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/Supported_features.md#inference-implementations).
**NOTE**: if a pipeline contains 2 or more inference instances, the first one should be a detection inference. -|6|Model description file with absolute path, generated by model_optimizer tool| -|7|The name of Inference engine, should be one of:CPU, GPU and MYRIAD.| -|8|The file name with absolute path of object labels.
**NOTE**: not enabled in the current version. The labels file with the same name as model description file under the same folder is searched and used.| -|9|The number of input data to be enqueued and handled by inference engine in parallel.| -|10|Set the inference result filtering by confidence ratio.| -|11|set *enable_roi_constraint* to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame.| -|12|A list of output method enabled for inference result showing/notifying. Should be one or some of:
• ImageWindow
• RosTopic
• Rviz
• RosService(*)
**NOTE**: RosService can only be used in ROS2 service server pipeline.| -|13|keyword for pipeline entities' relationship topology.| -|14~21|The detailed connection topology for the pipeline.
A pair of "left" and "right" parameters, whose contents are the names of inputs(line3), infers(line5) and outputs(line12) defines a connection between the two entities, it also defines that the data would be moved from *entity left* to *entity right*.| From f644e79f9370fb8f0bb2d042c4956fcf0ebd05dc Mon Sep 17 00:00:00 2001 From: wujiawei Date: Fri, 16 Dec 2022 01:32:25 +0800 Subject: [PATCH 05/28] add the table of contents in README --- README.md | 27 ++++++++++++++----- .../getting_started_with_ros2_ov2.0.md | 4 +-- doc/quick_start/yaml_configuration_guide.md | 4 +-- 3 files changed, 25 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 72896a0e..baedd810 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,27 @@ # ros2_openvino_toolkit +# Table of Contents +* [➤ Introduction](#-introduction) + * [ROS2 Version Supported](#-ros2-version-supported) + * [Inference Features Supported](#-inference-features-supported) + * [Prerequisite](#-prerequisite) + * [Design Architecture](#-design-architecture) + * [Logic Flow](#-logic-flow) +* [➤ Supported Features](#-supported-features) + * [Multiple Input Components](#-multiple-input-components) + * [Inference Implementations](#-inference-implementations) + * [ROS interfaces and outputs](#-ros-interfaces-and-outputs) + * [Demo result Snapshots](#-demo-result-snapshots) +* [➤ Installation & Launching](#-installation-and-launching) +* [➤ Reference](#-reference) + # Introduction -## ROS2 Version supported +## ROS2 Version Supported * [x] ROS2 Galactic * [x] ROS2 Foxy * [x] ROS2 Humble -## Inference Features supported +## Inference Features Supported * [x] Object Detection * [x] Face Detection * [x] Age Gender Recognition @@ -36,7 +51,7 @@ From the view of hirarchical architecture design, the package is divided into di - Unleash convolutional neural network (CNN)-based deep learning inference using a common API. - Speed development using optimized OpenCV* and OpenVX* functions. See more from [here](https://github.com/openvinotoolkit/openvino) for Intel OpenVINO™ introduction. -- **ros OpenVINO Runtime Framework** is the main body of this repo. it provides key logic implementation for pipeline lifecycle management, resource management and ROS system adapter, which extends Intel OpenVINO toolkit and libraries. Furthermore, this runtime framework provides ways to ease launching, configuration and data analytics and re-use. +- **ROS OpenVINO Runtime Framework** is the main body of this repo. it provides key logic implementation for pipeline lifecycle management, resource management and ROS system adapter, which extends Intel OpenVINO toolkit and libraries. Furthermore, this runtime framework provides ways to ease launching, configuration and data analytics and re-use. - **Diversal Input resources** are the data resources to be infered and analyzed with the OpenVINO framework. - **ROS interfaces and outputs** currently include _Topic_ and _service_. Natively, RViz output and CV image window output are also supported by refactoring topic message and inferrence results. - **Optimized Models** provides by Model Optimizer component of Intel® OpenVINO™ toolkit. Imports trained models from various frameworks (Caffe*, Tensorflow*, MxNet*, ONNX*, Kaldi*) and converts them to a unified intermediate representation file. It also optimizes topologies through node merging, horizontal fusion, eliminating batch normalization, and quantization.It also supports graph freeze and graph summarize along with dynamic input freezing. @@ -83,10 +98,10 @@ Currently, the inference feature list is supported: ## ROS interfaces and outputs ### Topic -* #### Subscribed Topic +#### Subscribed Topic - Image topic: ```/camera/color/image_raw```([sensor_msgs::Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) -* #### Published Topic +#### Published Topic - Face Detection: ```/ros2_openvino_toolkit/face_detection```([object_msgs::ObjectsInBoxes](https://github.com/intel/object_msgs/blob/master/msg/ObjectsInBoxes.msg)) - Emotion Recognition: @@ -143,7 +158,7 @@ See below pictures for the demo result snapshots. * Person Reidentification input from standard camera ![person_reidentification_demo_video](./data/images/person-reidentification.gif "person reidentification demo video") -# Installation & Launching +# Installation and Launching * Refer to the quick start document for [ROS2](./doc/quick_start/getting_started_with_ros2_ov2.0.md) for detailed installation & lauching instructions. * Refer to the quick start document for [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. diff --git a/doc/quick_start/getting_started_with_ros2_ov2.0.md b/doc/quick_start/getting_started_with_ros2_ov2.0.md index e0aad3a2..56d0a659 100644 --- a/doc/quick_start/getting_started_with_ros2_ov2.0.md +++ b/doc/quick_start/getting_started_with_ros2_ov2.0.md @@ -80,7 +80,7 @@ sudo python3 downloader.py --list download_model.lst -o /opt/openvino_toolkit/mo * If the model (tensorflow, caffe, MXNet, ONNX, Kaldi) need to be converted to Intermediate Representation (such as the model for object detection): ``` cd ~/openvino/thirdparty/open_model_zoo/tools/model_tools -sudo python3 converter.py --list convert_model.lst -o /opt/openvino_toolkit/models/convert +sudo python3 converter.py --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert ``` * Copy label files (execute once) @@ -94,7 +94,7 @@ sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/fr sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32 ``` -* Please check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml before lauching, make sure parameters such as model_path, label_path and input_path are set correctly. Refer to the quick start document for [Yaml configuration guidance](./yaml_configuration_guide.md) for detailed configuration guidance. +* Please check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml before lauching, make sure parameters such as model_path, label_path and input_path are set correctly. Refer to the quick start document for [yaml configuration guidance](./yaml_configuration_guide.md) for detailed configuration guidance. * run face detection sample code input from StandardCamera. ``` ros2 launch dynamic_vino_sample pipeline_people.launch.py diff --git a/doc/quick_start/yaml_configuration_guide.md b/doc/quick_start/yaml_configuration_guide.md index 06b8826c..12ffa4a4 100644 --- a/doc/quick_start/yaml_configuration_guide.md +++ b/doc/quick_start/yaml_configuration_guide.md @@ -3,7 +3,7 @@ The contents in .yaml config file should be well structured and follow the supported rules and entity names. # Sample -## [pipeline_people.yaml](https://github.com/intel/ros2_openvino_toolkit/blob/ros2/sample/param/pipeline_people.yaml) +## [pipeline_people.yaml](../../sample/param/pipeline_people.yaml) ```bash Pipelines: - name: people @@ -85,7 +85,7 @@ The name of inference engine need to be specified here. Currently, the inference * #### model The path of model need to be specified here. The scheme below illustrates the typical workflow for deploying a trained deep learning model. -![trained deep learning model](https://github.com/intel/ros2_openvino_toolkit/blob/ros2/data/images/CVSDK_Flow.png "trained deep learning model") +![trained deep learning model](../../data/images/CVSDK_Flow.png "trained deep learning model") * #### engine **Note**:Currently, only CPU and GPU are supported.
From 4845d76377a6d9f6c6633152d15d6739b0823a64 Mon Sep 17 00:00:00 2001 From: wujiawei Date: Fri, 16 Dec 2022 01:35:43 +0800 Subject: [PATCH 06/28] fix link --- README.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index baedd810..e75144ad 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,19 @@ # ros2_openvino_toolkit # Table of Contents -* [➤ Introduction](#-introduction) - * [ROS2 Version Supported](#-ros2-version-supported) - * [Inference Features Supported](#-inference-features-supported) - * [Prerequisite](#-prerequisite) - * [Design Architecture](#-design-architecture) - * [Logic Flow](#-logic-flow) -* [➤ Supported Features](#-supported-features) - * [Multiple Input Components](#-multiple-input-components) - * [Inference Implementations](#-inference-implementations) - * [ROS interfaces and outputs](#-ros-interfaces-and-outputs) - * [Demo result Snapshots](#-demo-result-snapshots) -* [➤ Installation & Launching](#-installation-and-launching) -* [➤ Reference](#-reference) +* [➤ Introduction](#introduction) + * [ROS2 Version Supported](#ros2-version-supported) + * [Inference Features Supported](#inference-features-supported) + * [Prerequisite](#prerequisite) + * [Design Architecture](#design-architecture) + * [Logic Flow](#logic-flow) +* [➤ Supported Features](#supported-features) + * [Multiple Input Components](#multiple-input-components) + * [Inference Implementations](#inference-implementations) + * [ROS interfaces and outputs](#ros-interfaces-and-outputs) + * [Demo result Snapshots](#demo-result-snapshots) +* [➤ Installation & Launching](#installation-and-launching) +* [➤ Reference](#reference) # Introduction ## ROS2 Version Supported From 3104f2525b46abe5ad8b5c4803d8f186e588014b Mon Sep 17 00:00:00 2001 From: wujiawei Date: Fri, 16 Dec 2022 01:55:59 +0800 Subject: [PATCH 07/28] refine for format and capital letter --- README.md | 14 ++++++------ .../getting_started_with_ros2_ov2.0.md | 22 +++++++++---------- doc/quick_start/yaml_configuration_guide.md | 14 ++++++------ 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index e75144ad..e5c85647 100644 --- a/README.md +++ b/README.md @@ -10,8 +10,8 @@ * [➤ Supported Features](#supported-features) * [Multiple Input Components](#multiple-input-components) * [Inference Implementations](#inference-implementations) - * [ROS interfaces and outputs](#ros-interfaces-and-outputs) - * [Demo result Snapshots](#demo-result-snapshots) + * [ROS Interfaces and Outputs](#ros-interfaces-and-outputs) + * [Demo Result Snapshots](#demo-result-snapshots) * [➤ Installation & Launching](#installation-and-launching) * [➤ Reference](#reference) @@ -146,25 +146,25 @@ To enable window, Image Window output should be added into the output choices in ## Demo Result Snapshots See below pictures for the demo result snapshots. -* face detection input from standard camera +* Face detection input from standard camera ![face_detection_demo_image](./data/images/face_detection.png "face detection demo image") -* object detection input from realsense camera +* Object detection input from realsense camera ![object_detection_demo_realsense](./data/images/object_detection.gif "object detection demo realsense") -* object segmentation input from video +* Object segmentation input from video ![object_segmentation_demo_video](./data/images/object_segmentation.gif "object segmentation demo video") * Person Reidentification input from standard camera ![person_reidentification_demo_video](./data/images/person-reidentification.gif "person reidentification demo video") # Installation and Launching -* Refer to the quick start document for [ROS2](./doc/quick_start/getting_started_with_ros2_ov2.0.md) for detailed installation & lauching instructions. +* Refer to the quick start document for [getting_started_with_ros2](./doc/quick_start/getting_started_with_ros2_ov2.0.md) for detailed installation & lauching instructions. * Refer to the quick start document for [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. # Reference * Open_model_zoo: Refer to the OpenVINO document for [open_model_zoo](https://github.com/openvinotoolkit/open_model_zoo/tree/master) for detailed model structure and demo samples. -* OpenVINO api 2.0: Refer to the OpenVINO document for [OpenVINO api 2.0](https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html) for latest api 2.0 transition guide. +* OpenVINO api 2.0: Refer to the OpenVINO document for [OpenVINO_api_2.0](https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html) for latest api 2.0 transition guide. # More Information * ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw diff --git a/doc/quick_start/getting_started_with_ros2_ov2.0.md b/doc/quick_start/getting_started_with_ros2_ov2.0.md index 56d0a659..8d92c55f 100644 --- a/doc/quick_start/getting_started_with_ros2_ov2.0.md +++ b/doc/quick_start/getting_started_with_ros2_ov2.0.md @@ -5,20 +5,20 @@ Below steps have been tested on **Ubuntu 20.04** and **Ubuntu 22.04**. Supported ROS2 versions include foxy,galactic and humble. ## 1. Environment Setup -* For ROS2 foxy and galactic on ubuntu 20.04: - * Install ROS2. ([foxy_guide](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html)) & ([galactic_guide](https://docs.ros.org/en/galactic/Installation/Ubuntu-Install-Debians.html)) +For ROS2 foxy and galactic on ubuntu 20.04: + * Install ROS2. [ROS_foxy_install_guide](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html) & [ROS_galactic_install_guide](https://docs.ros.org/en/galactic/Installation/Ubuntu-Install-Debians.html) - * Install Intel® OpenVINO™ Toolkit Version: 2022.1. ([guide](https://docs.openvino.ai/2022.1/openvino_docs_install_guides_installing_openvino_linux.html)) - * Install from an achive file. Both runtime and development tool are needed, `pip` is recommended for installing the development tool. ([guide](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/download.html)) + * Install Intel® OpenVINO™ Toolkit Version: 2022.1. [OpenVINO_install_guide](https://docs.openvino.ai/2022.1/openvino_docs_install_guides_installing_openvino_linux.html)) + * Install from an achive file. Both runtime and development tool are needed, `pip` is recommended for installing the development tool. [OpenVINO_devtool_install_guide](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/download.html)) - * Install Intel® RealSense™ SDK. ([guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md)) + * Install Intel® RealSense™ SDK. [RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md) -* For ROS2 humble on ubuntu 22.04: - * Install ROS2. ([humble_guide](https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html)) +For ROS2 humble on ubuntu 22.04: + * Install ROS2. [ROS_humble_install_guide](https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html) - * Install Intel® OpenVINO™ Toolkit Latest Version by Source. ([guide](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode)) + * Install Intel® OpenVINO™ Toolkit Latest Version by Source. [OpenVINO_install_guide](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode) - * Install Intel® RealSense™ SDK by Source. ([guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md)) + * Install Intel® RealSense™ SDK by Source. [RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md) ## 2. Building and Installation * Install ROS2_OpenVINO_Toolkit packages @@ -46,7 +46,7 @@ source ./install/local_setup.bash ## 3. Running the Demo ### Install OpenVINO 2022.1 by PIP -* OMZ tools are provided for downloading and converting models of open_model_zoo in ov2022.([guide](https://pypi.org/project/openvino-dev/)) +OMZ tools are provided for downloading and converting models of open_model_zoo in ov2022.[OMZtool_guide](https://pypi.org/project/openvino-dev/) * See all available models ``` @@ -94,7 +94,7 @@ sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/fr sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32 ``` -* Please check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml before lauching, make sure parameters such as model_path, label_path and input_path are set correctly. Refer to the quick start document for [yaml configuration guidance](./yaml_configuration_guide.md) for detailed configuration guidance. +* Check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml before lauching, make sure parameters such as model_path, label_path and input_path are set correctly. Please refer to the quick start document for [yaml configuration guidance](./yaml_configuration_guide.md) for detailed configuration guidance. * run face detection sample code input from StandardCamera. ``` ros2 launch dynamic_vino_sample pipeline_people.launch.py diff --git a/doc/quick_start/yaml_configuration_guide.md b/doc/quick_start/yaml_configuration_guide.md index 12ffa4a4..ea580ca7 100644 --- a/doc/quick_start/yaml_configuration_guide.md +++ b/doc/quick_start/yaml_configuration_guide.md @@ -48,10 +48,10 @@ Common: ``` ## Interface Description -### name +### Specify pipeline name The name value of this pipeline can be anyone other than null. -### inputs +### Specify inputs **Note**:The input parameter can only have one value.
Currently, options for inputs are: @@ -64,10 +64,10 @@ Currently, options for inputs are: |Video| Any video file which can be parsed by openCV.| |IpCamera| Any RTSP server which can push video stream.| -### input_path +### Specify input_path The **input_path** need to be specified when input is an image or video file. -### infers +### Specify infers The Inference Engine is a set of C++ classes to provides an API to read the Intermediate Representation, set the input and output formats, and execute the model on devices. * #### name @@ -104,7 +104,7 @@ Currently, this parameter does not work. * #### batch Enable dynamic batch size for the inference engine net. -### outputs +### Specify outputs **Note**:The output parameter can be one or more.
Currently, the output options are: @@ -114,8 +114,8 @@ Currently, the output options are: |RosTopic| Output the topic| |RViz| Display the result in rviz| -### confidence_threshold +### Specify confidence_threshold Set the threshold of detection probability. -### connects +### Specify connects The topology of a pipe can only have one value on the left and multiple values on the right. The value of the first left node should be the same as the specified **inputs**. From 6139126918dd201b27f2034cb386fe4446abc5be Mon Sep 17 00:00:00 2001 From: wujiawei Date: Fri, 16 Dec 2022 02:02:34 +0800 Subject: [PATCH 08/28] add reference link for installation --- .../getting_started_with_ros2_ov2.0.md | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/doc/quick_start/getting_started_with_ros2_ov2.0.md b/doc/quick_start/getting_started_with_ros2_ov2.0.md index 8d92c55f..d055f83e 100644 --- a/doc/quick_start/getting_started_with_ros2_ov2.0.md +++ b/doc/quick_start/getting_started_with_ros2_ov2.0.md @@ -6,19 +6,26 @@ Supported ROS2 versions include foxy,galactic and humble. ## 1. Environment Setup For ROS2 foxy and galactic on ubuntu 20.04: - * Install ROS2. [ROS_foxy_install_guide](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html) & [ROS_galactic_install_guide](https://docs.ros.org/en/galactic/Installation/Ubuntu-Install-Debians.html) + * Install ROS2.
+ Refer to:[ROS_foxy_install_guide](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html) & [ROS_galactic_install_guide](https://docs.ros.org/en/galactic/Installation/Ubuntu-Install-Debians.html) - * Install Intel® OpenVINO™ Toolkit Version: 2022.1. [OpenVINO_install_guide](https://docs.openvino.ai/2022.1/openvino_docs_install_guides_installing_openvino_linux.html)) - * Install from an achive file. Both runtime and development tool are needed, `pip` is recommended for installing the development tool. [OpenVINO_devtool_install_guide](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/download.html)) + * Install Intel® OpenVINO™ Toolkit Version: 2022.1.
+ Refer to:[OpenVINO_install_guide](https://docs.openvino.ai/2022.1/openvino_docs_install_guides_installing_openvino_linux.html) + * Install from an achive file. Both runtime and development tool are needed, `pip` is recommended for installing the development tool.
+ Refer to:[OpenVINO_devtool_install_guide](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/download.html) - * Install Intel® RealSense™ SDK. [RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md) + * Install Intel® RealSense™ SDK.
+ Refer to:[RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md) For ROS2 humble on ubuntu 22.04: - * Install ROS2. [ROS_humble_install_guide](https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html) + * Install ROS2.
+ Refer to:[ROS_humble_install_guide](https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html) - * Install Intel® OpenVINO™ Toolkit Latest Version by Source. [OpenVINO_install_guide](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode) + * Install Intel® OpenVINO™ Toolkit Latest Version by Source.
+ Refer to:[OpenVINO_install_guide](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode) - * Install Intel® RealSense™ SDK by Source. [RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md) + * Install Intel® RealSense™ SDK by Source.
+ Refer to:[RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md) ## 2. Building and Installation * Install ROS2_OpenVINO_Toolkit packages @@ -46,7 +53,8 @@ source ./install/local_setup.bash ## 3. Running the Demo ### Install OpenVINO 2022.1 by PIP -OMZ tools are provided for downloading and converting models of open_model_zoo in ov2022.[OMZtool_guide](https://pypi.org/project/openvino-dev/) +OMZ tools are provided for downloading and converting models of open_model_zoo in ov2022.
+Refer to:[OMZtool_guide](https://pypi.org/project/openvino-dev/) * See all available models ``` From 5438137414081b73090309894f04ec52fdf8df72 Mon Sep 17 00:00:00 2001 From: wujiawei Date: Fri, 16 Dec 2022 17:26:48 +0800 Subject: [PATCH 09/28] add model and yaml link for inference table --- README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index e5c85647..66521355 100644 --- a/README.md +++ b/README.md @@ -85,16 +85,16 @@ Currently, the package support several kinds of input resources of gaining image ## Inference Implementations Currently, the inference feature list is supported: -|Inference|Description| -|-----------------------|------------------------------------------------------------------| -|Face Detection|Object Detection task applied to face recognition using a sequence of neural networks.| -|Emotion Recognition| Emotion recognition based on detected face image.| -|Age & Gender Recognition| Age and gender recognition based on detected face image.| -|Head Pose Estimation| Head pose estimation based on detected face image.| -|Object Detection| Object detection based on SSD-based trained models.| -|Vehicle and License Detection| Vehicle and license detection based on Intel models.| -|Object Segmentation| object segmentation.| -|Person Reidentification| Person Reidentification based on object detection.| +|Inference|Description|YAML Configuration|Model Used| +|-----------------------|------------------------------------------------------------------|----------------------|----------------------| +|Face Detection| Object Detection task applied to face recognition using a sequence of neural networks.|[Face Detection YAML](./sample/param/pipeline_image.yaml)|[face-detection-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-adas-0001) [age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/age-gender-recognition-retail-0013) [emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/emotions-recognition-retail-0003) [head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/head-pose-estimation-adas-0001)| +|Emotion Recognition| Emotion recognition based on detected face image.|[Emotion Detection YAML](./sample/param/pipeline_image.yaml)|[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/emotions-recognition-retail-0003)| +|Age & Gender Recognition| Age and gender recognition based on detected face image.|[Age Gender Detection YAML](./sample/param/pipeline_image.yaml)|[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/age-gender-recognition-retail-0013)| +|Head Pose Estimation| Head pose estimation based on detected face image.|[Head Pose Detection YAML](./sample/param/pipeline_image.yaml)|[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/head-pose-estimation-adas-0001)| +|Object Detection| Object detection based on SSD-based trained models.|[Object Detection YAML](./sample/param/pipeline_object.yaml)|[mobilenet-ssd](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-ssd)| +|Vehicle and License Detection| Vehicle and license detection based on Intel models.|[Vehicle & License Detection YAML](./sample/param/pipeline_vehicle_detection.yaml)|[vehicle-license-plate-detection-barrier-0106](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/vehicle-license-plate-detection-barrier-0106)| +|Object Segmentation| Object segmentation.|[Object Segmentation YAML](./sample/param/pipeline_segmentation.yaml)|[semantic-segmentation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/semantic-segmentation-adas-0001)| +|Person Reidentification| Person Reidentification based on object detection.|[Person Reidentification YAML](./sample/param/pipeline_person_attributes.yaml)|[person-attributes-recognition-crossroad-0230](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-attributes-recognition-crossroad-0230)| ## ROS interfaces and outputs ### Topic From 72664702f96389c9e7eb62db10fa91943efbb67d Mon Sep 17 00:00:00 2001 From: wujiawei Date: Sat, 17 Dec 2022 18:47:27 +0800 Subject: [PATCH 10/28] add table for the corresponding relation of ros2 branches --- README.md | 9 +++++++++ .../getting_started_with_ros2_ov2.0.md | 16 ++++++++-------- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 66521355..414544ba 100644 --- a/README.md +++ b/README.md @@ -14,13 +14,22 @@ * [Demo Result Snapshots](#demo-result-snapshots) * [➤ Installation & Launching](#installation-and-launching) * [➤ Reference](#reference) +* [➤ More Information](#more-information) # Introduction ## ROS2 Version Supported +* [x] ROS2 Dashing * [x] ROS2 Galactic * [x] ROS2 Foxy * [x] ROS2 Humble +|Branch Name|ROS2 Version Supported|Openvino Version|Corresponding Branch Link|OS Version| +|-----------------------|-----------------------|--------------------------------|----------------------|----------------------| +|Dashing|Dashing|V2022.1, V2022.2|[dashing branch](https://github.com/intel/ros2_openvino_toolkit/tree/dashing)|Ubuntu 18.04| +|ros2|Galactic, Foxy, Humble|V2022.1, V2022.2|[ros2 branch](https://github.com/intel/ros2_openvino_toolkit/tree/ros2)|Ubuntu 20.04, Ubuntu 22.04| +|foxy|foxy|V2021.4|[foxy branch](https://github.com/intel/ros2_openvino_toolkit/tree/foxy)|Ubuntu 20.04| +|galactic-ov2021.4|galactic|V2021.4|[galactic branch](https://github.com/intel/ros2_openvino_toolkit/tree/galactic-ov2021.4)|Ubuntu 20.04| + ## Inference Features Supported * [x] Object Detection * [x] Face Detection diff --git a/doc/quick_start/getting_started_with_ros2_ov2.0.md b/doc/quick_start/getting_started_with_ros2_ov2.0.md index d055f83e..13b2f4c4 100644 --- a/doc/quick_start/getting_started_with_ros2_ov2.0.md +++ b/doc/quick_start/getting_started_with_ros2_ov2.0.md @@ -7,25 +7,25 @@ Supported ROS2 versions include foxy,galactic and humble. ## 1. Environment Setup For ROS2 foxy and galactic on ubuntu 20.04: * Install ROS2.
- Refer to:[ROS_foxy_install_guide](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html) & [ROS_galactic_install_guide](https://docs.ros.org/en/galactic/Installation/Ubuntu-Install-Debians.html) + Refer to: [ROS_foxy_install_guide](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html) & [ROS_galactic_install_guide](https://docs.ros.org/en/galactic/Installation/Ubuntu-Install-Debians.html) * Install Intel® OpenVINO™ Toolkit Version: 2022.1.
- Refer to:[OpenVINO_install_guide](https://docs.openvino.ai/2022.1/openvino_docs_install_guides_installing_openvino_linux.html) + Refer to: [OpenVINO_install_guide](https://docs.openvino.ai/2022.1/openvino_docs_install_guides_installing_openvino_linux.html) * Install from an achive file. Both runtime and development tool are needed, `pip` is recommended for installing the development tool.
- Refer to:[OpenVINO_devtool_install_guide](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/download.html) + Refer to: [OpenVINO_devtool_install_guide](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/download.html) * Install Intel® RealSense™ SDK.
- Refer to:[RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md) + Refer to: [RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md) For ROS2 humble on ubuntu 22.04: * Install ROS2.
- Refer to:[ROS_humble_install_guide](https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html) + Refer to: [ROS_humble_install_guide](https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html) * Install Intel® OpenVINO™ Toolkit Latest Version by Source.
- Refer to:[OpenVINO_install_guide](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode) + Refer to: [OpenVINO_install_guide](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode) * Install Intel® RealSense™ SDK by Source.
- Refer to:[RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md) + Refer to: [RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md) ## 2. Building and Installation * Install ROS2_OpenVINO_Toolkit packages @@ -54,7 +54,7 @@ source ./install/local_setup.bash ## 3. Running the Demo ### Install OpenVINO 2022.1 by PIP OMZ tools are provided for downloading and converting models of open_model_zoo in ov2022.
-Refer to:[OMZtool_guide](https://pypi.org/project/openvino-dev/) +Refer to: [OMZtool_guide](https://pypi.org/project/openvino-dev/) * See all available models ``` From bb45a4a400b265b81f7ceb96744386323ba0e60f Mon Sep 17 00:00:00 2001 From: wujiawei Date: Sat, 17 Dec 2022 19:22:10 +0800 Subject: [PATCH 11/28] add links to the table of contents and refine docker instruction --- README.md | 13 ++++++++++--- docker/docker_instructions_ov2.0.md | 8 +++++--- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 414544ba..121776a2 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,8 @@ * [ROS Interfaces and Outputs](#ros-interfaces-and-outputs) * [Demo Result Snapshots](#demo-result-snapshots) * [➤ Installation & Launching](#installation-and-launching) + * [Deploy in Local Environment](#deploy-in-local-environment) + * [Deploy in Docker](#deploy-in-docker) * [➤ Reference](#reference) * [➤ More Information](#more-information) @@ -25,10 +27,10 @@ |Branch Name|ROS2 Version Supported|Openvino Version|Corresponding Branch Link|OS Version| |-----------------------|-----------------------|--------------------------------|----------------------|----------------------| -|Dashing|Dashing|V2022.1, V2022.2|[dashing branch](https://github.com/intel/ros2_openvino_toolkit/tree/dashing)|Ubuntu 18.04| +|dashing|Dashing|V2022.1, V2022.2|[dashing branch](https://github.com/intel/ros2_openvino_toolkit/tree/dashing)|Ubuntu 18.04| |ros2|Galactic, Foxy, Humble|V2022.1, V2022.2|[ros2 branch](https://github.com/intel/ros2_openvino_toolkit/tree/ros2)|Ubuntu 20.04, Ubuntu 22.04| -|foxy|foxy|V2021.4|[foxy branch](https://github.com/intel/ros2_openvino_toolkit/tree/foxy)|Ubuntu 20.04| -|galactic-ov2021.4|galactic|V2021.4|[galactic branch](https://github.com/intel/ros2_openvino_toolkit/tree/galactic-ov2021.4)|Ubuntu 20.04| +|foxy|Foxy|V2021.4|[foxy branch](https://github.com/intel/ros2_openvino_toolkit/tree/foxy)|Ubuntu 20.04| +|galactic-ov2021.4|Galactic|V2021.4|[galactic branch](https://github.com/intel/ros2_openvino_toolkit/tree/galactic-ov2021.4)|Ubuntu 20.04| ## Inference Features Supported * [x] Object Detection @@ -168,9 +170,14 @@ See below pictures for the demo result snapshots. ![person_reidentification_demo_video](./data/images/person-reidentification.gif "person reidentification demo video") # Installation and Launching +## Deploy in local environment * Refer to the quick start document for [getting_started_with_ros2](./doc/quick_start/getting_started_with_ros2_ov2.0.md) for detailed installation & lauching instructions. * Refer to the quick start document for [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. +## Deploy in docker +* Refer to the docker instruction for [docker_instructions](./docker/docker_instructions_ov2.0.md) for detailed information about building docker image and launching. +* Refer to the quick start document for [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. + # Reference * Open_model_zoo: Refer to the OpenVINO document for [open_model_zoo](https://github.com/openvinotoolkit/open_model_zoo/tree/master) for detailed model structure and demo samples. * OpenVINO api 2.0: Refer to the OpenVINO document for [OpenVINO_api_2.0](https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html) for latest api 2.0 transition guide. diff --git a/docker/docker_instructions_ov2.0.md b/docker/docker_instructions_ov2.0.md index 1414da15..39c9e448 100644 --- a/docker/docker_instructions_ov2.0.md +++ b/docker/docker_instructions_ov2.0.md @@ -5,7 +5,8 @@ Below steps have been tested on **Ubuntu 20.04**. Supported ROS2 versions include foxy and galactic. ## 1. Environment Setup -* Install docker ([guide](https://docs.docker.com/engine/install/ubuntu/)) +* Install docker.
+Refer to: [Docker_install_guide](https://docs.docker.com/engine/install/ubuntu/) ## 2. Build docker image by dockerfile ``` @@ -64,7 +65,8 @@ source ./install/local_setup.bash ``` * See all available models -OMZ tools are provided for downloading and converting OMZ models in ov2022.([guide](https://pypi.org/project/openvino-dev/)) +OMZ tools are provided for downloading and converting OMZ models in ov2022.
+Refer to: [OMZtool_guide](https://pypi.org/project/openvino-dev/) ``` omz_downloader --print_all @@ -92,7 +94,7 @@ sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/fr sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32 ``` -* Before launch, check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml, make sure the paramter like model path, label path, inputs are right. +* Check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml before lauching, make sure parameters such as model_path, label_path and input_path are set correctly. Please refer to the quick start document for [yaml configuration guidance](../doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. * run face detection sample code input from StandardCamera. ``` ros2 launch dynamic_vino_sample pipeline_people.launch.py From b9f346ed98edd3d5f379c3088ba7830ff964bc6e Mon Sep 17 00:00:00 2001 From: wujiawei Date: Sat, 17 Dec 2022 21:28:21 +0800 Subject: [PATCH 12/28] update yaml configuration and add tables --- doc/quick_start/yaml_configuration_guide.md | 33 +++++++++++++-------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/doc/quick_start/yaml_configuration_guide.md b/doc/quick_start/yaml_configuration_guide.md index ea580ca7..6cf17ca6 100644 --- a/doc/quick_start/yaml_configuration_guide.md +++ b/doc/quick_start/yaml_configuration_guide.md @@ -52,20 +52,28 @@ Common: The name value of this pipeline can be anyone other than null. ### Specify inputs -**Note**:The input parameter can only have one value.
+**Note** The input parameter can only have one value.
Currently, options for inputs are: -|Option|Description| -|--------------------|------------------------------------------------------------------| -|StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.| -|RealSenseCamera| Intel RealSense RGB-D Camera, directly calling RealSense Camera via librealsense plugin of openCV.| -|RealSenseCameraTopic| Any ROS topic which is structured in image message.| -|Image| Any image file which can be parsed by openCV, such as .png, .jpeg.| -|Video| Any video file which can be parsed by openCV.| -|IpCamera| Any RTSP server which can push video stream.| +|Input Option|Description|Configuration| +|--------------------|------------------------------------------------------------------|-----------------------| +|StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.|```inputs: [StandardCamera]```| +|RealSenseCamera| Intel RealSense RGB-D Camera, directly calling RealSense Camera via librealsense plugin of openCV.|```inputs: [RealSenseCamera]```| +|RealSenseCameraTopic| Any ROS topic which is structured in image message.|```inputs: [RealSenseCameraTopic]```| +|Image| Any image file which can be parsed by openCV, such as .png, .jpeg.|```inputs: [Image]```| +|Video| Any video file which can be parsed by openCV.|```inputs: [Video]```| +|IpCamera| Any RTSP server which can push video stream.|```inputs: [IpCamera]```| + +**Note** Please refer to this opensource repo [RTSP_server_install_guide](https://github.com/EasyDarwin/EasyDarwin) to install RTSP server for IpCamera input. ### Specify input_path -The **input_path** need to be specified when input is an image or video file. +The input_path need to be specified when input is Image, Video and Ipcamera. + +|Input Option|Configuration| +|--------------------|------------------------------------------------------------------| +|Image|```input_path: to/be/set/image_path```| +|Video|```input_path: to/be/set/video_path```| +|IpCamera|```input_path: "rtsp://localhost/test"```| ### Specify infers The Inference Engine is a set of C++ classes to provides an API to read the Intermediate Representation, set the input and output formats, and execute the model on devices. @@ -82,13 +90,14 @@ The name of inference engine need to be specified here. Currently, the inference |ObjectDetection| object detection based on SSD-based trained models.| |VehicleDetection| Vehicle and passenger detection based on Intel models.| |ObjectSegmentation| object detection and segmentation.| +|ObjectSegmentationMaskrcnn| object segmentation based on Maskrcnn model.| * #### model The path of model need to be specified here. The scheme below illustrates the typical workflow for deploying a trained deep learning model. ![trained deep learning model](../../data/images/CVSDK_Flow.png "trained deep learning model") * #### engine -**Note**:Currently, only CPU and GPU are supported.
+**Note** Currently, only CPU and GPU are supported.
Target device options are: |Target Device| @@ -105,7 +114,7 @@ Currently, this parameter does not work. Enable dynamic batch size for the inference engine net. ### Specify outputs -**Note**:The output parameter can be one or more.
+**Note** The output parameter can be one or more.
Currently, the output options are: |Option|Description| From 3f8750366b176af3bcd8e48da6974eaedfade498 Mon Sep 17 00:00:00 2001 From: wujiawei Date: Sat, 17 Dec 2022 21:50:59 +0800 Subject: [PATCH 13/28] update yaml configuration --- doc/quick_start/yaml_configuration_guide.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/quick_start/yaml_configuration_guide.md b/doc/quick_start/yaml_configuration_guide.md index 6cf17ca6..b6a08a2a 100644 --- a/doc/quick_start/yaml_configuration_guide.md +++ b/doc/quick_start/yaml_configuration_guide.md @@ -52,11 +52,11 @@ Common: The name value of this pipeline can be anyone other than null. ### Specify inputs -**Note** The input parameter can only have one value.
+**Note:** The input parameter can only have one value.
Currently, options for inputs are: |Input Option|Description|Configuration| -|--------------------|------------------------------------------------------------------|-----------------------| +|--------------------|------------------------------------------------------------------|-----------------------------------------| |StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.|```inputs: [StandardCamera]```| |RealSenseCamera| Intel RealSense RGB-D Camera, directly calling RealSense Camera via librealsense plugin of openCV.|```inputs: [RealSenseCamera]```| |RealSenseCameraTopic| Any ROS topic which is structured in image message.|```inputs: [RealSenseCameraTopic]```| @@ -64,7 +64,7 @@ Currently, options for inputs are: |Video| Any video file which can be parsed by openCV.|```inputs: [Video]```| |IpCamera| Any RTSP server which can push video stream.|```inputs: [IpCamera]```| -**Note** Please refer to this opensource repo [RTSP_server_install_guide](https://github.com/EasyDarwin/EasyDarwin) to install RTSP server for IpCamera input. +**Note:** Please refer to this opensource repo [RTSP_server_install_guide](https://github.com/EasyDarwin/EasyDarwin) to install RTSP server for IpCamera input. ### Specify input_path The input_path need to be specified when input is Image, Video and Ipcamera. @@ -97,7 +97,7 @@ The path of model need to be specified here. The scheme below illustrates the ty ![trained deep learning model](../../data/images/CVSDK_Flow.png "trained deep learning model") * #### engine -**Note** Currently, only CPU and GPU are supported.
+**Note:** Currently, only CPU and GPU are supported.
Target device options are: |Target Device| @@ -114,14 +114,14 @@ Currently, this parameter does not work. Enable dynamic batch size for the inference engine net. ### Specify outputs -**Note** The output parameter can be one or more.
+**Note:** The output parameter can be one or more.
Currently, the output options are: -|Option|Description| -|--------------------|------------------------------------------------------------------| -|ImageWindow| Window showing results| -|RosTopic| Output the topic| -|RViz| Display the result in rviz| +|Option|Description|Configuration| +|--------------------|-----------------------------------------------------|---------------------------------------------| +|ImageWindow| Window showing results|```outputs: [ImageWindow, RosTopic, RViz]```| +|RosTopic| Output the topic|```outputs: [ImageWindow, RosTopic, RViz]```| +|RViz| Display the result in rviz|```outputs: [ImageWindow, RosTopic, RViz]```| ### Specify confidence_threshold Set the threshold of detection probability. From 4842d0d424eaf3e35bcefec756c0b9c30ecd6f6d Mon Sep 17 00:00:00 2001 From: wujiawei Date: Sat, 17 Dec 2022 22:05:34 +0800 Subject: [PATCH 14/28] update README format --- README.md | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 121776a2..c186971d 100644 --- a/README.md +++ b/README.md @@ -109,10 +109,20 @@ Currently, the inference feature list is supported: ## ROS interfaces and outputs ### Topic -#### Subscribed Topic + +

+

+Subscribed Topic + - Image topic: ```/camera/color/image_raw```([sensor_msgs::Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) -#### Published Topic +
+

+ +

+

+Published Topic + - Face Detection: ```/ros2_openvino_toolkit/face_detection```([object_msgs::ObjectsInBoxes](https://github.com/intel/object_msgs/blob/master/msg/ObjectsInBoxes.msg)) - Emotion Recognition: @@ -133,6 +143,8 @@ Currently, the inference feature list is supported: ```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::LicensePlateStamped](./people_msgs/msg/LicensePlateStamped.msg) - Rviz Output: ```/ros2_openvino_toolkit/image_rviz```([sensor_msgs::Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +
+

### Service - Object Detection Service: From 43e877620078b25ede5b0c7af41ac0e5edd26e14 Mon Sep 17 00:00:00 2001 From: wujiawei Date: Sat, 17 Dec 2022 22:16:12 +0800 Subject: [PATCH 15/28] update README format --- README.md | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/README.md b/README.md index c186971d..10b1c6a5 100644 --- a/README.md +++ b/README.md @@ -57,15 +57,42 @@ From the view of hirarchical architecture design, the package is divided into di ![OpenVINO_Architecture](./data/images/design_arch.PNG "OpenVINO RunTime Architecture") +

+

+Intel® OpenVINO™ toolkit + - **Intel® OpenVINO™ toolkit** provides a ROS-adapted runtime framework of neural network which quickly deploys applications and solutions for vision inference. By leveraging Intel® OpenVINO™ toolkit and corresponding libraries, this ROS2 runtime framework extends workloads across Intel® hardware (including accelerators) and maximizes performance. - Increase deep learning workload performance up to 19x1 with computer vision accelerators from Intel. - Unleash convolutional neural network (CNN)-based deep learning inference using a common API. - Speed development using optimized OpenCV* and OpenVX* functions. See more from [here](https://github.com/openvinotoolkit/openvino) for Intel OpenVINO™ introduction. +
+

+ +

+

+ROS OpenVINO Runtime Framework + - **ROS OpenVINO Runtime Framework** is the main body of this repo. it provides key logic implementation for pipeline lifecycle management, resource management and ROS system adapter, which extends Intel OpenVINO toolkit and libraries. Furthermore, this runtime framework provides ways to ease launching, configuration and data analytics and re-use. +
+

+ +

+

+ROS Input & Output + - **Diversal Input resources** are the data resources to be infered and analyzed with the OpenVINO framework. - **ROS interfaces and outputs** currently include _Topic_ and _service_. Natively, RViz output and CV image window output are also supported by refactoring topic message and inferrence results. +
+

+ +

+

+Optimized Models + - **Optimized Models** provides by Model Optimizer component of Intel® OpenVINO™ toolkit. Imports trained models from various frameworks (Caffe*, Tensorflow*, MxNet*, ONNX*, Kaldi*) and converts them to a unified intermediate representation file. It also optimizes topologies through node merging, horizontal fusion, eliminating batch normalization, and quantization.It also supports graph freeze and graph summarize along with dynamic input freezing. +
+

## Logic Flow From the view of logic implementation, the package introduces the definitions of parameter manager, pipeline and pipeline manager. The below picture depicts how these entities co-work together when the corresponding program is launched. @@ -76,9 +103,21 @@ Once a corresponding program is launched with a specified .yaml config file pass The contents in **.yaml config file** should be well structured and follow the supported rules and entity names. Please see [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for how to create or edit the config files. +

+

+Pipeline + **Pipeline** fulfills the whole data handling process: initiliazing Input Component for image data gathering and formating; building up the structured inference network and passing the formatted data through the inference network; transfering the inference results and handling output, etc. +
+

+ +

+

+Pipeline manager **Pipeline manager** manages all the created pipelines according to the inference requests or external demands (say, system exception, resource limitation, or end user's operation). Because of co-working with resource management and being aware of the whole framework, it covers the ability of performance optimization by sharing system resource between pipelines and reducing the burden of data copy. +
+

# Supported Features ## Multiple Input Components From 25d62496b0ed4bffb11e0b41d6b5c93429b0ee94 Mon Sep 17 00:00:00 2001 From: wujiawei Date: Sat, 17 Dec 2022 23:02:54 +0800 Subject: [PATCH 16/28] add FAQ module --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 10b1c6a5..57f72999 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ * [Deploy in Local Environment](#deploy-in-local-environment) * [Deploy in Docker](#deploy-in-docker) * [➤ Reference](#reference) +* [➤ FAQ](#faq) * [➤ More Information](#more-information) # Introduction @@ -233,6 +234,12 @@ See below pictures for the demo result snapshots. * Open_model_zoo: Refer to the OpenVINO document for [open_model_zoo](https://github.com/openvinotoolkit/open_model_zoo/tree/master) for detailed model structure and demo samples. * OpenVINO api 2.0: Refer to the OpenVINO document for [OpenVINO_api_2.0](https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html) for latest api 2.0 transition guide. +# FAQ +* [How to build OpenVINO by source?](https://github.com/openvinotoolkit/openvino/wiki#how-to-build) +* [How to build RealSense by source?](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md) +* [What is the basic command of Docker CLI?](https://docs.docker.com/engine/reference/commandline/docker/) +* [What is the canonical C++ API for interacting with ROS?](https://docs.ros2.org/latest/api/rclcpp/) + # More Information * ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw From 72bdb9a1162865f00e5cfcfc65204f1f0a66fc0e Mon Sep 17 00:00:00 2001 From: wujiawei Date: Sat, 17 Dec 2022 23:16:35 +0800 Subject: [PATCH 17/28] update structure of table content --- README.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 57f72999..e7a0e865 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,11 @@ # ros2_openvino_toolkit # Table of Contents -* [➤ Introduction](#introduction) +* [➤ Overview](#overview) * [ROS2 Version Supported](#ros2-version-supported) * [Inference Features Supported](#inference-features-supported) - * [Prerequisite](#prerequisite) +* [➤ Prerequisite](#prerequisite-for-ros2-branch) +* [➤ Introduction](#introduction) * [Design Architecture](#design-architecture) * [Logic Flow](#logic-flow) * [➤ Supported Features](#supported-features) @@ -19,7 +20,7 @@ * [➤ FAQ](#faq) * [➤ More Information](#more-information) -# Introduction +# Overview ## ROS2 Version Supported * [x] ROS2 Dashing * [x] ROS2 Galactic @@ -44,7 +45,7 @@ * [x] Vehicle Attribute Detection * [x] Vehicle License Plate Detection -## Prerequisite +# Prerequisite for ros2 Branch * Processor: A platform with Intel processors assembled. (see [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2020-3-lts-relnotes.html) for the full list of Intel processors supported.) * OS: Ubuntu 20.04, Ubuntu 22.04 * ROS2: Foxy, Galactic, Humble @@ -53,6 +54,7 @@ * [Optional] RealSense D400 Series Camera * [Optional] Intel NCS2 Stick +# Introduction ## Design Architecture From the view of hirarchical architecture design, the package is divided into different functional components, as shown in below picture. @@ -62,7 +64,7 @@ From the view of hirarchical architecture design, the package is divided into di
Intel® OpenVINO™ toolkit -- **Intel® OpenVINO™ toolkit** provides a ROS-adapted runtime framework of neural network which quickly deploys applications and solutions for vision inference. By leveraging Intel® OpenVINO™ toolkit and corresponding libraries, this ROS2 runtime framework extends workloads across Intel® hardware (including accelerators) and maximizes performance. +- **Intel® OpenVINO™ toolkit** provides a ROS-adapted runtime framework of neural network which quickly deploys applications and solutions for vision inference. By leveraging Intel® OpenVINO™ toolkit and corresponding libraries, this ROS2 runtime framework extends workloads across Intel® hardware (including accelerators) and maximizes performance. - Increase deep learning workload performance up to 19x1 with computer vision accelerators from Intel. - Unleash convolutional neural network (CNN)-based deep learning inference using a common API. - Speed development using optimized OpenCV* and OpenVX* functions. From b7ffe17008a18cfea31dfff4e9755400e352c0fe Mon Sep 17 00:00:00 2001 From: wujiawei Date: Sun, 18 Dec 2022 00:14:34 +0800 Subject: [PATCH 18/28] update Inference Implementations table in README --- README.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index e7a0e865..654ec0e9 100644 --- a/README.md +++ b/README.md @@ -140,14 +140,16 @@ Currently, the inference feature list is supported: |Inference|Description|YAML Configuration|Model Used| |-----------------------|------------------------------------------------------------------|----------------------|----------------------| -|Face Detection| Object Detection task applied to face recognition using a sequence of neural networks.|[Face Detection YAML](./sample/param/pipeline_image.yaml)|[face-detection-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-adas-0001) [age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/age-gender-recognition-retail-0013) [emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/emotions-recognition-retail-0003) [head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/head-pose-estimation-adas-0001)| -|Emotion Recognition| Emotion recognition based on detected face image.|[Emotion Detection YAML](./sample/param/pipeline_image.yaml)|[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/emotions-recognition-retail-0003)| -|Age & Gender Recognition| Age and gender recognition based on detected face image.|[Age Gender Detection YAML](./sample/param/pipeline_image.yaml)|[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/age-gender-recognition-retail-0013)| -|Head Pose Estimation| Head pose estimation based on detected face image.|[Head Pose Detection YAML](./sample/param/pipeline_image.yaml)|[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/head-pose-estimation-adas-0001)| -|Object Detection| Object detection based on SSD-based trained models.|[Object Detection YAML](./sample/param/pipeline_object.yaml)|[mobilenet-ssd](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-ssd)| -|Vehicle and License Detection| Vehicle and license detection based on Intel models.|[Vehicle & License Detection YAML](./sample/param/pipeline_vehicle_detection.yaml)|[vehicle-license-plate-detection-barrier-0106](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/vehicle-license-plate-detection-barrier-0106)| -|Object Segmentation| Object segmentation.|[Object Segmentation YAML](./sample/param/pipeline_segmentation.yaml)|[semantic-segmentation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/semantic-segmentation-adas-0001)| -|Person Reidentification| Person Reidentification based on object detection.|[Person Reidentification YAML](./sample/param/pipeline_person_attributes.yaml)|[person-attributes-recognition-crossroad-0230](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-attributes-recognition-crossroad-0230)| +|Face Detection| Object Detection task applied to face recognition using a sequence of neural networks.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[face-detection-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-adas-0001)
[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/age-gender-recognition-retail-0013)
[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/emotions-recognition-retail-0003)
[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/head-pose-estimation-adas-0001)| +|Emotion Recognition| Emotion recognition based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/emotions-recognition-retail-0003)| +|Age & Gender Recognition| Age and gender recognition based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/age-gender-recognition-retail-0013)| +|Head Pose Estimation| Head pose estimation based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/head-pose-estimation-adas-0001)| +|Object Detection| Object detection based on SSD-based trained models.|[pipeline_object.yaml](./sample/param/pipeline_object.yaml)|[mobilenet-ssd](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-ssd)
[pipeline_object_topic.yaml](./sample/param/pipeline_object_topic.yaml)| +|Vehicle and License Detection| Vehicle and license detection based on Intel models.|[pipeline_vehicle_detection.yaml](./sample/param/pipeline_vehicle_detection.yaml)|[vehicle-license-plate-detection-barrier-0106](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/vehicle-license-plate-detection-barrier-0106)
[vehicle-attributes-recognition-barrier-0039](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/vehicle-attributes-recognition-barrier-0039)
[license-plate-recognition-barrier-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/license-plate-recognition-barrier-0001)| +|Object Segmentation| Object segmentation.|[pipeline_segmentation.yaml](./sample/param/pipeline_segmentation.yaml)
[pipeline_segmentation_image.yaml](./sample/param/pipeline_segmentation_image.yaml)
[pipeline_video.yaml](./sample/param/pipeline_video.yaml)|[semantic-segmentation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/semantic-segmentation-adas-0001)
[deeplabv3](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/deeplabv3)| +|Person Attributes| Person attributes based on object detection.|[pipeline_person_attributes.yaml](./sample/param/pipeline_person_attributes.yaml)|[person-attributes-recognition-crossroad-0230](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-attributes-recognition-crossroad-0230)
[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-detection-retail-0013)| +|Person Reidentification|| Person reidentification based on object detection.|[pipeline_person_reidentification.yaml](./sample/param/pipeline_reidentification.yaml)|[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-detection-retail-0013)
[person-reidentification-retail-0277](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-reidentification-retail-0277)| +|Object Segmentation Maskrcnn| Object segmentation based on maskrcnn model.|[pipeline_segmentation_maskrcnn.yaml](./sample/param/pipeline_segmentation_maskrcnn.yaml)|[mask_rcnn_inception_v2_coco_2018_01_28](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mask_rcnn_inception_resnet_v2_atrous_coco)| ## ROS interfaces and outputs ### Topic From 948562ed81b2f86e932f1867022de14cab42beef Mon Sep 17 00:00:00 2001 From: wujiawei Date: Sun, 18 Dec 2022 00:15:45 +0800 Subject: [PATCH 19/28] update --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 654ec0e9..ae990559 100644 --- a/README.md +++ b/README.md @@ -148,7 +148,7 @@ Currently, the inference feature list is supported: |Vehicle and License Detection| Vehicle and license detection based on Intel models.|[pipeline_vehicle_detection.yaml](./sample/param/pipeline_vehicle_detection.yaml)|[vehicle-license-plate-detection-barrier-0106](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/vehicle-license-plate-detection-barrier-0106)
[vehicle-attributes-recognition-barrier-0039](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/vehicle-attributes-recognition-barrier-0039)
[license-plate-recognition-barrier-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/license-plate-recognition-barrier-0001)| |Object Segmentation| Object segmentation.|[pipeline_segmentation.yaml](./sample/param/pipeline_segmentation.yaml)
[pipeline_segmentation_image.yaml](./sample/param/pipeline_segmentation_image.yaml)
[pipeline_video.yaml](./sample/param/pipeline_video.yaml)|[semantic-segmentation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/semantic-segmentation-adas-0001)
[deeplabv3](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/deeplabv3)| |Person Attributes| Person attributes based on object detection.|[pipeline_person_attributes.yaml](./sample/param/pipeline_person_attributes.yaml)|[person-attributes-recognition-crossroad-0230](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-attributes-recognition-crossroad-0230)
[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-detection-retail-0013)| -|Person Reidentification|| Person reidentification based on object detection.|[pipeline_person_reidentification.yaml](./sample/param/pipeline_reidentification.yaml)|[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-detection-retail-0013)
[person-reidentification-retail-0277](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-reidentification-retail-0277)| +|Person Reidentification|Person reidentification based on object detection.|[pipeline_person_reidentification.yaml](./sample/param/pipeline_reidentification.yaml)|[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-detection-retail-0013)
[person-reidentification-retail-0277](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-reidentification-retail-0277)| |Object Segmentation Maskrcnn| Object segmentation based on maskrcnn model.|[pipeline_segmentation_maskrcnn.yaml](./sample/param/pipeline_segmentation_maskrcnn.yaml)|[mask_rcnn_inception_v2_coco_2018_01_28](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mask_rcnn_inception_resnet_v2_atrous_coco)| ## ROS interfaces and outputs From 47939b923ae69ecd8d8106bfc2d8fa73833f0391 Mon Sep 17 00:00:00 2001 From: wujiawei Date: Sun, 18 Dec 2022 00:21:39 +0800 Subject: [PATCH 20/28] update table format --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index ae990559..200ecf03 100644 --- a/README.md +++ b/README.md @@ -138,6 +138,10 @@ Currently, the package support several kinds of input resources of gaining image ## Inference Implementations Currently, the inference feature list is supported: +

+

+Inference feature correspondence table + |Inference|Description|YAML Configuration|Model Used| |-----------------------|------------------------------------------------------------------|----------------------|----------------------| |Face Detection| Object Detection task applied to face recognition using a sequence of neural networks.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[face-detection-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-adas-0001)
[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/age-gender-recognition-retail-0013)
[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/emotions-recognition-retail-0003)
[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/head-pose-estimation-adas-0001)| @@ -150,6 +154,8 @@ Currently, the inference feature list is supported: |Person Attributes| Person attributes based on object detection.|[pipeline_person_attributes.yaml](./sample/param/pipeline_person_attributes.yaml)|[person-attributes-recognition-crossroad-0230](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-attributes-recognition-crossroad-0230)
[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-detection-retail-0013)| |Person Reidentification|Person reidentification based on object detection.|[pipeline_person_reidentification.yaml](./sample/param/pipeline_reidentification.yaml)|[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-detection-retail-0013)
[person-reidentification-retail-0277](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-reidentification-retail-0277)| |Object Segmentation Maskrcnn| Object segmentation based on maskrcnn model.|[pipeline_segmentation_maskrcnn.yaml](./sample/param/pipeline_segmentation_maskrcnn.yaml)|[mask_rcnn_inception_v2_coco_2018_01_28](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mask_rcnn_inception_resnet_v2_atrous_coco)| +
+

## ROS interfaces and outputs ### Topic From eb1986e015b46d05272566f94b518a378f072956 Mon Sep 17 00:00:00 2001 From: wujiawei Date: Sun, 18 Dec 2022 00:24:33 +0800 Subject: [PATCH 21/28] update --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 200ecf03..3ce932e3 100644 --- a/README.md +++ b/README.md @@ -148,7 +148,7 @@ Currently, the inference feature list is supported: |Emotion Recognition| Emotion recognition based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/emotions-recognition-retail-0003)| |Age & Gender Recognition| Age and gender recognition based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/age-gender-recognition-retail-0013)| |Head Pose Estimation| Head pose estimation based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/head-pose-estimation-adas-0001)| -|Object Detection| Object detection based on SSD-based trained models.|[pipeline_object.yaml](./sample/param/pipeline_object.yaml)|[mobilenet-ssd](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-ssd)
[pipeline_object_topic.yaml](./sample/param/pipeline_object_topic.yaml)| +|Object Detection| Object detection based on SSD-based trained models.|[pipeline_object.yaml](./sample/param/pipeline_object.yaml)
[pipeline_object_topic.yaml](./sample/param/pipeline_object_topic.yaml)|[mobilenet-ssd](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-ssd)| |Vehicle and License Detection| Vehicle and license detection based on Intel models.|[pipeline_vehicle_detection.yaml](./sample/param/pipeline_vehicle_detection.yaml)|[vehicle-license-plate-detection-barrier-0106](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/vehicle-license-plate-detection-barrier-0106)
[vehicle-attributes-recognition-barrier-0039](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/vehicle-attributes-recognition-barrier-0039)
[license-plate-recognition-barrier-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/license-plate-recognition-barrier-0001)| |Object Segmentation| Object segmentation.|[pipeline_segmentation.yaml](./sample/param/pipeline_segmentation.yaml)
[pipeline_segmentation_image.yaml](./sample/param/pipeline_segmentation_image.yaml)
[pipeline_video.yaml](./sample/param/pipeline_video.yaml)|[semantic-segmentation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/semantic-segmentation-adas-0001)
[deeplabv3](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/deeplabv3)| |Person Attributes| Person attributes based on object detection.|[pipeline_person_attributes.yaml](./sample/param/pipeline_person_attributes.yaml)|[person-attributes-recognition-crossroad-0230](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-attributes-recognition-crossroad-0230)
[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-detection-retail-0013)| From 35c71bf4389fbdddb088dd98aaebe6ae4d569a81 Mon Sep 17 00:00:00 2001 From: wujiawei Date: Sun, 18 Dec 2022 18:19:50 +0800 Subject: [PATCH 22/28] refine format and add tables in README --- README.md | 74 ++++++++++++++++++++++++++----------------------------- 1 file changed, 35 insertions(+), 39 deletions(-) diff --git a/README.md b/README.md index 3ce932e3..0438e28c 100644 --- a/README.md +++ b/README.md @@ -126,6 +126,10 @@ The contents in **.yaml config file** should be well structured and follow the s ## Multiple Input Components Currently, the package support several kinds of input resources of gaining image data: +

+

+Input Resource Table + |Input Resource|Description| |--------------------|------------------------------------------------------------------| |StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.| @@ -134,13 +138,15 @@ Currently, the package support several kinds of input resources of gaining image |Image| Any image file which can be parsed by openCV, such as .png, .jpeg.| |Video| Any video file which can be parsed by openCV.| |IpCamera| Any RTSP server which can push video stream.| +
+

## Inference Implementations Currently, the inference feature list is supported:

-Inference feature correspondence table +Inference Feature Correspondence Table |Inference|Description|YAML Configuration|Model Used| |-----------------------|------------------------------------------------------------------|----------------------|----------------------| @@ -158,56 +164,46 @@ Currently, the inference feature list is supported:

## ROS interfaces and outputs +The inference results can be output in several types. One or more types can be enabled for any inference pipeline. ### Topic +Specific topic(s) can be generated and published according to the given inference functionalities.

-Subscribed Topic - -- Image topic: -```/camera/color/image_raw```([sensor_msgs::Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +Published Topic Correspondence Table + +|Inference|Published Topic| +|---|---| +|People Detection|```/ros2_openvino_toolkit/face_detection```([object_msgs:msg:ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| +|Emotion Recognition|```/ros2_openvino_toolkit/emotions_recognition```([people_msgs:msg:EmotionsStamped](../../../people_msgs/msg/EmotionsStamped.msg))| +|Age and Gender Recognition|```/ros2_openvino_toolkit/age_genders_Recognition```([people_msgs:msg:AgeGenderStamped](../../../people_msgs/msg/AgeGenderStamped.msg))| +|Head Pose Estimation|```/ros2_openvino_toolkit/headposes_estimation```([people_msgs:msg:HeadPoseStamped](../../../people_msgs/msg/HeadPoseStamped.msg))| +|Object Detection|```/ros2_openvino_toolkit/detected_objects```([object_msgs::msg::ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| +|Object Segmentation|```/ros2_openvino_toolkit/segmented_obejcts```([people_msgs::msg::ObjectsInMasks](../../../people_msgs/msg/ObjectsInMasks.msg))| +|Person Reidentification|```/ros2_openvino_toolkit/reidentified_persons```([people_msgs::msg::ReidentificationStamped](../../../people_msgs/msg/ReidentificationStamped.msg))| +|Face Reidenfication|```/ros2_openvino_toolkit/reidentified_faces```([people_msgs::msg::ReidentificationStamped](../../../people_msgs/msg/ReidentificationStamped.msg))| +|Vehicle Detection|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::VehicleAttribsStamped](../../../people_msgs/msg/PersonAttributeStamped.msg))| +|Vehicle License Detection|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped](../../../people_msgs/msg/LicensePlateStamped.msg))|

+### Service +Several ROS2 Services are created, expecting to be used in client/server mode, especially when synchronously getting inference results for a given image frame or when managing inference pipeline's lifecycle.
+

-Published Topic - -- Face Detection: -```/ros2_openvino_toolkit/face_detection```([object_msgs::ObjectsInBoxes](https://github.com/intel/object_msgs/blob/master/msg/ObjectsInBoxes.msg)) -- Emotion Recognition: -```/ros2_openvino_toolkit/emotion_detection```([people_msgs::EmotionsStamped](./people_msgs/msg/EmotionsStamped.msg)) -- Age and Gender Recognition: -```/ros2_openvino_toolkit/age_gender_detection```([people_msgs::AgeGenderStamped](./people_msgs/msg/AgeGenderStamped.msg)) -- Head Pose Estimation: -```/ros2_openvino_toolkit/head_pose_detection```([people_msgs::HeadPoseStamped](./people_msgs/msg/HeadPoseStamped.msg)) -- Object Detection: -```/ros2_openvino_toolkit/detected_objects```([object_msgs::ObjectsInBoxes](https://github.com/intel/object_msgs/blob/master/msg/ObjectsInBoxes.msg)) -- Object Segmentation: -```/ros2_openvino_toolkit/segmented_objects```([people_msgs::ObjectsInMasks](./people_msgs/msg/ObjectsInMasks.msg)) -- Person Reidentification: -```/ros2_openvino_toolkit/reidentified_persons```([people_msgs::ReidentificationStamped](./people_msgs/msg/ReidentificationStamped.msg)) -- Vehicle Detection: -```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::VehicleAttribsStamped](./people_msgs/msg/VehicleAttribsStamped.msg) -- Vehicle License Detection: -```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::LicensePlateStamped](./people_msgs/msg/LicensePlateStamped.msg) -- Rviz Output: -```/ros2_openvino_toolkit/image_rviz```([sensor_msgs::Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) +Service Correspondence Table + +|Inference|Service| +|---|---| +|Object Detection Service|```/detect_object```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))| +|Face Detection Service|```/detect_face```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))| +|Age Gender Detection Service|```/detect_age_gender```([people_msgs::srv::AgeGender](./people_msgs/srv/AgeGenderSrv.srv))| +|Headpose Detection Service|```/detect_head_pose```([people_msgs::srv::HeadPose](./people_msgs/srv/HeadPoseSrv.srv))| +|Emotion Detection Service|```/detect_emotion```([people_msgs::srv::Emotion](./people_msgs/srv/EmotionSrv.srv))|

-### Service -- Object Detection Service: -```/detect_object``` ([object_msgs::DetectObject](https://github.com/intel/object_msgs/blob/master/srv/DetectObject.srv)) -- Face Detection Service: -```/detect_face``` ([object_msgs::DetectObject](https://github.com/intel/object_msgs/blob/master/srv/DetectObject.srv)) -- Age & Gender Detection Service: -```/detect_age_gender``` ([people_msgs::AgeGender](./people_msgs/srv/AgeGenderSrv.srv)) -- Headpose Detection Service: -```/detect_head_pose``` ([people_msgs::HeadPose](./people_msgs/srv/HeadPoseSrv.srv)) -- Emotion Detection Service: -```/detect_emotion``` ([people_msgs::Emotion](./people_msgs/srv/EmotionSrv.srv)) - ### RViz RViz dispaly is also supported by the composited topic of original image frame with inference result. To show in RViz tool, add an image marker with the composited topic: From e4f9921864d275be380313a148879bed8cf320f5 Mon Sep 17 00:00:00 2001 From: wujiawei Date: Sun, 18 Dec 2022 18:37:12 +0800 Subject: [PATCH 23/28] add feedback module in README --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 0438e28c..40d3d33b 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ * [Deploy in Docker](#deploy-in-docker) * [➤ Reference](#reference) * [➤ FAQ](#faq) +* [➤ Feedback](#feedback) * [➤ More Information](#more-information) # Overview @@ -246,6 +247,9 @@ See below pictures for the demo result snapshots. * [What is the basic command of Docker CLI?](https://docs.docker.com/engine/reference/commandline/docker/) * [What is the canonical C++ API for interacting with ROS?](https://docs.ros2.org/latest/api/rclcpp/) +# Feedback +* Report questions, issues and suggestions, using: [issue](https://github.com/intel/ros2_openvino_toolkit/issues). + # More Information * ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw From 1c4c79fc03a68e6a8cdaa1a84ff79b3ecf2769ba Mon Sep 17 00:00:00 2001 From: wujiawei Date: Mon, 19 Dec 2022 20:11:02 +0800 Subject: [PATCH 24/28] fix typo in README --- README.md | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 40d3d33b..8020fa4c 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ |dashing|Dashing|V2022.1, V2022.2|[dashing branch](https://github.com/intel/ros2_openvino_toolkit/tree/dashing)|Ubuntu 18.04| |ros2|Galactic, Foxy, Humble|V2022.1, V2022.2|[ros2 branch](https://github.com/intel/ros2_openvino_toolkit/tree/ros2)|Ubuntu 20.04, Ubuntu 22.04| |foxy|Foxy|V2021.4|[foxy branch](https://github.com/intel/ros2_openvino_toolkit/tree/foxy)|Ubuntu 20.04| -|galactic-ov2021.4|Galactic|V2021.4|[galactic branch](https://github.com/intel/ros2_openvino_toolkit/tree/galactic-ov2021.4)|Ubuntu 20.04| +|galactic-ov2021.4|Galactic|V2021.4|[galactic-ov2021.4 branch](https://github.com/intel/ros2_openvino_toolkit/tree/galactic-ov2021.4)|Ubuntu 20.04| ## Inference Features Supported * [x] Object Detection @@ -47,7 +47,7 @@ * [x] Vehicle License Plate Detection # Prerequisite for ros2 Branch -* Processor: A platform with Intel processors assembled. (see [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2020-3-lts-relnotes.html) for the full list of Intel processors supported.) +* Processor: A platform with Intel processors assembled. (Refer to [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2020-3-lts-relnotes.html) for the full list of Intel processors supported.) * OS: Ubuntu 20.04, Ubuntu 22.04 * ROS2: Foxy, Galactic, Humble * OpenVINO: V2022.1, V2022.2 @@ -77,7 +77,7 @@ See more from [here](https://github.com/openvinotoolkit/openvino) for Intel Open
ROS OpenVINO Runtime Framework -- **ROS OpenVINO Runtime Framework** is the main body of this repo. it provides key logic implementation for pipeline lifecycle management, resource management and ROS system adapter, which extends Intel OpenVINO toolkit and libraries. Furthermore, this runtime framework provides ways to ease launching, configuration and data analytics and re-use. +- **ROS OpenVINO Runtime Framework** is the main body of this repo. It provides key logic implementation for pipeline lifecycle management, resource management and ROS system adapter, which extends Intel OpenVINO toolkit and libraries. Furthermore, this runtime framework provides ways to simplify launching, configuration, data analysis and re-use.

@@ -85,7 +85,7 @@ See more from [here](https://github.com/openvinotoolkit/openvino) for Intel Open
ROS Input & Output -- **Diversal Input resources** are the data resources to be infered and analyzed with the OpenVINO framework. +- **Diversal Input resources** are data resources to be infered and analyzed with the OpenVINO framework. - **ROS interfaces and outputs** currently include _Topic_ and _service_. Natively, RViz output and CV image window output are also supported by refactoring topic message and inferrence results.

@@ -94,12 +94,12 @@ See more from [here](https://github.com/openvinotoolkit/openvino) for Intel Open
Optimized Models -- **Optimized Models** provides by Model Optimizer component of Intel® OpenVINO™ toolkit. Imports trained models from various frameworks (Caffe*, Tensorflow*, MxNet*, ONNX*, Kaldi*) and converts them to a unified intermediate representation file. It also optimizes topologies through node merging, horizontal fusion, eliminating batch normalization, and quantization.It also supports graph freeze and graph summarize along with dynamic input freezing. +- **Optimized Models** provided by Model Optimizer component of Intel® OpenVINO™ toolkit. Imports trained models from various frameworks (Caffe*, Tensorflow*, MxNet*, ONNX*, Kaldi*) and converts them to a unified intermediate representation file. It also optimizes topologies through node merging, horizontal fusion, eliminating batch normalization, and quantization. It also supports graph freeze and graph summarize along with dynamic input freezing.

## Logic Flow -From the view of logic implementation, the package introduces the definitions of parameter manager, pipeline and pipeline manager. The below picture depicts how these entities co-work together when the corresponding program is launched. +From the view of logic implementation, the package introduces the definitions of parameter manager, pipeline and pipeline manager. The following picture depicts how these entities co-work together when the corresponding program is launched. ![Logic_Flow](./data/images/impletation_logic.PNG "OpenVINO RunTime Logic Flow") @@ -125,7 +125,7 @@ The contents in **.yaml config file** should be well structured and follow the s # Supported Features ## Multiple Input Components -Currently, the package support several kinds of input resources of gaining image data: +Currently, the package supports several input resources for acquiring image data. The following tables are listed:

@@ -143,7 +143,7 @@ Currently, the package support several kinds of input resources of gaining image

## Inference Implementations -Currently, the inference feature list is supported: +Currently, the corresponding relation of supported inference features, models used and yaml configurations are listed as follows:

@@ -206,16 +206,17 @@ Several ROS2 Services are created, expecting to be used in client/server mode, e

### RViz -RViz dispaly is also supported by the composited topic of original image frame with inference result. +RViz display is also supported by the composited topic of original image frame with inference result. To show in RViz tool, add an image marker with the composited topic: ```/ros2_openvino_toolkit/image_rviz```([sensor_msgs::Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) ### Image Window OpenCV based image window is natively supported by the package. -To enable window, Image Window output should be added into the output choices in .yaml config file. see [the config file guidance](./doc/quick_start/yaml_configuration_guide.md) for checking/adding this feature in your launching. +To enable window, Image Window output should be added into the output choices in .yaml config file. Refer to [the config file guidance](./doc/quick_start/yaml_configuration_guide.md) for more information about checking/adding this feature in your launching. ## Demo Result Snapshots -See below pictures for the demo result snapshots. +For the snapshot of demo results, refer to the following picture. + * Face detection input from standard camera ![face_detection_demo_image](./data/images/face_detection.png "face detection demo image") @@ -225,15 +226,15 @@ See below pictures for the demo result snapshots. * Object segmentation input from video ![object_segmentation_demo_video](./data/images/object_segmentation.gif "object segmentation demo video") -* Person Reidentification input from standard camera +* Person reidentification input from standard camera ![person_reidentification_demo_video](./data/images/person-reidentification.gif "person reidentification demo video") # Installation and Launching -## Deploy in local environment +## Deploy in Local Environment * Refer to the quick start document for [getting_started_with_ros2](./doc/quick_start/getting_started_with_ros2_ov2.0.md) for detailed installation & lauching instructions. * Refer to the quick start document for [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. -## Deploy in docker +## Deploy in Docker * Refer to the docker instruction for [docker_instructions](./docker/docker_instructions_ov2.0.md) for detailed information about building docker image and launching. * Refer to the quick start document for [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. @@ -251,7 +252,7 @@ See below pictures for the demo result snapshots. * Report questions, issues and suggestions, using: [issue](https://github.com/intel/ros2_openvino_toolkit/issues). # More Information -* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw +* ROS2 OpenVINO discription written in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw ###### *Any security issue should be reported using process at https://01.org/security* From 6fd65cf98b3cd72dc53dd1dd3ac4273d9af06ff7 Mon Sep 17 00:00:00 2001 From: wujiawei Date: Wed, 21 Dec 2022 10:49:44 +0800 Subject: [PATCH 25/28] add prerequisite table and fix links --- README.md | 54 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index 8020fa4c..aee1a1df 100644 --- a/README.md +++ b/README.md @@ -28,12 +28,12 @@ * [x] ROS2 Foxy * [x] ROS2 Humble -|Branch Name|ROS2 Version Supported|Openvino Version|Corresponding Branch Link|OS Version| -|-----------------------|-----------------------|--------------------------------|----------------------|----------------------| -|dashing|Dashing|V2022.1, V2022.2|[dashing branch](https://github.com/intel/ros2_openvino_toolkit/tree/dashing)|Ubuntu 18.04| -|ros2|Galactic, Foxy, Humble|V2022.1, V2022.2|[ros2 branch](https://github.com/intel/ros2_openvino_toolkit/tree/ros2)|Ubuntu 20.04, Ubuntu 22.04| -|foxy|Foxy|V2021.4|[foxy branch](https://github.com/intel/ros2_openvino_toolkit/tree/foxy)|Ubuntu 20.04| -|galactic-ov2021.4|Galactic|V2021.4|[galactic-ov2021.4 branch](https://github.com/intel/ros2_openvino_toolkit/tree/galactic-ov2021.4)|Ubuntu 20.04| +|Branch Name|ROS2 Version Supported|Openvino Version|OS Version| +|-----------------------|-----------------------|--------------------------------|----------------------| +|[ros2](https://github.com/intel/ros2_openvino_toolkit/tree/ros2)|Galactic, Foxy, Humble|V2022.1, V2022.2|Ubuntu 20.04, Ubuntu 22.04| +|[dashing](https://github.com/intel/ros2_openvino_toolkit/tree/dashing)|Dashing|V2022.1, V2022.2|Ubuntu 18.04| +|[foxy-ov2021.4](https://github.com/intel/ros2_openvino_toolkit/tree/foxy)|Foxy|V2021.4|Ubuntu 20.04| +|[galactic-ov2021.4](https://github.com/intel/ros2_openvino_toolkit/tree/galactic-ov2021.4)|Galactic|V2021.4|Ubuntu 20.04| ## Inference Features Supported * [x] Object Detection @@ -46,14 +46,15 @@ * [x] Vehicle Attribute Detection * [x] Vehicle License Plate Detection -# Prerequisite for ros2 Branch -* Processor: A platform with Intel processors assembled. (Refer to [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2020-3-lts-relnotes.html) for the full list of Intel processors supported.) -* OS: Ubuntu 20.04, Ubuntu 22.04 -* ROS2: Foxy, Galactic, Humble -* OpenVINO: V2022.1, V2022.2 -* Python: 3.6, 3.7, 3.8, 3.9 -* [Optional] RealSense D400 Series Camera -* [Optional] Intel NCS2 Stick +# Prerequisite + +|Prerequisite|Mandatory?|Description| +|-----------------------|-----------------------|--------------------------------| +|**Processor**|Mandatory|A platform with Intel processors assembled. (Refer to [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2020-3-lts-relnotes.html) for the full list of Intel processors supported.)| +|**OS**|Mandatory|We only tested this project under Ubuntu distros. It is recommended to install the corresponding Ubuntu Distro according to the ROS distro that you select to use. For example: Ubuntu 20.04 for Foxy and Galactic, Ubuntu 22.04 for Humble.| +|**ROS2**|Mandatory|We have already supported active ROS distros (Humble, Galactic, Foxy and Dashing (deprecated)). Choose the one matching your needs. You man find the corresponding branch from the table above in section ROS2 Version Supported.| +|**OpenVINO**|Mandatory|The version of OpenVINO toolkit is decided by the OS and ROS2 distros you use. See the table above in Section "ROS2 Version Supported".| +|**Realsense Camera**|Optional|Realsense Camera is optional, you may choose these alternatives as the input: Standard Camera, ROS Image Topic, Video/Image File or RTSP camera.| # Introduction ## Design Architecture @@ -135,7 +136,7 @@ Currently, the package supports several input resources for acquiring image data |--------------------|------------------------------------------------------------------| |StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.| |RealSenseCamera| Intel RealSense RGB-D Camera, directly calling RealSense Camera via librealsense plugin of openCV.| -|RealSenseCameraTopic| Any ROS topic which is structured in image message.| +|ImageTopic| Any ROS topic which is structured in image message.| |Image| Any image file which can be parsed by openCV, such as .png, .jpeg.| |Video| Any video file which can be parsed by openCV.| |IpCamera| Any RTSP server which can push video stream.| @@ -151,16 +152,16 @@ Currently, the corresponding relation of supported inference features, models us |Inference|Description|YAML Configuration|Model Used| |-----------------------|------------------------------------------------------------------|----------------------|----------------------| -|Face Detection| Object Detection task applied to face recognition using a sequence of neural networks.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[face-detection-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-adas-0001)
[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/age-gender-recognition-retail-0013)
[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/emotions-recognition-retail-0003)
[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/head-pose-estimation-adas-0001)| -|Emotion Recognition| Emotion recognition based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/emotions-recognition-retail-0003)| -|Age & Gender Recognition| Age and gender recognition based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/age-gender-recognition-retail-0013)| -|Head Pose Estimation| Head pose estimation based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/head-pose-estimation-adas-0001)| -|Object Detection| Object detection based on SSD-based trained models.|[pipeline_object.yaml](./sample/param/pipeline_object.yaml)
[pipeline_object_topic.yaml](./sample/param/pipeline_object_topic.yaml)|[mobilenet-ssd](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-ssd)| -|Vehicle and License Detection| Vehicle and license detection based on Intel models.|[pipeline_vehicle_detection.yaml](./sample/param/pipeline_vehicle_detection.yaml)|[vehicle-license-plate-detection-barrier-0106](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/vehicle-license-plate-detection-barrier-0106)
[vehicle-attributes-recognition-barrier-0039](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/vehicle-attributes-recognition-barrier-0039)
[license-plate-recognition-barrier-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/license-plate-recognition-barrier-0001)| -|Object Segmentation| Object segmentation.|[pipeline_segmentation.yaml](./sample/param/pipeline_segmentation.yaml)
[pipeline_segmentation_image.yaml](./sample/param/pipeline_segmentation_image.yaml)
[pipeline_video.yaml](./sample/param/pipeline_video.yaml)|[semantic-segmentation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/semantic-segmentation-adas-0001)
[deeplabv3](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/deeplabv3)| -|Person Attributes| Person attributes based on object detection.|[pipeline_person_attributes.yaml](./sample/param/pipeline_person_attributes.yaml)|[person-attributes-recognition-crossroad-0230](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-attributes-recognition-crossroad-0230)
[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-detection-retail-0013)| -|Person Reidentification|Person reidentification based on object detection.|[pipeline_person_reidentification.yaml](./sample/param/pipeline_reidentification.yaml)|[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-detection-retail-0013)
[person-reidentification-retail-0277](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-reidentification-retail-0277)| -|Object Segmentation Maskrcnn| Object segmentation based on maskrcnn model.|[pipeline_segmentation_maskrcnn.yaml](./sample/param/pipeline_segmentation_maskrcnn.yaml)|[mask_rcnn_inception_v2_coco_2018_01_28](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mask_rcnn_inception_resnet_v2_atrous_coco)| +|Face Detection| Object Detection task applied to face recognition using a sequence of neural networks.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[face-detection-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/face-detection-adas-0001)
[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/age-gender-recognition-retail-0013)
[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/emotions-recognition-retail-0003)
[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/head-pose-estimation-adas-0001)| +|Emotion Recognition| Emotion recognition based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/emotions-recognition-retail-0003)| +|Age & Gender Recognition| Age and gender recognition based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/age-gender-recognition-retail-0013)| +|Head Pose Estimation| Head pose estimation based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/head-pose-estimation-adas-0001)| +|Object Detection| Object detection based on SSD-based trained models.|[pipeline_object.yaml](./sample/param/pipeline_object.yaml)
[pipeline_object_topic.yaml](./sample/param/pipeline_object_topic.yaml)|[mobilenet-ssd](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/public/mobilenet-ssd)| +|Vehicle and License Detection| Vehicle and license detection based on Intel models.|[pipeline_vehicle_detection.yaml](./sample/param/pipeline_vehicle_detection.yaml)|[vehicle-license-plate-detection-barrier-0106](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/vehicle-license-plate-detection-barrier-0106)
[vehicle-attributes-recognition-barrier-0039](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/vehicle-attributes-recognition-barrier-0039)
[license-plate-recognition-barrier-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/license-plate-recognition-barrier-0001)| +|Object Segmentation| Object segmentation.|[pipeline_segmentation.yaml](./sample/param/pipeline_segmentation.yaml)
[pipeline_segmentation_image.yaml](./sample/param/pipeline_segmentation_image.yaml)
[pipeline_video.yaml](./sample/param/pipeline_video.yaml)|[semantic-segmentation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/semantic-segmentation-adas-0001)
[deeplabv3](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/public/deeplabv3)| +|Person Attributes| Person attributes based on object detection.|[pipeline_person_attributes.yaml](./sample/param/pipeline_person_attributes.yaml)|[person-attributes-recognition-crossroad-0230](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/person-attributes-recognition-crossroad-0230)
[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/person-detection-retail-0013)| +|Person Reidentification|Person reidentification based on object detection.|[pipeline_person_reidentification.yaml](./sample/param/pipeline_reidentification.yaml)|[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/person-detection-retail-0013)
[person-reidentification-retail-0277](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/intel/person-reidentification-retail-0277)| +|Object Segmentation Maskrcnn| Object segmentation and detection based on maskrcnn model.|[pipeline_segmentation_maskrcnn.yaml](./sample/param/pipeline_segmentation_maskrcnn.yaml)|[mask_rcnn_inception_v2_coco_2018_01_28](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1/models/public/mask_rcnn_inception_resnet_v2_atrous_coco)|

@@ -181,6 +182,7 @@ Specific topic(s) can be generated and published according to the given inferenc |Head Pose Estimation|```/ros2_openvino_toolkit/headposes_estimation```([people_msgs:msg:HeadPoseStamped](../../../people_msgs/msg/HeadPoseStamped.msg))| |Object Detection|```/ros2_openvino_toolkit/detected_objects```([object_msgs::msg::ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| |Object Segmentation|```/ros2_openvino_toolkit/segmented_obejcts```([people_msgs::msg::ObjectsInMasks](../../../people_msgs/msg/ObjectsInMasks.msg))| +|Object Segmentation Maskrcnn|```/ros2_openvino_toolkit/segmented_obejcts```([people_msgs::msg::ObjectsInMasks](../../../people_msgs/msg/ObjectsInMasks.msg))| |Person Reidentification|```/ros2_openvino_toolkit/reidentified_persons```([people_msgs::msg::ReidentificationStamped](../../../people_msgs/msg/ReidentificationStamped.msg))| |Face Reidenfication|```/ros2_openvino_toolkit/reidentified_faces```([people_msgs::msg::ReidentificationStamped](../../../people_msgs/msg/ReidentificationStamped.msg))| |Vehicle Detection|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::VehicleAttribsStamped](../../../people_msgs/msg/PersonAttributeStamped.msg))| @@ -239,7 +241,7 @@ For the snapshot of demo results, refer to the following picture. * Refer to the quick start document for [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. # Reference -* Open_model_zoo: Refer to the OpenVINO document for [open_model_zoo](https://github.com/openvinotoolkit/open_model_zoo/tree/master) for detailed model structure and demo samples. +* Open_model_zoo: Refer to the OpenVINO document for [open_model_zoo](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/1) for detailed model structure and demo samples. * OpenVINO api 2.0: Refer to the OpenVINO document for [OpenVINO_api_2.0](https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html) for latest api 2.0 transition guide. # FAQ From 3c4c3a5d4b9b655a6ade362d9a1f87134184cd0c Mon Sep 17 00:00:00 2001 From: wujiawei Date: Wed, 21 Dec 2022 13:10:38 +0800 Subject: [PATCH 26/28] fix for topic table --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index aee1a1df..e107d1f7 100644 --- a/README.md +++ b/README.md @@ -185,7 +185,7 @@ Specific topic(s) can be generated and published according to the given inferenc |Object Segmentation Maskrcnn|```/ros2_openvino_toolkit/segmented_obejcts```([people_msgs::msg::ObjectsInMasks](../../../people_msgs/msg/ObjectsInMasks.msg))| |Person Reidentification|```/ros2_openvino_toolkit/reidentified_persons```([people_msgs::msg::ReidentificationStamped](../../../people_msgs/msg/ReidentificationStamped.msg))| |Face Reidenfication|```/ros2_openvino_toolkit/reidentified_faces```([people_msgs::msg::ReidentificationStamped](../../../people_msgs/msg/ReidentificationStamped.msg))| -|Vehicle Detection|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::VehicleAttribsStamped](../../../people_msgs/msg/PersonAttributeStamped.msg))| +|Vehicle Detection|```/ros2_openvino_toolkit/detected_vehicles_attribs```([people_msgs::msg::VehicleAttribsStamped](../../../people_msgs/msg/PersonAttributeStamped.msg))| |Vehicle License Detection|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped](../../../people_msgs/msg/LicensePlateStamped.msg))|

From a9bbe5a0855b38038b76d5a8e478a581bf3d9aaf Mon Sep 17 00:00:00 2001 From: wujiawei Date: Wed, 21 Dec 2022 13:18:20 +0800 Subject: [PATCH 27/28] fix typo --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index e107d1f7..89a96c9d 100644 --- a/README.md +++ b/README.md @@ -51,9 +51,9 @@ |Prerequisite|Mandatory?|Description| |-----------------------|-----------------------|--------------------------------| |**Processor**|Mandatory|A platform with Intel processors assembled. (Refer to [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2020-3-lts-relnotes.html) for the full list of Intel processors supported.)| -|**OS**|Mandatory|We only tested this project under Ubuntu distros. It is recommended to install the corresponding Ubuntu Distro according to the ROS distro that you select to use. For example: Ubuntu 20.04 for Foxy and Galactic, Ubuntu 22.04 for Humble.| -|**ROS2**|Mandatory|We have already supported active ROS distros (Humble, Galactic, Foxy and Dashing (deprecated)). Choose the one matching your needs. You man find the corresponding branch from the table above in section ROS2 Version Supported.| -|**OpenVINO**|Mandatory|The version of OpenVINO toolkit is decided by the OS and ROS2 distros you use. See the table above in Section "ROS2 Version Supported".| +|**OS**|Mandatory|We only tested this project under Ubuntu distros. It is recommended to install the corresponding Ubuntu Distro according to the ROS distro that you select to use. **For example: Ubuntu 18.04 for dashing, Ubuntu 20.04 for Foxy and Galactic, Ubuntu 22.04 for Humble.**| +|**ROS2**|Mandatory|We have already supported active ROS distros (Humble, Galactic, Foxy and Dashing (deprecated)). Choose the one matching your needs. You may find the corresponding branch from the table above in section **ROS2 Version Supported**.| +|**OpenVINO**|Mandatory|The version of OpenVINO toolkit is decided by the OS and ROS2 distros you use. See the table above in Section **ROS2 Version Supported**.| |**Realsense Camera**|Optional|Realsense Camera is optional, you may choose these alternatives as the input: Standard Camera, ROS Image Topic, Video/Image File or RTSP camera.| # Introduction From f3967a468bf40d8fca2f090aeec4f90fc969e45e Mon Sep 17 00:00:00 2001 From: wujiawei Date: Wed, 21 Dec 2022 13:20:39 +0800 Subject: [PATCH 28/28] add link --- README.md | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 89a96c9d..c163b5d8 100644 --- a/README.md +++ b/README.md @@ -23,10 +23,6 @@ # Overview ## ROS2 Version Supported -* [x] ROS2 Dashing -* [x] ROS2 Galactic -* [x] ROS2 Foxy -* [x] ROS2 Humble |Branch Name|ROS2 Version Supported|Openvino Version|OS Version| |-----------------------|-----------------------|--------------------------------|----------------------| @@ -52,8 +48,8 @@ |-----------------------|-----------------------|--------------------------------| |**Processor**|Mandatory|A platform with Intel processors assembled. (Refer to [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2020-3-lts-relnotes.html) for the full list of Intel processors supported.)| |**OS**|Mandatory|We only tested this project under Ubuntu distros. It is recommended to install the corresponding Ubuntu Distro according to the ROS distro that you select to use. **For example: Ubuntu 18.04 for dashing, Ubuntu 20.04 for Foxy and Galactic, Ubuntu 22.04 for Humble.**| -|**ROS2**|Mandatory|We have already supported active ROS distros (Humble, Galactic, Foxy and Dashing (deprecated)). Choose the one matching your needs. You may find the corresponding branch from the table above in section **ROS2 Version Supported**.| -|**OpenVINO**|Mandatory|The version of OpenVINO toolkit is decided by the OS and ROS2 distros you use. See the table above in Section **ROS2 Version Supported**.| +|**ROS2**|Mandatory|We have already supported active ROS distros (Humble, Galactic, Foxy and Dashing (deprecated)). Choose the one matching your needs. You may find the corresponding branch from the table above in section [**ROS2 Version Supported**](#ros2-version-supported).| +|**OpenVINO**|Mandatory|The version of OpenVINO toolkit is decided by the OS and ROS2 distros you use. See the table above in Section [**ROS2 Version Supported**](#ros2-version-supported).| |**Realsense Camera**|Optional|Realsense Camera is optional, you may choose these alternatives as the input: Standard Camera, ROS Image Topic, Video/Image File or RTSP camera.| # Introduction