diff --git a/.ci_local_test/Jenkinsfile b/.ci_local_test/Jenkinsfile index 8dcb84ac..2264f86e 100644 --- a/.ci_local_test/Jenkinsfile +++ b/.ci_local_test/Jenkinsfile @@ -3,28 +3,62 @@ pipeline { environment { // Test_Server is the local test machine. Test_Server = "robotics-testNUC11" - WORKSPACE_PATH = "/home/intel/ros2_openvino_toolkit" + Test_WORKSPACE = "/home/intel/ros2_openvino_toolkit_test" } stages { - stage('Test Ros2 Galatic') { + stage('Check The Conflict') { steps { script { - def flag = sh script: "ssh intel@$Test_Server 'cd $WORKSPACE_PATH && docker images | grep ros2_openvino_test'", returnStatus: true - if (flag == 0) { - docker rmi -f ros2_openvino_test - } - def test_result = sh script: "ssh intel@$Test_Server 'cd $WORKSPACE_PATH && ./self_host_test_ros2_openvino.sh '", returnStatus: true + sh script: "ssh intel@$Test_Server 'cd $Test_WORKSPACE && ./check_conflict.sh'", returnStatus: true + echo "no conflict, the task continue" + } + } + } + stage('Get The env') { + steps { + script { + // rm the old env + sh script: "ssh intel@$Test_Server 'rm -rf $Test_WORKSPACE/env'", returnStatus: true + // get new env + sh script: "export | tee -a env", returnStatus: true + sh script: "scp -r env intel@$Test_Server:$Test_WORKSPACE", returnStatus: true + } + } + } + stage('Moving The Code To Test Machine') { + steps { + script { + sh script: "ssh intel@$Test_Server 'rm -rf $Test_WORKSPACE/ros2_openvino_toolkit'", returnStatus: true + sh script: "scp -r $WORKSPACE intel@$Test_Server:$Test_WORKSPACE/ros2_openvino_toolkit", returnStatus: true + // sh script: "ssh intel@$Test_Server 'docker cp $Test_WORKSPACE/ros2_openvino_toolkit:/root/catkin_ws/src'", returnStatus: true + } + } + } + stage('Klocwork Code check') { + steps { + script { + echo 'klocwork code check' + sh script: "sudo docker cp $WORKSPACE klocwork_test:/home/intel/catkin_ws/src/ros2_openvino_toolkit", returnStatus: true + sh script: "sudo docker exec -i klocwork_test bash -c 'source ~/.bashrc && cd catkin_ws && ./klocwork_scan.sh'", returnStatus: true + } + + } + } + stage('The Ros2_openvino container run') { + steps { + script { + def test_result = sh script: "ssh intel@$Test_Server 'cd $Test_WORKSPACE && ./self_container_ros2_openvino_test.sh '", returnStatus: true if (test_result == 0) { echo "test pass" } else { echo "test fail" exit -1 } - + } + } } - } } diff --git a/.ci_local_test/ros2_openvino_toolkit_test/Dockerfile b/.ci_local_test/ros2_openvino_toolkit_test/Dockerfile new file mode 100644 index 00000000..57df6247 --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/Dockerfile @@ -0,0 +1,68 @@ +# ros2 openvino toolkit env master f1b1ca4d914186a1881b87f103be9c6e910c9d80 + +ARG ROS_PRE_INSTALLED_PKG +FROM osrf/ros:${ROS_PRE_INSTALLED_PKG} +ARG VERSION + +# setting proxy env --option +# If needed, enable the below ENV setting by correct proxies. +# ENV HTTP_PROXY="your_proxy" +# ENV HTTPS_PROXY="your_proxy" +# ENV FTP_PROXY="your_proxy" + +# author information +LABEL author="Jiawei Wu " + +# default shell type +SHELL ["/bin/bash", "-c"] + +# ignore the warning +ARG DEBIAN_FRONTEND=noninteractive +ARG APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 +RUN apt-get update && apt-get install --assume-yes apt-utils + +# install openvino 2022.3 +# https://docs.openvino.ai/2022.3/openvino_docs_install_guides_installing_openvino_apt.html +RUN apt update && apt install --assume-yes curl wget gnupg2 lsb-release +RUN wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB && \ +apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB && echo "deb https://apt.repos.intel.com/openvino/2022 focal main" | tee /etc/apt/sources.list.d/intel-openvino-2022.list +RUN apt update && apt-cache search openvino && apt install -y openvino-2022.3.0 + +# install librealsense2 +RUN apt-get install -y --no-install-recommends \ +software-properties-common +# https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md +# Make sure you set http-proxy in below commands if your environment needs. +# RUN apt-key adv --keyserver-options http-proxy=your_proxy --keyserver keys.gnupg.net --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE || apt-key adv --keyserver-options http-proxy=your_proxy --keyserver hkp://keyserver.ubuntu.com:80 --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE || apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE +RUN add-apt-repository "deb https://librealsense.intel.com/Debian/apt-repo $(lsb_release -cs) main" -u \ +&& apt-get install -y --no-install-recommends \ +librealsense2-dkms \ +librealsense2-utils \ +librealsense2-dev \ +librealsense2-dbg \ +libgflags-dev \ +libboost-all-dev \ +&& rm -rf /var/lib/apt/lists/* + +# other dependencies +RUN apt-get update && apt-get install -y python3-pip && python3 -m pip install -U \ +numpy \ +networkx \ +pyyaml \ +requests \ +&& apt-get install -y --no-install-recommends libboost-all-dev +WORKDIR /usr/lib/x86_64-linux-gnu +RUN pip install --upgrade pip +RUN pip install openvino-dev[tensorflow2]==2022.3 + +# build ros2 openvino toolkit +WORKDIR /root +RUN mkdir -p catkin_ws/src +WORKDIR /root/catkin_ws/src +RUN git init && git clone https://github.com/intel/ros2_object_msgs.git \ +&& git clone -b ros2 https://github.com/intel/ros2_openvino_toolkit.git +RUN apt-get install ros-${VERSION}-diagnostic-updater +WORKDIR /root/catkin_ws +RUN source /opt/ros/${VERSION}/setup.bash && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release + diff --git a/.ci_local_test/ros2_openvino_toolkit_test/docker_run.sh b/.ci_local_test/ros2_openvino_toolkit_test/docker_run.sh new file mode 100755 index 00000000..956ded09 --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/docker_run.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +export DISPLAY=:0 + +export work_dir=$PWD + + +function run_container() { + + docker images | grep ros2_openvino_docker + + if [ $? -eq 0 ] + then + echo "the image of ros2_openvino_docker:01 existence" + docker rmi -f ros2_openvino_docker:01 + fi + + docker ps -a | grep ros2_openvino_container + if [ $? -eq 0 ] + then + docker rm -f ros2_openvino_container + fi + # Using jenkins server ros2_openvino_toolkit code instead of git clone code. + cd $work_dir && sed -i '/ros2_openvino_toolkit.git/d' Dockerfile + # remove the "\" + cd $work_dir && sed -i 's#ros2_object_msgs.git \\#ros2_object_msgs.git#' Dockerfile + # add the jpg for test. + cd $work_dir && sed -i '$i COPY jpg /root/jpg' Dockerfile + + cd $work_dir && docker build --build-arg ROS_PRE_INSTALLED_PKG=galactic-desktop --build-arg VERSION=galactic -t ros2_openvino_docker:01 . + docker run -i --privileged=true --device=/dev/dri -v $work_dir/ros2_openvino_toolkit:/root/catkin_ws/src/ros2_openvino_toolkit -v /tmp/.X11-unix:/tmp/.X11-unix -v $HOME/.Xauthority:/root/.Xauthority -e GDK_SCALE -v $work_dir/test_cases:/root/test_cases --name ros2_openvino_container ros2_openvino_docker:01 bash -c "cd /root/test_cases && ./run.sh galactic" + +} + +run_container +if [ $? -ne 0 ] +then + echo "Test fail" + exit -1 +fi + + diff --git a/.ci_local_test/ros2_openvino_toolkit_test/jpg/car.jpg b/.ci_local_test/ros2_openvino_toolkit_test/jpg/car.jpg new file mode 100644 index 00000000..f53b0339 Binary files /dev/null and b/.ci_local_test/ros2_openvino_toolkit_test/jpg/car.jpg differ diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/config.sh b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/config.sh new file mode 100755 index 00000000..0efee6ce --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/config.sh @@ -0,0 +1,13 @@ +#/bin/bash + +if [[ $1 == '' ]] +then + export ros2_branch=galactic +else + export ros2_branch=$1 +fi + +export dynamic_vino_sample=/root/catkin_ws/install/openvino_node/share/openvino_node + + +source /opt/ros/$ros2_branch/setup.bash diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/ros2_openvino_tool_model_download.sh b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/ros2_openvino_tool_model_download.sh new file mode 100755 index 00000000..4ff107b6 --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/ros2_openvino_tool_model_download.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +mkdir -p /opt/openvino_toolkit/models +#apt install -y python-pip +apt install -y python3.8-venv +cd ~ && python3 -m venv openvino_env && source openvino_env/bin/activate +python -m pip install --upgrade pip +pip install openvino-dev[tensorflow2,mxnet,caffe] + + +#Download the optimized Intermediate Representation (IR) of model (execute once) +cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list && omz_downloader --list download_model.lst -o /opt/openvino_toolkit/models/ + +cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list && omz_converter --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert + + +#Copy label files (execute once) +cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP32/ +cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/ +cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/ +cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP32/ +cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP16/ +cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32 + +mkdir -p /opt/openvino_toolkit/models/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP16/ +cp /opt/openvino_toolkit/models/convert/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP16/* /opt/openvino_toolkit/models/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP16/ + +cd /root/test_cases/ && ./yolov5_model_download.sh + diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/run.sh b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/run.sh new file mode 100755 index 00000000..844fefc6 --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/run.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +export ros2_branch=$1 +if [[ $1 == '' ]] +then + export ros2_branch=galactic +else + export ros2_branch=$1 +fi +source /root/test_cases/config.sh $ros2_branch + +cd /root/catkin_ws && colcon build --symlink-install +cd /root/catkin_ws && source ./install/local_setup.bash + +apt-get update +# apt-get install -y ros-$ros2_branch-diagnostic-updater +apt-get install python3-defusedxml +apt-get install -y python3-pip +pip3 install XTestRunner==1.5.0 + +cd /root/test_cases && ./ros2_openvino_tool_model_download.sh +mkdir -p /root/test_cases/log +echo "===cat pipeline_people_ci.yaml" +cat /root/catkin_ws/install/openvino_node/share/openvino_node/param/pipeline_people_ci.yaml + +cd /root/test_cases/unittest && python3 run_all.py +result=$? +#echo "cat segmentation maskrcnn" +#cat /root/test_cases/log/pipeline_segmentation_maskrcnn_test_ci.log +if [ $result -ne 0 ] +then + exit -1 +fi + diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/run_all.py b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/run_all.py new file mode 100755 index 00000000..89c5d84e --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/run_all.py @@ -0,0 +1,45 @@ +#!/usr/opt/python3 +import unittest +from test_cases import Test_Cases +from XTestRunner import HTMLTestRunner + +def main(): + + suite = unittest.TestSuite() + + all_cases = [Test_Cases('test_1_pipeline_people_ci'), + Test_Cases('test_2_pipeline_reidentification_ci'), + Test_Cases('test_3_pipeline_image_ci'), + Test_Cases('test_4_pipeline_segmentation_ci'), + Test_Cases('test_5_pipeline_vehicle_detection_ci'), + Test_Cases('test_6_pipeline_person_attributes_ci'), + Test_Cases('test_7_pipeline_segmentation_image_ci'), + Test_Cases('test_8_pipeline_object_yolo_ci')] + suite.addTests(all_cases) + + with (open('./result.html', 'wb')) as fp: + runner = HTMLTestRunner( + stream=fp, + title='ROS2 Openvino Test Report', + description='Test ROS2-galactic openvino all cases', + language='en', + ) + result = runner.run( + testlist=suite, + rerun=1, + save_last_run=False + ) + + failure_count = len(all_cases) - result.success_count + print(f"all count: {len(all_cases)}") + print(f"success count: {result.success_count}") + print(f"failure count: {failure_count}") + if result.success_count == len(all_cases) and failure_count == 0: + print(f"Test ALL PASS") + else: + print(f"Test FAIL") + exit(-1) + +if __name__=="__main__": + main() + diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/test_cases.py b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/test_cases.py new file mode 100755 index 00000000..c71bbd2c --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/unittest/test_cases.py @@ -0,0 +1,109 @@ +#from asyncio import sleep +from time import sleep +import unittest +import subprocess +import pdb +import os + +class Test_Cases(unittest.TestCase): + + def test_pipeline(self, launch_file, log_file, topic_list=['/rosout']): + print(f"{log_file} topic_list", topic_list) + subprocess.Popen([f"ros2 launch openvino_node {launch_file} > {log_file} &"], shell=True) + for topic in topic_list: + name=topic.split('/', -1)[-1] + sleep(3) + print(f"{topic} {name}.log") + subprocess.Popen([f"ros2 topic echo {topic} > {name}.log &"], shell=True) + if name == "segmented_obejcts": + subprocess.Popen([f"ros2 topic echo {topic} >> {name}.log &"], shell=True) + kill_ros2_process() + print(f"kill the test process done") + with open(f"{log_file}") as handle: + log = handle.read() + check_log = log.split("user interrupted with ctrl-c (SIGINT)")[0] + if f"pipeline_object_yolo" not in log_file: + self.assertIn('Analyzing Detection results', check_log) + self.assertNotIn('ERROR', check_log) + for topic in topic_list: + name = topic.split('/', -1)[-1] + with open(f"{name}.log") as topic_handle: + topic_info = topic_handle.read() + if "header" not in topic_info: + print(f"the {launch_file} topic {name} failed") + else: + print(f"the {launch_file} topic {name} pass") + self.assertIn("header", topic_info) + print(f"check all done") + + + def test_1_pipeline_people_ci(self): + topic_ls = ["/ros2_openvino_toolkit/age_genders_Recognition", \ + "/ros2_openvino_toolkit/headposes_estimation", \ + "/ros2_openvino_toolkit/face_detection", \ + "/ros2_openvino_toolkit/emotions_recognition"] + launch_file = f"pipeline_people_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_people_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_2_pipeline_reidentification_ci(self): + topic_ls = ["/ros2_openvino_toolkit/reidentified_persons",] + launch_file = f"pipeline_reidentification_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_reidentification_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_3_pipeline_image_ci(self): + topic_ls = ["/ros2_openvino_toolkit/emotions_recognition", \ + "/ros2_openvino_toolkit/headposes_estimation", \ + "/ros2_openvino_toolkit/people/age_genders_Recognition"] + launch_file = f"pipeline_image_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_image_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_4_pipeline_segmentation_ci(self): + topic_ls = ["/ros2_openvino_toolkit/segmented_obejcts"] + launch_file = f"pipeline_segmentation_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_segmentation_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_5_pipeline_vehicle_detection_ci(self): + topic_ls = ["/ros2_openvino_toolkit/detected_license_plates", + "/ros2_openvino_toolkit/detected_vehicles_attribs"] + launch_file = f"pipeline_vehicle_detection_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_vehicle_detection_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_6_pipeline_person_attributes_ci(self): + topic_ls = ["/ros2_openvino_toolkit/detected_objects", \ + "/ros2_openvino_toolkit/person_attributes"] + launch_file = f"pipeline_person_attributes_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_person_attributes_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_7_pipeline_segmentation_image_ci(self): + topic_ls = ["/ros2_openvino_toolkit/segmented_obejcts"] + launch_file = f"pipeline_segmentation_image_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_segmentation_image_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + def test_8_pipeline_object_yolo_ci(self): + topic_ls = ["/ros2_openvino_toolkit/detected_objects"] + launch_file = f"pipeline_object_yolo_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_object_yolo_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + @unittest.skip("skip case") + def test_9_pipeline_segmentation_maskrcnn_ci(self): + topic_ls = ["/ros2_openvino_toolkit/segmented_obejcts"] + launch_file = f"pipeline_segmentation_maskrcnn_ci_test.py" + log_file = f"/root/test_cases/log/pipeline_segmentation_maskrcnn_test_ci.log" + self.test_pipeline(launch_file, log_file, topic_list=topic_ls) + + +def kill_ros2_process(sleep_z=30): + sleep(sleep_z) + process_result = subprocess.Popen(["ps -ef | grep ros2 | grep -v 'grep' | awk '{print $2}'"],stdout=subprocess.PIPE, shell=True).communicate() + print(process_result[0].decode('utf-8').replace('\n', ' ')) + kill_process = 'kill -9 ' + process_result[0].decode('utf-8').replace('\n', ' ') + subprocess.Popen([kill_process], shell=True).communicate() + diff --git a/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov5_model_download.sh b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov5_model_download.sh new file mode 100755 index 00000000..f3e50d3b --- /dev/null +++ b/.ci_local_test/ros2_openvino_toolkit_test/test_cases/yolov5_model_download.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +#1. Copy YOLOv5 Repository from GitHub +cd /root && git clone https://github.com/ultralytics/yolov5.git + +#Set Environment for Installing YOLOv5 + +cd yolov5 +python3 -m venv yolo_env # Create a virtual python environment +source yolo_env/bin/activate # Activate environment +pip install -r requirements.txt # Install yolov5 prerequisites +pip install wheel +pip install onnx + +# Download PyTorch Weights +mkdir -p /root/yolov5/model_convert && cd /root/yolov5/model_convert +wget https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt + +cd /root/yolov5 +python3 export.py --weights model_convert/yolov5n.pt --include onnx + + +#2. Convert ONNX files to IR files +cd /root/yolov5/ +python3 -m venv ov_env # Create openVINO virtual environment +source ov_env/bin/activate # Activate environment +python -m pip install --upgrade pip # Upgrade pip +pip install openvino[onnx]==2022.3.0 # Install OpenVINO for ONNX +pip install openvino-dev[onnx]==2022.3.0 # Install OpenVINO Dev Tool for ONNX + + +cd /root/yolov5/model_convert +mo --input_model yolov5n.onnx + + +mkdir -p /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/ +sudo cp yolov5n.bin yolov5n.mapping yolov5n.xml /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/ + diff --git a/.github/workflows/basic_func_tests.yml b/.github/workflows/basic_func_tests.yml new file mode 100644 index 00000000..d94a5357 --- /dev/null +++ b/.github/workflows/basic_func_tests.yml @@ -0,0 +1,36 @@ +# This is a basic workflow to help you get started with Actions + +name: Basic_Func_CI + +# Controls when the workflow will run +on: + # Triggers the workflow on push or pull request events but only for the "master" branch + push: + branches: [ "master", "ros2" ] + pull_request: + branches: [ "master", "ros2" ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-20.04 + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v3 + + # Runs a set of commands using the runners shell + - name: ros2_openvino_toolkit_test + run: | + pwd + mkdir -p ../workspace_ci + cp -r ${GITHUB_WORKSPACE}/.ci_local_test/ros2_openvino_toolkit_test ../workspace_ci + \cp ${GITHUB_WORKSPACE}/docker/Dockerfile ../workspace_ci/ros2_openvino_toolkit_test/Dockerfile + cp -r ${GITHUB_WORKSPACE} ../workspace_ci/ros2_openvino_toolkit_test + cd ../workspace_ci/ros2_openvino_toolkit_test && ./docker_run.sh diff --git a/README.md b/README.md index 26f481e0..33d0f8be 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,40 @@ # ros2_openvino_toolkit -ROS2 Version supported: +# Table of Contents +* [➤ Overview](#overview) + * [ROS2 Version Supported](#ros2-version-supported) + * [Inference Features Supported](#inference-features-supported) +* [➤ Prerequisite](#prerequisite) +* [➤ Introduction](#introduction) + * [Design Architecture](#design-architecture) + * [Logic Flow](#logic-flow) +* [➤ Supported Features](#supported-features) + * [Multiple Input Components](#multiple-input-components) + * [Inference Implementations](#inference-implementations) + * [ROS Interfaces and Outputs](#ros-interfaces-and-outputs) + * [Demo Result Snapshots](#demo-result-snapshots) +* [➤ Installation & Launching](#installation-and-launching) + * [Deploy in Local Environment](#deploy-in-local-environment) + * [Deploy in Docker](#deploy-in-docker) +* [➤ Reference](#reference) +* [➤ FAQ](#faq) +* [➤ Feedback](#feedback) +* [➤ More Information](#more-information) -* [x] ROS2 Dashing -* [x] ROS2 Eloquent -* [x] ROS2 Foxy -* [x] ROS2 Galactic +# Overview +## ROS2 Version Supported -Inference Features supported: +|Branch Name|ROS2 Version Supported|Openvino Version|OS Version| +|-----------------------|-----------------------|--------------------------------|----------------------| +|[ros2](https://github.com/intel/ros2_openvino_toolkit/tree/ros2)|Galactic, Foxy, Humble|V2022.1, V2022.2, V2022.3|Ubuntu 20.04, Ubuntu 22.04| +|[dashing](https://github.com/intel/ros2_openvino_toolkit/tree/dashing)|Dashing|V2022.1, V2022.2, V2022.3|Ubuntu 18.04| +|[foxy-ov2021.4](https://github.com/intel/ros2_openvino_toolkit/tree/foxy)|Foxy|V2021.4|Ubuntu 20.04| +|[galactic-ov2021.4](https://github.com/intel/ros2_openvino_toolkit/tree/galactic-ov2021.4)|Galactic|V2021.4|Ubuntu 20.04| +## Inference Features Supported * [x] Object Detection * [x] Face Detection -* [x] Age-Gender Recognition +* [x] Age Gender Recognition * [x] Emotion Recognition * [x] Head Pose Estimation * [x] Object Segmentation @@ -19,31 +42,215 @@ Inference Features supported: * [x] Vehicle Attribute Detection * [x] Vehicle License Plate Detection -## Introduction +# Prerequisite -The OpenVINO™ (Open visual inference and neural network optimization) toolkit provides a ROS-adaptered runtime framework of neural network which quickly deploys applications and solutions for vision inference. By leveraging Intel® OpenVINO™ toolkit and corresponding libraries, this ROS2 runtime framework extends workloads across Intel® hardware (including accelerators) and maximizes performance. +|Prerequisite|Mandatory?|Description| +|-----------------------|-----------------------|--------------------------------| +|**Processor**|Mandatory|A platform with Intel processors assembled. (Refer to [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2020-3-lts-relnotes.html) for the full list of Intel processors supported.)| +|**OS**|Mandatory|We only tested this project under Ubuntu distros. It is recommended to install the corresponding Ubuntu Distro according to the ROS distro that you select to use. **For example: Ubuntu 18.04 for dashing, Ubuntu 20.04 for Foxy and Galactic, Ubuntu 22.04 for Humble.**| +|**ROS2**|Mandatory|We have already supported active ROS distros (Humble, Galactic, Foxy and Dashing (deprecated)). Choose the one matching your needs. You may find the corresponding branch from the table above in section [**ROS2 Version Supported**](#ros2-version-supported).| +|**OpenVINO**|Mandatory|The version of OpenVINO toolkit is decided by the OS and ROS2 distros you use. See the table above in Section [**ROS2 Version Supported**](#ros2-version-supported).| +|**Realsense Camera**|Optional|Realsense Camera is optional, you may choose these alternatives as the input: Standard Camera, ROS Image Topic, Video/Image File or RTSP camera.| +# Introduction +## Design Architecture +From the view of hirarchical architecture design, the package is divided into different functional components, as shown in below picture. + +![OpenVINO_Architecture](./data/images/design_arch.PNG "OpenVINO RunTime Architecture") + +

+

+Intel® OpenVINO™ toolkit + +- **Intel® OpenVINO™ toolkit** provides a ROS-adapted runtime framework of neural network which quickly deploys applications and solutions for vision inference. By leveraging Intel® OpenVINO™ toolkit and corresponding libraries, this ROS2 runtime framework extends workloads across Intel® hardware (including accelerators) and maximizes performance. + - Increase deep learning workload performance up to 19x1 with computer vision accelerators from Intel. + - Unleash convolutional neural network (CNN)-based deep learning inference using a common API. + - Speed development using optimized OpenCV* and OpenVX* functions. See more from [here](https://github.com/openvinotoolkit/openvino) for Intel OpenVINO™ introduction. +
+

+ +

+

+ROS OpenVINO Runtime Framework + +- **ROS OpenVINO Runtime Framework** is the main body of this repo. It provides key logic implementation for pipeline lifecycle management, resource management and ROS system adapter, which extends Intel OpenVINO toolkit and libraries. Furthermore, this runtime framework provides ways to simplify launching, configuration, data analysis and re-use. +
+

+ +

+

+ROS Input & Output + +- **Diversal Input resources** are data resources to be infered and analyzed with the OpenVINO framework. +- **ROS interfaces and outputs** currently include _Topic_ and _service_. Natively, RViz output and CV image window output are also supported by refactoring topic message and inferrence results. +
+

+ +

+

+Optimized Models + +- **Optimized Models** provided by Model Optimizer component of Intel® OpenVINO™ toolkit. Imports trained models from various frameworks (Caffe*, Tensorflow*, MxNet*, ONNX*, Kaldi*) and converts them to a unified intermediate representation file. It also optimizes topologies through node merging, horizontal fusion, eliminating batch normalization, and quantization. It also supports graph freeze and graph summarize along with dynamic input freezing. +
+

+ +## Logic Flow +From the view of logic implementation, the package introduces the definitions of parameter manager, pipeline and pipeline manager. The following picture depicts how these entities co-work together when the corresponding program is launched. + +![Logic_Flow](./data/images/impletation_logic.PNG "OpenVINO RunTime Logic Flow") + +Once a corresponding program is launched with a specified .yaml config file passed in the .launch file or via commandline, _**parameter manager**_ analyzes the configurations about pipeline and the whole framework, then shares the parsed configuration information with pipeline procedure. A _**pipeline instance**_ is created by following the configuration info and is added into _**pipeline manager**_ for lifecycle control and inference action triggering. + +The contents in **.yaml config file** should be well structured and follow the supported rules and entity names. Please see [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for how to create or edit the config files. + +

+

+Pipeline + +**Pipeline** fulfills the whole data handling process: initiliazing Input Component for image data gathering and formating; building up the structured inference network and passing the formatted data through the inference network; transfering the inference results and handling output, etc. +
+

+ +

+

+Pipeline manager + +**Pipeline manager** manages all the created pipelines according to the inference requests or external demands (say, system exception, resource limitation, or end user's operation). Because of co-working with resource management and being aware of the whole framework, it covers the ability of performance optimization by sharing system resource between pipelines and reducing the burden of data copy. +
+

+ +# Supported Features +## Multiple Input Components +Currently, the package supports several input resources for acquiring image data. The following tables are listed: + +

+

+Input Resource Table + +|Input Resource|Description| +|--------------------|------------------------------------------------------------------| +|StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.| +|RealSenseCamera| Intel RealSense RGB-D Camera, directly calling RealSense Camera via librealsense plugin of openCV.| +|ImageTopic| Any ROS topic which is structured in image message.| +|Image| Any image file which can be parsed by openCV, such as .png, .jpeg.| +|Video| Any video file which can be parsed by openCV.| +|IpCamera| Any RTSP server which can push video stream.| +
+

-## Prerequisite +## Inference Implementations +Currently, the corresponding relation of supported inference features, models used and yaml configurations are listed as follows: -* Processor: A platform with Intel processors assembled. (see [here](https://software.intel.com/content/www/us/en/develop/articles/openvino-2021-4-lts-relnotes.html) for the full list of Intel processors supported.) -* OS: Ubuntu 20.04 -* ROS2: Galactic Geochelone -* OpenVINO: V2021.4, see [the release notes](https://software.intel.com/content/www/us/en/develop/articles/openvino-relnotes.html) for more info. -* [Optional] RealSense D400 Series Camera -* [Optional] Intel NCS2 Stick -## Tables of contents -* [Design Architecture and Logic Flow](./doc/tables_of_contents/Design_Architecture_and_logic_flow.md) -* [Supported Features](./doc/tables_of_contents/supported_features/Supported_features.md) -* Tutorials - - [How to configure a inference pipeline?](./doc/tables_of_contents/tutorials/configuration_file_customization.md) - - [How to create multiple pipelines in a process?](./doc/tables_of_contents/tutorials/Multiple_Pipelines.md) +

+

+Inference Feature Correspondence Table -## Installation & Launching -See Getting Start Pages for [ROS2 Dashing](./doc/getting_started_with_Dashing.md) or [ROS2 Foxy](./doc/getting_started_with_Foxy_Ubuntu20.04.md) or [ROS2 Galactic](./doc/getting_started_with_Galactic_Ubuntu20.04.md) for detailed installation & lauching instructions. +|Inference|Description|YAML Configuration|Model Used| +|-----------------------|------------------------------------------------------------------|----------------------|----------------------| +|Face Detection| Object Detection task applied to face recognition using a sequence of neural networks.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[face-detection-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/face-detection-adas-0001)
[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/age-gender-recognition-retail-0013)
[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/emotions-recognition-retail-0003)
[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/head-pose-estimation-adas-0001)| +|Emotion Recognition| Emotion recognition based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[emotions-recognition-retail-0003](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/emotions-recognition-retail-0003)| +|Age & Gender Recognition| Age and gender recognition based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[age-gender-recognition-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/age-gender-recognition-retail-0013)| +|Head Pose Estimation| Head pose estimation based on detected face image.|[pipeline_image.yaml](./sample/param/pipeline_image.yaml)
[pipeline_image_video.yaml](./sample/param/pipeline_image_video.yaml)
[pipeline_people.yaml](./sample/param/pipeline_people.yaml)
[pipeline_people_ip.yaml](./sample/param/pipeline_people_ip.yaml)|[head-pose-estimation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/head-pose-estimation-adas-0001)| +|Object Detection| Object detection based on SSD-based trained models.|[pipeline_object.yaml](./sample/param/pipeline_object.yaml)
[pipeline_object_topic.yaml](./sample/param/pipeline_object_topic.yaml)|[mobilenet-ssd](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/public/mobilenet-ssd)| +|Vehicle and License Detection| Vehicle and license detection based on Intel models.|[pipeline_vehicle_detection.yaml](./sample/param/pipeline_vehicle_detection.yaml)|[vehicle-license-plate-detection-barrier-0106](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/vehicle-license-plate-detection-barrier-0106)
[vehicle-attributes-recognition-barrier-0039](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/vehicle-attributes-recognition-barrier-0039)
[license-plate-recognition-barrier-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/license-plate-recognition-barrier-0001)| +|Object Segmentation| Object segmentation.|[pipeline_segmentation.yaml](./sample/param/pipeline_segmentation.yaml)
[pipeline_segmentation_image.yaml](./sample/param/pipeline_segmentation_image.yaml)
[pipeline_video.yaml](./sample/param/pipeline_video.yaml)|[semantic-segmentation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/semantic-segmentation-adas-0001)
[deeplabv3](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/public/deeplabv3)| +|Person Attributes| Person attributes based on object detection.|[pipeline_person_attributes.yaml](./sample/param/pipeline_person_attributes.yaml)|[person-attributes-recognition-crossroad-0230](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/person-attributes-recognition-crossroad-0230)
[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/person-detection-retail-0013)| +|Person Reidentification|Person reidentification based on object detection.|[pipeline_person_reidentification.yaml](./sample/param/pipeline_reidentification.yaml)|[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/person-detection-retail-0013)
[person-reidentification-retail-0277](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/intel/person-reidentification-retail-0277)| +|Object Segmentation Maskrcnn| Object segmentation and detection based on maskrcnn model.|[pipeline_segmentation_maskrcnn.yaml](./sample/param/pipeline_segmentation_maskrcnn.yaml)|[mask_rcnn_inception_v2_coco_2018_01_28](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3/models/public/mask_rcnn_inception_resnet_v2_atrous_coco)| +
+

+ +## ROS interfaces and outputs +The inference results can be output in several types. One or more types can be enabled for any inference pipeline. +### Topic +Specific topic(s) can be generated and published according to the given inference functionalities. + +

+

+Published Topic Correspondence Table + +|Inference|Published Topic| +|---|---| +|People Detection|```/ros2_openvino_toolkit/face_detection```([object_msgs:msg:ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| +|Emotion Recognition|```/ros2_openvino_toolkit/emotions_recognition```([object_msgs:msg:EmotionsStamped](../../../object_msgs/msg/EmotionsStamped.msg))| +|Age and Gender Recognition|```/ros2_openvino_toolkit/age_genders_Recognition```([object_msgs:msg:AgeGenderStamped](../../../object_msgs/msg/AgeGenderStamped.msg))| +|Head Pose Estimation|```/ros2_openvino_toolkit/headposes_estimation```([object_msgs:msg:HeadPoseStamped](../../../object_msgs/msg/HeadPoseStamped.msg))| +|Object Detection|```/ros2_openvino_toolkit/detected_objects```([object_msgs::msg::ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| +|Object Segmentation|```/ros2_openvino_toolkit/segmented_obejcts```([object_msgs::msg::ObjectsInMasks](../../../object_msgs/msg/ObjectsInMasks.msg))| +|Object Segmentation Maskrcnn|```/ros2_openvino_toolkit/segmented_obejcts```([object_msgs::msg::ObjectsInMasks](../../../object_msgs/msg/ObjectsInMasks.msg))| +|Person Reidentification|```/ros2_openvino_toolkit/reidentified_persons```([object_msgs::msg::ReidentificationStamped](../../../object_msgs/msg/ReidentificationStamped.msg))| +|Vehicle Detection|```/ros2_openvino_toolkit/detected_vehicles_attribs```([object_msgs::msg::VehicleAttribsStamped](../../../object_msgs/msg/PersonAttributeStamped.msg))| +|Vehicle License Detection|```/ros2_openvino_toolkit/detected_license_plates```([object_msgs::msg::LicensePlateStamped](../../../object_msgs/msg/LicensePlateStamped.msg))| +
+

+ +### Service +Several ROS2 Services are created, expecting to be used in client/server mode, especially when synchronously getting inference results for a given image frame or when managing inference pipeline's lifecycle.
+ +

+

+Service Correspondence Table + +|Inference|Service| +|---|---| +|Object Detection Service|```/detect_object```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))| +|Face Detection Service|```/detect_face```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))| +|Age Gender Detection Service|```/detect_age_gender```([object_msgs::srv::AgeGender](./object_msgs/srv/AgeGenderSrv.srv))| +|Headpose Detection Service|```/detect_head_pose```([object_msgs::srv::HeadPose](./object_msgs/srv/HeadPoseSrv.srv))| +|Emotion Detection Service|```/detect_emotion```([object_msgs::srv::Emotion](./object_msgs/srv/EmotionSrv.srv))| +
+

+ +### RViz +RViz display is also supported by the composited topic of original image frame with inference result. +To show in RViz tool, add an image marker with the composited topic: +```/ros2_openvino_toolkit/image_rviz```([sensor_msgs::Image](https://docs.ros.org/en/api/sensor_msgs/html/msg/Image.html)) + +### Image Window +OpenCV based image window is natively supported by the package. +To enable window, Image Window output should be added into the output choices in .yaml config file. Refer to [the config file guidance](./doc/quick_start/yaml_configuration_guide.md) for more information about checking/adding this feature in your launching. + +## Demo Result Snapshots +For the snapshot of demo results, refer to the following picture. + +* Face detection input from standard camera +![face_detection_demo_image](./data/images/face_detection.png "face detection demo image") + +* Object detection input from realsense camera +![object_detection_demo_realsense](./data/images/object_detection.gif "object detection demo realsense") + +* Object segmentation input from video +![object_segmentation_demo_video](./data/images/object_segmentation.gif "object segmentation demo video") + +* Person reidentification input from standard camera +![person_reidentification_demo_video](./data/images/person-reidentification.gif "person reidentification demo video") + +# Installation and Launching +## Deploy in Local Environment +* Refer to the quick start document for [getting_started_with_ros2](./doc/quick_start/getting_started_with_ros2_ov2.0.md) for detailed installation & lauching instructions. +* Refer to the quick start document for [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. + +## Deploy in Docker +* Refer to the docker instruction for [docker_instructions](./docker/docker_instructions_ov2.0.md) for detailed information about building docker image and launching. +* Refer to the quick start document for [yaml configuration guidance](./doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. + +# Reference +* Open_model_zoo: Refer to the OpenVINO document for [open_model_zoo](https://github.com/openvinotoolkit/open_model_zoo/tree/releases/2022/3) for detailed model structure and demo samples. +* OpenVINO api 2.0: Refer to the OpenVINO document for [OpenVINO_api_2.0](https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html) for latest api 2.0 transition guide. + +# FAQ +* [How to get the IR file for yolov5?](./doc/quick_start/tutorial_for_yolov5_converted.md) +* [How to build OpenVINO by source?](https://github.com/openvinotoolkit/openvino/wiki#how-to-build) +* [How to build RealSense by source?](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md) +* [What is the basic command of Docker CLI?](https://docs.docker.com/engine/reference/commandline/docker/) +* [What is the canonical C++ API for interacting with ROS?](https://docs.ros2.org/latest/api/rclcpp/) + +# Feedback +* Report questions, issues and suggestions, using: [issue](https://github.com/intel/ros2_openvino_toolkit/issues). # More Information -* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw +* ROS2 OpenVINO discription written in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw ###### *Any security issue should be reported using process at https://01.org/security* + diff --git a/data/labels/object_detection/coco.names b/data/labels/object_detection/coco.names new file mode 100755 index 00000000..16315f2b --- /dev/null +++ b/data/labels/object_detection/coco.names @@ -0,0 +1,80 @@ +person +bicycle +car +motorbike +aeroplane +bus +train +truck +boat +traffic light +fire hydrant +stop sign +parking meter +bench +bird +cat +dog +horse +sheep +cow +elephant +bear +zebra +giraffe +backpack +umbrella +handbag +tie +suitcase +frisbee +skis +snowboard +sports ball +kite +baseball bat +baseball glove +skateboard +surfboard +tennis racket +bottle +wine glass +cup +fork +knife +spoon +bowl +banana +apple +sandwich +orange +broccoli +carrot +hot dog +pizza +donut +cake +chair +sofa +pottedplant +bed +diningtable +toilet +tvmonitor +laptop +mouse +remote +keyboard +cell phone +microwave +oven +toaster +sink +refrigerator +book +clock +vase +scissors +teddy bear +hair drier +toothbrush \ No newline at end of file diff --git a/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels b/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels index 23d4cd9a..827dc158 100644 --- a/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels +++ b/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels @@ -1,2 +1,3 @@ +background vehicle license diff --git a/data/model_list/convert_model.lst b/data/model_list/convert_model.lst new file mode 100644 index 00000000..0cfc7f5b --- /dev/null +++ b/data/model_list/convert_model.lst @@ -0,0 +1,5 @@ +# This file can be used with the --list option of the model converter. +mobilenet-ssd +deeplabv3 +mask_rcnn_inception_resnet_v2_atrous_coco + diff --git a/data/model_list/download_model.lst b/data/model_list/download_model.lst new file mode 100644 index 00000000..0744a846 --- /dev/null +++ b/data/model_list/download_model.lst @@ -0,0 +1,18 @@ +# This file can be used with the --list option of the model downloader. +face-detection-adas-0001 +age-gender-recognition-retail-0013 +emotions-recognition-retail-0003 +landmarks-regression-retail-0009 +license-plate-recognition-barrier-0001 +person-detection-retail-0013 +person-attributes-recognition-crossroad-0230 +person-reidentification-retail-0277 +vehicle-attributes-recognition-barrier-0039 +vehicle-license-plate-detection-barrier-0106 +head-pose-estimation-adas-0001 +human-pose-estimation-0001 +semantic-segmentation-adas-0001 +mobilenet-ssd +deeplabv3 +mask_rcnn_inception_resnet_v2_atrous_coco + diff --git a/doc/inferences/Face_Detection.md b/doc/inferences/Face_Detection.md deleted file mode 100644 index 3bd2c8fa..00000000 --- a/doc/inferences/Face_Detection.md +++ /dev/null @@ -1,21 +0,0 @@ -# Face Detection - -## Demo Result Snapshots -See below pictures for the demo result snapshots. -* face detection input from image -![face_detection_demo_image](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/face_detection.png "face detection demo image") -## Download Models -* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash - cd $model_downloader - sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output - sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output - sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output - sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output - ``` -* copy label files (excute _once_)
- ```bash - sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ - sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo cp /opt/openvino_toolkit/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - ``` diff --git a/doc/inferences/Face_Reidentification.md b/doc/inferences/Face_Reidentification.md deleted file mode 100644 index 9a496fff..00000000 --- a/doc/inferences/Face_Reidentification.md +++ /dev/null @@ -1,10 +0,0 @@ -# Face Reidentification -## Download Models -* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash - cd $model_downloader - sudo python3 downloader.py --name landmarks-regression-retail-0009 --output_dir /opt/openvino_toolkit/models/landmarks-regression/output - sudo python3 downloader.py --name face-reidentification-retail-0095 --output_dir /opt/openvino_toolkit/models/face-reidentification/output - ``` - - diff --git a/doc/inferences/Object_Detection.md b/doc/inferences/Object_Detection.md deleted file mode 100644 index 905b134d..00000000 --- a/doc/inferences/Object_Detection.md +++ /dev/null @@ -1,91 +0,0 @@ -# Object Detection -## Introduction -The section depict the kind of Object Detection, which produces object classification and its location based ROI. -Two kinds of models are supported currently: -- SSD based Object Detection Models - * SSD300-VGG16, SSD500-VGG16, Mobilenet-SSD (both caffe and tensorflow) -- YoloV2 - -## Demo Result Snapshots -* object detection input from realsense camera - -![object_detection_demo_realsense](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/object_detection.gif "object detection demo realsense") - -## Download Models ->> Before using the supported models, you need to first downloand and optimize them into OpenVINO mode. mobilenet-SSD caffe model is the default one used in the Object Detection configuration. - -#### mobilenet-ssd -* download and convert a trained model to produce an optimized Intermediate Representation (IR) of the model - ```bash - cd $model_downloader - sudo python3 ./downloader.py --name mobilenet-ssd - #FP32 precision model - sudo python3 $model_optimizer/mo.py --input_model $model_downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32 --mean_values [127.5,127.5,127.5] --scale_values [127.5] - #FP16 precision model - sudo python3 $model_optimizer/mo.py --input_model $model_downloader/public/mobilenet-ssd/mobilenet-ssd.caffemodel --output_dir /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16 --data_type=FP16 --mean_values [127.5,127.5,127.5] --scale_values [127.5] - ``` -* copy label files (excute _once_)
- ```bash - sudo cp $openvino_labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32 - sudo cp $openvino_labels/object_detection/mobilenet-ssd.labels /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16 - ``` -#### YOLOv2-voc -* Darkflow to protobuf(.pb) - - install [darkflow](https://github.com/thtrieu/darkflow) - - install prerequsites - ```bash - pip3 install tensorflow opencv-python numpy networkx cython - ``` - - Get darkflow and YOLO-OpenVINO - ```bash - mkdir -p ~/code && cd ~/code - git clone https://github.com/thtrieu/darkflow - git clone https://github.com/chaoli2/YOLO-OpenVINO - sudo ln -sf ~/code/darkflow /opt/openvino_toolkit/ - ``` - - modify the line self.offset = 16 in the ./darkflow/utils/loader.py file and replace with self.offset = 20 - - Install darkflow - ```bash - cd ~/code/darkflow - pip3 install . - ``` - - Copy voc.names in YOLO-OpenVINO/common to labels.txt in darkflow. - ```bash - cp ~/code/YOLO-OpenVINO/common/voc.names ~/code/darkflow/labels.txt - ``` - - Get yolov2 weights and cfg - ```bash - cd ~/code/darkflow - mkdir -p models - cd models - wget -c https://pjreddie.com/media/files/yolov2-voc.weights - wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov2-voc.cfg - ``` - - Run convert script - ```bash - cd ~/code/darkflow - flow --model models/yolov2-voc.cfg --load models/yolov2-voc.weights --savepb - ``` -* Convert YOLOv2-voc TensorFlow Model to the optimized Intermediate Representation (IR) of model - ```bash - cd ~/code/darkflow - # FP32 precision model - sudo python3 $model_optimizer/mo_tf.py \ - --input_model built_graph/yolov2-voc.pb \ - --batch 1 \ - --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/yolo_v2_voc.json \ - --data_type FP32 \ - --output_dir /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP32 - # FP16 precision model - sudo python3 $model_optimizer/mo_tf.py \ - --input_model built_graph/yolov2-voc.pb \ - --batch 1 \ - --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/yolo_v2_voc.json \ - --data_type FP16 \ - --output_dir /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16 - ``` -* copy label files (excute _once_)
- ```bash - sudo cp $openvino_labels/object_detection/yolov2-voc.labels /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP32 - sudo cp $openvino_labels/object_detection/yolov2-voc.labels /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16 - ``` diff --git a/doc/inferences/Object_Segmentation.md b/doc/inferences/Object_Segmentation.md deleted file mode 100644 index 7e998af9..00000000 --- a/doc/inferences/Object_Segmentation.md +++ /dev/null @@ -1,24 +0,0 @@ -# Object Segmentation -## Demo Result Snapshots -See below pictures for the demo result snapshots. -* object segmentation input from video -![object_segmentation_demo_video](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/object_segmentation.gif "object segmentation demo video") -## Download Models -* download and convert a trained model to produce an optimized Intermediate Representation (IR) of the model - ```bash - #object segmentation model - mkdir -p ~/Downloads/models - cd ~/Downloads/models - wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz - tar -zxvf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz - cd mask_rcnn_inception_v2_coco_2018_01_28 - #FP32 - sudo python3 $model_optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --output_dir /opt/openvino_toolkit/models/segmentation/output/FP32 - #FP16 - sudo python3 $model_optimizer/mo_tf.py --input_model frozen_inference_graph.pb --tensorflow_use_custom_operations_config $model_optimizer/extensions/front/tf/mask_rcnn_support.json --tensorflow_object_detection_api_pipeline_config pipeline.config --reverse_input_channels --data_type=FP16 --output_dir /opt/openvino_toolkit/models/segmentation/output/FP16 - ``` -* copy label files (excute _once_)
- ```bash - sudo cp $openvino_labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/segmentation/output/FP32 - sudo cp $openvino_labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/segmentation/output/FP16 - ``` diff --git a/doc/inferences/People_Reidentification.md b/doc/inferences/People_Reidentification.md deleted file mode 100644 index 39c276d6..00000000 --- a/doc/inferences/People_Reidentification.md +++ /dev/null @@ -1,13 +0,0 @@ -# People Reidentification -## Demo Result Snapshots -See below pictures for the demo result snapshots. -* Person Reidentification input from standard camera -![person_reidentification_demo_video](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/person-reidentification.gif "person reidentification demo video") -## Download Models -* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash - cd $model_downloader - sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output - sudo python3 downloader.py --name person-reidentification-retail-0076 --output_dir /opt/openvino_toolkit/models/person-reidentification/output - ``` - diff --git a/doc/inferences/Vehicle_Detection.md b/doc/inferences/Vehicle_Detection.md deleted file mode 100644 index 8fdb1a5b..00000000 --- a/doc/inferences/Vehicle_Detection.md +++ /dev/null @@ -1,14 +0,0 @@ -# Vehicle Detection -## Download Models -### OpenSource Version -* download the optimized Intermediate Representation (IR) of model (excute _once_)
- ```bash - cd $model_downloader - sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output - sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output - sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output - ``` -* copy label files (excute _once_)
- ```bash - sudo cp $openvino_labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output - ``` diff --git a/doc/installation/BINARY_INSTALLATION.md b/doc/installation/BINARY_INSTALLATION.md deleted file mode 100644 index ebe1cf71..00000000 --- a/doc/installation/BINARY_INSTALLATION.md +++ /dev/null @@ -1,74 +0,0 @@ -# ros2_openvino_toolkit -## 1. Prerequisite -- An x86_64 computer running Ubuntu 18.04. Below processors are supported: - * 6th-8th Generation Intel® Core™ - * Intel® Xeon® v5 family - * Intel® Xeon® v6 family -- ROS2 [Dashing](https://github.com/ros2/ros2/wiki) -- [OpenVINO™ Toolkit](https://software.intel.com/en-us/openvino-toolkit) -- RGB Camera, e.g. RealSense D400 Series or standard USB camera or Video/Image File -- Graphics are required only if you use a GPU. The official system requirements for GPU are: - * 6th to 8th generation Intel® Core™ processors with Iris® Pro graphics and Intel® HD Graphics - * 6th to 8th generation Intel® Xeon® processors with Iris Pro graphics and Intel HD Graphics (excluding the e5 product family, which does not have graphics) - * Intel® Pentium® processors N4200/5, N3350/5, N3450/5 with Intel HD Graphics - -- Use one of the following methods to determine the GPU on your hardware: - * [lspci] command: GPU info may lie in the [VGA compatible controller] line. - * Ubuntu system: Menu [System Settings] --> [Details] may help you find the graphics information. - * Openvino: Download the install package, install_GUI.sh inside will check the GPU information before installation. - -## 2. Environment Setup -**Note**:You can choose to build the environment using *./environment_setup_binary.sh* script in the script subfolder.The *modules.conf* file in the same directory as the .sh file is the configuration file that controls the installation process.You can modify the *modules.conf* to customize your installation process. -```bash -./environment_setup_binary.sh -``` -**Note**:You can also choose to follow the steps below to build the environment step by step. -* Install ROS2 [Dashing](https://github.com/ros2/ros2/wiki) ([guide](https://index.ros.org/doc/ros2/Installation/Dashing/Linux-Development-Setup/))
-* Install [OpenVINO™ Toolkit 2019R3.1](https://software.intel.com/en-us/articles/OpenVINO-Install-Linux) ([download](https://software.intel.com/en-us/openvino-toolkit/choose-download/free-download-linux))
- **Note**: Please use *root privileges* to run the installer when installing the core components. -* Install [the Intel® Graphics Compute Runtime for OpenCL™ driver components required to use the GPU plugin](https://docs.openvinotoolkit.org/latest/_docs_install_guides_installing_openvino_linux.html#additional-GPU-steps) - -- Install Intel® RealSense™ SDK 2.0 [(tag v2.30.0)](https://github.com/IntelRealSense/librealsense/tree/v2.30.0)
- * [Install from package](https://github.com/IntelRealSense/librealsense/blob/v2.30.0/doc/distribution_linux.md)
- -## 3. Building and Installation -* Build sample code under openvino toolkit - ```bash - # root is required instead of sudo - source /opt/intel/openvino/bin/setupvars.sh - cd /opt/intel/openvino/deployment_tools/inference_engine/samples/ - mkdir build - cd build - cmake .. - make - ``` -* set ENV CPU_EXTENSION_LIB and GFLAGS_LIB - ```bash - export CPU_EXTENSION_LIB=/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib/libcpu_extension.so - export GFLAGS_LIB=/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib/libgflags_nothreads.a - ``` -* Install ROS2_OpenVINO packages - ```bash - mkdir -p ~/ros2_overlay_ws/src - cd ~/ros2_overlay_ws/src - git clone https://github.com/intel/ros2_openvino_toolkit - git clone https://github.com/intel/ros2_object_msgs - git clone https://github.com/ros-perception/vision_opencv -b ros2 - git clone https://github.com/ros2/message_filters.git - git clone https://github.com/ros-perception/image_common.git -b dashing - git clone https://github.com/intel/ros2_intel_realsense.git -b refactor - ``` - -* Build package - ``` - source ~/ros2_ws/install/local_setup.bash - source /opt/intel/openvino/bin/setupvars.sh - cd ~/ros2_overlay_ws - colcon build --symlink-install - source ./install/local_setup.bash - sudo mkdir -p /opt/openvino_toolkit - sudo ln -sf ~/ros2_overlay_ws/src/ros2_openvino_toolkit /opt/openvino_toolkit/ - ``` - - - diff --git a/doc/installation/OPEN_SOURCE_INSTALLATION.md b/doc/installation/OPEN_SOURCE_INSTALLATION.md deleted file mode 100644 index cba2ce0c..00000000 --- a/doc/installation/OPEN_SOURCE_INSTALLATION.md +++ /dev/null @@ -1,82 +0,0 @@ -# ros2_openvino_toolkit - -## 1. Prerequisite -- An x86_64 computer running Ubuntu 18.04. Below processors are supported: - * 6th-8th Generation Intel® Core™ - * Intel® Xeon® v5 family - * Intel® Xeon® v6 family -- ROS2 [Dashing](https://github.com/ros2/ros2/wiki) - -- OpenVINO™ Toolkit Open Source
- * The [Deep Learning Deployment Toolkit](https://github.com/openvinotoolkit/openvino) that helps to enable fast, heterogeneous deep learning inferencing for Intel® processors (CPU and GPU/Intel® Processor Graphics), and supports more than 100 public and custom models.
- * [Open Model Zoo](https://github.com/opencv/open_model_zoo) includes 20+ pre-trained deep learning models to expedite development and improve deep learning inference on Intel® processors (CPU, Intel Processor Graphics, FPGA, VPU), along with many samples to easily get started. - -- RGB Camera, e.g. RealSense D400 Series or standard USB camera or Video/Image File -- Graphics are required only if you use a GPU. The official system requirements for GPU are: - * 6th to 8th generation Intel® Core™ processors with Iris® Pro graphics and Intel® HD Graphics - * 6th to 8th generation Intel® Xeon® processors with Iris Pro graphics and Intel HD Graphics (excluding the e5 product family, which does not have graphics) - * Intel® Pentium® processors N4200/5, N3350/5, N3450/5 with Intel HD Graphics - -- Use one of the following methods to determine the GPU on your hardware: - * [lspci] command: GPU info may lie in the [VGA compatible controller] line. - * Ubuntu system: Menu [System Settings] --> [Details] may help you find the graphics information. - * Openvino: Download the install package, install_GUI.sh inside will check the GPU information before installation. - -## 2. Environment Setup -**Note**:You can choose to build the environment using *./environment_setup_binary.sh* script in the script subfolder.The *modules.conf* file in the same directory as the .sh file is the configuration file that controls the installation process.You can modify the *modules.conf* to customize your installation process. -```bash -./environment_setup.sh -``` -**Note**:You can also choose to follow the steps below to build the environment step by step. -* Install ROS2 [Dashing](https://github.com/ros2/ros2/wiki) ([guide](https://index.ros.org/doc/ros2/Installation/Dashing/Linux-Development-Setup/))
-* Install OpenVINO™ Toolkit Open Source
- * Install OpenCL Driver for GPU
- ```bash - cd ~/Downloads - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-gmmlib_18.4.1_amd64.deb - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-igc-core_18.50.1270_amd64.deb - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-igc-opencl_18.50.1270_amd64.deb - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-opencl_19.04.12237_amd64.deb - wget https://github.com/intel/compute-runtime/releases/download/19.04.12237/intel-ocloc_19.04.12237_amd64.deb - sudo dpkg -i *.deb - ``` - * Install [Deep Learning Deployment Toolkit](https://github.com/openvinotoolkit/openvino)([tag 2019_R3.1](https://github.com/openvinotoolkit/openvino/blob/2019_R3.1/inference-engine/README.md))
- * Install [Open Model Zoo](https://github.com/opencv/open_model_zoo)([tag 2019_R3.1](https://github.com/opencv/open_model_zoo/blob/2019_R3.1/demos/README.md))
- -- Install Intel® RealSense™ SDK 2.0 [(tag v2.30.0)](https://github.com/IntelRealSense/librealsense/tree/v2.30.0)
- * [Install from package](https://github.com/IntelRealSense/librealsense/blob/v2.30.0/doc/distribution_linux.md)
- -## 3. Building and Installation - -* set ENV InferenceEngine_DIR, CPU_EXTENSION_LIB and GFLAGS_LIB - ```bash - export InferenceEngine_DIR=/opt/openvino_toolkit/dldt/inference-engine/build/ - export CPU_EXTENSION_LIB=/opt/openvino_toolkit/dldt/inference-engine/bin/intel64/Release/lib/libcpu_extension.so - export GFLAGS_LIB=/opt/openvino_toolkit/dldt/inference-engine/bin/intel64/Release/lib/libgflags_nothreads.a - ``` -* Install ROS2_OpenVINO packages - ```bash - mkdir -p ~/ros2_overlay_ws/src - cd ~/ros2_overlay_ws/src - git clone https://github.com/intel/ros2_openvino_toolkit - git clone https://github.com/intel/ros2_object_msgs - git clone https://github.com/ros-perception/vision_opencv -b ros2 - git clone https://github.com/ros2/message_filters.git - git clone https://github.com/ros-perception/image_common.git -b dashing - git clone https://github.com/intel/ros2_intel_realsense.git -b refactor - ``` - -* Build package - ``` - source ~/ros2_ws/install/local_setup.bash - cd ~/ros2_overlay_ws - colcon build --symlink-install - source ./install/local_setup.bash - sudo mkdir -p /opt/openvino_toolkit - sudo ln -sf ~/ros2_overlay_ws/src/ros2_openvino_toolkit /opt/openvino_toolkit/ - ``` - - - - - diff --git a/doc/installation/installation.md b/doc/installation/installation.md deleted file mode 100644 index 6596a35a..00000000 --- a/doc/installation/installation.md +++ /dev/null @@ -1,11 +0,0 @@ - -# Installation ->> Intel releases 2 different series of OpenVINO Toolkit, we call them as [OpenSource Version](https://github.com/openvinotoolkit/openvino/) and [Binary Version](https://software.intel.com/en-us/openvino-toolkit). You may choose any of them to install. - -**NOTE:** If you are not sure which version you would use, it is recommended for you to choose [Binary Version](https://software.intel.com/en-us/openvino-toolkit), which can simplify your environment setup. - -## OpenSource Version -One-step installation scripts are provided for the dependencies' installation. Please see [the guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/installation/OPEN_SOURCE_INSTALLATION.md) for details. - -## Binary Version -One-step installation scripts are provided for the dependencies' installation. Please see [the guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/installation/BINARY_INSTALLATION.md) for details. diff --git a/doc/launching/launch.md b/doc/launching/launch.md deleted file mode 100644 index efc1d1ae..00000000 --- a/doc/launching/launch.md +++ /dev/null @@ -1,37 +0,0 @@ -# Launching -## 1. Setup Environment -Please refer to this [guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/launching/set_environment.md) for details. - -**NOTE:** Configure *once* the Neural Compute Stick USB Driver by following between instructions, in case you have a NCS or NCS2 in hand. - ```bash - cd ~/Downloads - SUBSYSTEM=="usb", ATTRS{idProduct}=="2150", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - SUBSYSTEM=="usb", ATTRS{idProduct}=="2485", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - SUBSYSTEM=="usb", ATTRS{idProduct}=="f63b", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" - EOF - sudo cp 97-usbboot.rules /etc/udev/rules.d/ - sudo udevadm control --reload-rules - sudo udevadm trigger - sudo ldconfig - rm 97-usbboot.rules - ``` -## 2. Launch Program -### Topic -Each inference listed in [section Inference Implementations](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/Supported_features.md#inference-implementations) is created default launching configurations( xxx.launch.py) in OpenVINO Sample package. You can follow the utility of ROS2 launch instruction to launch them. For example: - ```bash - ros2 launch dynamic_vino_sample pipeline_object.launch.py - ``` - -The full list of xxx.launch.py is shown in below tabel: - -|Download Models|Launch File|Description| -|---|---|---| -|[Object Detection](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Object_Detection.md)|pipeline_object.launch.py|Launching file for **Object Detection**, by default mobilenet_ssd model and standard USB camera are used.| -|[Face Detection](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Face_Detection.md)|pipeline_people.launch.py|Launching file for **Face Detection**, also including **Age/Gender Recognition, HeadPose Estimation, and Emotion Recognition**.| -|[Object Segmentation](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Object_Segmentation.md)|pipeline_segmentation.launch.py|Launching file for **Object Segmentation**.| -|[Person Re-Identification](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/People_Reidentification.md)|pipeline_person_reid.launch.py|Launching file for **Person Re-Identification**.| -|[Face Re-Identification](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Face_Reidentification.md)|pipeline_face_reid.launch.py|Launching file for **Face Segmentation**, in which **Face Landmark Detection** is included.| -|[Vehicle Detection](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Vehicle_Detection.md)|pipeline_vehicle_detection.launch.py|Launching file for **vehicle detection**, in which **license plate recognition** is included.| - -### Service -See [service Page](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/launching/service.md) for detailed launching instructions. diff --git a/doc/launching/service.md b/doc/launching/service.md deleted file mode 100644 index c5f5701f..00000000 --- a/doc/launching/service.md +++ /dev/null @@ -1,27 +0,0 @@ -# Service -## Download Models -### Object Detection Service -* See [object detection download model](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Object_Detection.md#mobilenet-ssd) section for detailed instructions. - -### People Detection Service -* See [People Detection download model](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/inferences/Face_Detection.md#opensource-version) section for detaild instructions. - -## Launching -* run object detection service sample code input from Image - Run image processing service: - ```bash - ros2 launch dynamic_vino_sample image_object_server.launch.py - ``` - Run example application with an absolute path of an image on another console: - ```bash - ros2 run dynamic_vino_sample image_object_client /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/car.png - ``` -* run face detection service sample code input from Image - Run image processing service: - ```bash - ros2 launch dynamic_vino_sample image_people_server.launch.py - ``` - Run example application with an absolute path of an image on another console: - ```bash - ros2 run dynamic_vino_sample image_people_client /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/team.jpg - ``` diff --git a/doc/launching/set_environment.md b/doc/launching/set_environment.md deleted file mode 100644 index d50006a3..00000000 --- a/doc/launching/set_environment.md +++ /dev/null @@ -1,32 +0,0 @@ -# Set Environment -## OpenSource Version -* Set ENV LD_LIBRARY_PATH and openvino_version - ```bash - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/openvino_toolkit/dldt/inference-engine/bin/intel64/Release/lib - export openvino_version=opensource - ``` -* Install prerequisites - ```bash - cd /opt/openvino_toolkit/dldt/model-optimizer/install_prerequisites - sudo ./install_prerequisites.sh - ``` -* Set model tool variable - ```bash - source /opt/openvino_toolkit/ros2_openvino_toolkit/script/set_variable.sh - ``` -## Binary Version -* Set ENV LD_LIBRARY_PATH and openvino_version - ```bash - source /opt/intel/openvino/bin/setupvars.sh - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/openvino/deployment_tools/inference_engine/samples/build/intel64/Release/lib - export openvino_version=binary - ``` -* Install prerequisites - ```bash - cd /opt/intel/openvino/deployment_tools/model_optimizer/install_prerequisites - sudo ./install_prerequisites.sh - ``` -* Set model tool variable - ```bash - source /opt/openvino_toolkit/ros2_openvino_toolkit/script/set_variable.sh - ``` diff --git a/doc/quick_start/getting_started_with_Foxy_Ubuntu20.04.md b/doc/quick_start/getting_started_with_Foxy_Ubuntu20.04.md deleted file mode 100644 index 0f43cc9f..00000000 --- a/doc/quick_start/getting_started_with_Foxy_Ubuntu20.04.md +++ /dev/null @@ -1,136 +0,0 @@ -# ROS2_FOXY_OpenVINO_Toolkit - -**NOTE:** -Below steps have been tested on **Ubuntu 20.04**. - -## 1. Environment Setup -* Install ROS2 Foxy ([guide](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html)) -* Install Intel® OpenVINO™ Toolkit Version: 2021.4 ([guide]https://docs.openvino.ai/2021.4/openvino_docs_install_guides_installing_openvino_linux.html)) -* Install Intel® RealSense ™ SDK ([guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md)) - -## 2. Building and Installation -* Install ROS2_OpenVINO packages -``` -mkdir -p ~/catkin_ws/src -cd ~/catkin_ws/src -git clone https://github.com/intel/ros2_openvino_toolkit -b foxy_dev -git clone https://github.com/intel/ros2_object_msgs -git clone https://github.com/IntelRealSense/realsense-ros.git -b ros2 -git clone https://github.com/ros-perception/vision_opencv.git -b ros2 -``` -* Install dependencies -``` -sudo apt-get install ros-foxy-diagnostic-updater -``` -* Build package -``` -source /opt/ros/foxy/setup.bash -source /opt/intel/openvino_2021/bin/setupvars.sh -cd ~/catkin_ws -colcon build --symlink-install -source ./install/local_setup.bash -``` - -## 3. Running the Demo -* Preparation -``` -source /opt/intel/openvino_2021/bin/setupvars.sh -sudo mkdir -p /opt/openvino_toolkit -sudo ln -s /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader /opt/openvino_toolkit/models -sudo chmod 777 -R /opt/openvino_toolkit/models -``` - -* See all available models -``` -cd /opt/intel//deployment_tools/open_model_zoo/tools/downloader -sudo python3 downloader.py --print_all -``` - -* Download the optimized Intermediate Representation (IR) of model (execute once), for example: -``` -cd /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader -sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output -sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output -sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output -sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output -sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output -sudo python3 downloader.py --name person-reidentification-retail-0277 --output_dir /opt/openvino_toolkit/models/person-reidentification/output -sudo python3 downloader.py --name landmarks-regression-retail-0009 --output_dir /opt/openvino_toolkit/models/landmarks-regression/output -sudo python3 downloader.py --name semantic-segmentation-adas-0001 --output_dir /opt/openvino_toolkit/models/semantic-segmentation/output -sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output -sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recognition/output -sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output -sudo python3 downloader.py --name person-attributes-recognition-crossroad-0230 --output_dir /opt/openvino_toolkit/models/person-attributes/output -``` - -* copy label files (execute once) -* Before launch, copy label files to the same model path, make sure the model path and label path match the ros_openvino_toolkit/vino_launch/param/xxxx.yaml. -``` - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP16/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32 -``` - -* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi)need to be converted to intermediate representation (For example the model for object detection) -* (Note: Tensorflow=1.15.5, Python<=3.7) - * ssd_mobilenet_v2_coco - ``` - cd /opt/openvino_toolkit/models/ - sudo python3 downloader/downloader.py --name ssd_mobilenet_v2_coco - sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=ssd_mobilenet_v2_coco --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py - ``` - * deeplabv3 - ``` - cd /opt/openvino_toolkit/models/ - sudo python3 downloader/downloader.py --name deeplabv3 - sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=deeplabv3 --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py - ``` - * YOLOV2 - ``` - cd /opt/openvino_toolkit/models/ - sudo python3 downloader/downloader.py --name yolo-v2-tf - sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=yolo-v2-tf --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py - ``` - -* Before launch, check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml, make sure the paramter like model path, label path, inputs are right. - * run face detection sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_people.launch.py - ``` - * run person reidentification sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_reidentification.launch.py - ``` - * run person face reidentification sample code input from RealSenseCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_face_reidentification.launch.py - ``` - * run face detection sample code input from Image. - ``` - ros2 launch dynamic_vino_sample pipeline_image.launch.py - ``` - * run object segmentation sample code input from RealSenseCameraTopic. - ``` - ros2 launch dynamic_vino_sample pipeline_segmentation.launch.py - ``` - * run object segmentation sample code input from Image. - ``` - ros2 launch dynamic_vino_sample pipeline_segmentation_image.launch.py - ``` - * run vehicle detection sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_vehicle_detection.launch.py - ``` - * run person attributes sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_person_attributes.launch.py - ``` - -# More Information -* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw - -###### *Any security issue should be reported using process at https://01.org/security* - diff --git a/doc/quick_start/getting_started_with_Galactic_Ubuntu20.04.md b/doc/quick_start/getting_started_with_Galactic_Ubuntu20.04.md deleted file mode 100644 index a5125268..00000000 --- a/doc/quick_start/getting_started_with_Galactic_Ubuntu20.04.md +++ /dev/null @@ -1,156 +0,0 @@ -# ROS2_GALACTIC_OpenVINO_Toolkit - -**NOTE:** -Below steps have been tested on **Ubuntu 20.04**. - -## 1. Environment Setup -* Install ROS2 Galactic ([guide](https://docs.ros.org/en/galactic/Installation/Ubuntu-Install-Debians.html)) -* Install Intel® OpenVINO™ Toolkit Version: 2021.4 ([guide](https://docs.openvino.ai/2021.4/openvino_docs_install_guides_installing_openvino_linux.html)) or building by source code ([guide](https://github.com/openvinotoolkit/openvino/wiki/BuildingForLinux)) - - * version **intel-openvino-dev-ubuntu20-2021.4.752** was tested. It is recommend to use 2021.4.752 or the newer. -* Install Intel® RealSense ™ SDK ([guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md)) - -## 2. Building and Installation -* Install ROS2_OpenVINO_Toolkit packages -``` -mkdir -p ~/catkin_ws/src -cd ~/catkin_ws/src -git clone https://github.com/intel/ros2_openvino_toolkit -b galactic -git clone https://github.com/intel/ros2_object_msgs -git clone https://github.com/IntelRealSense/realsense-ros.git -b ros2 -git clone https://github.com/ros-perception/vision_opencv.git -b ros2 -``` -* Install dependencies -``` -sudo apt-get install ros-galactic-diagnostic-updater -sudo pip3 install networkx -sudo apt-get install python3-defusedxml -sudo pip3 install tensorflow==2.4.1 -``` -* Build package -``` -source /opt/ros/galactic/setup.bash -source /opt/intel/openvino_2021/bin/setupvars.sh -cd ~/catkin_ws -colcon build --symlink-install -source ./install/local_setup.bash -``` - -## 3. Running the Demo -* Preparation -``` -source /opt/intel/openvino_2021/bin/setupvars.sh -sudo mkdir -p /opt/openvino_toolkit -sudo ln -s /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader /opt/openvino_toolkit/models -sudo chmod 777 -R /opt/openvino_toolkit/models -``` - -* See all available models -``` -cd /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader -sudo python3 downloader.py --print_all -``` - -* Download the optimized Intermediate Representation (IR) of model (execute once), for example: -``` -cd /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader -sudo python3 downloader.py --name face-detection-adas-0001 --output_dir /opt/openvino_toolkit/models/face_detection/output -sudo python3 downloader.py --name age-gender-recognition-retail-0013 --output_dir /opt/openvino_toolkit/models/age-gender-recognition/output -sudo python3 downloader.py --name emotions-recognition-retail-0003 --output_dir /opt/openvino_toolkit/models/emotions-recognition/output -sudo python3 downloader.py --name head-pose-estimation-adas-0001 --output_dir /opt/openvino_toolkit/models/head-pose-estimation/output -sudo python3 downloader.py --name person-detection-retail-0013 --output_dir /opt/openvino_toolkit/models/person-detection/output -sudo python3 downloader.py --name person-reidentification-retail-0277 --output_dir /opt/openvino_toolkit/models/person-reidentification/output -sudo python3 downloader.py --name landmarks-regression-retail-0009 --output_dir /opt/openvino_toolkit/models/landmarks-regression/output -sudo python3 downloader.py --name semantic-segmentation-adas-0001 --output_dir /opt/openvino_toolkit/models/semantic-segmentation/output -sudo python3 downloader.py --name vehicle-license-plate-detection-barrier-0106 --output_dir /opt/openvino_toolkit/models/vehicle-license-plate-detection/output -sudo python3 downloader.py --name vehicle-attributes-recognition-barrier-0039 --output_dir /opt/openvino_toolkit/models/vehicle-attributes-recognition/output -sudo python3 downloader.py --name license-plate-recognition-barrier-0001 --output_dir /opt/openvino_toolkit/models/license-plate-recognition/output -sudo python3 downloader.py --name person-attributes-recognition-crossroad-0230 --output_dir /opt/openvino_toolkit/models/person-attributes/output -``` - -* copy label files (execute once) -* Before launch, copy label files to the same model path, make sure the model path and label path match the ros_openvino_toolkit/vino_launch/param/xxxx.yaml. -``` - # Lables for Face-Detection - sudo mkdir -p /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo mkdir -p /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/ - - # Lables for Emotions-Recognition - sudo mkdir -p /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/ - - # Labels for Sementic-Segmentation - sudo mkdir -p /opt/openvino_toolkit/models/semantic-segmentation/output/FP32/ - sudo mkdir -p /opt/openvino_toolkit/models/semantic-segmentation/output/FP16/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP32/ - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/semantic-segmentation/output/FP16/ - - # Labels for Vehicle-License_Plate - sudo mkdir -p /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32 - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32 -``` - -* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi)need to be converted to intermediate representation (For example the model for object detection) -* (Note: Tensorflow=2.4.1, Python<=3.7) - * ssd_mobilenet_v2_coco - ``` - cd /opt/openvino_toolkit/models/ - sudo python3 downloader/downloader.py --name ssd_mobilenet_v2_coco - sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=ssd_mobilenet_v2_coco --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py - ``` - * deeplabv3 - ``` - cd /opt/openvino_toolkit/models/ - sudo python3 downloader/downloader.py --name deeplabv3 - sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=deeplabv3 --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py -d /opt/openvino_toolkit/models/ - ``` - * YOLOV2 - ``` - cd /opt/openvino_toolkit/models/ - sudo python3 downloader/downloader.py --name yolo-v2-tf - sudo python3 /opt/intel/openvino_2021/deployment_tools/open_model_zoo/tools/downloader/converter.py --name=yolo-v2-tf --mo /opt/intel/openvino_2021/deployment_tools/model_optimizer/mo.py - ``` - -* Before launch, check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml, make sure the paramter like model path, label path, inputs are right. - * run face detection sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_people.launch.py - ``` - * run person reidentification sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_reidentification.launch.py - ``` - * run person face reidentification sample code input from RealSenseCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_face_reidentification.launch.py - ``` - * run face detection sample code input from Image. - ``` - ros2 launch dynamic_vino_sample pipeline_image.launch.py - ``` - * run object segmentation sample code input from RealSenseCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_segmentation.launch.py - ``` - * run object segmentation sample code input from Image. - ``` - sudo mkdir -p /opt/openvino_toolkit/ros2_openvino_toolkit/data/images - sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/images/expressway.jpg /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/ - ros2 launch dynamic_vino_sample pipeline_segmentation_image.launch.py - ``` - * run vehicle detection sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_vehicle_detection.launch.py - ``` - * run person attributes sample code input from StandardCamera. - ``` - ros2 launch dynamic_vino_sample pipeline_person_attributes.launch.py - ``` - -# More Information -* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw - -###### *Any security issue should be reported using process at https://01.org/security* - diff --git a/doc/quick_start/getting_started_with_ros2_ov2.0.md b/doc/quick_start/getting_started_with_ros2_ov2.0.md new file mode 100644 index 00000000..45f79670 --- /dev/null +++ b/doc/quick_start/getting_started_with_ros2_ov2.0.md @@ -0,0 +1,135 @@ +# ROS2_OpenVINO_Toolkit + +**NOTE:** +Below steps have been tested on **Ubuntu 20.04** and **Ubuntu 22.04**. +Supported ROS2 versions include foxy,galactic and humble. + +## 1. Environment Setup +For ROS2 foxy and galactic on ubuntu 20.04: + * Install ROS2.
+ Refer to: [ROS_foxy_install_guide](https://docs.ros.org/en/foxy/Installation/Ubuntu-Install-Debians.html) & [ROS_galactic_install_guide](https://docs.ros.org/en/galactic/Installation/Ubuntu-Install-Debians.html) + + * Install Intel® OpenVINO™ Toolkit Version: 2022.3.
+ Refer to: [OpenVINO_install_guide](https://docs.openvino.ai/2022.3/openvino_docs_install_guides_installing_openvino_apt.html#doxid-openvino-docs-install-guides-installing-openvino-apt) + * Install from an achive file. Both runtime and development tool are needed, `pip` is recommended for installing the development tool.
+ Refer to: [OpenVINO_devtool_install_guide](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/download.html) + + * Install Intel® RealSense™ SDK.
+ Refer to: [RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md) + +For ROS2 humble on ubuntu 22.04: + * Install ROS2.
+ Refer to: [ROS_humble_install_guide](https://docs.ros.org/en/humble/Installation/Ubuntu-Install-Debians.html) + + * Install Intel® OpenVINO™ Toolkit Latest Version by Source.
+ Refer to: [OpenVINO_install_guide](https://github.com/openvinotoolkit/openvino/wiki/BuildingCode) + + * Install Intel® RealSense™ SDK by Source.
+ Refer to: [RealSense_install_guide](https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md) + +## 2. Building and Installation +* Install ROS2_OpenVINO_Toolkit packages +``` +mkdir -p ~/catkin_ws/src +cd ~/catkin_ws/src +git clone https://github.com/intel/ros2_openvino_toolkit -b ros2 +git clone https://github.com/intel/ros2_object_msgs +git clone https://github.com/IntelRealSense/realsense-ros.git -b ros2-development +git clone https://github.com/ros-perception/vision_opencv.git -b +``` +* Install dependencies +``` +sudo apt-get install ros--diagnostic-updater +sudo apt install python3-colcon-common-extensions +``` +* Build package +``` +source /opt/ros//setup.bash +source /setupvars.sh +cd ~/catkin_ws +colcon build --symlink-install +source ./install/local_setup.bash +``` + +## 3. Running the Demo +### Install OpenVINO 2022.3 by PIP +OMZ tools are provided for downloading and converting models of open_model_zoo in ov2022.
+Refer to: [OMZtool_guide](https://pypi.org/project/openvino-dev/) + +* See all available models +``` +omz_downloader --print_all +``` + +* Download the optimized Intermediate Representation (IR) of model (execute once), for example: +``` +cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list +omz_downloader --list download_model.lst -o /opt/openvino_toolkit/models/ +``` + +* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi) need to be converted to intermediate representation (such as the model for object detection): +``` +cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list +omz_converter --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert +``` +### Install OpenVINO 2022.3 by source code +* See all available models +``` +cd ~/openvino/thirdparty/open_model_zoo/tools/model_tools +sudo python3 downloader.py --print_all +``` + +* Download the optimized Intermediate Representation (IR) of models (execute once), for example: +``` +cd ~/openvino/thirdparty/open_model_zoo/tools/model_tools +sudo python3 downloader.py --list download_model.lst -o /opt/openvino_toolkit/models/ +``` + +* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi) need to be converted to Intermediate Representation (such as the model for object detection): +``` +cd ~/openvino/thirdparty/open_model_zoo/tools/model_tools +sudo python3 converter.py --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert +``` + +* Copy label files (execute once) +**Note**:Need to make label_dirs if skip steps for set output_dirs above. +``` +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP32/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP32/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP16/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32 +``` + +* Check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml before lauching, make sure parameters such as model_path, label_path and input_path are set correctly. Please refer to the quick start document for [yaml configuration guidance](./yaml_configuration_guide.md) for detailed configuration guidance. + * run face detection sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_people.launch.py + ``` + * run person reidentification sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_reidentification.launch.py + ``` + * run face detection sample code input from Image. + ``` + ros2 launch openvino_node pipeline_image.launch.py + ``` + * run object segmentation sample code input from RealSenseCameraTopic. + ``` + ros2 launch openvino_node pipeline_segmentation.launch.py + ``` + * run vehicle detection sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_vehicle_detection.launch.py + ``` + * run person attributes sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_person_attributes.launch.py + ``` + +# More Information +* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw + +###### *Any security issue should be reported using process at https://01.org/security* + diff --git a/doc/quick_start/tutorial_for_yolov5_converted.md b/doc/quick_start/tutorial_for_yolov5_converted.md new file mode 100644 index 00000000..70ee8421 --- /dev/null +++ b/doc/quick_start/tutorial_for_yolov5_converted.md @@ -0,0 +1,88 @@ +# Tutorial_For_yolov5_Converted + +# Introduction +This document describes a method to convert YOLOv5 nano PyTorch weight files with the. pt extension to ONNX weight files, and a method to convert ONNX weight files to IR files using the OpenVINO model optimizer. This method can help OpenVINO users optimize YOLOv5n for deployment in practical applications. + +## Reference Phrase +|Term|Description| +|---|---| +|OpenVINO|Open Visual Inference & Neural Network Optimization| +|ONNX|Open Neural Network Exchange| +|YOLO|You Only Look Once| +|IR|Intermediate Representation| + +## Reference Document +|Doc|Link| +|---|---| +|OpenVINO|[openvino_2_0_transition_guide](https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html)| +|YOLOv5|[yolov5](https://github.com/ultralytics/yolov5)| + +# Convert Weight File to ONNX +* Copy YOLOv5 Repository from GitHub +``` +git clone https://github.com/ultralytics/yolov5.git +``` + +* Set Environment for Installing YOLOv5 +``` +cd yolov5 +python3 -m venv yolo_env // Create a virtual python environment +source yolo_env/bin/activate // Activate environment +pip install -r requirements.txt // Install yolov5 prerequisites +pip install onnx // Install ONNX +``` + +* Download PyTorch Weights +``` +mkdir model_convert && cd model_convert +wget https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt +``` + +* Convert PyTorch weights to ONNX weights +YOLOv5 repository provides export.py script, which can be used to convert PyTorch weight to ONNX weight. +``` +cd .. +python3 export.py --weights model_convert/yolov5n.pt --include onnx +``` + +# Convert ONNX files to IR files +After obtaining the ONNX weight file from the previous section [Convert Weight File to ONNX](#convert-weight-file-to-onnx), we can use the model optimizer to convert it to an IR file. + +* Install the OpenVINO Model Optimizer Environment +To use the model optimizer, you need to run the following command to install some necessary components (if you are still in the yolo_env virtual environment, you need to run the **deactivate** command to exit the environment or start a new terminal). +``` +python3 -m venv ov_env // Create openVINO virtual environment +source ov_env/bin/activate // Activate environment +python -m pip install --upgrade pip // Upgrade pip +pip install openvino[onnx]==2022.3.0 // Install OpenVINO for ONNX +pip install openvino-dev[onnx]==2022.3.0 // Install OpenVINO Dev Tool for ONNX +``` + +* Generate IR file +``` +cd model_convert +mo --input_model yolov5n.onnx +``` +Then we will get three files: yolov5n.xml, yolov5n.bin, and yolov5n.mapping under the model_convert folder. + +# Move to the Recommended Model Path +``` +cd ~/yolov5/model_convert +mkdir -p /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/ +sudo cp yolov5n.bin yolov5n.mapping yolov5n.xml /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/ +``` + +# FAQ + +

+

+How to install the python3-venv package? + +On Debian/Ubuntu systems, you need to install the python3-venv package using the following command. +``` +apt-get update +apt-get install python3-venv +``` +You may need to use sudo with that command. After installing, recreate your virtual environment. +
+

diff --git a/doc/quick_start/yaml_configuration_guide.md b/doc/quick_start/yaml_configuration_guide.md new file mode 100644 index 00000000..b6a08a2a --- /dev/null +++ b/doc/quick_start/yaml_configuration_guide.md @@ -0,0 +1,130 @@ +# Introduction + +The contents in .yaml config file should be well structured and follow the supported rules and entity names. + +# Sample +## [pipeline_people.yaml](../../sample/param/pipeline_people.yaml) +```bash +Pipelines: +- name: people + inputs: [StandardCamera] + infers: + - name: FaceDetection + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: AgeGenderRecognition + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + - name: EmotionRecognition + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels + batch: 16 + - name: HeadPoseEstimation + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + outputs: [ImageWindow, RosTopic, RViz] + connects: + - left: StandardCamera + right: [FaceDetection] + - left: FaceDetection + right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, ImageWindow, RosTopic, RViz] + - left: AgeGenderRecognition + right: [ImageWindow, RosTopic, RViz] + - left: EmotionRecognition + right: [ImageWindow, RosTopic, RViz] + - left: HeadPoseEstimation + right: [ImageWindow, RosTopic, RViz] + +Common: +``` +## Interface Description + +### Specify pipeline name +The name value of this pipeline can be anyone other than null. + +### Specify inputs +**Note:** The input parameter can only have one value.
+Currently, options for inputs are: + +|Input Option|Description|Configuration| +|--------------------|------------------------------------------------------------------|-----------------------------------------| +|StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.|```inputs: [StandardCamera]```| +|RealSenseCamera| Intel RealSense RGB-D Camera, directly calling RealSense Camera via librealsense plugin of openCV.|```inputs: [RealSenseCamera]```| +|RealSenseCameraTopic| Any ROS topic which is structured in image message.|```inputs: [RealSenseCameraTopic]```| +|Image| Any image file which can be parsed by openCV, such as .png, .jpeg.|```inputs: [Image]```| +|Video| Any video file which can be parsed by openCV.|```inputs: [Video]```| +|IpCamera| Any RTSP server which can push video stream.|```inputs: [IpCamera]```| + +**Note:** Please refer to this opensource repo [RTSP_server_install_guide](https://github.com/EasyDarwin/EasyDarwin) to install RTSP server for IpCamera input. + +### Specify input_path +The input_path need to be specified when input is Image, Video and Ipcamera. + +|Input Option|Configuration| +|--------------------|------------------------------------------------------------------| +|Image|```input_path: to/be/set/image_path```| +|Video|```input_path: to/be/set/video_path```| +|IpCamera|```input_path: "rtsp://localhost/test"```| + +### Specify infers +The Inference Engine is a set of C++ classes to provides an API to read the Intermediate Representation, set the input and output formats, and execute the model on devices. + +* #### name +The name of inference engine need to be specified here. Currently, the inference feature list is supported: + +|Inference|Description| +|-----------------------|------------------------------------------------------------------| +|FaceDetection|Object Detection task applied to face recognition using a sequence of neural networks.| +|EmotionRecognition| Emotion recognition based on detected face image.| +|AgeGenderRecognition| Age and gener recognition based on detected face image.| +|HeadPoseEstimation| Head pose estimation based on detected face image.| +|ObjectDetection| object detection based on SSD-based trained models.| +|VehicleDetection| Vehicle and passenger detection based on Intel models.| +|ObjectSegmentation| object detection and segmentation.| +|ObjectSegmentationMaskrcnn| object segmentation based on Maskrcnn model.| + +* #### model +The path of model need to be specified here. The scheme below illustrates the typical workflow for deploying a trained deep learning model. +![trained deep learning model](../../data/images/CVSDK_Flow.png "trained deep learning model") + +* #### engine +**Note:** Currently, only CPU and GPU are supported.
+Target device options are: + +|Target Device| +|-----------------------| +|CPU| +|Intel® Integrated Graphics| +|FPGA| +|Intel® Movidius™ Neural Compute Stick| + +* #### label +Currently, this parameter does not work. + +* #### batch +Enable dynamic batch size for the inference engine net. + +### Specify outputs +**Note:** The output parameter can be one or more.
+Currently, the output options are: + +|Option|Description|Configuration| +|--------------------|-----------------------------------------------------|---------------------------------------------| +|ImageWindow| Window showing results|```outputs: [ImageWindow, RosTopic, RViz]```| +|RosTopic| Output the topic|```outputs: [ImageWindow, RosTopic, RViz]```| +|RViz| Display the result in rviz|```outputs: [ImageWindow, RosTopic, RViz]```| + +### Specify confidence_threshold +Set the threshold of detection probability. + +### Specify connects +The topology of a pipe can only have one value on the left and multiple values on the right. The value of the first left node should be the same as the specified **inputs**. diff --git a/doc/tables_of_contents/Design_Architecture_and_logic_flow.md b/doc/tables_of_contents/Design_Architecture_and_logic_flow.md deleted file mode 100644 index 86c48bb3..00000000 --- a/doc/tables_of_contents/Design_Architecture_and_logic_flow.md +++ /dev/null @@ -1,27 +0,0 @@ -# Design Architecture -From the view of hirarchical architecture design, the package is divided into different functional components, as shown in below picture. - -![OpenVINO_Architecture](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/design_arch.PNG "OpenVINO RunTime Architecture") - -- **Intel® OpenVINO™ toolkit** is leveraged to provide deep learning basic implementation for data inference. is free software that helps developers and data scientists speed up computer vision workloads, streamline deep learning inference and deployments, -and enable easy, heterogeneous execution across Intel® platforms from edge to cloud. It helps to: - - Increase deep learning workload performance up to 19x1 with computer vision accelerators from Intel. - - Unleash convolutional neural network (CNN)-based deep learning inference using a common API. - - Speed development using optimized OpenCV* and OpenVX* functions. -- **ROS2 OpenVINO Runtime Framework** is the main body of this repo. it provides key logic implementation for pipeline lifecycle management, resource management and ROS system adapter, which extends Intel OpenVINO toolkit and libraries. Furthermore, this runtime framework provides ways to ease launching, configuration and data analytics and re-use. -- **Diversal Input resources** are the data resources to be infered and analyzed with the OpenVINO framework. -- **ROS interfaces and outputs** currently include _Topic_ and _service_. Natively, RViz output and CV image window output are also supported by refactoring topic message and inferrence results. -- **Optimized Models** provides by Model Optimizer component of Intel® OpenVINO™ toolkit. Imports trained models from various frameworks (Caffe*, Tensorflow*, MxNet*, ONNX*, Kaldi*) and converts them to a unified intermediate representation file. It also optimizes topologies through node merging, horizontal fusion, eliminating batch normalization, and quantization.It also supports graph freeze and graph summarize along with dynamic input freezing. - -# Logic Flow -From the view of logic implementation, the package introduces the definitions of parameter manager, pipeline and pipeline manager. The below picture depicts how these entities co-work together when the corresponding program is launched. - -![Logic_Flow](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/data/images/impletation_logic.PNG "OpenVINO RunTime Logic Flow") - -Once a corresponding program is launched with a specified .yaml config file passed in the .launch.py file or via commandline, _**parameter manager**_ analyzes the configurations about pipeline and the whole framework, then shares the parsed configuration information with pipeline procedure. A _**pipeline instance**_ is created by following the configuration info and is added into _**pipeline manager**_ for lifecycle control and inference action triggering. - -The contents in **.yaml config file** should be well structured and follow the supported rules and entity names. Please see [the configuration guidance](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/tutorials/configuration_file_customization.md) for how to create or edit the config files. - -**Pipeline** fulfills the whole data handling process: initiliazing Input Component for image data gathering and formating; building up the structured inference network and passing the formatted data through the inference network; transfering the inference results and handling output, etc. - -**Pipeline manager** manages all the created pipelines according to the inference requests or external demands (say, system exception, resource limitation, or end user's operation). Because of co-working with resource management and being aware of the whole framework, it covers the ability of performance optimization by sharing system resource between pipelines and reducing the burden of data copy. diff --git a/doc/tables_of_contents/prerequisite.md b/doc/tables_of_contents/prerequisite.md deleted file mode 100644 index f42279d7..00000000 --- a/doc/tables_of_contents/prerequisite.md +++ /dev/null @@ -1,31 +0,0 @@ -# Development and Target Platform - ->> The development and target platforms have the same requirements, but you can select different components during the installation, based on your intended use. - -## Hardware -### Processor Supported: -- Intel architecture processor, e.g. 6th~8th generation Intel® Core™ -- Intel® Xeon® v5 family -- Intel® Xeon® v6 family -- Intel® Pentium® processor N4200/5, N3350/5, N3450/5 with Intel® HD Graphics - -**Notes**: -- Processor graphics are not included in all processors. See [Product Specifications](https://ark.intel.com/) for information about your processor. -- A chipset that supports processor graphics is required for Intel® Xeon® processors. -- Use one of the following methods to determine the GPU on your hardware: - * [lspci] command: GPU info may lie in the [VGA compatible controller] line. - * Ubuntu system: Menu [System Settings] --> [Details] may help you find the graphics information. - * Openvino: Download the install package, install_GUI.sh inside will check the GPU information before installation. - -### Pripheral Depended: -- Intel® Movidius™ Neural Compute Stick -- Intel® Neural Compute Stick 2 -- Intel® Vision Accelerator Design with Intel® Movidius™ VPU -- RGB Camera, e.g. RealSense D400 Series or standard USB camera - -## Operating Systems -- Ubuntu 16.04 or 18.04 long-term support (LTS), 64-bit: Minimum supported kernel is 4.14 -- CentOS 7.4, 64-bit (for target only) -- Yocto Project Poky Jethro v2.0.3, 64-bit (for target only and requires modifications) - -**Note**: Since **Ubuntu 18.04** in the list is the only one well supported by ROS2 core, it is highly recommended to use as the OS. diff --git a/doc/tables_of_contents/supported_features/Supported_features.md b/doc/tables_of_contents/supported_features/Supported_features.md deleted file mode 100644 index 3117ac71..00000000 --- a/doc/tables_of_contents/supported_features/Supported_features.md +++ /dev/null @@ -1,33 +0,0 @@ -# Supported Features -## Input Resources -Currently, the package supports RGB frame data from several kinds of input resources: -- Standard USB Camera -- Realsense Camera -- Image Topic -- Image File -- Video File - -See more from [the input resource description](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/input_resource.md). - -## Inference Implementations -Inferences shown in below list are supported: -- Face Detection -- Emotion Recognition -- Age and Gender Recognition -- Head Pose Estimation -- Object Detection -- Vehicle and License Detection -- Object Segmentation -- Person Re-Identification -- Face Re-Identification - -[Inference functionality overview](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/inference_functionality_overview.md). - -## Output Types -The inference results can be output in several types. One or more types can be enabled for any infernece pipeline: -- Topic Publishing -- Image View Window -- RViz Showing -- Service (as a mechanism responding user's request about object detection results.) - -See more from [output types](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/output_types.md) page. diff --git a/doc/tables_of_contents/supported_features/inference_functionality_overview.md b/doc/tables_of_contents/supported_features/inference_functionality_overview.md deleted file mode 100644 index 35afb571..00000000 --- a/doc/tables_of_contents/supported_features/inference_functionality_overview.md +++ /dev/null @@ -1,16 +0,0 @@ -# Infernece Feature List -Currently, the inference feature list is supported: - -|Inference Label|Description|Outputs Topic| -|---|---|---| -|FaceDetection|Object Detection task applied to face recognition using a sequence of neural networks.|```/ros2_openvino_toolkit/face_detection```([object_msgs:msg:ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| -|EmotionRecognition| Emotion recognition based on detected face image.|```/ros2_openvino_toolkit/emotions_recognition```([people_msgs:msg:EmotionsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/EmotionsStamped.msg))| -|AgeGenderRecognition| Age and gener recognition based on detected face image.|```/ros2_openvino_toolkit/age_genders_Recognition```([people_msgs:msg:AgeGenderStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/AgeGenderStamped.msg))| -|HeadPoseEstimation| Head pose estimation based on detected face image.|```/ros2_openvino_toolkit/headposes_estimation```([people_msgs:msg:HeadPoseStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/HeadPoseStamped.msg))| -|ObjectDetection| object detection based on SSD-based trained models.|```/ros2_openvino_toolkit/detected_objects```([object_msgs::msg::ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| -|VehicleAttribsDetection| Vehicle detection based on Intel models.|```/ros2_openvino_toolkit/detected_vehicles_attribs```([people_msgs::msg::VehicleAttribsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/VehicleAttribsStamped.msg))| -|LicensePlateDetection| License detection based on Intel models.|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/LicensePlateStamped.msg))| -|ObjectSegmentation| object detection and segmentation.|```/ros2_openvino_toolkit/segmented_obejcts```([people_msgs::msg::ObjectsInMasks](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ObjectsInMasks.msg))| -|PersonReidentification| Person Reidentification based on object detection.|```/ros2_openvino_toolkit/reidentified_persons```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))| -|LandmarksDetection| Landmark regression based on face detection.|```/ros2_openvino_toolkit/detected_landmarks```([people_msgs::msg::LandmarkStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/LandmarkStamped.msg))| -|FaceReidentification| Face Reidentification based on face detection.|```/ros2_openvino_toolkit/reidentified_faces```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))| diff --git a/doc/tables_of_contents/supported_features/input_resource.md b/doc/tables_of_contents/supported_features/input_resource.md deleted file mode 100644 index 43cd3af0..00000000 --- a/doc/tables_of_contents/supported_features/input_resource.md +++ /dev/null @@ -1,8 +0,0 @@ -# Full list of supported Input Resources -|Input Resource Name|Description| -|---|-------------------------------------------| -|StandardCamera|Any RGB camera with USB port supporting. Currently only the first USB camera if many are connected.| -|RealSenseCamera| Intel RealSense RGB-D Camera,directly calling RealSense Camera via librealsense plugin of openCV.| -|RealSenseCameraTopic| any ROS topic which is structured in image message.The topic to be inputted must be remapped to name ```/openvino_toolkit/image_raw```(type [sensor_msgs::msg::Image](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/Image.msg))| -|Image| Any image file which can be parsed by openCV, such as .png, .jpeg.| -|Video| Any video file which can be parsed by openCV.| \ No newline at end of file diff --git a/doc/tables_of_contents/supported_features/output_types.md b/doc/tables_of_contents/supported_features/output_types.md deleted file mode 100644 index 315c0cb9..00000000 --- a/doc/tables_of_contents/supported_features/output_types.md +++ /dev/null @@ -1,43 +0,0 @@ -# Output Types ->> The inference results can be output in several types. One or more types can be enabled for any inference pipeline. -## Topic Publishing ->> Specific topic(s) can be generated and published according to the given inference functionalities.
- -|Inference|Published Topic| -|---|---| -|People Detection|```/ros2_openvino_toolkit/face_detection```([object_msgs:msg:ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| -|Emotion Recognition|```/ros2_openvino_toolkit/emotions_recognition```([people_msgs:msg:EmotionsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/EmotionsStamped.msg))|/ros2_openvino_toolkit/face_detection(object_msgs:msg:ObjectsInBoxes) -|Age and Gender Recognition|```/ros2_openvino_toolkit/age_genders_Recognition```([people_msgs:msg:AgeGenderStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/AgeGenderStamped.msg))| -|Head Pose Estimation|```/ros2_openvino_toolkit/headposes_estimation```([people_msgs:msg:HeadPoseStamped](https://github.com/intel/ros2_openvino_toolkit/blob/master/people_msgs/msg/HeadPoseStamped.msg))| -|Object Detection|```/ros2_openvino_toolkit/detected_objects```([object_msgs::msg::ObjectsInBoxes](https://github.com/intel/ros2_object_msgs/blob/master/msg/ObjectsInBoxes.msg))| -|Object Segmentation|```/ros2_openvino_toolkit/segmented_obejcts```([people_msgs::msg::ObjectsInMasks](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ObjectsInMasks.msg))| -|Person Reidentification|```/ros2_openvino_toolkit/reidentified_persons```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))| -|Face Reidenfication|```/ros2_openvino_toolkit/reidentified_faces```([people_msgs::msg::ReidentificationStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/ReidentificationStamped.msg))| -|Vehicle Detection|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::VehicleAttribsStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/VehicleAttribsStamped.msg))| -|Vehicle License Detection|```/ros2_openvino_toolkit/detected_license_plates```([people_msgs::msg::LicensePlateStamped](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/msg/LicensePlateStamped.msg))| - -## Image View Window ->> The original image and the inference results are rendered together and shown in a CV window. -## RViz Showing ->> The Rendered image (rendering inference results into the original image) was transformed into sensor_msgs::msg::Image topic, that can be shown in RViz application. -- RViz Published Topic -```/ros2_openvino_toolkit/image_rviz```([sensor_msgs::msg::Image](https://github.com/ros2/common_interfaces/blob/master/sensor_msgs/msg/Image.msg)) - -## Service ->> Several ROS2 Services are created, expecting to be used in client/server mode, especially when synchronously getting inference results for a given image frame or when managing inference pipeline's lifecycle.
- -- **Face Detection or Object Detection for a given Image file** - -|Inference|Service| -|---|---| -|Object Detection Service|```/detect_object```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))| -|Face Detection Service|```/detect_face```([object_msgs::srv::DetectObject](https://github.com/intel/ros2_object_msgs/blob/master/srv/DetectObject.srv))| -|Age Gender Detection Service|```/detect_age_gender```([people_msgs::srv::AgeGender](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/srv/AgeGender.srv))| -|Headpose Detection Service|```/detect_head_pose```([people_msgs::srv::HeadPose](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/srv/HeadPose.srv))| -|Emotion Detection Service|```/detect_emotion```([people_msgs::srv::Emotion](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/people_msgs/srv/Emotion.srv))| - -- **Inference Pipeline Lifecycle Management** - - Create new pipeline - - Start/Stop/Pause a pipeline - - Get pipeline list or status - diff --git a/doc/tables_of_contents/tutorials/Multiple_Pipelines.md b/doc/tables_of_contents/tutorials/Multiple_Pipelines.md deleted file mode 100644 index cd03aec7..00000000 --- a/doc/tables_of_contents/tutorials/Multiple_Pipelines.md +++ /dev/null @@ -1,54 +0,0 @@ -# Multiple Pipelines ->> This is a way to run more than one pipeline in the same process.Having multiple pipelines in a single instance allows each pipeline to have custom configuration and different performance. - -## prerequest -see [this guide](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/tutorials/configuration_file_customization.md) to see how to customize a pipeline. - -## A demo for multiple pipeline -```bash -1 Pipelines: - 2 - name: object1 - 3 inputs: [StandardCamera] - 4 infers: - 5 - name: ObjectDetection - 6 model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml - 7 engine: CPU - 8 label: to/be/set/xxx.labels - 9 batch: 1 - 10 confidence_threshold: 0.5 - 11 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - 12 outputs: [ImageWindow, RosTopic, RViz] - 13 connects: - 14 - left: StandardCamera - 15 right: [ObjectDetection] - 16 - left: ObjectDetection - 17 right: [ImageWindow] - 18 - left: ObjectDetection - 19 right: [RosTopic] - 20 - left: ObjectDetection - 21 right: [RViz] - 22 - 23 - name: object2 - 24 inputs: [RealSenseCamera] - 25 infers: - 26 - name: ObjectDetection - 27 model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml - 28 engine: CPU - 29 label: to/be/set/xxx.labels - 30 batch: 1 - 31 confidence_threshold: 0.5 - 32 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - 33 outputs: [ImageWindow, RosTopic, RViz] - 34 connects: - 35 - left: RealSenseCamera - 36 right: [ObjectDetection] - 37 - left: ObjectDetection - 38 right: [ImageWindow] - 39 - left: ObjectDetection - 40 right: [RosTopic] - 41 - left: ObjectDetection - 42 right: [RViz] - 43 - 44 OpenvinoCommon: - -``` diff --git a/doc/tables_of_contents/tutorials/configuration_file_customization.md b/doc/tables_of_contents/tutorials/configuration_file_customization.md deleted file mode 100644 index 703459b6..00000000 --- a/doc/tables_of_contents/tutorials/configuration_file_customization.md +++ /dev/null @@ -1,58 +0,0 @@ -# Configuration File Customization - -One of the key added values of ROS2 OpenVINO is automatically create new pipeline on demand according to the given configuration files. In order to create new pipelines, the end user only need to create a new configuration file or update one already existed. The configuration file must be written by following some rules. - - 1 Pipelines: - 2 - name: object - 3 inputs: [RealSenseCamera] - 4 infers: - 5 - name: ObjectDetection - 6 model: /opt/intel/openvino/deployment_tools/tools/model_downloader/object_detection/common/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml - 7 engine: MYRIAD - 8 label: to/be/set/xxx.labels - 9 batch: 1 - 10 confidence_threshold: 0.5 - 11 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - 12 outputs: [ImageWindow, RosTopic, RViz] - 13 connects: - 14 - left: RealSenseCamera - 15 right: [ObjectDetection] - 16 - left: ObjectDetection - 17 right: [ImageWindow] - 18 - left: ObjectDetection - 19 right: [RosTopic] - 20 - left: ObjectDetection - 21 right: [RViz] - -In this sample, a pipeline is to be created with this topology: - -```flow -input=operation: RealSenseCamera -infer=operation: ObjectDetection -output1=operation: ImageWindow -output2=operation: RosTopic -output3=operation: RViz - -input-infer-output1 -infer-output2 -infer-output3 -``` - -Detail Description for each line shows in below tabel: - -|Line No.|Description| -|-------------|---| -| 1 |Keyword, label for pipeline parameters. The pipeline configuration must be started by this line.| -|2|Pipeline name, the published topics bound to this name. (e.g. /openvino_toolkit/**object**/face_detection)| -|3|The name of chosen input device, should be one and only one of [the list](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/Supported_features.md#input-resources) (taking the item "Input Resource Name").| -|4|key word for inference section. one or more inferences can be included in a pipeline's inference section.| -|5|The name of Inference instance, should be in [the list](https://github.com/intel/ros2_openvino_toolkit/blob/doc-ov.2020.3/doc/tables_of_contents/supported_features/Supported_features.md#inference-implementations).
**NOTE**: if a pipeline contains 2 or more inference instances, the first one should be a detection inference. -|6|Model description file with absolute path, generated by model_optimizer tool| -|7|The name of Inference engine, should be one of:CPU, GPU and MYRIAD.| -|8|The file name with absolute path of object labels.
**NOTE**: not enabled in the current version. The labels file with the same name as model description file under the same folder is searched and used.| -|9|The number of input data to be enqueued and handled by inference engine in parallel.| -|10|Set the inference result filtering by confidence ratio.| -|11|set *enable_roi_constraint* to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame.| -|12|A list of output method enabled for inference result showing/notifying. Should be one or some of:
• ImageWindow
• RosTopic
• Rviz
• RosService(*)
**NOTE**: RosService can only be used in ROS2 service server pipeline.| -|13|keyword for pipeline entities' relationship topology.| -|14~21|The detailed connection topology for the pipeline.
A pair of "left" and "right" parameters, whose contents are the names of inputs(line3), infers(line5) and outputs(line12) defines a connection between the two entities, it also defines that the data would be moved from *entity left* to *entity right*.| diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 00000000..5a53d585 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,67 @@ +# ros2 openvino toolkit env master f1b1ca4d914186a1881b87f103be9c6e910c9d80 + +ARG ROS_PRE_INSTALLED_PKG +FROM osrf/ros:${ROS_PRE_INSTALLED_PKG} +ARG VERSION + +# setting proxy env --option +# If needed, enable the below ENV setting by correct proxies. +# ENV HTTP_PROXY="your_proxy" +# ENV HTTPS_PROXY="your_proxy" +# ENV FTP_PROXY="your_proxy" + +# author information +LABEL author="Jiawei Wu " + +# default shell type +SHELL ["/bin/bash", "-c"] + +# ignore the warning +ARG DEBIAN_FRONTEND=noninteractive +ARG APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 +RUN apt-get update && apt-get install --assume-yes apt-utils + +# install openvino 2022.3 +# https://docs.openvino.ai/2022.3/openvino_docs_install_guides_installing_openvino_apt.html +RUN apt update && apt install --assume-yes curl wget gnupg2 lsb-release +RUN wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB && \ +apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB && echo "deb https://apt.repos.intel.com/openvino/2022 focal main" | tee /etc/apt/sources.list.d/intel-openvino-2022.list +RUN apt update && apt-cache search openvino && apt install -y openvino-2022.3.0 + +# install librealsense2 +RUN apt-get install -y --no-install-recommends \ +software-properties-common +# https://github.com/IntelRealSense/librealsense/blob/master/doc/distribution_linux.md +# Make sure you set http-proxy in below commands if your environment needs. +# RUN apt-key adv --keyserver-options http-proxy=your_proxy --keyserver keys.gnupg.net --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE || apt-key adv --keyserver-options http-proxy=your_proxy --keyserver hkp://keyserver.ubuntu.com:80 --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE || apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE +RUN add-apt-repository "deb https://librealsense.intel.com/Debian/apt-repo $(lsb_release -cs) main" -u \ +&& apt-get install -y --no-install-recommends \ +librealsense2-dkms \ +librealsense2-utils \ +librealsense2-dev \ +librealsense2-dbg \ +libgflags-dev \ +libboost-all-dev \ +&& rm -rf /var/lib/apt/lists/* + +# other dependencies +RUN apt-get update && apt-get install -y python3-pip && python3 -m pip install -U \ +numpy \ +networkx \ +pyyaml \ +requests \ +&& apt-get install -y --no-install-recommends libboost-all-dev +WORKDIR /usr/lib/x86_64-linux-gnu +RUN pip install --upgrade pip + +# build ros2 openvino toolkit +WORKDIR /root +RUN mkdir -p catkin_ws/src +WORKDIR /root/catkin_ws/src +RUN git init && git clone https://github.com/intel/ros2_object_msgs.git \ +&& git clone -b ros2 https://github.com/intel/ros2_openvino_toolkit.git +RUN apt-get install ros-${VERSION}-diagnostic-updater +WORKDIR /root/catkin_ws +RUN source /opt/ros/${VERSION}/setup.bash && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release + diff --git a/docker/docker_instructions_ov2.0.md b/docker/docker_instructions_ov2.0.md new file mode 100644 index 00000000..c9cdd202 --- /dev/null +++ b/docker/docker_instructions_ov2.0.md @@ -0,0 +1,130 @@ +# Run Docker Images For ROS2_OpenVINO_Toolkit + +**NOTE:** +Below steps have been tested on **Ubuntu 20.04**. +Supported ROS2 versions include foxy and galactic. + +## 1. Environment Setup +* Install docker.
+Refer to: [Docker_install_guide](https://docs.docker.com/engine/install/ubuntu/) + +## 2. Build docker image by dockerfile +``` +cd ~/ros2_openvino_toolkit/docker/Dockerfile +vi ~/ros2_openvino_toolkit/docker/Dockerfile +docker build --build-arg ROS_PRE_INSTALLED_PKG= --build-arg VERSION= --build-arg "HTTP_PROXY=set_your_proxy" -t ros2_openvino_202203 . +``` +For example: +* Build image for ros_galactic +``` +cd ~/ros2_openvino_toolkit/docker/Dockerfile +vi ~/ros2_openvino_toolkit/docker/Dockerfile +docker build --build-arg ROS_PRE_INSTALLED_PKG=galactic-desktop --build-arg VERSION=galactic --build-arg "HTTP_PROXY=set_your_proxy" -t ros2_galactic_openvino_202203 . +``` +* Build image for ros_foxy +``` +cd ~/ros2_openvino_toolkit/docker/Dockerfile +vi ~/ros2_openvino_toolkit/docker/Dockerfile +docker build --build-arg ROS_PRE_INSTALLED_PKG=foxy-desktop --build-arg VERSION=foxy --build-arg "HTTP_PROXY=set_your_proxy" -t ros2_foxy_openvino_202203 . +``` + +## 3. Download and load docker image +* Download docker image +``` + # ros2_openvino_202203 for demo + cd ~/Downloads/ + wget +``` +* Load docker image +``` +cd ~/Downloads/ +docker load -i +docker images +// (show in the list) +``` + +## 4. Running the Demos +* Install dependency +``` + sudo apt install x11-xserver-utils + xhost + +``` +* Run docker image +``` + docker images + docker run -itd  -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix -v /dev:/dev  --privileged=true --name +``` +* In Docker Container + +* Preparation +``` +source /opt/ros//setup.bash +cd ~/catkin_ws +source ./install/local_setup.bash +``` + +* See all available models +OMZ tools are provided for downloading and converting OMZ models in ov2022.
+Refer to: [OMZtool_guide](https://pypi.org/project/openvino-dev/) + +``` +omz_downloader --print_all +``` + +* Download the optimized Intermediate Representation (IR) of model (execute once), for example: +``` +cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list +omz_downloader --list download_model.lst -o /opt/openvino_toolkit/models/ +``` + +* If the model (tensorflow, caffe, MXNet, ONNX, Kaldi) need to be converted to intermediate representation (such as the model for object detection): +``` +cd ~/catkin_ws/src/ros2_openvino_toolkit/data/model_list +omz_converter --list convert_model.lst -d /opt/openvino_toolkit/models/ -o /opt/openvino_toolkit/models/convert +``` +* Copy label files (execute once) +**Note**:Need to make label_dirs if skip steps for set output_dirs above. +``` +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP32/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/face_detection/face-detection-adas-0001.labels /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/emotions-recognition/FP32/emotions-recognition-retail-0003.labels /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP32/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_segmentation/frozen_inference_graph.labels /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP16/ +sudo cp ~/catkin_ws/src/ros2_openvino_toolkit/data/labels/object_detection/vehicle-license-plate-detection-barrier-0106.labels /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32 +``` + +* Check the parameter configuration in ros2_openvino_toolkit/sample/param/xxxx.yaml before lauching, make sure parameters such as model_path, label_path and input_path are set correctly. Please refer to the quick start document for [yaml configuration guidance](../doc/quick_start/yaml_configuration_guide.md) for detailed configuration guidance. + * run face detection sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_people.launch.py + ``` + * run person reidentification sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_reidentification.launch.py + ``` + * run face detection sample code input from Image. + ``` + ros2 launch openvino_node pipeline_image.launch.py + ``` + * run object segmentation sample code input from RealSenseCameraTopic. + ``` + ros2 launch openvino_node pipeline_segmentation.launch.py + ``` + * run object segmentation sample code input from Image. + ``` + ros2 launch openvino_node pipeline_segmentation_image.launch.py + ``` + * run vehicle detection sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_vehicle_detection.launch.py + ``` + * run person attributes sample code input from StandardCamera. + ``` + ros2 launch openvino_node pipeline_person_attributes.launch.py + ``` + +# More Information +* ROS2 OpenVINO discription writen in Chinese: https://mp.weixin.qq.com/s/BgG3RGauv5pmHzV_hkVAdw + +###### *Any security issue should be reported using process at https://01.org/security* + diff --git a/dynamic_vino_lib/src/models/emotion_detection_model.cpp b/dynamic_vino_lib/src/models/emotion_detection_model.cpp deleted file mode 100644 index 0c4f78e0..00000000 --- a/dynamic_vino_lib/src/models/emotion_detection_model.cpp +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of EmotionDetectionModel class - * @file emotion_detection_model.cpp - */ -#include - -#include "dynamic_vino_lib/models/emotion_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" - -// Validated Emotions Detection Network -Models::EmotionDetectionModel::EmotionDetectionModel( - const std::string & label_loc, const std::string & model_loc, int max_batch_size) -: BaseModel(label_loc, model_loc, max_batch_size) -{ -} - -bool Models::EmotionDetectionModel::updateLayerProperty -(InferenceEngine::CNNNetwork& net_reader) -{ - slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - // set input property - InferenceEngine::InputsDataMap input_info_map(net_reader.getInputsInfo()); - if (input_info_map.size() != 1) { - slog::warn << "This model seems not Age-Gender-like, which should have only one input," - <<" but we got " << std::to_string(input_info_map.size()) << "inputs" - << slog::endl; - return false; - } - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::FP32); - input_info->setLayout(InferenceEngine::Layout::NCHW); - addInputInfo("input", input_info_map.begin()->first); - - // set output property - InferenceEngine::OutputsDataMap output_info_map(net_reader.getOutputsInfo()); - if (output_info_map.size() != 1) { - // throw std::logic_error("Age/Gender Recognition network should have two output layers"); - slog::warn << "This model should have and only have 1 output, but we got " - << std::to_string(output_info_map.size()) << "outputs" << slog::endl; - return false; - } - ///InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; - ///slog::info << "Emotions layer: " << output_data_ptr->getCreatorLayer().lock()->name << - /// slog::endl; - ///output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); - ///output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); - addOutputInfo("output", output_info_map.begin()->first); - - printAttribute(); - return true; ///verifyOutputLayer(output_data_ptr); -} - -bool Models::EmotionDetectionModel::verifyOutputLayer(const InferenceEngine::DataPtr & ptr) -{ -/// if (ptr->getCreatorLayer().lock()->type != "SoftMax") { -/// slog::err <<"In Emotion network, gender layer (" -/// << ptr->getCreatorLayer().lock()->name -/// << ") should be a SoftMax, but was: " -/// << ptr->getCreatorLayer().lock()->type -/// << slog::endl; -/// return false; -/// } - - return true; -} - -const std::string Models::EmotionDetectionModel::getModelCategory() const -{ - return "Emotions Detection"; -} diff --git a/dynamic_vino_lib/src/models/object_detection_yolov2_model.cpp b/dynamic_vino_lib/src/models/object_detection_yolov2_model.cpp deleted file mode 100644 index df1388ab..00000000 --- a/dynamic_vino_lib/src/models/object_detection_yolov2_model.cpp +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of ObjectDetectionModel class - * @file object_detection_yolov2_model.cpp - */ - -#include "dynamic_vino_lib/models/object_detection_yolov2_model.hpp" -#include -#include -#include -#include -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/object_detection.hpp" - -// Validated Object Detection Network -Models::ObjectDetectionYolov2Model::ObjectDetectionYolov2Model( - const std::string & label_loc, const std::string & model_loc, int max_batch_size) -: ObjectDetectionModel(label_loc, model_loc, max_batch_size) -{ -} - -bool Models::ObjectDetectionYolov2Model::updateLayerProperty( - InferenceEngine::CNNNetwork& net_reader) -{ - slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - - InferenceEngine::InputsDataMap input_info_map(net_reader.getInputsInfo()); - if (input_info_map.size() != 1) { - slog::warn << "This model seems not Yolo-like, which has only one input, but we got " - << std::to_string(input_info_map.size()) << "inputs" << slog::endl; - return false; - } - - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::FP32); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - input_info_ = input_info; - addInputInfo("input", input_info_map.begin()->first); - - // set output property - InferenceEngine::OutputsDataMap output_info_map(net_reader.getOutputsInfo()); - if (output_info_map.size() != 1) { - slog::warn << "This model seems not Yolo-like! We got " - << std::to_string(output_info_map.size()) << "outputs, but SSDnet has only one." - << slog::endl; - return false; - } - InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; - output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); - addOutputInfo("output", output_info_map.begin()->first); - slog::info << "Checking Object Detection output ... Name=" << output_info_map.begin()->first - << slog::endl; - -#if(0) /// - const InferenceEngine::CNNLayerPtr output_layer = - net_reader->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); - // output layer should have attribute called num_classes - slog::info << "Checking Object Detection num_classes" << slog::endl; - if (output_layer == nullptr || - output_layer->params.find("classes") == output_layer->params.end()) { - slog::warn << "This model's output layer (" << output_info_map.begin()->first - << ") should have num_classes integer attribute" << slog::endl; - return false; - } - // class number should be equal to size of label vector - // if network has default "background" class, fake is used - const int num_classes = output_layer->GetParamAsInt("classes"); - slog::info << "Checking Object Detection output ... num_classes=" << num_classes << slog::endl; - if (getLabels().size() != num_classes) { - if (getLabels().size() == (num_classes - 1)) { - getLabels().insert(getLabels().begin(), "fake"); - } else { - getLabels().clear(); - } - } -#endif - - // last dimension of output layer should be 7 - const InferenceEngine::SizeVector output_dims = output_data_ptr->getTensorDesc().getDims(); - setMaxProposalCount(static_cast(output_dims[2])); - slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; - - auto object_size = static_cast(output_dims[3]); - if (object_size != 33) { - slog::warn << "This model is NOT Yolo-like, whose output data for each detected object" - << "should have 7 dimensions, but was " << std::to_string(object_size) - << slog::endl; - return false; - } - setObjectSize(object_size); - - if (output_dims.size() != 2) { - slog::warn << "This model is not Yolo-like, output dimensions shoulld be 2, but was" - << std::to_string(output_dims.size()) << slog::endl; - return false; - } - - printAttribute(); - slog::info << "This model is Yolo-like, Layer Property updated!" << slog::endl; - return true; -} - -const std::string Models::ObjectDetectionYolov2Model::getModelCategory() const -{ - return "Object Detection Yolo v2"; -} - -bool Models::ObjectDetectionYolov2Model::enqueue( - const std::shared_ptr & engine, - const cv::Mat & frame, - const cv::Rect & input_frame_loc) -{ - setFrameSize(frame.cols, frame.rows); - - if (!matToBlob(frame, input_frame_loc, 1, 0, engine)) { - return false; - } - return true; -} - -bool Models::ObjectDetectionYolov2Model::matToBlob( - const cv::Mat & orig_image, const cv::Rect &, float scale_factor, - int batch_index, const std::shared_ptr & engine) -{ - if (engine == nullptr) { - slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; - return false; - } - - std::string input_name = getInputName(); - InferenceEngine::Blob::Ptr input_blob = - engine->getRequest()->GetBlob(input_name); - - InferenceEngine::SizeVector blob_size = input_blob->getTensorDesc().getDims(); - const int width = blob_size[3]; - const int height = blob_size[2]; - const int channels = blob_size[1]; - float * blob_data = input_blob->buffer().as(); - - - int dx = 0; - int dy = 0; - int srcw = 0; - int srch = 0; - - int IH = height; - int IW = width; - - cv::Mat image = orig_image.clone(); - cv::cvtColor(image, image, cv::COLOR_BGR2RGB); - - image.convertTo(image, CV_32F, 1.0 / 255.0, 0); - srcw = image.size().width; - srch = image.size().height; - - cv::Mat resizedImg(IH, IW, CV_32FC3); - resizedImg = cv::Scalar(0.5, 0.5, 0.5); - int imw = image.size().width; - int imh = image.size().height; - float resize_ratio = static_cast(IH) / static_cast(std::max(imw, imh)); - cv::resize(image, image, cv::Size(imw * resize_ratio, imh * resize_ratio)); - - int new_w = imw; - int new_h = imh; - if ((static_cast(IW) / imw) < (static_cast(IH) / imh)) { - new_w = IW; - new_h = (imh * IW) / imw; - } else { - new_h = IH; - new_w = (imw * IW) / imh; - } - dx = (IW - new_w) / 2; - dy = (IH - new_h) / 2; - - imh = image.size().height; - imw = image.size().width; - - for (int row = 0; row < imh; row++) { - for (int col = 0; col < imw; col++) { - for (int ch = 0; ch < 3; ch++) { - resizedImg.at(dy + row, dx + col)[ch] = image.at(row, col)[ch]; - } - } - } - - for (int c = 0; c < channels; c++) { - for (int h = 0; h < height; h++) { - for (int w = 0; w < width; w++) { - blob_data[c * width * height + h * width + w] = resizedImg.at(h, w)[c]; - } - } - } - - setFrameSize(srcw, srch); - return true; -} - -bool Models::ObjectDetectionYolov2Model::fetchResults( - const std::shared_ptr & engine, - std::vector & results, - const float & confidence_thresh, - const bool & enable_roi_constraint) -{ - try { - if (engine == nullptr) { - slog::err << "Trying to fetch results from Engines." << slog::endl; - return false; - } - - InferenceEngine::InferRequest::Ptr request = engine->getRequest(); - - std::string output = getOutputName(); - std::vector & labels = getLabels(); - const float * detections = - request->GetBlob(output)->buffer().as::value_type *>(); - ///InferenceEngine::CNNLayerPtr layer = - /// getNetReader()->getNetwork().getLayerByName(output.c_str()); - int input_height = input_info_->getTensorDesc().getDims()[2]; - int input_width = input_info_->getTensorDesc().getDims()[3]; - - // --------------------------- Validating output parameters -------------------------------- - ///if (layer != nullptr && layer->type != "RegionYolo") { - /// throw std::runtime_error("Invalid output type: " + layer->type + ". RegionYolo expected"); - ///} - // --------------------------- Extracting layer parameters -------------------------------- - const int num = 3; ///layer->GetParamAsInt("num"); - const int coords = 9; ///layer->GetParamAsInt("coords"); - const int classes = 21; ///layer->GetParamAsInt("classes"); - auto blob = request->GetBlob(output); - const int out_blob_h = static_cast(blob->getTensorDesc().getDims()[2]);; - - std::vector anchors = { - 0.572730, 0.677385, - 1.874460, 2.062530, - 3.338430, 5.474340, - 7.882820, 3.527780, - 9.770520, 9.168280 - }; - auto side = out_blob_h; - - auto side_square = side * side; - // --------------------------- Parsing YOLO Region output ------------------------------------- - std::vector raw_results; - for (int i = 0; i < side_square; ++i) { - int row = i / side; - int col = i % side; - - for (int n = 0; n < num; ++n) { - int obj_index = getEntryIndex(side, coords, classes, n * side * side + i, coords); - int box_index = getEntryIndex(side, coords, classes, n * side * side + i, 0); - - float scale = detections[obj_index]; - - if (scale < confidence_thresh) { - continue; - } - - float x = (col + detections[box_index + 0 * side_square]) / side * input_width; - float y = (row + detections[box_index + 1 * side_square]) / side * input_height; - float height = std::exp(detections[box_index + 3 * side_square]) * anchors[2 * n + 1] / - side * input_height; - float width = std::exp(detections[box_index + 2 * side_square]) * anchors[2 * n] / side * - input_width; - - for (int j = 0; j < classes; ++j) { - int class_index = - getEntryIndex(side, coords, classes, n * side_square + i, coords + 1 + j); - - float prob = scale * detections[class_index]; - if (prob < confidence_thresh) { - continue; - } - - float x_min = x - width / 2; - float y_min = y - height / 2; - - auto frame_size = getFrameSize(); - float x_min_resized = x_min / input_width * frame_size.width; - float y_min_resized = y_min / input_height * frame_size.height; - float width_resized = width / input_width * frame_size.width; - float height_resized = height / input_height * frame_size.height; - - cv::Rect r(x_min_resized, y_min_resized, width_resized, height_resized); - Result result(r); - // result.label_ = j; - std::string label = j < - labels.size() ? labels[j] : std::string("label #") + std::to_string(j); - result.setLabel(label); - - result.setConfidence(prob); - raw_results.emplace_back(result); - } - } - } - - std::sort(raw_results.begin(), raw_results.end()); - for (unsigned int i = 0; i < raw_results.size(); ++i) { - if (raw_results[i].getConfidence() == 0) { - continue; - } - for (unsigned int j = i + 1; j < raw_results.size(); ++j) { - auto iou = dynamic_vino_lib::ObjectDetection::calcIoU( - raw_results[i].getLocation(), raw_results[j].getLocation()); - if (iou >= 0.45) { - raw_results[j].setConfidence(0); - } - } - } - - for (auto & raw_result : raw_results) { - if (raw_result.getConfidence() < confidence_thresh) { - continue; - } - - results.push_back(raw_result); - } - - raw_results.clear(); - - return true; - } catch (const std::exception & error) { - slog::err << error.what() << slog::endl; - return false; - } catch (...) { - slog::err << "Unknown/internal exception happened." << slog::endl; - return false; - } -} - -int Models::ObjectDetectionYolov2Model::getEntryIndex( - int side, int lcoords, int lclasses, - int location, int entry) -{ - int n = location / (side * side); - int loc = location % (side * side); - return n * side * side * (lcoords + lclasses + 1) + entry * side * side + loc; -} diff --git a/dynamic_vino_lib/src/models/object_segmentation_model.cpp b/dynamic_vino_lib/src/models/object_segmentation_model.cpp deleted file mode 100644 index ab4797f0..00000000 --- a/dynamic_vino_lib/src/models/object_segmentation_model.cpp +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of ObjectSegmentationModel class - * @file object_segmentation_model.cpp - */ -#include -#include -#include -#include "dynamic_vino_lib/models/object_segmentation_model.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -// Validated Object Segmentation Network -Models::ObjectSegmentationModel::ObjectSegmentationModel( - const std::string & label_loc, - const std::string & model_loc, - int max_batch_size) - : BaseModel(label_loc, model_loc, max_batch_size) -{ -} - -bool Models::ObjectSegmentationModel::enqueue( - const std::shared_ptr &engine, - const cv::Mat &frame, - const cv::Rect &input_frame_loc) -{ - if (engine == nullptr) - { - slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; - return false; - } - - for (const auto &inputInfoItem : input_info_) - { - // Fill first input tensor with images. First b channel, then g and r channels - slog::debug<<"first tensor"<getTensorDesc().getDims().size()<getTensorDesc().getDims().size()==4) - { - matToBlob(frame, input_frame_loc, 1.0, 0, engine); - } - - // Fill second input tensor with image info - if (inputInfoItem.second->getTensorDesc().getDims().size() == 2) - { - InferenceEngine::Blob::Ptr input = engine->getRequest()->GetBlob(inputInfoItem.first); - auto data = input->buffer().as::value_type *>(); - data[0] = static_cast(frame.rows); // height - data[1] = static_cast(frame.cols); // width - data[2] = 1; - } - } - return true; - -} - -bool Models::ObjectSegmentationModel::matToBlob( - const cv::Mat &orig_image, const cv::Rect &, float scale_factor, - int batch_index, const std::shared_ptr &engine) -{ - (void)scale_factor; - (void)batch_index; - - if (engine == nullptr) - { - slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; - return false; - } - - size_t channels = orig_image.channels(); - size_t height = orig_image.size().height; - size_t width = orig_image.size().width; - - size_t strideH = orig_image.step.buf[0]; - size_t strideW = orig_image.step.buf[1]; - - bool is_dense = - strideW == channels && - strideH == channels * width; - - if (!is_dense){ - slog::err << "Doesn't support conversion from not dense cv::Mat." << slog::endl; - return false; - } - - InferenceEngine::TensorDesc tDesc(InferenceEngine::Precision::U8, - {1, channels, height, width}, - InferenceEngine::Layout::NHWC); - - auto shared_blob = InferenceEngine::make_shared_blob(tDesc, orig_image.data); - engine->getRequest()->SetBlob(getInputName(), shared_blob); - - return true; -} - -const std::string Models::ObjectSegmentationModel::getModelCategory() const -{ - return "Object Segmentation"; -} - -bool Models::ObjectSegmentationModel::updateLayerProperty( - InferenceEngine::CNNNetwork& net_reader) -{ - slog::info<< "Checking INPUTS for Model" <second; - slog::debug<<"channel size"<second; - inputInfo.getPreProcess().setResizeAlgorithm(InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR); - inputInfo.setLayout(InferenceEngine::Layout::NHWC); - inputInfo.setPrecision(InferenceEngine::Precision::U8); - - //InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - //addInputInfo("input", input_info_map.begin()->first.c_str()); - addInputInfo("input", inputShapes.begin()->first); - - InferenceEngine::OutputsDataMap outputsDataMap = network.getOutputsInfo(); - if (outputsDataMap.size() != 1) { - //throw std::runtime_error("Demo supports topologies only with 1 output"); - slog::warn << "This inference sample should have only one output, but we got" - << std::to_string(outputsDataMap.size()) << "outputs" - << slog::endl; - return false; - } - - InferenceEngine::Data & data = *outputsDataMap.begin()->second; - data.setPrecision(InferenceEngine::Precision::FP32); - - const InferenceEngine::SizeVector& outSizeVector = data.getTensorDesc().getDims(); - int outChannels, outHeight, outWidth; - slog::debug << "output size vector " << outSizeVector.size() << slog::endl; - switch(outSizeVector.size()){ - case 3: - outChannels = 0; - outHeight = outSizeVector[1]; - outWidth = outSizeVector[2]; - break; - case 4: - outChannels = outSizeVector[1]; - outHeight = outSizeVector[2]; - outWidth = outSizeVector[3]; - break; - default: - throw std::runtime_error("Unexpected output blob shape. Only 4D and 3D output blobs are" - "supported."); - - } - if(outHeight == 0 || outWidth == 0){ - slog::err << "output_height or output_width is not set, please check the MaskOutput Info " - << "is set correctly." << slog::endl; - //throw std::runtime_error("output_height or output_width is not set, please check the MaskOutputInfo"); - return false; - } - - slog::debug << "output width " << outWidth<< slog::endl; - slog::debug << "output hEIGHT " << outHeight<< slog::endl; - slog::debug << "output CHANNALS " << outChannels<< slog::endl; - addOutputInfo("masks", (outputsDataMap.begin()++)->first); - addOutputInfo("detection", outputsDataMap.begin()->first); - - //const InferenceEngine::CNNLayerPtr output_layer = - //network.getLayerByName(outputsDataMap.begin()->first.c_str()); - ///const InferenceEngine::CNNLayerPtr output_layer = - /// network.getLayerByName(getOutputName("detection").c_str()); - //const int num_classes = output_layer->GetParamAsInt("num_classes"); - //slog::info << "Checking Object Segmentation output ... num_classes=" << num_classes << slog::endl; - -#if 0 - if (getLabels().size() != num_classes) - { - if (getLabels().size() == (num_classes - 1)) - { - getLabels().insert(getLabels().begin(), "fake"); - } - else - { - getLabels().clear(); - } - } -#endif -/* - const InferenceEngine::SizeVector output_dims = data.getTensorDesc().getDims(); - setMaxProposalCount(static_cast(output_dims[2])); - slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; - auto object_size = static_cast(output_dims[3]); - setObjectSize(object_size); - - slog::debug << "model size" << output_dims.size() << slog::endl;*/ - printAttribute(); - slog::info << "This model is SSDNet-like, Layer Property updated!" << slog::endl; - return true; - -} diff --git a/dynamic_vino_lib/src/models/person_reidentification_model.cpp b/dynamic_vino_lib/src/models/person_reidentification_model.cpp deleted file mode 100644 index e9e2834c..00000000 --- a/dynamic_vino_lib/src/models/person_reidentification_model.cpp +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2018 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * @brief a header file with declaration of PersonReidentificationModel class - * @file person_reidentification_model.cpp - */ -#include -#include "dynamic_vino_lib/models/person_reidentification_model.hpp" -#include "dynamic_vino_lib/slog.hpp" -// Validated Person Reidentification Network -Models::PersonReidentificationModel::PersonReidentificationModel( - const std::string & label_loc, const std::string & model_loc, int max_batch_size) -: BaseModel(label_loc, model_loc, max_batch_size) {} -/* -void Models::PersonReidentificationModel::setLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) -{ - // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - // set output property - InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); - // set input and output layer name - input_ = input_info_map.begin()->first; - output_ = output_info_map.begin()->first; -} - -void Models::PersonReidentificationModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr & net_reader) {} - -const std::string Models::PersonReidentificationModel::getModelCategory() const -{ - return "Person Reidentification"; -} -*/ -bool Models::PersonReidentificationModel::updateLayerProperty( - InferenceEngine::CNNNetwork& netreader) -{ - slog::info << "Checking Inputs for Model" << getModelName() << slog::endl; - - auto network = netreader; - - InferenceEngine::InputsDataMap input_info_map(network.getInputsInfo()); - - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - // set output property - InferenceEngine::OutputsDataMap output_info_map( - network.getOutputsInfo()); - // set input and output layer name - input_ = input_info_map.begin()->first; - output_ = output_info_map.begin()->first; - - return true; -} - -const std::string Models::PersonReidentificationModel::getModelCategory() const -{ - return "Person Reidentification"; -} diff --git a/pipeline_srv_msgs/CMakeLists.txt b/openvino_msgs/CMakeLists.txt similarity index 95% rename from pipeline_srv_msgs/CMakeLists.txt rename to openvino_msgs/CMakeLists.txt index b05b70bf..30b70b83 100644 --- a/pipeline_srv_msgs/CMakeLists.txt +++ b/openvino_msgs/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.5) -project(pipeline_srv_msgs) +project(openvino_msgs) if(NOT CMAKE_CXX_STANDARD) set(CMAKE_CXX_STANDARD 14) diff --git a/pipeline_srv_msgs/msg/Connection.msg b/openvino_msgs/msg/Connection.msg similarity index 93% rename from pipeline_srv_msgs/msg/Connection.msg rename to openvino_msgs/msg/Connection.msg index 80f63a18..05fecda8 100644 --- a/pipeline_srv_msgs/msg/Connection.msg +++ b/openvino_msgs/msg/Connection.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/pipeline_srv_msgs/msg/Pipeline.msg b/openvino_msgs/msg/Pipeline.msg similarity index 94% rename from pipeline_srv_msgs/msg/Pipeline.msg rename to openvino_msgs/msg/Pipeline.msg index f2b7bab1..d4272961 100644 --- a/pipeline_srv_msgs/msg/Pipeline.msg +++ b/openvino_msgs/msg/Pipeline.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/pipeline_srv_msgs/msg/PipelineRequest.msg b/openvino_msgs/msg/PipelineRequest.msg similarity index 94% rename from pipeline_srv_msgs/msg/PipelineRequest.msg rename to openvino_msgs/msg/PipelineRequest.msg index 565ea4fc..0fc9e053 100644 --- a/pipeline_srv_msgs/msg/PipelineRequest.msg +++ b/openvino_msgs/msg/PipelineRequest.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/pipeline_srv_msgs/package.xml b/openvino_msgs/package.xml similarity index 96% rename from pipeline_srv_msgs/package.xml rename to openvino_msgs/package.xml index b78f2cfe..85d579d0 100644 --- a/pipeline_srv_msgs/package.xml +++ b/openvino_msgs/package.xml @@ -1,7 +1,7 @@ - pipeline_srv_msgs + openvino_msgs 0.9.0 A package containing pipeline service message definitions. Yang Lu diff --git a/pipeline_srv_msgs/srv/PipelineSrv.srv b/openvino_msgs/srv/PipelineSrv.srv similarity index 93% rename from pipeline_srv_msgs/srv/PipelineSrv.srv rename to openvino_msgs/srv/PipelineSrv.srv index fb23dec5..b72935b6 100644 --- a/pipeline_srv_msgs/srv/PipelineSrv.srv +++ b/openvino_msgs/srv/PipelineSrv.srv @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/vino_param_lib/CMakeLists.txt b/openvino_param_lib/CMakeLists.txt similarity index 94% rename from vino_param_lib/CMakeLists.txt rename to openvino_param_lib/CMakeLists.txt index c3cddd8f..8a78469a 100644 --- a/vino_param_lib/CMakeLists.txt +++ b/openvino_param_lib/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # limitations under the License. cmake_minimum_required(VERSION 3.5) -project(vino_param_lib) +project(openvino_param_lib) find_package(ament_cmake REQUIRED) find_package(yaml_cpp_vendor REQUIRED) @@ -67,8 +67,8 @@ if(UNIX OR APPLE) # Generic flags. set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -fno-operator-names -Wformat -Wformat-security -Wall") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") - # Dot not forward c++14 flag to GPU beucause it is not supported + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") + # Dot not forward c++17 flag to GPU beucause it is not supported set(CUDA_PROPAGATE_HOST_FLAGS OFF) set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -D_FORTIFY_SOURCE=2") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie") diff --git a/vino_param_lib/include/vino_param_lib/param_manager.hpp b/openvino_param_lib/include/openvino_param_lib/param_manager.hpp similarity index 95% rename from vino_param_lib/include/vino_param_lib/param_manager.hpp rename to openvino_param_lib/include/openvino_param_lib/param_manager.hpp index 8897ba84..558d9359 100644 --- a/vino_param_lib/include/vino_param_lib/param_manager.hpp +++ b/openvino_param_lib/include/openvino_param_lib/param_manager.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief A header file with declaration for parameter management * @file param_manager.hpp */ -#ifndef VINO_PARAM_LIB__PARAM_MANAGER_HPP_ -#define VINO_PARAM_LIB__PARAM_MANAGER_HPP_ +#ifndef OPENVINO_PARAM_LIB__PARAM_MANAGER_HPP_ +#define OPENVINO_PARAM_LIB__PARAM_MANAGER_HPP_ #include #include @@ -148,4 +148,4 @@ class ParamManager // singleton }; } // namespace Params -#endif // VINO_PARAM_LIB__PARAM_MANAGER_HPP_ +#endif // OPENVINO_PARAM_LIB__PARAM_MANAGER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/slog.hpp b/openvino_param_lib/include/openvino_param_lib/slog.hpp similarity index 96% rename from dynamic_vino_lib/include/dynamic_vino_lib/slog.hpp rename to openvino_param_lib/include/openvino_param_lib/slog.hpp index e9790327..bf350394 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/slog.hpp +++ b/openvino_param_lib/include/openvino_param_lib/slog.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief a header file with logging facility for common samples * @file slog.hpp */ -#ifndef DYNAMIC_VINO_LIB__SLOG_HPP_ -#define DYNAMIC_VINO_LIB__SLOG_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__SLOG_HPP_ +#define OPENVINO_WRAPPER_LIB__SLOG_HPP_ #pragma once @@ -172,4 +172,4 @@ static LogStream warn("WARNING", std::cout, YELLOW); static LogStream err("ERROR", std::cerr, RED); } // namespace slog -#endif // DYNAMIC_VINO_LIB__SLOG_HPP_ +#endif // OPENVINO_WRAPPER_LIB__SLOG_HPP_ diff --git a/vino_param_lib/package.xml b/openvino_param_lib/package.xml similarity index 94% rename from vino_param_lib/package.xml rename to openvino_param_lib/package.xml index 982e42fa..6484dd58 100644 --- a/vino_param_lib/package.xml +++ b/openvino_param_lib/package.xml @@ -1,7 +1,7 @@ - vino_param_lib + openvino_param_lib 0.9.0 Library for ROS2 OpenVINO parameter management Weizhi Liu diff --git a/vino_param_lib/param/pipeline.yaml b/openvino_param_lib/param/pipeline.yaml similarity index 100% rename from vino_param_lib/param/pipeline.yaml rename to openvino_param_lib/param/pipeline.yaml diff --git a/vino_param_lib/src/param_manager.cpp b/openvino_param_lib/src/param_manager.cpp similarity index 98% rename from vino_param_lib/src/param_manager.cpp rename to openvino_param_lib/src/param_manager.cpp index dbc167da..89527c95 100644 --- a/vino_param_lib/src/param_manager.cpp +++ b/openvino_param_lib/src/param_manager.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "vino_param_lib/param_manager.hpp" -#include +#include "openvino_param_lib/param_manager.hpp" +#include #include #include #include diff --git a/people_msgs/CMakeLists.txt b/openvino_people_msgs/CMakeLists.txt similarity index 96% rename from people_msgs/CMakeLists.txt rename to openvino_people_msgs/CMakeLists.txt index 0500babb..d3257b00 100644 --- a/people_msgs/CMakeLists.txt +++ b/openvino_people_msgs/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.5) -project(people_msgs) +project(openvino_people_msgs) if(NOT CMAKE_CXX_STANDARD) set(CMAKE_CXX_STANDARD 14) diff --git a/openvino_people_msgs/COLCON_IGNORE b/openvino_people_msgs/COLCON_IGNORE new file mode 100644 index 00000000..e69de29b diff --git a/people_msgs/msg/AgeGender.msg b/openvino_people_msgs/msg/AgeGender.msg similarity index 94% rename from people_msgs/msg/AgeGender.msg rename to openvino_people_msgs/msg/AgeGender.msg index ad02ad1a..8436e0e5 100644 --- a/people_msgs/msg/AgeGender.msg +++ b/openvino_people_msgs/msg/AgeGender.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/AgeGenderStamped.msg b/openvino_people_msgs/msg/AgeGenderStamped.msg similarity index 87% rename from people_msgs/msg/AgeGenderStamped.msg rename to openvino_people_msgs/msg/AgeGenderStamped.msg index efa0c724..25217127 100644 --- a/people_msgs/msg/AgeGenderStamped.msg +++ b/openvino_people_msgs/msg/AgeGenderStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,4 +13,4 @@ # limitations under the License. std_msgs/Header header -people_msgs/AgeGender[] objects +openvino_people_msgs/AgeGender[] objects diff --git a/people_msgs/msg/Emotion.msg b/openvino_people_msgs/msg/Emotion.msg similarity index 94% rename from people_msgs/msg/Emotion.msg rename to openvino_people_msgs/msg/Emotion.msg index 63f9b83f..af1fccb2 100644 --- a/people_msgs/msg/Emotion.msg +++ b/openvino_people_msgs/msg/Emotion.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/EmotionsStamped.msg b/openvino_people_msgs/msg/EmotionsStamped.msg similarity index 87% rename from people_msgs/msg/EmotionsStamped.msg rename to openvino_people_msgs/msg/EmotionsStamped.msg index 1636fc02..78914367 100644 --- a/people_msgs/msg/EmotionsStamped.msg +++ b/openvino_people_msgs/msg/EmotionsStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,4 +13,4 @@ # limitations under the License. std_msgs/Header header -people_msgs/Emotion[] emotions +openvino_people_msgs/Emotion[] emotions diff --git a/people_msgs/msg/HeadPose.msg b/openvino_people_msgs/msg/HeadPose.msg similarity index 94% rename from people_msgs/msg/HeadPose.msg rename to openvino_people_msgs/msg/HeadPose.msg index 11a717db..c757d8f7 100644 --- a/people_msgs/msg/HeadPose.msg +++ b/openvino_people_msgs/msg/HeadPose.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/HeadPoseStamped.msg b/openvino_people_msgs/msg/HeadPoseStamped.msg similarity index 87% rename from people_msgs/msg/HeadPoseStamped.msg rename to openvino_people_msgs/msg/HeadPoseStamped.msg index 75d97828..de80904a 100644 --- a/people_msgs/msg/HeadPoseStamped.msg +++ b/openvino_people_msgs/msg/HeadPoseStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,4 +13,4 @@ # limitations under the License. std_msgs/Header header -people_msgs/HeadPose[] headposes \ No newline at end of file +openvino_people_msgs/HeadPose[] headposes \ No newline at end of file diff --git a/people_msgs/msg/Landmark.msg b/openvino_people_msgs/msg/Landmark.msg similarity index 94% rename from people_msgs/msg/Landmark.msg rename to openvino_people_msgs/msg/Landmark.msg index 48513b54..1c0e24c4 100644 --- a/people_msgs/msg/Landmark.msg +++ b/openvino_people_msgs/msg/Landmark.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/LandmarkStamped.msg b/openvino_people_msgs/msg/LandmarkStamped.msg similarity index 94% rename from people_msgs/msg/LandmarkStamped.msg rename to openvino_people_msgs/msg/LandmarkStamped.msg index 6da0b1c0..2b390576 100644 --- a/people_msgs/msg/LandmarkStamped.msg +++ b/openvino_people_msgs/msg/LandmarkStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/LicensePlate.msg b/openvino_people_msgs/msg/LicensePlate.msg similarity index 94% rename from people_msgs/msg/LicensePlate.msg rename to openvino_people_msgs/msg/LicensePlate.msg index 6ba97f8c..3f128920 100644 --- a/people_msgs/msg/LicensePlate.msg +++ b/openvino_people_msgs/msg/LicensePlate.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/LicensePlateStamped.msg b/openvino_people_msgs/msg/LicensePlateStamped.msg similarity index 86% rename from people_msgs/msg/LicensePlateStamped.msg rename to openvino_people_msgs/msg/LicensePlateStamped.msg index fa4fbc75..04406dac 100644 --- a/people_msgs/msg/LicensePlateStamped.msg +++ b/openvino_people_msgs/msg/LicensePlateStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,4 +13,4 @@ # limitations under the License. std_msgs/Header header -people_msgs/LicensePlate[] licenses \ No newline at end of file +openvino_people_msgs/LicensePlate[] licenses \ No newline at end of file diff --git a/people_msgs/msg/ObjectInMask.msg b/openvino_people_msgs/msg/ObjectInMask.msg similarity index 94% rename from people_msgs/msg/ObjectInMask.msg rename to openvino_people_msgs/msg/ObjectInMask.msg index aa981eef..b3e719f1 100644 --- a/people_msgs/msg/ObjectInMask.msg +++ b/openvino_people_msgs/msg/ObjectInMask.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/ObjectsInMasks.msg b/openvino_people_msgs/msg/ObjectsInMasks.msg similarity index 94% rename from people_msgs/msg/ObjectsInMasks.msg rename to openvino_people_msgs/msg/ObjectsInMasks.msg index 28bd3eb2..f59a9f56 100644 --- a/people_msgs/msg/ObjectsInMasks.msg +++ b/openvino_people_msgs/msg/ObjectsInMasks.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/PersonAttribute.msg b/openvino_people_msgs/msg/PersonAttribute.msg similarity index 94% rename from people_msgs/msg/PersonAttribute.msg rename to openvino_people_msgs/msg/PersonAttribute.msg index 12dd2793..3ad0b0a6 100644 --- a/people_msgs/msg/PersonAttribute.msg +++ b/openvino_people_msgs/msg/PersonAttribute.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/PersonAttributeStamped.msg b/openvino_people_msgs/msg/PersonAttributeStamped.msg similarity index 94% rename from people_msgs/msg/PersonAttributeStamped.msg rename to openvino_people_msgs/msg/PersonAttributeStamped.msg index 0fb6dfcd..4738c51a 100644 --- a/people_msgs/msg/PersonAttributeStamped.msg +++ b/openvino_people_msgs/msg/PersonAttributeStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/PersonsStamped.msg b/openvino_people_msgs/msg/PersonsStamped.msg similarity index 78% rename from people_msgs/msg/PersonsStamped.msg rename to openvino_people_msgs/msg/PersonsStamped.msg index 2aae1a08..f8ee471f 100644 --- a/people_msgs/msg/PersonsStamped.msg +++ b/openvino_people_msgs/msg/PersonsStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,6 @@ std_msgs/Header header object_msgs/ObjectInBox[] faces -people_msgs/Emotion[] emotions -people_msgs/AgeGender[] agegenders -people_msgs/HeadPose[] headposes +openvino_people_msgs/Emotion[] emotions +openvino_people_msgs/AgeGender[] agegenders +openvino_people_msgs/HeadPose[] headposes diff --git a/people_msgs/msg/Reidentification.msg b/openvino_people_msgs/msg/Reidentification.msg similarity index 94% rename from people_msgs/msg/Reidentification.msg rename to openvino_people_msgs/msg/Reidentification.msg index 34cd1156..3e53b92a 100644 --- a/people_msgs/msg/Reidentification.msg +++ b/openvino_people_msgs/msg/Reidentification.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/ReidentificationStamped.msg b/openvino_people_msgs/msg/ReidentificationStamped.msg similarity index 94% rename from people_msgs/msg/ReidentificationStamped.msg rename to openvino_people_msgs/msg/ReidentificationStamped.msg index cba3c33e..d5ebd9fd 100644 --- a/people_msgs/msg/ReidentificationStamped.msg +++ b/openvino_people_msgs/msg/ReidentificationStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2017-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/VehicleAttribs.msg b/openvino_people_msgs/msg/VehicleAttribs.msg similarity index 94% rename from people_msgs/msg/VehicleAttribs.msg rename to openvino_people_msgs/msg/VehicleAttribs.msg index 39b49696..ddc3718f 100644 --- a/people_msgs/msg/VehicleAttribs.msg +++ b/openvino_people_msgs/msg/VehicleAttribs.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/msg/VehicleAttribsStamped.msg b/openvino_people_msgs/msg/VehicleAttribsStamped.msg similarity index 86% rename from people_msgs/msg/VehicleAttribsStamped.msg rename to openvino_people_msgs/msg/VehicleAttribsStamped.msg index 3cdcd47e..4e97859a 100644 --- a/people_msgs/msg/VehicleAttribsStamped.msg +++ b/openvino_people_msgs/msg/VehicleAttribsStamped.msg @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,4 +13,4 @@ # limitations under the License. std_msgs/Header header -people_msgs/VehicleAttribs[] vehicles +openvino_people_msgs/VehicleAttribs[] vehicles diff --git a/people_msgs/package.xml b/openvino_people_msgs/package.xml similarity index 96% rename from people_msgs/package.xml rename to openvino_people_msgs/package.xml index 2ed702fd..6e59ce10 100644 --- a/people_msgs/package.xml +++ b/openvino_people_msgs/package.xml @@ -1,7 +1,7 @@ - people_msgs + openvino_people_msgs 0.9.0 A package containing people message definitions. Weizhi Liu diff --git a/people_msgs/srv/AgeGenderSrv.srv b/openvino_people_msgs/srv/AgeGenderSrv.srv similarity index 93% rename from people_msgs/srv/AgeGenderSrv.srv rename to openvino_people_msgs/srv/AgeGenderSrv.srv index 30469660..1a4f0de1 100644 --- a/people_msgs/srv/AgeGenderSrv.srv +++ b/openvino_people_msgs/srv/AgeGenderSrv.srv @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/srv/EmotionSrv.srv b/openvino_people_msgs/srv/EmotionSrv.srv similarity index 93% rename from people_msgs/srv/EmotionSrv.srv rename to openvino_people_msgs/srv/EmotionSrv.srv index 836fbc38..da8f1c57 100644 --- a/people_msgs/srv/EmotionSrv.srv +++ b/openvino_people_msgs/srv/EmotionSrv.srv @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/srv/HeadPoseSrv.srv b/openvino_people_msgs/srv/HeadPoseSrv.srv similarity index 93% rename from people_msgs/srv/HeadPoseSrv.srv rename to openvino_people_msgs/srv/HeadPoseSrv.srv index 17a1eca4..5fd225d5 100644 --- a/people_msgs/srv/HeadPoseSrv.srv +++ b/openvino_people_msgs/srv/HeadPoseSrv.srv @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/people_msgs/srv/People.srv b/openvino_people_msgs/srv/People.srv similarity index 93% rename from people_msgs/srv/People.srv rename to openvino_people_msgs/srv/People.srv index 100142a0..c349b2e9 100644 --- a/people_msgs/srv/People.srv +++ b/openvino_people_msgs/srv/People.srv @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/dynamic_vino_lib/CMakeLists.txt b/openvino_wrapper_lib/CMakeLists.txt similarity index 85% rename from dynamic_vino_lib/CMakeLists.txt rename to openvino_wrapper_lib/CMakeLists.txt index 09adf376..f6ea6126 100644 --- a/dynamic_vino_lib/CMakeLists.txt +++ b/openvino_wrapper_lib/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2018-2020 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,20 +14,17 @@ cmake_minimum_required(VERSION 3.5) -project(dynamic_vino_lib) +project(openvino_wrapper_lib) #################################### -## to use C++14 -set(CMAKE_CXX_STANDARD 14) +## to use C++17 +set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_FLAGS "-std=c++14 ${CMAKE_CXX_FLAGS}") +set(CMAKE_CXX_FLAGS "-std=c++17 ${CMAKE_CXX_FLAGS}") #################################### #################################### -## by default, new InferenceEngine API (InferenceEngine::Core) is used. -## If the deprecated InferenceEngine API (InferenceEngine::InferencePlugin) -## then, uncomment below line -## add_definitions(-DUSE_OLD_E_PLUGIN_API) +## by default, new OpenVINO API (ov::core) is used. #################################### #################################### @@ -36,16 +33,13 @@ set(CMAKE_CXX_FLAGS "-std=c++14 ${CMAKE_CXX_FLAGS}") add_definitions(-DLOG_LEVEL_DEBUG) #################################### -# environment variable InferenceEngine_DIR can be use instead of relaive path to specify location of configuration file -#set(InferenceEngine_DIR /opt/intel/computer_vision_sdk_2018.2.299/deployment_tools/inference_engine/share) -#set(OpenCV_DIR /opt/intel/computer_vision_sdk_2018.2.299/opencv/share/OpenCV) - +# environment variable OpenVINO_DIR can be use instead of relaive path to specify location of configuration file -message(STATUS "Looking for inference engine configuration file at: ${CMAKE_PREFIX_PATH}") -find_package(InferenceEngine REQUIRED) -if(NOT InferenceEngine_FOUND) +find_package(OpenVINO REQUIRED) +if(NOT OpenVINO_FOUND) message(FATAL_ERROR "") endif() +set(OpenVINO_LIBRARIES openvino::runtime) # Find OpenCV libray if exists find_package(OpenCV REQUIRED) @@ -68,20 +62,12 @@ find_package(rmw REQUIRED) find_package(std_msgs REQUIRED) find_package(sensor_msgs REQUIRED) find_package(object_msgs REQUIRED) -find_package(people_msgs REQUIRED) -find_package(pipeline_srv_msgs REQUIRED) +find_package(openvino_msgs REQUIRED) find_package(class_loader REQUIRED) find_package(cv_bridge REQUIRED) -find_package(vino_param_lib REQUIRED) +find_package(openvino_param_lib REQUIRED) find_package(yaml_cpp_vendor REQUIRED) -################### -#To be deleted -#set(CpuExtension_lib $ENV{CPU_EXTENSION_LIB}) -#add_library(cpu_extension SHARED IMPORTED) -#set_target_properties(cpu_extension PROPERTIES -# IMPORTED_LOCATION $ENV{CPU_EXTENSION_LIB}) - if("${CMAKE_BUILD_TYPE}" STREQUAL "") message(STATUS "CMAKE_BUILD_TYPE not defined, 'Release' will be used") set(CMAKE_BUILD_TYPE "Release") @@ -175,17 +161,15 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter -Wno-deprecated-de # Properties->C/C++->General->Additional Include Directories include_directories( - # ${CMAKE_CURRENT_SOURCE_DIR}/common/format_reader ${CMAKE_CURRENT_SOURCE_DIR}/include - ${InferenceEngine_INCLUDE_DIRS} - #${realsense2_INCLUDE_DIRS} + ${OpenVINO_INCLUDE_DIRS} ) if(UNIX) set(LIB_DL dl) endif() -set(DEPENDENCIES ${realsense2_LIBRARY} ${OpenCV_LIBS} ${InferenceEngine_LIBRARIES}) +set(DEPENDENCIES ${realsense2_LIBRARY} ${OpenCV_LIBS} openvino::runtime) add_library(${PROJECT_NAME} SHARED src/services/pipeline_processing_server.cpp @@ -204,6 +188,7 @@ add_library(${PROJECT_NAME} SHARED src/inferences/object_detection.cpp src/inferences/head_pose_detection.cpp src/inferences/object_segmentation.cpp + src/inferences/object_segmentation_maskrcnn.cpp src/inferences/person_reidentification.cpp src/inferences/person_attribs_detection.cpp #src/inferences/landmarks_detection.cpp @@ -223,6 +208,7 @@ add_library(${PROJECT_NAME} SHARED src/models/face_detection_model.cpp src/models/head_pose_detection_model.cpp src/models/object_segmentation_model.cpp + src/models/object_segmentation_maskrcnn_model.cpp src/models/person_reidentification_model.cpp src/models/person_attribs_detection_model.cpp #src/models/landmarks_detection_model.cpp @@ -230,7 +216,7 @@ add_library(${PROJECT_NAME} SHARED src/models/vehicle_attribs_detection_model.cpp src/models/license_plate_detection_model.cpp src/models/object_detection_ssd_model.cpp - src/models/object_detection_yolov2_model.cpp + src/models/object_detection_yolov5_model.cpp src/outputs/image_window_output.cpp src/outputs/ros_topic_output.cpp src/outputs/rviz_output.cpp @@ -246,13 +232,12 @@ ament_target_dependencies(${PROJECT_NAME} "std_msgs" "sensor_msgs" "object_msgs" - "people_msgs" - "pipeline_srv_msgs" + "openvino_msgs" "ament_index_cpp" "class_loader" "realsense2" "cv_bridge" - "vino_param_lib" + "openvino_param_lib" "yaml_cpp_vendor" ) diff --git a/dynamic_vino_lib/Doxyfile b/openvino_wrapper_lib/Doxyfile similarity index 100% rename from dynamic_vino_lib/Doxyfile rename to openvino_wrapper_lib/Doxyfile diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/engines/engine.hpp similarity index 79% rename from dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/engines/engine.hpp index fbf974d4..b576af7e 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/engines/engine.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,13 +16,13 @@ * @brief A header file with declaration for Inference Engine class * @file engine.hpp */ -#ifndef DYNAMIC_VINO_LIB__ENGINES__ENGINE_HPP_ -#define DYNAMIC_VINO_LIB__ENGINES__ENGINE_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__ENGINES__ENGINE_HPP_ +#define OPENVINO_WRAPPER_LIB__ENGINES__ENGINE_HPP_ #pragma once -#include "dynamic_vino_lib/models/base_model.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" +#include "openvino/openvino.hpp" namespace Engines { @@ -47,12 +47,12 @@ class Engine /** * @brief Using an Inference Request to initialize the inference Engine. */ - Engine(InferenceEngine::InferRequest::Ptr &); + Engine(ov::InferRequest &); /** * @brief Get the inference request this instance holds. * @return The inference request this instance holds. */ - inline InferenceEngine::InferRequest::Ptr & getRequest() + inline ov::InferRequest & getRequest() { return request_; } @@ -64,12 +64,12 @@ class Engine template void setCompletionCallback(const T & callbackToSet) { - request_->SetCompletionCallback(callbackToSet); + request_.set_callback(callbackToSet); } private: - InferenceEngine::InferRequest::Ptr request_ = nullptr; + ov::InferRequest request_; }; } // namespace Engines -#endif // DYNAMIC_VINO_LIB__ENGINES__ENGINE_HPP_ +#endif // OPENVINO_WRAPPER_LIB__ENGINES__ENGINE_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/engines/engine_manager.hpp similarity index 76% rename from dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/engines/engine_manager.hpp index ed5923f3..2583316b 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/engines/engine_manager.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/engines/engine_manager.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,14 +16,14 @@ * @brief A header file with declaration for NetworkEngine class * @file engine.h */ -#ifndef DYNAMIC_VINO_LIB__ENGINES__ENGINE_MANAGER_HPP_ -#define DYNAMIC_VINO_LIB__ENGINES__ENGINE_MANAGER_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__ENGINES__ENGINE_MANAGER_HPP_ +#define OPENVINO_WRAPPER_LIB__ENGINES__ENGINE_MANAGER_HPP_ #pragma once -#include "dynamic_vino_lib/models/base_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino/openvino.hpp" namespace Engines { @@ -35,7 +35,7 @@ class EngineManager { public: /** - * @brief Create InferenceEngine instance by given Engine Name and Network. + * @brief Create OpenVINO instance by given Engine Name and Network. * @return The shared pointer of created Engine instance. */ std::shared_ptr createEngine( @@ -52,10 +52,10 @@ class EngineManager const std::string &, const std::shared_ptr &); #endif - std::shared_ptr createEngine_V2019R2_plus( + std::shared_ptr createEngine_V2022( const std::string &, const std::shared_ptr &); }; } // namespace Engines -#endif // DYNAMIC_VINO_LIB__ENGINES__ENGINE_MANAGER_HPP_ +#endif // OPENVINO_WRAPPER_LIB__ENGINES__ENGINE_MANAGER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/age_gender_detection.hpp similarity index 82% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/age_gender_detection.hpp index 130041a5..865348f5 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/age_gender_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/age_gender_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,26 +16,25 @@ * @brief A header file with declaration for AgeGenderDetection Class * @file age_gender_recignition.h */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__AGE_GENDER_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__AGE_GENDER_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__AGE_GENDER_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__AGE_GENDER_DETECTION_HPP_ -#include -#include +#include +#include #include #include #include - -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/models/age_gender_detection_model.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/models/age_gender_detection_model.hpp" +#include "openvino/openvino.hpp" #include "opencv2/opencv.hpp" namespace Outputs { class BaseOuput; } -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class AgeGenderResult @@ -75,7 +74,7 @@ class AgeGenderResult : public Result class AgeGenderDetection : public BaseInference { public: - using Result = dynamic_vino_lib::AgeGenderResult; + using Result = openvino_wrapper_lib::AgeGenderResult; AgeGenderDetection(); ~AgeGenderDetection() override; /** @@ -113,7 +112,7 @@ class AgeGenderDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result * getLocationResult(int idx) const override; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. @@ -132,6 +131,6 @@ class AgeGenderDetection : public BaseInference std::shared_ptr valid_model_; std::vector results_; }; -} // namespace dynamic_vino_lib +} // namespace openvino_wrapper_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__AGE_GENDER_DETECTION_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__AGE_GENDER_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_filter.hpp similarity index 94% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_filter.hpp index ec46271e..b5aca1f2 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_filter.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_filter.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,16 +16,16 @@ * @brief A header file with declaration for BaseFilter Class * @file base_filter.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__BASE_FILTER_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__BASE_FILTER_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__BASE_FILTER_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__BASE_FILTER_HPP_ #include #include #include #include -#include "dynamic_vino_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** @@ -190,6 +190,6 @@ class BaseFilter std::vector relation_operators_ = {"==", "!=", "<=", ">=", "<", ">"}; std::vector logic_operators_ = {"&&", "||"}; }; -} // namespace dynamic_vino_lib +} // namespace openvino_wrapper_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__BASE_FILTER_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__BASE_FILTER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_inference.hpp similarity index 86% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_inference.hpp index 8e830764..318fbeac 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_inference.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_inference.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,17 +16,16 @@ * @brief A header file with declaration for BaseInference Class * @file base_inference.h */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__BASE_INFERENCE_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__BASE_INFERENCE_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__BASE_INFERENCE_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__BASE_INFERENCE_HPP_ #include #include #include - -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/models/base_model.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "opencv2/opencv.hpp" namespace Outputs @@ -42,14 +41,14 @@ class BaseOutput; */ template void matU8ToBlob( - const cv::Mat & orig_image, InferenceEngine::Blob::Ptr & blob, + const cv::Mat & orig_image, ov::Tensor & input_tensor, float scale_factor = 1.0, int batch_index = 0) { - InferenceEngine::SizeVector blob_size = blob->getTensorDesc().getDims(); + ov::Shape blob_size = input_tensor.get_shape(); const size_t width = blob_size[3]; const size_t height = blob_size[2]; const size_t channels = blob_size[1]; - T * blob_data = blob->buffer().as(); + T * blob_data = input_tensor.data(); cv::Mat resized_image(orig_image); if (width != orig_image.size().width || height != orig_image.size().height) { @@ -67,7 +66,7 @@ void matU8ToBlob( } } -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class Result @@ -166,7 +165,7 @@ class BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - virtual const dynamic_vino_lib::Result * getLocationResult(int idx) const = 0; + virtual const openvino_wrapper_lib::Result * getLocationResult(int idx) const = 0; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. @@ -194,8 +193,8 @@ class BaseInference ") processed by inference" << slog::endl; return false; } - InferenceEngine::Blob::Ptr input_blob = engine_->getRequest()->GetBlob(input_name); - matU8ToBlob(frame, input_blob, scale_factor, batch_index); + ov::Tensor input_tensor = engine_->getRequest().get_tensor(input_name); + matU8ToBlob(frame, input_tensor, scale_factor, batch_index); enqueued_frames_ += 1; return true; } @@ -209,6 +208,6 @@ class BaseInference int enqueued_frames_ = 0; bool results_fetched_ = false; }; -} // namespace dynamic_vino_lib +} // namespace openvino_wrapper_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__BASE_INFERENCE_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__BASE_INFERENCE_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_reidentification.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_reidentification.hpp similarity index 89% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_reidentification.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_reidentification.hpp index 17fd90f0..44285be3 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/base_reidentification.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/base_reidentification.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief A header file with declaration for BaseReidentification Class * @file base_reidentification.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ #include #include #include @@ -26,7 +26,7 @@ #include // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class Tracker @@ -96,5 +96,5 @@ class Tracker std::unordered_map recorded_tracks_; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__BASE_REIDENTIFICATION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/emotions_detection.hpp similarity index 83% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/emotions_detection.hpp index a493c3f8..38d50fbe 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/emotions_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/emotions_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,24 +15,24 @@ * @brief A header file with declaration for EmotionsDetection Class * @file emotions_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__EMOTIONS_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__EMOTIONS_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__EMOTIONS_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__EMOTIONS_DETECTION_HPP_ #include #include #include -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino/openvino.hpp" #include "opencv2/opencv.hpp" -#include "dynamic_vino_lib/models/emotion_detection_model.hpp" +#include "openvino_wrapper_lib/models/emotion_detection_model.hpp" namespace Outputs { class BaseOuput; } -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class EmotionResult @@ -64,7 +64,7 @@ class EmotionsResult : public Result class EmotionsDetection : public BaseInference { public: - using Result = dynamic_vino_lib::EmotionsResult; + using Result = openvino_wrapper_lib::EmotionsResult; EmotionsDetection(); ~EmotionsDetection() override; /** @@ -102,7 +102,7 @@ class EmotionsDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result * getLocationResult(int idx) const override; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. @@ -125,6 +125,6 @@ class EmotionsDetection : public BaseInference std::shared_ptr valid_model_; std::vector results_; }; -} // namespace dynamic_vino_lib +} // namespace openvino_wrapper_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__EMOTIONS_DETECTION_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__EMOTIONS_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/face_detection.hpp similarity index 73% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/face_detection.hpp index f43a9f2e..6c18adbe 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/face_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief A header file with declaration for FaceDetection Class * @file face_detection.h */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__FACE_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__FACE_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__FACE_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__FACE_DETECTION_HPP_ #include #include @@ -28,14 +28,13 @@ #include #include -#include "dynamic_vino_lib/models/face_detection_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/object_detection.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/face_detection_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/object_detection.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class FaceDetectionResult @@ -56,5 +55,5 @@ class FaceDetection : public ObjectDetection public: explicit FaceDetection(bool, double); }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__FACE_DETECTION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__FACE_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/face_reidentification.hpp similarity index 79% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/face_reidentification.hpp index 3785b371..7175f3be 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/face_reidentification.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/face_reidentification.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,20 +16,20 @@ * @brief A header file with declaration for FaceReidentification Class * @file face_reidentification.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__FACE_REIDENTIFICATION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__FACE_REIDENTIFICATION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__FACE_REIDENTIFICATION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__FACE_REIDENTIFICATION_HPP_ #include #include #include #include -#include "dynamic_vino_lib/models/face_reidentification_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/inferences/base_reidentification.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/face_reidentification_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/inferences/base_reidentification.hpp" +#include "openvino/openvino.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class FaceReidentificationResult @@ -53,7 +53,7 @@ class FaceReidentificationResult : public Result class FaceReidentification : public BaseInference { public: - using Result = dynamic_vino_lib::FaceReidentificationResult; + using Result = openvino_wrapper_lib::FaceReidentificationResult; explicit FaceReidentification(double); ~FaceReidentification() override; /** @@ -91,7 +91,7 @@ class FaceReidentification : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result * getLocationResult(int idx) const override; /** * @brief Show the observed reidentification result either through image window or ROS topic. @@ -108,7 +108,7 @@ class FaceReidentification : public BaseInference private: std::shared_ptr valid_model_; std::vector results_; - std::shared_ptr face_tracker_; + std::shared_ptr face_tracker_; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__FACE_REIDENTIFICATION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__FACE_REIDENTIFICATION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/head_pose_detection.hpp similarity index 84% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/head_pose_detection.hpp index 06990cc5..1a0ebc4f 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/head_pose_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/head_pose_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,20 +16,20 @@ * @brief A header file with declaration for FaceDetection Class * @file head_pose_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__HEAD_POSE_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__HEAD_POSE_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__HEAD_POSE_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__HEAD_POSE_DETECTION_HPP_ #include #include #include -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/models/head_pose_detection_model.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/models/head_pose_detection_model.hpp" +#include "openvino/openvino.hpp" #include "opencv2/opencv.hpp" -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class HeadPoseResult @@ -78,7 +78,7 @@ class HeadPoseResult : public Result class HeadPoseDetection : public BaseInference { public: - using Result = dynamic_vino_lib::HeadPoseResult; + using Result = openvino_wrapper_lib::HeadPoseResult; HeadPoseDetection(); ~HeadPoseDetection() override; /** @@ -116,7 +116,7 @@ class HeadPoseDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result * getLocationResult(int idx) const override; /** * @brief Get the name of the Inference instance. * @return The name of the Inference instance. @@ -139,5 +139,5 @@ class HeadPoseDetection : public BaseInference std::shared_ptr valid_model_; std::vector results_; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__HEAD_POSE_DETECTION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__HEAD_POSE_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/inference_manager.hpp similarity index 77% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/inference_manager.hpp index 0966a96a..1f2c9881 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/inference_manager.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/inference_manager.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief a header file with declaration of Inference Manager class * @file inference_manager.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ -#include +#include #include #include #include @@ -28,7 +28,7 @@ #include #include #include -#include "dynamic_vino_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" /** * @class InferenceManager @@ -84,20 +84,20 @@ class InferenceManager parseInputDevice(const Params::ParamManager::PipelineRawData & params); std::map> parseOutput( const Params::ParamManager::PipelineRawData & params); - std::map> + std::map> parseInference(const Params::ParamManager::PipelineRawData & params); - std::shared_ptr createFaceDetection( + std::shared_ptr createFaceDetection( const Params::ParamManager::InferenceParams & infer); - std::shared_ptr createAgeGenderRecognition( + std::shared_ptr createAgeGenderRecognition( const Params::ParamManager::InferenceParams & infer); - std::shared_ptr createEmotionRecognition( + std::shared_ptr createEmotionRecognition( const Params::ParamManager::InferenceParams & infer); - std::shared_ptr createHeadPoseEstimation( + std::shared_ptr createHeadPoseEstimation( const Params::ParamManager::InferenceParams & infer); - std::shared_ptr createObjectDetection( + std::shared_ptr createObjectDetection( const Params::ParamManager::InferenceParams & infer); std::map pipelines_; }; -#endif // DYNAMIC_VINO_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__INFERENCE_MANAGER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/landmarks_detection.hpp similarity index 83% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/landmarks_detection.hpp index e706dd8c..fe7a34e6 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/landmarks_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/landmarks_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,19 +16,18 @@ * @brief A header file with declaration for LandmarksDetection Class * @file landmarks_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ #include #include #include #include -#include "dynamic_vino_lib/models/landmarks_detection_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/landmarks_detection_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class LandmarksDetectionResult @@ -54,7 +53,7 @@ class LandmarksDetectionResult : public Result class LandmarksDetection : public BaseInference { public: - using Result = dynamic_vino_lib::LandmarksDetectionResult; + using Result = openvino_wrapper_lib::LandmarksDetectionResult; LandmarksDetection(); ~LandmarksDetection() override; /** @@ -92,7 +91,7 @@ class LandmarksDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result * getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. @@ -110,5 +109,5 @@ class LandmarksDetection : public BaseInference std::shared_ptr valid_model_; std::vector results_; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__LANDMARKS_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/license_plate_detection.hpp similarity index 85% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/license_plate_detection.hpp index 7d8b6e33..1d9ef900 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/license_plate_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/license_plate_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,19 +16,18 @@ * @brief A header file with declaration for LicensePlateDetection Class * @file license_plate_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ #include #include #include #include -#include "dynamic_vino_lib/models/license_plate_detection_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/license_plate_detection_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class LicensePlateDetectionResult @@ -54,7 +53,7 @@ class LicensePlateDetectionResult : public Result class LicensePlateDetection : public BaseInference { public: - using Result = dynamic_vino_lib::LicensePlateDetectionResult; + using Result = openvino_wrapper_lib::LicensePlateDetectionResult; LicensePlateDetection(); ~LicensePlateDetection() override; /** @@ -96,7 +95,7 @@ class LicensePlateDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result * getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. @@ -129,5 +128,5 @@ class LicensePlateDetection : public BaseInference "U", "V", "W", "X", "Y", "Z" }; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__LICENSE_PLATE_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_detection.hpp similarity index 88% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_detection.hpp index ac9304c0..47b7cc68 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief A header file with declaration for ObjectDetection Class * @file object_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__OBJECT_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__OBJECT_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_DETECTION_HPP_ #include #include #include @@ -27,14 +27,13 @@ #include #include #include -#include "dynamic_vino_lib/models/base_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/inferences/base_filter.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/inferences/base_filter.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class ObjectDetectionResult @@ -85,7 +84,7 @@ class ObjectDetectionResult : public Result class ObjectDetectionResultFilter : public BaseFilter { public: - using Result = dynamic_vino_lib::ObjectDetectionResult; + using Result = openvino_wrapper_lib::ObjectDetectionResult; ObjectDetectionResultFilter(); @@ -141,8 +140,8 @@ class ObjectDetectionResultFilter : public BaseFilter class ObjectDetection : public BaseInference { public: - using Result = dynamic_vino_lib::ObjectDetectionResult; - using Filter = dynamic_vino_lib::ObjectDetectionResultFilter; + using Result = openvino_wrapper_lib::ObjectDetectionResult; + using Filter = openvino_wrapper_lib::ObjectDetectionResultFilter; explicit ObjectDetection(bool, double); ~ObjectDetection() override; /** @@ -207,5 +206,5 @@ class ObjectDetection : public BaseInference double show_output_thresh_ = 0; bool enable_roi_constraint_ = false; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__OBJECT_DETECTION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation.hpp similarity index 86% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation.hpp index 68f90a1c..a450c567 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/object_segmentation.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief A header file with declaration for ObjectSegmentation Class * @file object_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ #include #include #include @@ -25,13 +25,13 @@ #include #include #include -#include "dynamic_vino_lib/models/object_segmentation_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/object_segmentation_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino/openvino.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class ObjectSegmentationResult @@ -71,7 +71,7 @@ class ObjectSegmentationResult : public Result class ObjectSegmentation : public BaseInference { public: - using Result = dynamic_vino_lib::ObjectSegmentationResult; + using Result = openvino_wrapper_lib::ObjectSegmentationResult; explicit ObjectSegmentation(double); ~ObjectSegmentation() override; /** @@ -113,7 +113,7 @@ class ObjectSegmentation : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result * getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. @@ -142,5 +142,5 @@ class ObjectSegmentation : public BaseInference {81, 0, 81} }; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation_maskrcnn.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation_maskrcnn.hpp new file mode 100644 index 00000000..7954157b --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/object_segmentation_maskrcnn.hpp @@ -0,0 +1,146 @@ +// Copyright (c) 2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief A header file with declaration for ObjectSegmentation Class + * @file object_detection.hpp + */ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_MASKRCNN_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_MASKRCNN_HPP_ +#include +#include +#include +#include +#include +#include +#include +#include "openvino_wrapper_lib/models/object_segmentation_maskrcnn_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino/openvino.hpp" +#include "opencv2/opencv.hpp" +// namespace +namespace openvino_wrapper_lib +{ +/** + * @class ObjectSegmentationMaskrcnnResult + * @brief Class for storing and processing object segmentation result. + */ +class ObjectSegmentationMaskrcnnResult : public Result +{ +public: + friend class ObjectSegmentationMaskrcnn; + explicit ObjectSegmentationMaskrcnnResult(const cv::Rect & location); + std::string getLabel() const + { + return label_; + } + /** + * @brief Get the confidence that the detected area is a face. + * @return The confidence value. + */ + float getConfidence() const + { + return confidence_; + } + cv::Mat getMask() const + { + return mask_; + } + +private: + std::string label_ = ""; + float confidence_ = -1; + cv::Mat mask_; +}; +/** + * @class ObjectSegmentation + * @brief Class to load object segmentation model and perform object segmentation. + */ +class ObjectSegmentationMaskrcnn : public BaseInference +{ +public: + using Result = openvino_wrapper_lib::ObjectSegmentationMaskrcnnResult; + explicit ObjectSegmentationMaskrcnn(double); + ~ObjectSegmentationMaskrcnn() override; + /** + * @brief Load the object segmentation model. + */ + void loadNetwork(std::shared_ptr); + /** + * @brief Enqueue a frame to this class. + * The frame will be buffered but not infered yet. + * @param[in] frame The frame to be enqueued. + * @param[in] input_frame_loc The location of the enqueued frame with respect + * to the frame generated by the input device. + * @return Whether this operation is successful. + */ + bool enqueue(const cv::Mat &, const cv::Rect &) override; + + //Deprecated!! + bool enqueue_for_one_input(const cv::Mat &, const cv::Rect &); + + /** + * @brief Start inference for all buffered frames. + * @return Whether this operation is successful. + */ + bool submitRequest() override; + /** + * @brief This function will fetch the results of the previous inference and + * stores the results in a result buffer array. All buffered frames will be + * cleared. + * @return Whether the Inference object fetches a result this time + */ + bool fetchResults() override; + /** + * @brief Get the length of the buffer result array. + * @return The length of the buffer result array. + */ + int getResultsLength() const override; + /** + * @brief Get the location of result with respect + * to the frame generated by the input device. + * @param[in] idx The index of the result. + */ + const openvino_wrapper_lib::Result * getLocationResult(int idx) const override; + /** + * @brief Show the observed detection result either through image window + or ROS topic. + */ + void observeOutput(const std::shared_ptr & output); + /** + * @brief Get the name of the Inference instance. + * @return The name of the Inference instance. + */ + const std::string getName() const override; + const std::vector getFilteredROIs( + const std::string filter_conditions) const override; + +private: + std::shared_ptr valid_model_; + std::vector results_; + int width_ = 0; + int height_ = 0; + double show_output_thresh_ = 0; + + std::vector colors_ = { + {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, + {153, 153, 153}, {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, + {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, + {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111}, + {81, 0, 81} + }; +}; +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__OBJECT_SEGMENTATION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/person_attribs_detection.hpp similarity index 84% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/person_attribs_detection.hpp index fbea1e8c..f5a3c100 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_attribs_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/person_attribs_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,19 +16,19 @@ * @brief A header file with declaration for PersonAttribsDetection Class * @file person_attribs_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ #include #include #include #include -#include "dynamic_vino_lib/models/person_attribs_detection_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/person_attribs_detection_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino/openvino.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class PersonAttribsDetectionResult @@ -71,7 +71,7 @@ class PersonAttribsDetectionResult : public Result class PersonAttribsDetection : public BaseInference { public: - using Result = dynamic_vino_lib::PersonAttribsDetectionResult; + using Result = openvino_wrapper_lib::PersonAttribsDetectionResult; explicit PersonAttribsDetection(double); ~PersonAttribsDetection() override; /** @@ -109,7 +109,7 @@ class PersonAttribsDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result * getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. @@ -137,5 +137,5 @@ class PersonAttribsDetection : public BaseInference "has longhair", "has coat_jacket"}; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__PERSON_ATTRIBS_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/person_reidentification.hpp similarity index 80% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/person_reidentification.hpp index 2d47dc3e..be6d55f8 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/person_reidentification.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/person_reidentification.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,20 +16,19 @@ * @brief A header file with declaration for PersonReidentification Class * @file person_reidentification.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__PERSON_REIDENTIFICATION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__PERSON_REIDENTIFICATION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__PERSON_REIDENTIFICATION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__PERSON_REIDENTIFICATION_HPP_ #include #include #include #include -#include "dynamic_vino_lib/models/person_reidentification_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/inferences/base_reidentification.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/person_reidentification_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/inferences/base_reidentification.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class PersonReidentificationResult @@ -52,7 +51,7 @@ class PersonReidentificationResult : public Result class PersonReidentification : public BaseInference { public: - using Result = dynamic_vino_lib::PersonReidentificationResult; + using Result = openvino_wrapper_lib::PersonReidentificationResult; explicit PersonReidentification(double); ~PersonReidentification() override; /** @@ -90,7 +89,7 @@ class PersonReidentification : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result * getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. @@ -107,7 +106,7 @@ class PersonReidentification : public BaseInference private: std::shared_ptr valid_model_; std::vector results_; - std::shared_ptr person_tracker_; + std::shared_ptr person_tracker_; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__PERSON_REIDENTIFICATION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__PERSON_REIDENTIFICATION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/vehicle_attribs_detection.hpp similarity index 84% rename from dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/vehicle_attribs_detection.hpp index 03ff1427..575e99d1 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inferences/vehicle_attribs_detection.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inferences/vehicle_attribs_detection.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,19 +16,18 @@ * @brief A header file with declaration for VehicleAttribsDetection Class * @file vehicle_attribs_detection.hpp */ -#ifndef DYNAMIC_VINO_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ -#define DYNAMIC_VINO_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ +#define OPENVINO_WRAPPER_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ #include #include #include #include -#include "dynamic_vino_lib/models/vehicle_attribs_detection_model.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/models/vehicle_attribs_detection_model.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" #include "opencv2/opencv.hpp" // namespace -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { /** * @class VehicleAttribsDetectionResult @@ -59,7 +58,7 @@ class VehicleAttribsDetectionResult : public Result class VehicleAttribsDetection : public BaseInference { public: - using Result = dynamic_vino_lib::VehicleAttribsDetectionResult; + using Result = openvino_wrapper_lib::VehicleAttribsDetectionResult; VehicleAttribsDetection(); ~VehicleAttribsDetection() override; /** @@ -97,7 +96,7 @@ class VehicleAttribsDetection : public BaseInference * to the frame generated by the input device. * @param[in] idx The index of the result. */ - const dynamic_vino_lib::Result * getLocationResult(int idx) const override; + const openvino_wrapper_lib::Result * getLocationResult(int idx) const override; /** * @brief Show the observed detection result either through image window or ROS topic. @@ -119,5 +118,5 @@ class VehicleAttribsDetection : public BaseInference const std::vector colors_ = { "white", "gray", "yellow", "red", "green", "blue", "black"}; }; -} // namespace dynamic_vino_lib -#endif // DYNAMIC_VINO_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ +} // namespace openvino_wrapper_lib +#endif // OPENVINO_WRAPPER_LIB__INFERENCES__VEHICLE_ATTRIBS_DETECTION_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/base_input.hpp similarity index 92% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/base_input.hpp index 695e7200..79350653 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/base_input.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/base_input.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief A header file with declaration for BaseInput Class * @file base_input.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__BASE_INPUT_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__BASE_INPUT_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__BASE_INPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__BASE_INPUT_HPP_ #include #include @@ -25,7 +25,7 @@ #include #include #include -#include "dynamic_vino_lib/inputs/ros2_handler.hpp" +#include "openvino_wrapper_lib/inputs/ros2_handler.hpp" /** * @class BaseInputDevice @@ -121,4 +121,4 @@ class BaseInputDevice : public Ros2Handler bool is_init_ = false; }; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__BASE_INPUT_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__BASE_INPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/image_input.hpp similarity index 85% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/image_input.hpp index 08874c49..e1a7b6c9 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_input.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/image_input.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,12 +16,12 @@ * @brief A header file with declaration for Image class * @file file_input.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__IMAGE_INPUT_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__IMAGE_INPUT_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__IMAGE_INPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__IMAGE_INPUT_HPP_ #include #include -#include "dynamic_vino_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" namespace Input { @@ -62,4 +62,4 @@ class Image : public BaseInputDevice }; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__IMAGE_INPUT_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__IMAGE_INPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/image_topic.hpp similarity index 80% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/image_topic.hpp index 196a934b..cc5274fd 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/image_topic.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/image_topic.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,15 +17,15 @@ * @file image_topic.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__IMAGE_TOPIC_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__IMAGE_TOPIC_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__IMAGE_TOPIC_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__IMAGE_TOPIC_HPP_ #include #include #include #include -#include "dynamic_vino_lib/utils/mutex_counter.hpp" -#include "dynamic_vino_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/utils/mutex_counter.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" namespace Input { @@ -51,4 +51,4 @@ class ImageTopic : public BaseInputDevice }; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__IMAGE_TOPIC_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__IMAGE_TOPIC_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ip_camera.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/ip_camera.hpp similarity index 86% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/ip_camera.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/ip_camera.hpp index 02ffb3ce..c497cf85 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ip_camera.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/ip_camera.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,12 +17,12 @@ * @file ip_camera.hpp */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__IP_CAMERA_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__IP_CAMERA_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__IP_CAMERA_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__IP_CAMERA_HPP_ #include #include -#include "dynamic_vino_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" namespace Input { @@ -57,4 +57,4 @@ class IpCamera : public BaseInputDevice std::string ip_uri_; }; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__IP_CAMERA_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__IP_CAMERA_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/realsense_camera.hpp similarity index 85% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/realsense_camera.hpp index 3d399927..8752289c 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/realsense_camera.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,12 +17,12 @@ * @file realsense_camera.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__REALSENSE_CAMERA_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__REALSENSE_CAMERA_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__REALSENSE_CAMERA_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__REALSENSE_CAMERA_HPP_ #include #include -#include "dynamic_vino_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" namespace Input { @@ -61,4 +61,4 @@ class RealSenseCamera : public BaseInputDevice }; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__REALSENSE_CAMERA_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__REALSENSE_CAMERA_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/realsense_camera_topic.hpp similarity index 75% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/realsense_camera_topic.hpp index 2b62c643..8d9de35c 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/realsense_camera_topic.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/realsense_camera_topic.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,10 +17,10 @@ * @file realsense_camera.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__REALSENSE_CAMERA_TOPIC_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__REALSENSE_CAMERA_TOPIC_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__REALSENSE_CAMERA_TOPIC_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__REALSENSE_CAMERA_TOPIC_HPP_ -#include "dynamic_vino_lib/inputs/image_topic.hpp" +#include "openvino_wrapper_lib/inputs/image_topic.hpp" namespace Input { @@ -34,4 +34,4 @@ typedef ImageTopic RealSenseCameraTopic; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__REALSENSE_CAMERA_TOPIC_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__REALSENSE_CAMERA_TOPIC_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ros2_handler.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/ros2_handler.hpp similarity index 90% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/ros2_handler.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/ros2_handler.hpp index 6a2ac311..c2d31778 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/ros2_handler.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/ros2_handler.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,8 +17,8 @@ * @file ros_handler.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__ROS2_HANDLER_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__ROS2_HANDLER_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__ROS2_HANDLER_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__ROS2_HANDLER_HPP_ #include #include @@ -84,4 +84,4 @@ class Ros2Handler } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__ROS2_HANDLER_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__ROS2_HANDLER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/standard_camera.hpp similarity index 86% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/standard_camera.hpp index ef63e65b..d5924551 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/standard_camera.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/standard_camera.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,13 +17,13 @@ * @file standard_camera.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__STANDARD_CAMERA_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__STANDARD_CAMERA_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__STANDARD_CAMERA_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__STANDARD_CAMERA_HPP_ #include -#include "dynamic_vino_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" #include #include #include @@ -64,4 +64,4 @@ class StandardCamera : public BaseInputDevice int camera_id_ = -1; }; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__STANDARD_CAMERA_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__STANDARD_CAMERA_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/video_input.hpp similarity index 85% rename from dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/video_input.hpp index e02a5f16..c4ee9b10 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/inputs/video_input.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/inputs/video_input.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,12 +16,12 @@ * @brief A header file with declaration for Video class * @file video_input.h */ -#ifndef DYNAMIC_VINO_LIB__INPUTS__VIDEO_INPUT_HPP_ -#define DYNAMIC_VINO_LIB__INPUTS__VIDEO_INPUT_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__INPUTS__VIDEO_INPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__INPUTS__VIDEO_INPUT_HPP_ #include #include -#include "dynamic_vino_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" namespace Input { @@ -57,4 +57,4 @@ class Video : public BaseInputDevice }; } // namespace Input -#endif // DYNAMIC_VINO_LIB__INPUTS__VIDEO_INPUT_HPP_ +#endif // OPENVINO_WRAPPER_LIB__INPUTS__VIDEO_INPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/age_gender_detection_model.hpp similarity index 81% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/age_gender_detection_model.hpp index 7ef53bfd..564f93a3 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/age_gender_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/age_gender_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,11 +17,11 @@ * @file age_gender_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__AGE_GENDER_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__AGE_GENDER_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__AGE_GENDER_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__AGE_GENDER_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { @@ -61,9 +61,8 @@ class AgeGenderDetectionModel : public BaseModel const std::string getModelCategory() const override; protected: - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; - + bool updateLayerProperty(std::shared_ptr&) override; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__AGE_GENDER_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__AGE_GENDER_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/attributes/base_attribute.hpp similarity index 86% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/attributes/base_attribute.hpp index 061a1c2b..7f36c061 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/attributes/base_attribute.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/attributes/base_attribute.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2020 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,16 +17,17 @@ * @file base_attribute.hpp */ -#ifndef DYNAMIC_VINO_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ #include #include #include #include +#include -#include "inference_engine.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino/openvino.hpp" +#include "openvino_wrapper_lib/slog.hpp" namespace Models { @@ -86,7 +87,7 @@ class ModelAttribute } virtual bool updateLayerProperty( - const InferenceEngine::CNNNetwork&) + const std::shared_ptr&) { return false; } inline std::string getModelName() const @@ -101,7 +102,6 @@ class ModelAttribute inline std::string getInputName(std::string name = "input") const { - // std::map::iterator it; auto it = attr_.input_names.find(name); if(it == attr_.input_names.end()){ slog::warn << "No input named: " << name << slog::endl; @@ -113,7 +113,6 @@ class ModelAttribute inline std::string getOutputName(std::string name = "output") const { - //std::map::iterator it; auto it = attr_.output_names.find(name); if(it == attr_.output_names.end()){ slog::warn << "No output named: " << name << slog::endl; @@ -161,11 +160,21 @@ class ModelAttribute attr_.input_height = height; } + inline int getInputHeight() const + { + return attr_.input_height; + } + inline void setInputWidth(const int width) { attr_.input_width = width; } + inline int getInputWidth() const + { + return attr_.input_width; + } + inline void setMaxProposalCount(const int max) { attr_.max_proposal_count = max; @@ -178,7 +187,10 @@ class ModelAttribute protected: ModelAttr attr_; - + std::string input_tensor_name_; + std::string output_tensor_name_; + std::vector> inputs_info_; + std::vector> outputs_info_; }; class SSDModelAttr : public ModelAttribute @@ -187,12 +199,10 @@ class SSDModelAttr : public ModelAttribute explicit SSDModelAttr(const std::string model_name = "SSDNet-like"); bool updateLayerProperty( - const InferenceEngine::CNNNetwork&); + const std::shared_ptr&); }; - - } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__ATTRIBUTES_BASE_ATTRIBUTE_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/base_model.hpp similarity index 82% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/base_model.hpp index b3e19a52..ce0a0ac0 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/base_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/base_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,8 +17,8 @@ * @file base_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__BASE_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__BASE_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__BASE_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__BASE_MODEL_HPP_ #include @@ -29,16 +29,16 @@ #include #include -#include "inference_engine.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/models/attributes/base_attribute.hpp" +#include +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/models/attributes/base_attribute.hpp" namespace Engines { class Engine; } -namespace dynamic_vino_lib +namespace openvino_wrapper_lib { class ObjectDetectionResult; } @@ -95,9 +95,9 @@ namespace Models virtual const std::string getModelCategory() const = 0; inline ModelAttr getAttribute() { return attr_; } - inline InferenceEngine::CNNNetwork getNetReader() const + inline std::shared_ptr getModel() const { - return net_reader_; + return model_; } protected: @@ -106,11 +106,9 @@ namespace Models * @brief Set the layer property (layer layout, layer precision, etc.). * @param[in] network_reader The reader of the network to be set. */ - virtual bool updateLayerProperty(InferenceEngine::CNNNetwork& network_reader) = 0; - - ///InferenceEngine::CNNNetReader::Ptr net_reader_; - InferenceEngine::Core engine; - InferenceEngine::CNNNetwork net_reader_; // = engine.ReadNetwork(model->getModelFileName()); + virtual bool updateLayerProperty(std::shared_ptr& network_reader) = 0; + ov::Core engine; + std::shared_ptr model_; void setFrameSize(const int &w, const int &h) { frame_size_.width = w; @@ -134,7 +132,7 @@ namespace Models ObjectDetectionModel(const std::string& label_loc, const std::string& model_loc, int batch_size = 1); virtual bool fetchResults( const std::shared_ptr &engine, - std::vector &result, + std::vector &result, const float &confidence_thresh = 0.3, const bool &enable_roi_constraint = false) = 0; virtual bool matToBlob( @@ -144,4 +142,4 @@ namespace Models } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__BASE_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__BASE_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/emotion_detection_model.hpp similarity index 73% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/emotion_detection_model.hpp index de5d4dfb..56c73665 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/emotion_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/emotion_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,11 +17,11 @@ * @file emotion_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__EMOTION_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__EMOTION_DETECTION_MODEL_HPP_ - +#ifndef OPENVINO_WRAPPER_LIB__MODELS__EMOTION_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__EMOTION_DETECTION_MODEL_HPP_ + #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { @@ -39,12 +39,8 @@ class EmotionDetectionModel : public BaseModel * @return Name of the model. */ const std::string getModelCategory() const override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; - -private: - bool verifyOutputLayer(const InferenceEngine::DataPtr & ptr); - + bool updateLayerProperty(std::shared_ptr&) override; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__EMOTION_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__EMOTION_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/face_detection_model.hpp similarity index 80% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/face_detection_model.hpp index 11c7efae..c4923a36 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/face_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,11 +17,11 @@ * @file face_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__FACE_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__FACE_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__FACE_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__FACE_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { @@ -42,4 +42,4 @@ class FaceDetectionModel : public ObjectDetectionModel }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__FACE_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__FACE_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/face_reidentification_model.hpp similarity index 75% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/face_reidentification_model.hpp index 1939cf05..4eda2755 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/face_reidentification_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/face_reidentification_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,11 @@ * @brief A header file with declaration for FaceReidentificationModel Class * @file person_reidentification_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" + namespace Models { /** @@ -39,10 +40,8 @@ class FaceReidentificationModel : public BaseModel const std::string getModelCategory() const override; protected: - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; std::string input_; std::string output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__FACE_REIDENTIFICATION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/head_pose_detection_model.hpp similarity index 82% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/head_pose_detection_model.hpp index 5afce9b3..d815f125 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/head_pose_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/head_pose_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,11 +17,11 @@ * @file head_pose_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__HEAD_POSE_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__HEAD_POSE_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__HEAD_POSE_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__HEAD_POSE_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { @@ -63,7 +63,7 @@ class HeadPoseDetectionModel : public BaseModel * @return Name of the model. */ const std::string getModelCategory() const override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; + bool updateLayerProperty(std::shared_ptr&) override; private: @@ -73,4 +73,4 @@ class HeadPoseDetectionModel : public BaseModel }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__HEAD_POSE_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__HEAD_POSE_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/landmarks_detection_model.hpp similarity index 75% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/landmarks_detection_model.hpp index 7bbb51e5..71a7244f 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/landmarks_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/landmarks_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief A header file with declaration for LandmarksDetectionModel Class * @file landmarks_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** @@ -39,10 +39,8 @@ class LandmarksDetectionModel : public BaseModel const std::string getModelCategory() const override; protected: - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; std::string input_; std::string output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__LANDMARKS_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/license_plate_detection_model.hpp similarity index 73% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/license_plate_detection_model.hpp index 9357160a..76c2deed 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/license_plate_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/license_plate_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief A header file with declaration for LicensePlateDetectionModel Class * @file vehicle_attribs_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** @@ -41,14 +41,11 @@ class LicensePlateDetectionModel : public BaseModel const std::string getModelCategory() const override; protected: - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; - // up to 88 items per license plate, ended with "-1" + bool updateLayerProperty(std::shared_ptr&) override; const int max_sequence_size_ = 88; std::string input_; std::string seq_input_; std::string output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__LICENSE_PLATE_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_ssd_model.hpp similarity index 76% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_ssd_model.hpp index 76bb6354..818414dd 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_ssd_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_ssd_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,12 +15,12 @@ * @brief A header file with declaration for ObjectDetectionModel Class * @file face_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_SSD_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_SSD_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_SSD_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_SSD_MODEL_HPP_ #include #include #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** @@ -29,14 +29,14 @@ namespace Models */ class ObjectDetectionSSDModel : public ObjectDetectionModel { - using Result = dynamic_vino_lib::ObjectDetectionResult; + using Result = openvino_wrapper_lib::ObjectDetectionResult; public: ObjectDetectionSSDModel(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); bool fetchResults( const std::shared_ptr & engine, - std::vector & results, + std::vector & results, const float & confidence_thresh = 0.3, const bool & enable_roi_constraint = false) override; @@ -55,8 +55,8 @@ class ObjectDetectionSSDModel : public ObjectDetectionModel */ const std::string getModelCategory() const override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; + bool updateLayerProperty(std::shared_ptr&) override; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_SSD_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_SSD_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_yolov5_model.hpp similarity index 63% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_yolov5_model.hpp index efbe17e9..8e2f177e 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_detection_yolov2_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_detection_yolov5_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,28 +15,36 @@ * @brief A header file with declaration for ObjectDetectionModel Class * @file face_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_YOLOV2_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_YOLOV2_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_YOLOV5_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_YOLOV5_MODEL_HPP_ #include #include #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** * @class ObjectDetectionModel * @brief This class generates the face detection model. */ -class ObjectDetectionYolov2Model : public ObjectDetectionModel +#pragma pack(1) + typedef struct Resize { + cv::Mat resized_image; + int dw{}; + int dh{}; + } Resize_t; +#pragma pack() + +class ObjectDetectionYolov5Model : public ObjectDetectionModel { - using Result = dynamic_vino_lib::ObjectDetectionResult; + using Result = openvino_wrapper_lib::ObjectDetectionResult; public: - ObjectDetectionYolov2Model(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); + ObjectDetectionYolov5Model(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); bool fetchResults( const std::shared_ptr & engine, - std::vector & results, + std::vector & results, const float & confidence_thresh = 0.3, const bool & enable_roi_constraint = false) override; @@ -54,11 +62,12 @@ class ObjectDetectionYolov2Model : public ObjectDetectionModel * @return Name of the model. */ const std::string getModelCategory() const override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; + bool updateLayerProperty(std::shared_ptr&) override; + static Resize_t pre_process_ov(const cv::Mat &input_image); + + cv::Mat input_image; + Resize_t resize_img; -protected: - int getEntryIndex(int side, int lcoords, int lclasses, int location, int entry); - InferenceEngine::InputInfo::Ptr input_info_ = nullptr; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__OBJECT_DETECTION_YOLOV2_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__OBJECT_DETECTION_YOLOV5_MODEL_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_maskrcnn_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_maskrcnn_model.hpp new file mode 100644 index 00000000..49940225 --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_maskrcnn_model.hpp @@ -0,0 +1,61 @@ +// Copyright (c) 2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +/** + * @brief A header file with declaration for ObjectSegmentationMaskrcnnModel Class + * @file face_detection_model.h + */ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_MASKRCNN_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_MASKRCNN_MODEL_HPP_ +#include +#include +#include "openvino_wrapper_lib/models/base_model.hpp" +namespace Models +{ +/** + * @class ObjectSegmentationMaskrcnnModel + * @brief This class generates the object segmentation model. + */ +class ObjectSegmentationMaskrcnnModel : public BaseModel +{ +public: + ObjectSegmentationMaskrcnnModel(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); + inline int getMaxProposalCount() const + { + return max_proposal_count_; + } + inline int getObjectSize() const + { + return object_size_; + } + + bool enqueue(const std::shared_ptr & ,const cv::Mat &, + const cv::Rect & ) override; + + bool matToBlob( + const cv::Mat & , const cv::Rect &, float , + int , const std::shared_ptr & ); + + /** + * @brief Get the name of this segmentation model. + * @return Name of the model. + */ + const std::string getModelCategory() const override; + bool updateLayerProperty(std::shared_ptr&) override; + +private: + int max_proposal_count_; + int object_size_; +}; +} // namespace Models +#endif // OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_MASKRCNN_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_model.hpp similarity index 79% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_model.hpp index af047bcc..217ec132 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/object_segmentation_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/object_segmentation_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,10 +15,11 @@ * @brief A header file with declaration for ObjectSegmentationModel Class * @file face_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** @@ -50,13 +51,11 @@ class ObjectSegmentationModel : public BaseModel * @return Name of the model. */ const std::string getModelCategory() const override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; + bool updateLayerProperty(std::shared_ptr&) override; private: int max_proposal_count_; int object_size_; - - InferenceEngine::InputsDataMap input_info_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__OBJECT_SEGMENTATION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/person_attribs_detection_model.hpp similarity index 65% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/person_attribs_detection_model.hpp index d05e67a6..d22e6a2d 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_attribs_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/person_attribs_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief A header file with declaration for PersonAttribsDetectionModel Class * @file person_attribs_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** @@ -30,8 +30,6 @@ class PersonAttribsDetectionModel : public BaseModel { public: PersonAttribsDetectionModel(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); - //inline const std::string getInputName() {return input_;} - //inline const std::string getOutputName() {return output_;} /** * @brief Get the name of this detection model. * @return Name of the model. @@ -39,11 +37,9 @@ class PersonAttribsDetectionModel : public BaseModel const std::string getModelCategory() const override; protected: - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; + bool updateLayerProperty(std::shared_ptr&) override; std::string input_; std::string output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__PERSON_ATTRIBS_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_reidentification_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/person_reidentification_model.hpp similarity index 72% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/person_reidentification_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/person_reidentification_model.hpp index 41ff85c7..4b2937ff 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/person_reidentification_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/person_reidentification_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief A header file with declaration for PersonReidentificationModel Class * @file person_reidentification_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__PERSON_REIDENTIFICATION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__PERSON_REIDENTIFICATION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__PERSON_REIDENTIFICATION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__PERSON_REIDENTIFICATION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** @@ -39,11 +39,9 @@ class PersonReidentificationModel : public BaseModel const std::string getModelCategory() const override; protected: - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; + bool updateLayerProperty(std::shared_ptr&) override; std::string input_; std::string output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__PERSON_REIDENTIFICATION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__PERSON_REIDENTIFICATION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/vehicle_attribs_detection_model.hpp similarity index 69% rename from dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/models/vehicle_attribs_detection_model.hpp index 9ed5acdc..8012bd68 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/models/vehicle_attribs_detection_model.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/models/vehicle_attribs_detection_model.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief A header file with declaration for VehicleAttribsDetectionModel Class * @file vehicle_attribs_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ -#define DYNAMIC_VINO_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ +#define OPENVINO_WRAPPER_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ #include -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" namespace Models { /** @@ -30,7 +30,7 @@ class VehicleAttribsDetectionModel : public BaseModel { public: VehicleAttribsDetectionModel(const std::string& label_loc, const std::string & model_loc, int batch_size = 1); - inline const std::string getInputName() {return input_;} + inline const std::string getInputName() {return input_tensor_name_;} inline const std::string getColorOutputName() {return color_output_;} inline const std::string getTypeOutputName() {return type_output_;} /** @@ -40,12 +40,9 @@ class VehicleAttribsDetectionModel : public BaseModel const std::string getModelCategory() const override; protected: - //void checkLayerProperty(const InferenceEngine::CNNNetReader::Ptr &) override; - //void setLayerProperty(InferenceEngine::CNNNetReader::Ptr) override; - bool updateLayerProperty(InferenceEngine::CNNNetwork&) override; - std::string input_; + bool updateLayerProperty(std::shared_ptr&) override; std::string color_output_; std::string type_output_; }; } // namespace Models -#endif // DYNAMIC_VINO_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ +#endif // OPENVINO_WRAPPER_LIB__MODELS__VEHICLE_ATTRIBS_DETECTION_MODEL_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/base_output.hpp similarity index 52% rename from dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/base_output.hpp index 7d25944c..67971a47 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/base_output.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/base_output.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,39 +17,40 @@ * @file head_pose_detection_model.h */ -#ifndef DYNAMIC_VINO_LIB__OUTPUTS__BASE_OUTPUT_HPP_ -#define DYNAMIC_VINO_LIB__OUTPUTS__BASE_OUTPUT_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__OUTPUTS__BASE_OUTPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__OUTPUTS__BASE_OUTPUT_HPP_ #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include #include #include -#include "dynamic_vino_lib/inferences/age_gender_detection.hpp" -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/inferences/emotions_detection.hpp" -#include "dynamic_vino_lib/inferences/face_detection.hpp" -#include "dynamic_vino_lib/inferences/head_pose_detection.hpp" -#include "dynamic_vino_lib/inferences/object_detection.hpp" -#include "dynamic_vino_lib/inferences/object_segmentation.hpp" -#include "dynamic_vino_lib/inferences/person_reidentification.hpp" -#include "dynamic_vino_lib/inferences/person_attribs_detection.hpp" -#include "dynamic_vino_lib/inferences/landmarks_detection.hpp" -#include "dynamic_vino_lib/inferences/face_reidentification.hpp" -#include "dynamic_vino_lib/inferences/vehicle_attribs_detection.hpp" -#include "dynamic_vino_lib/inferences/license_plate_detection.hpp" +#include "openvino_wrapper_lib/inferences/age_gender_detection.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/inferences/emotions_detection.hpp" +#include "openvino_wrapper_lib/inferences/face_detection.hpp" +#include "openvino_wrapper_lib/inferences/head_pose_detection.hpp" +#include "openvino_wrapper_lib/inferences/object_detection.hpp" +#include "openvino_wrapper_lib/inferences/object_segmentation.hpp" +#include "openvino_wrapper_lib/inferences/person_reidentification.hpp" +#include "openvino_wrapper_lib/inferences/person_attribs_detection.hpp" +#include "openvino_wrapper_lib/inferences/landmarks_detection.hpp" +#include "openvino_wrapper_lib/inferences/face_reidentification.hpp" +#include "openvino_wrapper_lib/inferences/vehicle_attribs_detection.hpp" +#include "openvino_wrapper_lib/inferences/license_plate_detection.hpp" +#include "openvino_wrapper_lib/inferences/object_segmentation_maskrcnn.hpp" #include "opencv2/opencv.hpp" class Pipeline; @@ -69,74 +70,80 @@ class BaseOutput /** * @brief Generate output content according to the license plate detection result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector &) { } /** * @brief Generate output content according to the vehicle attributes detection result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector &) { } /** * @brief Generate output content according to the face reidentification result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector &) { } /** * @brief Generate output content according to the landmarks detection result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector &) { } /** * @brief Generate output content according to the person reidentification result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector &) { } /** * @brief Generate output content according to the person reidentification result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector &) { } /** * @brief Generate output content according to the object segmentation result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector &) + { + } + /** + * @brief Generate output content according to the object segmentation maskrcnn result. + */ + virtual void accept(const std::vector &) { } /** * @brief Generate output content according to the object detection result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector &) { } /** * @brief Generate output content according to the face detection result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector &) { } /** * @brief Generate output content according to the emotion detection result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector &) { } /** * @brief Generate output content according to the age and gender detection * result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector &) { } /** * @brief Generate output content according to the headpose detection result. */ - virtual void accept(const std::vector &) + virtual void accept(const std::vector &) { } /** @@ -157,13 +164,13 @@ class BaseOutput virtual void setServiceResponseForFace( std::shared_ptr response) {} virtual void setServiceResponse( - std::shared_ptr response) {} + std::shared_ptr response) {} virtual void setServiceResponse( - std::shared_ptr response) {} + std::shared_ptr response) {} virtual void setServiceResponse( - std::shared_ptr response) {} + std::shared_ptr response) {} virtual void setServiceResponse( - std::shared_ptr response) {} + std::shared_ptr response) {} Pipeline * getPipeline() const; cv::Mat getFrame() const; virtual void clearData() {} @@ -174,4 +181,4 @@ class BaseOutput std::string output_name_; }; } // namespace Outputs -#endif // DYNAMIC_VINO_LIB__OUTPUTS__BASE_OUTPUT_HPP_ +#endif // OPENVINO_WRAPPER_LIB__OUTPUTS__BASE_OUTPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/image_window_output.hpp similarity index 74% rename from dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/image_window_output.hpp index e34950af..d7c39ea1 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/image_window_output.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/image_window_output.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,12 +17,12 @@ * @file image_window_output.h */ -#ifndef DYNAMIC_VINO_LIB__OUTPUTS__IMAGE_WINDOW_OUTPUT_HPP_ -#define DYNAMIC_VINO_LIB__OUTPUTS__IMAGE_WINDOW_OUTPUT_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__OUTPUTS__IMAGE_WINDOW_OUTPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__OUTPUTS__IMAGE_WINDOW_OUTPUT_HPP_ #include #include -#include "dynamic_vino_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" namespace Outputs { @@ -56,78 +56,84 @@ class ImageWindowOutput : public BaseOutput * @param[in] A license plate detection result objetc. */ void accept( - const std::vector &) override; + const std::vector &) override; /** * @brief Generate image window output content according to * the vehicle attributes detection result. * @param[in] A vehicle attributes detection result objetc. */ void accept( - const std::vector &) override; + const std::vector &) override; /** * @brief Generate image window output content according to * the face reidentification result. * @param[in] A face reidentification result objetc. */ void accept( - const std::vector &) override; + const std::vector &) override; /** * @brief Generate image window output content according to * the landmarks detection result. * @param[in] A landmarks detection result objetc. */ void accept( - const std::vector &) override; + const std::vector &) override; /** * @brief Generate image window output content according to * the person attributes detection result. * @param[in] A person attributes detection result objetc. */ void accept( - const std::vector &) override; + const std::vector &) override; /** * @brief Generate image window output content according to * the person reidentification result. * @param[in] A person reidentification result objetc. */ void accept( - const std::vector &) override; + const std::vector &) override; /** * @brief Generate image window output content according to * the object segmentation result. * @param[in] An obejct segmentation result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; + /** + * @brief Generate image window output content according to + * the object segmentation maskrcnn result. + * @param[in] An obejct segmentation result objetc. + */ + void accept(const std::vector &) override; /** * @brief Generate image window output content according to * the face detection result. * @param[in] A face detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate image window output content according to * the object detection result. * @param[in] results A bundle of object detection results. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate image window output content according to * the emotion detection result. * @param[in] A emotion detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate image window output content according to * the age and gender detection result. * @param[in] A head pose detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate image window output content according to * the headpose detection result. * @param[in] An age gender detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; private: unsigned findOutput(const cv::Rect &); @@ -144,7 +150,8 @@ class ImageWindowOutput : public BaseOutput */ cv::Mat getRotationTransform(double yaw, double pitch, double roll); - void mergeMask(const std::vector &); + void mergeMask(const std::vector &); + void mergeMask(const std::vector &); struct OutputData { @@ -173,4 +180,4 @@ class ImageWindowOutput : public BaseOutput }; }; } // namespace Outputs -#endif // DYNAMIC_VINO_LIB__OUTPUTS__IMAGE_WINDOW_OUTPUT_HPP_ +#endif // OPENVINO_WRAPPER_LIB__OUTPUTS__IMAGE_WINDOW_OUTPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/ros_service_output.hpp similarity index 66% rename from dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/ros_service_output.hpp index 5392792d..d0558ad2 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_service_output.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/ros_service_output.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,18 +17,18 @@ * @file ros_topic_output.hpp */ -#ifndef DYNAMIC_VINO_LIB__OUTPUTS__ROS_SERVICE_OUTPUT_HPP_ -#define DYNAMIC_VINO_LIB__OUTPUTS__ROS_SERVICE_OUTPUT_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__OUTPUTS__ROS_SERVICE_OUTPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__OUTPUTS__ROS_SERVICE_OUTPUT_HPP_ #include #include #include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include #include #include @@ -36,8 +36,8 @@ #include #include -#include "dynamic_vino_lib/inferences/face_detection.hpp" -#include "dynamic_vino_lib/outputs/ros_topic_output.hpp" +#include "openvino_wrapper_lib/inferences/face_detection.hpp" +#include "openvino_wrapper_lib/outputs/ros_topic_output.hpp" namespace Outputs { @@ -60,13 +60,13 @@ class RosServiceOutput : public RosTopicOutput void setServiceResponse(std::shared_ptr response); void setResponseForFace(std::shared_ptr response); - void setServiceResponse(std::shared_ptr response); - void setServiceResponse(std::shared_ptr response); - void setServiceResponse(std::shared_ptr response); - void setServiceResponse(std::shared_ptr response); + void setServiceResponse(std::shared_ptr response); + void setServiceResponse(std::shared_ptr response); + void setServiceResponse(std::shared_ptr response); + void setServiceResponse(std::shared_ptr response); private: const std::string service_name_; }; } // namespace Outputs -#endif // DYNAMIC_VINO_LIB__OUTPUTS__ROS_SERVICE_OUTPUT_HPP_ +#endif // OPENVINO_WRAPPER_LIB__OUTPUTS__ROS_SERVICE_OUTPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/ros_topic_output.hpp similarity index 53% rename from dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/ros_topic_output.hpp index c102e44e..74285c0e 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/ros_topic_output.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/ros_topic_output.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,30 +17,30 @@ * @file ros_topic_output.hpp */ -#ifndef DYNAMIC_VINO_LIB__OUTPUTS__ROS_TOPIC_OUTPUT_HPP_ -#define DYNAMIC_VINO_LIB__OUTPUTS__ROS_TOPIC_OUTPUT_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__OUTPUTS__ROS_TOPIC_OUTPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__OUTPUTS__ROS_TOPIC_OUTPUT_HPP_ #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include @@ -48,8 +48,8 @@ #include #include -#include "dynamic_vino_lib/inferences/face_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/inferences/face_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" namespace Outputs { @@ -77,101 +77,107 @@ class RosTopicOutput : public BaseOutput * the license plate detection result. * @param[in] results a bundle of license plate detection results. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the vehicle attributes detection result. * @param[in] results a bundle of vehicle attributes detection results. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the face reidentification result. * @param[in] results a bundle of face reidentification results. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the landmarks detection result. * @param[in] results a bundle of landmarks detection results. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the person attributes detection result. * @param[in] results a bundle of person attributes detection results. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the person reidentification result. * @param[in] results a bundle of person reidentification results. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the object segmentation result. * @param[in] results a bundle of object segmentation results. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; + /** + * @brief Generate ros topic infomation according to + * the object segmentation result. + * @param[in] results a bundle of object segmentation maskrcnn results. + */ + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the object detection result. * @param[in] results a bundle of object detection results. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the face detection result. * @param[in] An face detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the emotion detection result. * @param[in] An emotion detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the age gender detection result. * @param[in] An age gender detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate ros topic infomation according to * the headpose detection result. * @param[in] An head pose detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; protected: const std::string topic_name_; std::shared_ptr node_; - rclcpp::Publisher::SharedPtr pub_license_plate_; - std::shared_ptr license_plate_topic_; - rclcpp::Publisher::SharedPtr pub_vehicle_attribs_; - std::shared_ptr vehicle_attribs_topic_; - rclcpp::Publisher::SharedPtr pub_landmarks_; - std::shared_ptr landmarks_topic_; - rclcpp::Publisher::SharedPtr pub_face_reid_; - std::shared_ptr face_reid_topic_; - rclcpp::Publisher::SharedPtr pub_person_attribs_; - std::shared_ptr person_attribs_topic_; - rclcpp::Publisher::SharedPtr pub_person_reid_; - std::shared_ptr person_reid_topic_; - rclcpp::Publisher::SharedPtr pub_segmented_object_; - std::shared_ptr segmented_objects_topic_; + rclcpp::Publisher::SharedPtr pub_license_plate_; + std::shared_ptr license_plate_topic_; + rclcpp::Publisher::SharedPtr pub_vehicle_attribs_; + std::shared_ptr vehicle_attribs_topic_; + rclcpp::Publisher::SharedPtr pub_landmarks_; + std::shared_ptr landmarks_topic_; + rclcpp::Publisher::SharedPtr pub_face_reid_; + std::shared_ptr face_reid_topic_; + rclcpp::Publisher::SharedPtr pub_person_attribs_; + std::shared_ptr person_attribs_topic_; + rclcpp::Publisher::SharedPtr pub_person_reid_; + std::shared_ptr person_reid_topic_; + rclcpp::Publisher::SharedPtr pub_segmented_object_; + std::shared_ptr segmented_objects_topic_; rclcpp::Publisher::SharedPtr pub_detected_object_; std::shared_ptr detected_objects_topic_; rclcpp::Publisher::SharedPtr pub_face_; std::shared_ptr faces_topic_; - rclcpp::Publisher::SharedPtr pub_emotion_; - std::shared_ptr emotions_topic_; - rclcpp::Publisher::SharedPtr pub_age_gender_; - std::shared_ptr age_gender_topic_; - rclcpp::Publisher::SharedPtr pub_headpose_; - std::shared_ptr headpose_topic_; + rclcpp::Publisher::SharedPtr pub_emotion_; + std::shared_ptr emotions_topic_; + rclcpp::Publisher::SharedPtr pub_age_gender_; + std::shared_ptr age_gender_topic_; + rclcpp::Publisher::SharedPtr pub_headpose_; + std::shared_ptr headpose_topic_; }; } // namespace Outputs -#endif // DYNAMIC_VINO_LIB__OUTPUTS__ROS_TOPIC_OUTPUT_HPP_ +#endif // OPENVINO_WRAPPER_LIB__OUTPUTS__ROS_TOPIC_OUTPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/rviz_output.hpp similarity index 68% rename from dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/rviz_output.hpp index 359f8313..6711fab6 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/outputs/rviz_output.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/outputs/rviz_output.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,8 +17,8 @@ * @file rviz_output.h */ -#ifndef DYNAMIC_VINO_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ -#define DYNAMIC_VINO_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ +#define OPENVINO_WRAPPER_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ #include #include @@ -26,8 +26,8 @@ #include #include -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/outputs/image_window_output.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/outputs/image_window_output.hpp" namespace Outputs { @@ -54,61 +54,67 @@ class RvizOutput : public BaseOutput * the face reidentification result. * @param[in] A face reidentification result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the landmarks detection result. * @param[in] A landmarks detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the person attributes detection result. * @param[in] A person attributes detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the person reidentification result. * @param[in] A person reidentification result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the face detection result. * @param[in] A face detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the object detection result. * @param[in] results A bundle of object detection results. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the object segmentation result. * @param[in] results A bundle of object segmentation results. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; + /** + * @brief Generate rviz output content according to + * the object segmentation result. + * @param[in] results A bundle of object segmentation maskrcnn results. + */ + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the emotion detection result. * @param[in] A emotion detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the age and gender detection result. * @param[in] A head pose detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; /** * @brief Generate rviz output content according to * the headpose detection result. * @param[in] An age gender detection result objetc. */ - void accept(const std::vector &) override; + void accept(const std::vector &) override; private: std::shared_ptr node_; @@ -117,4 +123,4 @@ class RvizOutput : public BaseOutput std::shared_ptr image_window_output_; }; } // namespace Outputs -#endif // DYNAMIC_VINO_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ +#endif // OPENVINO_WRAPPER_LIB__OUTPUTS__RVIZ_OUTPUT_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline.hpp similarity index 87% rename from dynamic_vino_lib/include/dynamic_vino_lib/pipeline.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline.hpp index 210b4e96..a80c15b3 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief a header file with declaration of Pipeline class * @file pipeline.h */ -#ifndef DYNAMIC_VINO_LIB__PIPELINE_HPP_ -#define DYNAMIC_VINO_LIB__PIPELINE_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__PIPELINE_HPP_ +#define OPENVINO_WRAPPER_LIB__PIPELINE_HPP_ #include #include @@ -27,11 +27,10 @@ #include #include -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/inputs/standard_camera.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/pipeline_params.hpp" -// #include "dynamic_vino_lib/pipeline_filters.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/inputs/standard_camera.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/pipeline_params.hpp" #include "opencv2/opencv.hpp" /** @@ -60,7 +59,7 @@ class Pipeline */ bool add( const std::string & parent, const std::string & name, - std::shared_ptr inference); + std::shared_ptr inference); /** * @brief Add output device to the pipeline. * @param[in] parent name of the parent inference. @@ -78,7 +77,7 @@ class Pipeline // { // filters_.add(filters); // } - bool add(const std::string & name, std::shared_ptr inference); + bool add(const std::string & name, std::shared_ptr inference); /** * @brief Add inference network-output device edge to the pipeline. * @param[in] parent name of the parent inference. @@ -151,12 +150,11 @@ class Pipeline const int kCatagoryOrder_Output = 3; std::shared_ptr params_; - // PipelineFilters filters_; std::shared_ptr input_device_; std::string input_device_name_; std::multimap next_; - std::map> name_to_detection_map_; + std::map> name_to_detection_map_; std::map> name_to_output_map_; int total_inference_ = 0; std::set output_names_; @@ -172,4 +170,4 @@ class Pipeline std::chrono::time_point t_start_; }; -#endif // DYNAMIC_VINO_LIB__PIPELINE_HPP_ +#endif // OPENVINO_WRAPPER_LIB__PIPELINE_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline_manager.hpp similarity index 76% rename from dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline_manager.hpp index 06c583c5..e4a3d485 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_manager.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline_manager.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief a header file with declaration of Pipeline Manager class * @file pipeline_manager.hpp */ -#ifndef DYNAMIC_VINO_LIB__PIPELINE_MANAGER_HPP_ -#define DYNAMIC_VINO_LIB__PIPELINE_MANAGER_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__PIPELINE_MANAGER_HPP_ +#define OPENVINO_WRAPPER_LIB__PIPELINE_MANAGER_HPP_ -#include +#include #include #include #include @@ -28,8 +28,8 @@ #include #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/engines/engine_manager.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/engines/engine_manager.hpp" /** * @class PipelineManager @@ -85,7 +85,6 @@ class PipelineManager struct ServiceData { std::shared_ptr thread; - // std::shared_ptr node; PipelineState state; }; @@ -111,35 +110,37 @@ class PipelineManager parseInputDevice(const PipelineData & params); std::map> parseOutput(const PipelineData & pdata); - std::map> + std::map> parseInference(const Params::ParamManager::PipelineRawData & params); - std::shared_ptr + std::shared_ptr createFaceDetection(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr + std::shared_ptr createAgeGenderRecognition(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr + std::shared_ptr createEmotionRecognition(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr + std::shared_ptr createHeadPoseEstimation(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr + std::shared_ptr createObjectDetection(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr + std::shared_ptr createObjectSegmentation(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr + std::shared_ptr + createObjectSegmentationMaskrcnn(const Params::ParamManager::InferenceRawData & infer); + std::shared_ptr createPersonReidentification(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr + std::shared_ptr createPersonAttribsDetection(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr + std::shared_ptr createLandmarksDetection(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr + std::shared_ptr createFaceReidentification(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr + std::shared_ptr createVehicleAttribsDetection(const Params::ParamManager::InferenceRawData & infer); - std::shared_ptr + std::shared_ptr createLicensePlateDetection(const Params::ParamManager::InferenceRawData & infer); std::map pipelines_; ServiceData service_; Engines::EngineManager engine_manager_; }; -#endif // DYNAMIC_VINO_LIB__PIPELINE_MANAGER_HPP_ +#endif // OPENVINO_WRAPPER_LIB__PIPELINE_MANAGER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline_params.hpp similarity index 84% rename from dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline_params.hpp index 9de08354..bcb2991a 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/pipeline_params.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/pipeline_params.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,10 +16,10 @@ * @brief a header file with declaration of Pipeline class * @file pipeline_params.hpp */ -#ifndef DYNAMIC_VINO_LIB__PIPELINE_PARAMS_HPP_ -#define DYNAMIC_VINO_LIB__PIPELINE_PARAMS_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__PIPELINE_PARAMS_HPP_ +#define OPENVINO_WRAPPER_LIB__PIPELINE_PARAMS_HPP_ -#include +#include #include #include #include @@ -28,9 +28,9 @@ #include #include -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/inputs/standard_camera.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/inputs/standard_camera.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" #include "opencv2/opencv.hpp" const char kInputType_Image[] = "Image"; @@ -53,8 +53,9 @@ const char kInferTpye_EmotionRecognition[] = "EmotionRecognition"; const char kInferTpye_HeadPoseEstimation[] = "HeadPoseEstimation"; const char kInferTpye_ObjectDetection[] = "ObjectDetection"; const char kInferTpye_ObjectSegmentation[] = "ObjectSegmentation"; +const char kInferTpye_ObjectSegmentationMaskrcnn[] = "ObjectSegmentationMaskrcnn"; const char kInferTpye_ObjectDetectionTypeSSD[] = "SSD"; -const char kInferTpye_ObjectDetectionTypeYolov2[] = "yolov2"; +const char kInferTpye_ObjectDetectionTypeYolov5[] = "yolov5"; const char kInferTpye_PersonReidentification[] = "PersonReidentification"; const char kInferTpye_PersonAttribsDetection[] = "PersonAttribsDetection"; const char kInferTpye_LandmarksDetection[] = "LandmarksDetection"; @@ -84,4 +85,4 @@ class PipelineParams Params::ParamManager::PipelineRawData params_; }; -#endif // DYNAMIC_VINO_LIB__PIPELINE_PARAMS_HPP_ +#endif // OPENVINO_WRAPPER_LIB__PIPELINE_PARAMS_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/services/frame_processing_server.hpp similarity index 68% rename from dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/services/frame_processing_server.hpp index 056cb179..9c361718 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/services/frame_processing_server.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/services/frame_processing_server.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,22 +11,22 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#ifndef DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ -#define DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ +#define OPENVINO_WRAPPER_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ #include #include #include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include -#include -#include -#include +#include +#include +#include #include #include @@ -56,4 +56,4 @@ class FrameProcessingServer : public rclcpp::Node std::string config_path_; }; } // namespace vino_service -#endif // DYNAMIC_VINO_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ +#endif // OPENVINO_WRAPPER_LIB__SERVICES__FRAME_PROCESSING_SERVER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/services/pipeline_processing_server.hpp similarity index 70% rename from dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/services/pipeline_processing_server.hpp index f7f41141..ed5e1dc2 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/services/pipeline_processing_server.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/services/pipeline_processing_server.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,14 +11,14 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#ifndef DYNAMIC_VINO_LIB__SERVICES__PIPELINE_PROCESSING_SERVER_HPP_ -#define DYNAMIC_VINO_LIB__SERVICES__PIPELINE_PROCESSING_SERVER_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__SERVICES__PIPELINE_PROCESSING_SERVER_HPP_ +#define OPENVINO_WRAPPER_LIB__SERVICES__PIPELINE_PROCESSING_SERVER_HPP_ -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include #include #include @@ -37,7 +37,6 @@ class PipelineProcessingServer : public rclcpp::Node private: void initPipelineService(); - // bool cbService(ros::ServiceEvent& event); void cbService( const std::shared_ptr request, std::shared_ptr response); @@ -52,4 +51,4 @@ class PipelineProcessingServer : public rclcpp::Node std::string service_name_; }; } // namespace vino_service -#endif // DYNAMIC_VINO_LIB__SERVICES__PIPELINE_PROCESSING_SERVER_HPP_ +#endif // OPENVINO_WRAPPER_LIB__SERVICES__PIPELINE_PROCESSING_SERVER_HPP_ diff --git a/openvino_wrapper_lib/include/openvino_wrapper_lib/slog.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/slog.hpp new file mode 100644 index 00000000..bf350394 --- /dev/null +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/slog.hpp @@ -0,0 +1,175 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with logging facility for common samples + * @file slog.hpp + */ +#ifndef OPENVINO_WRAPPER_LIB__SLOG_HPP_ +#define OPENVINO_WRAPPER_LIB__SLOG_HPP_ + +#pragma once + +#include +#include + +namespace slog +{ +#if 1 + enum COLOR { + RESET = 0, + BLUE = 1, + GREEN = 2, + YELLOW = 3, + RED = 4, + }; + +#else +//the following are UBUNTU/LINUX ONLY terminal color codes. +#define RESET "\033[0m" +#define BLACK "\033[30m" /* Black */ +#define RED "\033[31m" /* Red */ +#define GREEN "\033[32m" /* Green */ +#define YELLOW "\033[33m" /* Yellow */ +#define BLUE "\033[34m" /* Blue */ +#define MAGENTA "\033[35m" /* Magenta */ +#define CYAN "\033[36m" /* Cyan */ +#define WHITE "\033[37m" /* White */ +#define BOLDBLACK "\033[1m\033[30m" /* Bold Black */ +#define BOLDRED "\033[1m\033[31m" /* Bold Red */ +#define BOLDGREEN "\033[1m\033[32m" /* Bold Green */ +#define BOLDYELLOW "\033[1m\033[33m" /* Bold Yellow */ +#define BOLDBLUE "\033[1m\033[34m" /* Bold Blue */ +#define BOLDMAGENTA "\033[1m\033[35m" /* Bold Magenta */ +#define BOLDCYAN "\033[1m\033[36m" /* Bold Cyan */ +#define BOLDWHITE "\033[1m\033[37m" /* Bold White */ +#endif + +/** + * @class LogStreamEndLine + * @brief The LogStreamEndLine class implements an end line marker for a log + * stream + */ +class LogStreamEndLine +{ +}; + +static constexpr LogStreamEndLine endl; + +/** + * @class LogStream + * @brief The LogStream class implements a stream for sample logging + */ +class LogStream +{ + std::string _prefix; + std::ostream * _log_stream; + bool _new_line; + int _color_id; + +public: + /** + * @brief A constructor. Creates an LogStream object + * @param prefix The prefix to print + */ + LogStream(const std::string & prefix, std::ostream & log_stream, + const int color_id = -1) + : _prefix(prefix), _new_line(true), _color_id(color_id) + { + _log_stream = &log_stream; + } + + /** + * @brief A stream output operator to be used within the logger + * @param arg Object for serialization in the logger message + */ + template + LogStream & operator<<(const T & arg) + { + if (_new_line) { + setLineColor(); + (*_log_stream) << "[ " << _prefix << " ] "; + _new_line = false; + } + + (*_log_stream) << arg; + return *this; + } + + // Specializing for LogStreamEndLine to support slog::endl + LogStream & operator<<(const LogStreamEndLine & arg) + { + _new_line = true; + resetLineColor(); + (*_log_stream) << std::endl; + return *this; + } + + void setLineColor() + { + switch(_color_id){ + case BLUE: + (*_log_stream) << "\033[34m"; + break; + case GREEN: + (*_log_stream) << "\033[32m"; + break; + case YELLOW: + (*_log_stream) << "\033[33m"; + break; + case RED: + (*_log_stream) << "\033[31m"; + break; + default: + break; + } + } + + void resetLineColor() + { + if(_color_id > 0){ + (*_log_stream) << "\033[0m"; //RESET + } + } +}; + +class NullStream +{ +public: + NullStream(){} + + NullStream(const std::string & prefix, std::ostream & log_stream) + { + (void)prefix; + (void)log_stream; + } + + template + NullStream & operator<<(const T & arg) + { + return *this; + } +}; + +#ifdef LOG_LEVEL_DEBUG + static LogStream debug("DEBUG", std::cout, GREEN); +#else + static NullStream debug; +#endif +static LogStream info("INFO", std::cout, BLUE); +static LogStream warn("WARNING", std::cout, YELLOW); +static LogStream err("ERROR", std::cerr, RED); + +} // namespace slog +#endif // OPENVINO_WRAPPER_LIB__SLOG_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/utils/mutex_counter.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/utils/mutex_counter.hpp similarity index 84% rename from dynamic_vino_lib/include/dynamic_vino_lib/utils/mutex_counter.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/utils/mutex_counter.hpp index 47947928..f0da93a3 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/utils/mutex_counter.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/utils/mutex_counter.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,8 +17,8 @@ // @file mutex_counter.hpp // -#ifndef DYNAMIC_VINO_LIB__UTILS__MUTEX_COUNTER_HPP_ -#define DYNAMIC_VINO_LIB__UTILS__MUTEX_COUNTER_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__UTILS__MUTEX_COUNTER_HPP_ +#define OPENVINO_WRAPPER_LIB__UTILS__MUTEX_COUNTER_HPP_ #include @@ -54,4 +54,4 @@ class MutexCounter std::condition_variable cv_; }; -#endif // DYNAMIC_VINO_LIB__UTILS__MUTEX_COUNTER_HPP_ +#endif // OPENVINO_WRAPPER_LIB__UTILS__MUTEX_COUNTER_HPP_ diff --git a/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp b/openvino_wrapper_lib/include/openvino_wrapper_lib/utils/version_info.hpp similarity index 80% rename from dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp rename to openvino_wrapper_lib/include/openvino_wrapper_lib/utils/version_info.hpp index abeac0c5..fa3f32e1 100644 --- a/dynamic_vino_lib/include/dynamic_vino_lib/utils/version_info.hpp +++ b/openvino_wrapper_lib/include/openvino_wrapper_lib/utils/version_info.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,8 +17,8 @@ // @file version_info.hpp // -#ifndef DYNAMIC_VINO_LIB__UTILS__VERSION_INFO_HPP_ -#define DYNAMIC_VINO_LIB__UTILS__VERSION_INFO_HPP_ +#ifndef OPENVINO_WRAPPER_LIB__UTILS__VERSION_INFO_HPP_ +#define OPENVINO_WRAPPER_LIB__UTILS__VERSION_INFO_HPP_ #if(defined(USE_OLD_E_PLUGIN_API)) #include @@ -52,22 +52,15 @@ inline std::string & trim(std::string & s) return s; } -static std::ostream & operator<<(std::ostream & os, const InferenceEngine::Version * version) +static std::ostream & operator<<(std::ostream & os, const ov::Version& version) { os << "\n\tAPI version ............ "; - if (nullptr == version) { - os << "UNKNOWN"; - } else { - os << version->apiVersion.major << "." << version->apiVersion.minor; - if (nullptr != version->buildNumber) { - os << "\n\t" << - "Build .................. " << version->buildNumber; - } - if (nullptr != version->description) { - os << "\n\t" << - "Description ............ " << version->description; - } - } + os << OPENVINO_VERSION_MAJOR << "." << OPENVINO_VERSION_MINOR << "." << OPENVINO_VERSION_PATCH; + os << "\n\t" << + "Build .................. " << version.buildNumber; + os << "\n\t" << + "Description ............ " << version.description; + return os; } @@ -129,4 +122,4 @@ inline void printPluginVersion(InferenceEngine::InferenceEnginePluginPtr ptr, st } #endif // (defined(USE_OLD_E_PLUGIN_API)) -#endif // DYNAMIC_VINO_LIB__UTILS__VERSION_INFO_HPP_ +#endif // OPENVINO_WRAPPER_LIB__UTILS__VERSION_INFO_HPP_ diff --git a/dynamic_vino_lib/package.xml b/openvino_wrapper_lib/package.xml similarity index 88% rename from dynamic_vino_lib/package.xml rename to openvino_wrapper_lib/package.xml index d432eba2..bd73ac69 100644 --- a/dynamic_vino_lib/package.xml +++ b/openvino_wrapper_lib/package.xml @@ -1,7 +1,7 @@ - dynamic_vino_lib + openvino_wrapper_lib 0.9.0 a ROS2 wrapper package for Intel OpenVINO Weizhi Liu @@ -37,9 +37,8 @@ limitations under the License. class_loader cv_bridge object_msgs - people_msgs - pipeline_srv_msgs - vino_param_lib + openvino_msgs + openvino_param_lib realsense2 openvino_common @@ -54,9 +53,8 @@ limitations under the License. class_loader cv_bridge object_msgs - people_msgs - pipeline_srv_msgs - vino_param_lib + openvino_msgs + openvino_param_lib realsense2 ament_lint_auto diff --git a/dynamic_vino_lib/src/engines/engine.cpp b/openvino_wrapper_lib/src/engines/engine.cpp similarity index 75% rename from dynamic_vino_lib/src/engines/engine.cpp rename to openvino_wrapper_lib/src/engines/engine.cpp index 6f16472f..ab8704d4 100644 --- a/dynamic_vino_lib/src/engines/engine.cpp +++ b/openvino_wrapper_lib/src/engines/engine.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,20 +16,20 @@ * @brief a header file with definition of Engine class * @file engine.cpp */ -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/slog.hpp" #if(defined(USE_OLD_E_PLUGIN_API)) Engines::Engine::Engine( InferenceEngine::InferencePlugin plg, const Models::BaseModel::Ptr base_model) { - request_ = (plg.LoadNetwork(base_model->getNetReader()->getNetwork(), {})).CreateInferRequestPtr(); + request_ = (plg.LoadNetwork(base_model->getModel()->getNetwork(), {})).CreateInferRequestPtr(); } #endif Engines::Engine::Engine( - InferenceEngine::InferRequest::Ptr & request) + ov::InferRequest & request) { request_ = request; } diff --git a/dynamic_vino_lib/src/engines/engine_manager.cpp b/openvino_wrapper_lib/src/engines/engine_manager.cpp similarity index 82% rename from dynamic_vino_lib/src/engines/engine_manager.cpp rename to openvino_wrapper_lib/src/engines/engine_manager.cpp index ed0e3efb..f3906838 100644 --- a/dynamic_vino_lib/src/engines/engine_manager.cpp +++ b/openvino_wrapper_lib/src/engines/engine_manager.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,13 +16,13 @@ * @brief a header file with definition of Engine class * @file engine.cpp */ -#include "dynamic_vino_lib/engines/engine_manager.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/models/base_model.hpp" -#include "dynamic_vino_lib/utils/version_info.hpp" -#include -#include +#include "openvino_wrapper_lib/engines/engine_manager.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/utils/version_info.hpp" +#include +#include #if(defined(USE_OLD_E_PLUGIN_API)) #include #endif @@ -33,18 +33,18 @@ std::shared_ptr Engines::EngineManager::createEngine( #if(defined(USE_OLD_E_PLUGIN_API)) return createEngine_beforeV2019R2(device, model); #else - return createEngine_V2019R2_plus(device, model); + return createEngine_V2022(device, model); #endif } -std::shared_ptr Engines::EngineManager::createEngine_V2019R2_plus( +std::shared_ptr Engines::EngineManager::createEngine_V2022( const std::string & device, const std::shared_ptr & model) { - InferenceEngine::Core core; - auto executable_network = core.LoadNetwork(model->getNetReader(), device); - auto request = executable_network.CreateInferRequestPtr(); + ov::Core core; + ov::CompiledModel executable_network = core.compile_model(model->getModel(), device); + ov::InferRequest infer_request = executable_network.create_infer_request(); - return std::make_shared(request); + return std::make_shared(infer_request); } #if(defined(USE_OLD_E_PLUGIN_API)) @@ -59,7 +59,7 @@ std::shared_ptr Engines::EngineManager::createEngine_beforeV201 } auto executeable_network = - plugins_for_devices_[device].LoadNetwork(model->getNetReader()->getNetwork(), {}); + plugins_for_devices_[device].LoadNetwork(model->getModel()->getNetwork(), {}); auto request = executeable_network.CreateInferRequestPtr(); return std::make_shared(request); diff --git a/dynamic_vino_lib/src/inferences/age_gender_detection.cpp b/openvino_wrapper_lib/src/inferences/age_gender_detection.cpp similarity index 56% rename from dynamic_vino_lib/src/inferences/age_gender_detection.cpp rename to openvino_wrapper_lib/src/inferences/age_gender_detection.cpp index 6cb2bc71..59138e3a 100644 --- a/dynamic_vino_lib/src/inferences/age_gender_detection.cpp +++ b/openvino_wrapper_lib/src/inferences/age_gender_detection.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,38 +20,39 @@ #include #include #include -#include "dynamic_vino_lib/inferences/age_gender_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" +#include +#include "openvino_wrapper_lib/inferences/age_gender_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" // AgeGenderResult -dynamic_vino_lib::AgeGenderResult::AgeGenderResult(const cv::Rect & location) +openvino_wrapper_lib::AgeGenderResult::AgeGenderResult(const cv::Rect & location) : Result(location) { } // AgeGender Detection -dynamic_vino_lib::AgeGenderDetection::AgeGenderDetection() -: dynamic_vino_lib::BaseInference() +openvino_wrapper_lib::AgeGenderDetection::AgeGenderDetection() +: openvino_wrapper_lib::BaseInference() { } -dynamic_vino_lib::AgeGenderDetection::~AgeGenderDetection() = default; +openvino_wrapper_lib::AgeGenderDetection::~AgeGenderDetection() = default; -void dynamic_vino_lib::AgeGenderDetection::loadNetwork( +void openvino_wrapper_lib::AgeGenderDetection::loadNetwork( std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::AgeGenderDetection::enqueue( +bool openvino_wrapper_lib::AgeGenderDetection::enqueue( const cv::Mat & frame, const cv::Rect & input_frame_loc) { if (getEnqueuedNum() == 0) { results_.clear(); } - bool succeed = dynamic_vino_lib::BaseInference::enqueue( + bool succeed = openvino_wrapper_lib::BaseInference::enqueue( frame, input_frame_loc, 1, getResultsLength(), valid_model_->getInputName()); if (!succeed) { return false; @@ -61,45 +62,45 @@ bool dynamic_vino_lib::AgeGenderDetection::enqueue( return true; } -bool dynamic_vino_lib::AgeGenderDetection::submitRequest() +bool openvino_wrapper_lib::AgeGenderDetection::submitRequest() { - return dynamic_vino_lib::BaseInference::submitRequest(); + return openvino_wrapper_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::AgeGenderDetection::fetchResults() +bool openvino_wrapper_lib::AgeGenderDetection::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); if (!can_fetch) { return false; } auto request = getEngine()->getRequest(); - InferenceEngine::Blob::Ptr genderBlob = request->GetBlob(valid_model_->getOutputGenderName()); - InferenceEngine::Blob::Ptr ageBlob = request->GetBlob(valid_model_->getOutputAgeName()); + ov::Tensor gender_tensor = request.get_tensor(valid_model_->getOutputGenderName()); + ov::Tensor age_tensor = request.get_tensor(valid_model_->getOutputAgeName()); for (int i = 0; i < results_.size(); ++i) { - results_[i].age_ = ageBlob->buffer().as()[i] * 100; - results_[i].male_prob_ = genderBlob->buffer().as()[i * 2 + 1]; + results_[i].age_ = age_tensor.data()[i] * 100; + results_[i].male_prob_ = gender_tensor.data()[i * 2 + 1]; } return true; } -int dynamic_vino_lib::AgeGenderDetection::getResultsLength() const +int openvino_wrapper_lib::AgeGenderDetection::getResultsLength() const { return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::AgeGenderDetection::getLocationResult(int idx) const +const openvino_wrapper_lib::Result * +openvino_wrapper_lib::AgeGenderDetection::getLocationResult(int idx) const { return &(results_[idx]); } -const std::string dynamic_vino_lib::AgeGenderDetection::getName() const +const std::string openvino_wrapper_lib::AgeGenderDetection::getName() const { return valid_model_->getModelCategory(); } -void dynamic_vino_lib::AgeGenderDetection::observeOutput( +void openvino_wrapper_lib::AgeGenderDetection::observeOutput( const std::shared_ptr & output) { if (output != nullptr) { @@ -107,7 +108,7 @@ void dynamic_vino_lib::AgeGenderDetection::observeOutput( } } -const std::vector dynamic_vino_lib::AgeGenderDetection::getFilteredROIs( +const std::vector openvino_wrapper_lib::AgeGenderDetection::getFilteredROIs( const std::string filter_conditions) const { if (!filter_conditions.empty()) { diff --git a/dynamic_vino_lib/src/inferences/base_filter.cpp b/openvino_wrapper_lib/src/inferences/base_filter.cpp similarity index 82% rename from dynamic_vino_lib/src/inferences/base_filter.cpp rename to openvino_wrapper_lib/src/inferences/base_filter.cpp index 14f2a38c..5b4d381a 100644 --- a/dynamic_vino_lib/src/inferences/base_filter.cpp +++ b/openvino_wrapper_lib/src/inferences/base_filter.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,20 +17,20 @@ * @file base_filter.cpp */ -#include "dynamic_vino_lib/inferences/base_filter.hpp" +#include "openvino_wrapper_lib/inferences/base_filter.hpp" #include #include #include -dynamic_vino_lib::BaseFilter::BaseFilter() {} +openvino_wrapper_lib::BaseFilter::BaseFilter() {} -bool dynamic_vino_lib::BaseFilter::isValidFilterConditions( +bool openvino_wrapper_lib::BaseFilter::isValidFilterConditions( const std::string & filter_conditions) { return strip(filter_conditions) != ""; } -void dynamic_vino_lib::BaseFilter::acceptFilterConditions( +void openvino_wrapper_lib::BaseFilter::acceptFilterConditions( const std::string & filter_conditions) { striped_conditions_ = strip(filter_conditions); @@ -38,7 +38,7 @@ void dynamic_vino_lib::BaseFilter::acceptFilterConditions( infixToSuffix(infix_conditions); } -bool dynamic_vino_lib::BaseFilter::isRelationOperator(const std::string & str) +bool openvino_wrapper_lib::BaseFilter::isRelationOperator(const std::string & str) { if (std::find(relation_operators_.begin(), relation_operators_.end(), str) != relation_operators_.end()) @@ -48,7 +48,7 @@ bool dynamic_vino_lib::BaseFilter::isRelationOperator(const std::string & str) return false; } -bool dynamic_vino_lib::BaseFilter::isLogicOperator(const std::string & str) +bool openvino_wrapper_lib::BaseFilter::isLogicOperator(const std::string & str) { if (std::find(logic_operators_.begin(), logic_operators_.end(), str) != logic_operators_.end()) @@ -58,7 +58,7 @@ bool dynamic_vino_lib::BaseFilter::isLogicOperator(const std::string & str) return false; } -bool dynamic_vino_lib::BaseFilter::isPriorTo( +bool openvino_wrapper_lib::BaseFilter::isPriorTo( const std::string & operator1, const std::string & operator2) { if (isRelationOperator(operator1) && isLogicOperator(operator2)) { @@ -67,13 +67,13 @@ bool dynamic_vino_lib::BaseFilter::isPriorTo( return false; } -std::string dynamic_vino_lib::BaseFilter::boolToStr(bool value) +std::string openvino_wrapper_lib::BaseFilter::boolToStr(bool value) { if (value) {return "true";} return "false"; } -bool dynamic_vino_lib::BaseFilter::strToBool(const std::string & value) +bool openvino_wrapper_lib::BaseFilter::strToBool(const std::string & value) { if (!value.compare("true")) {return true;} else if (!value.compare("false")) { return false; @@ -84,12 +84,12 @@ bool dynamic_vino_lib::BaseFilter::strToBool(const std::string & value) } const std::vector & -dynamic_vino_lib::BaseFilter::getSuffixConditions() const +openvino_wrapper_lib::BaseFilter::getSuffixConditions() const { return suffix_conditons_; } -bool dynamic_vino_lib::BaseFilter::logicOperation( +bool openvino_wrapper_lib::BaseFilter::logicOperation( const std::string & logic1, const std::string & op, const std::string & logic2) { if (!op.compare("&&")) { @@ -102,7 +102,7 @@ bool dynamic_vino_lib::BaseFilter::logicOperation( } } -bool dynamic_vino_lib::BaseFilter::stringCompare( +bool openvino_wrapper_lib::BaseFilter::stringCompare( const std::string & candidate, const std::string & op, const std::string & target) { if (!op.compare("==")) { @@ -115,7 +115,7 @@ bool dynamic_vino_lib::BaseFilter::stringCompare( } } -bool dynamic_vino_lib::BaseFilter::floatCompare( +bool openvino_wrapper_lib::BaseFilter::floatCompare( float candidate, const std::string & op, float target) { if (!op.compare("<=")) { @@ -132,7 +132,7 @@ bool dynamic_vino_lib::BaseFilter::floatCompare( } } -float dynamic_vino_lib::BaseFilter::stringToFloat(const std::string & candidate) +float openvino_wrapper_lib::BaseFilter::stringToFloat(const std::string & candidate) { float result = 0; try { @@ -143,7 +143,7 @@ float dynamic_vino_lib::BaseFilter::stringToFloat(const std::string & candidate) return result; } -std::vector dynamic_vino_lib::BaseFilter::split( +std::vector openvino_wrapper_lib::BaseFilter::split( const std::string & filter_conditions) { std::vector seperators; @@ -174,7 +174,7 @@ std::vector dynamic_vino_lib::BaseFilter::split( return infix_conditions; } -void dynamic_vino_lib::BaseFilter::infixToSuffix( +void openvino_wrapper_lib::BaseFilter::infixToSuffix( std::vector & infix_conditions) { std::stack operator_stack; @@ -206,7 +206,7 @@ void dynamic_vino_lib::BaseFilter::infixToSuffix( } } -std::string dynamic_vino_lib::BaseFilter::strip(const std::string & str) +std::string openvino_wrapper_lib::BaseFilter::strip(const std::string & str) { std::string stripped_string = ""; for (auto character : str) { diff --git a/dynamic_vino_lib/src/inferences/base_inference.cpp b/openvino_wrapper_lib/src/inferences/base_inference.cpp similarity index 61% rename from dynamic_vino_lib/src/inferences/base_inference.cpp rename to openvino_wrapper_lib/src/inferences/base_inference.cpp index b17a8c21..52138e5c 100644 --- a/dynamic_vino_lib/src/inferences/base_inference.cpp +++ b/openvino_wrapper_lib/src/inferences/base_inference.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,28 +19,28 @@ #include -#include "dynamic_vino_lib/inferences/base_inference.hpp" -#include "dynamic_vino_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/inferences/base_inference.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" // Result -dynamic_vino_lib::Result::Result(const cv::Rect & location) +openvino_wrapper_lib::Result::Result(const cv::Rect & location) { location_ = location; } // BaseInference -dynamic_vino_lib::BaseInference::BaseInference() = default; +openvino_wrapper_lib::BaseInference::BaseInference() = default; -dynamic_vino_lib::BaseInference::~BaseInference() = default; +openvino_wrapper_lib::BaseInference::~BaseInference() = default; -void dynamic_vino_lib::BaseInference::loadEngine(const std::shared_ptr engine) +void openvino_wrapper_lib::BaseInference::loadEngine(const std::shared_ptr engine) { engine_ = engine; } -bool dynamic_vino_lib::BaseInference::submitRequest() +bool openvino_wrapper_lib::BaseInference::submitRequest() { - if (engine_->getRequest() == nullptr) { + if (!engine_->getRequest()) { return false; } if (!enqueued_frames_) { @@ -48,14 +48,14 @@ bool dynamic_vino_lib::BaseInference::submitRequest() } enqueued_frames_ = 0; results_fetched_ = false; - engine_->getRequest()->StartAsync(); + engine_->getRequest().start_async(); slog::debug << "Async Inference started!" << slog::endl; return true; } -bool dynamic_vino_lib::BaseInference::SynchronousRequest() +bool openvino_wrapper_lib::BaseInference::SynchronousRequest() { - if (engine_->getRequest() == nullptr) { + if (!engine_->getRequest()) { return false; } if (!enqueued_frames_) { @@ -63,11 +63,11 @@ bool dynamic_vino_lib::BaseInference::SynchronousRequest() } enqueued_frames_ = 0; results_fetched_ = false; - engine_->getRequest()->Infer(); + engine_->getRequest().infer(); return true; } -bool dynamic_vino_lib::BaseInference::fetchResults() +bool openvino_wrapper_lib::BaseInference::fetchResults() { if (results_fetched_) { return false; @@ -76,7 +76,7 @@ bool dynamic_vino_lib::BaseInference::fetchResults() return true; } -void dynamic_vino_lib::BaseInference::addCandidatedModel(std::shared_ptr model) +void openvino_wrapper_lib::BaseInference::addCandidatedModel(std::shared_ptr model) { slog::info << "TESTING in addCandidatedModel()" << slog::endl; if (model != nullptr) { diff --git a/dynamic_vino_lib/src/inferences/base_reidentification.cpp b/openvino_wrapper_lib/src/inferences/base_reidentification.cpp similarity index 87% rename from dynamic_vino_lib/src/inferences/base_reidentification.cpp rename to openvino_wrapper_lib/src/inferences/base_reidentification.cpp index d9ede0bd..559565cc 100644 --- a/dynamic_vino_lib/src/inferences/base_reidentification.cpp +++ b/openvino_wrapper_lib/src/inferences/base_reidentification.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,17 +23,17 @@ #include #include #include -#include "dynamic_vino_lib/inferences/base_reidentification.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inferences/base_reidentification.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Tracker -dynamic_vino_lib::Tracker::Tracker( +openvino_wrapper_lib::Tracker::Tracker( int max_record_size, double same_track_thresh, double new_track_thresh) : max_record_size_(max_record_size), same_track_thresh_(same_track_thresh), new_track_thresh_(new_track_thresh) {} -int dynamic_vino_lib::Tracker::processNewTrack(const std::vector & feature) +int openvino_wrapper_lib::Tracker::processNewTrack(const std::vector & feature) { int most_similar_id; double similarity = findMostSimilarTrack(feature, most_similar_id); @@ -45,7 +45,7 @@ int dynamic_vino_lib::Tracker::processNewTrack(const std::vector & featur return most_similar_id; } -double dynamic_vino_lib::Tracker::findMostSimilarTrack( +double openvino_wrapper_lib::Tracker::findMostSimilarTrack( const std::vector & feature, int & most_similar_id) { double max_similarity = 0; @@ -60,7 +60,7 @@ double dynamic_vino_lib::Tracker::findMostSimilarTrack( return max_similarity; } -double dynamic_vino_lib::Tracker::calcSimilarity( +double openvino_wrapper_lib::Tracker::calcSimilarity( const std::vector & feature_a, const std::vector & feature_b) { if (feature_a.size() != feature_b.size()) { @@ -84,7 +84,7 @@ double dynamic_vino_lib::Tracker::calcSimilarity( return mul_sum / (sqrt(denom_a) * sqrt(denom_b)); } -void dynamic_vino_lib::Tracker::updateMatchTrack( +void openvino_wrapper_lib::Tracker::updateMatchTrack( int track_id, const std::vector & feature) { if (recorded_tracks_.find(track_id) != recorded_tracks_.end()) { @@ -95,7 +95,7 @@ void dynamic_vino_lib::Tracker::updateMatchTrack( } } -void dynamic_vino_lib::Tracker::removeEarlestTrack() +void openvino_wrapper_lib::Tracker::removeEarlestTrack() { std::lock_guard lk(tracks_mtx_); int64_t earlest_time = LONG_MAX; @@ -110,7 +110,7 @@ void dynamic_vino_lib::Tracker::removeEarlestTrack() } -int dynamic_vino_lib::Tracker::addNewTrack(const std::vector & feature) +int openvino_wrapper_lib::Tracker::addNewTrack(const std::vector & feature) { if (recorded_tracks_.size() >= max_record_size_) { std::thread remove_thread(std::bind(&Tracker::removeEarlestTrack, this)); @@ -125,14 +125,14 @@ int dynamic_vino_lib::Tracker::addNewTrack(const std::vector & feature) return max_track_id_; } -int64_t dynamic_vino_lib::Tracker::getCurrentTime() +int64_t openvino_wrapper_lib::Tracker::getCurrentTime() { auto tp = std::chrono::time_point_cast( std::chrono::system_clock::now()); return static_cast(tp.time_since_epoch().count()); } -bool dynamic_vino_lib::Tracker::saveTracksToFile(std::string filepath) +bool openvino_wrapper_lib::Tracker::saveTracksToFile(std::string filepath) { std::ofstream outfile(filepath); if (!outfile.is_open()) { @@ -152,7 +152,7 @@ bool dynamic_vino_lib::Tracker::saveTracksToFile(std::string filepath) return true; } -bool dynamic_vino_lib::Tracker::loadTracksFromFile(std::string filepath) +bool openvino_wrapper_lib::Tracker::loadTracksFromFile(std::string filepath) { std::ifstream infile(filepath); if (!infile.is_open()) { diff --git a/dynamic_vino_lib/src/inferences/emotions_detection.cpp b/openvino_wrapper_lib/src/inferences/emotions_detection.cpp similarity index 67% rename from dynamic_vino_lib/src/inferences/emotions_detection.cpp rename to openvino_wrapper_lib/src/inferences/emotions_detection.cpp index ab3313fc..e239c228 100644 --- a/dynamic_vino_lib/src/inferences/emotions_detection.cpp +++ b/openvino_wrapper_lib/src/inferences/emotions_detection.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,42 +18,43 @@ * @file emotions_recognition.cpp */ -#include +#include #include #include -#include "dynamic_vino_lib/inferences/emotions_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include +#include "openvino_wrapper_lib/inferences/emotions_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" // EmotionsResult -dynamic_vino_lib::EmotionsResult::EmotionsResult(const cv::Rect & location) +openvino_wrapper_lib::EmotionsResult::EmotionsResult(const cv::Rect & location) : Result(location) { } // Emotions Detection -dynamic_vino_lib::EmotionsDetection::EmotionsDetection() -: dynamic_vino_lib::BaseInference() +openvino_wrapper_lib::EmotionsDetection::EmotionsDetection() +: openvino_wrapper_lib::BaseInference() { } -dynamic_vino_lib::EmotionsDetection::~EmotionsDetection() = default; +openvino_wrapper_lib::EmotionsDetection::~EmotionsDetection() = default; -void dynamic_vino_lib::EmotionsDetection::loadNetwork( +void openvino_wrapper_lib::EmotionsDetection::loadNetwork( const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::EmotionsDetection::enqueue( +bool openvino_wrapper_lib::EmotionsDetection::enqueue( const cv::Mat & frame, const cv::Rect & input_frame_loc) { if (getEnqueuedNum() == 0) { results_.clear(); } - bool succeed = dynamic_vino_lib::BaseInference::enqueue( + bool succeed = openvino_wrapper_lib::BaseInference::enqueue( frame, input_frame_loc, 1, getResultsLength(), valid_model_->getInputName()); if (!succeed) { slog::err << "Failed enqueue Emotion frame." << slog::endl; @@ -65,24 +66,25 @@ bool dynamic_vino_lib::EmotionsDetection::enqueue( return true; } -bool dynamic_vino_lib::EmotionsDetection::submitRequest() +bool openvino_wrapper_lib::EmotionsDetection::submitRequest() { - return dynamic_vino_lib::BaseInference::submitRequest(); + return openvino_wrapper_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::EmotionsDetection::fetchResults() +bool openvino_wrapper_lib::EmotionsDetection::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); if (!can_fetch) { return false; } int label_length = static_cast(valid_model_->getLabels().size()); std::string output_name = valid_model_->getOutputName(); - InferenceEngine::Blob::Ptr emotions_blob = getEngine()->getRequest()->GetBlob(output_name); + ov::Tensor emotions_tensor = getEngine()->getRequest().get_tensor(output_name); /** emotions vector must have the same size as number of channels in model output. Default output format is NCHW so we check index 1 */ - int64 num_of_channels = emotions_blob->getTensorDesc().getDims().at(1); + ov::Shape shape = emotions_tensor.get_shape(); + int64 num_of_channels = shape[1]; if (num_of_channels != label_length) { slog::err << "Output size (" << num_of_channels << ") of the Emotions Recognition network is not equal " << @@ -95,7 +97,7 @@ bool dynamic_vino_lib::EmotionsDetection::fetchResults() /** we identify an index of the most probable emotion in output array for idx image to return appropriate emotion name */ - auto emotions_values = emotions_blob->buffer().as(); + auto emotions_values = emotions_tensor.data(); for (int idx = 0; idx < results_.size(); ++idx) { auto output_idx_pos = emotions_values + label_length * idx; int64 max_prob_emotion_idx = @@ -106,23 +108,23 @@ bool dynamic_vino_lib::EmotionsDetection::fetchResults() return true; } -int dynamic_vino_lib::EmotionsDetection::getResultsLength() const +int openvino_wrapper_lib::EmotionsDetection::getResultsLength() const { return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::EmotionsDetection::getLocationResult(int idx) const +const openvino_wrapper_lib::Result * +openvino_wrapper_lib::EmotionsDetection::getLocationResult(int idx) const { return &(results_[idx]); } -const std::string dynamic_vino_lib::EmotionsDetection::getName() const +const std::string openvino_wrapper_lib::EmotionsDetection::getName() const { return valid_model_->getModelCategory(); } -void dynamic_vino_lib::EmotionsDetection::observeOutput( +void openvino_wrapper_lib::EmotionsDetection::observeOutput( const std::shared_ptr & output) { if (output != nullptr) { @@ -130,7 +132,7 @@ void dynamic_vino_lib::EmotionsDetection::observeOutput( } } -const std::vector dynamic_vino_lib::EmotionsDetection::getFilteredROIs( +const std::vector openvino_wrapper_lib::EmotionsDetection::getFilteredROIs( const std::string filter_conditions) const { if (!filter_conditions.empty()) { diff --git a/dynamic_vino_lib/src/inferences/face_detection.cpp b/openvino_wrapper_lib/src/inferences/face_detection.cpp similarity index 73% rename from dynamic_vino_lib/src/inferences/face_detection.cpp rename to openvino_wrapper_lib/src/inferences/face_detection.cpp index 0e6bb1d7..0037bea3 100644 --- a/dynamic_vino_lib/src/inferences/face_detection.cpp +++ b/openvino_wrapper_lib/src/inferences/face_detection.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,18 +22,18 @@ #include #include -#include "dynamic_vino_lib/inferences/face_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inferences/face_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" // FaceDetectionResult -dynamic_vino_lib::FaceDetectionResult::FaceDetectionResult(const cv::Rect & location) +openvino_wrapper_lib::FaceDetectionResult::FaceDetectionResult(const cv::Rect & location) : ObjectDetectionResult(location) { } // FaceDetection -dynamic_vino_lib::FaceDetection::FaceDetection( +openvino_wrapper_lib::FaceDetection::FaceDetection( bool enable_roi_constraint, double show_output_thresh) : ObjectDetection(enable_roi_constraint, show_output_thresh) diff --git a/dynamic_vino_lib/src/inferences/face_reidentification.cpp b/openvino_wrapper_lib/src/inferences/face_reidentification.cpp similarity index 65% rename from dynamic_vino_lib/src/inferences/face_reidentification.cpp rename to openvino_wrapper_lib/src/inferences/face_reidentification.cpp index d50d317f..392d4d62 100644 --- a/dynamic_vino_lib/src/inferences/face_reidentification.cpp +++ b/openvino_wrapper_lib/src/inferences/face_reidentification.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,37 +20,37 @@ #include #include #include -#include "dynamic_vino_lib/inferences/face_reidentification.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inferences/face_reidentification.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" // FaceReidentificationResult -dynamic_vino_lib::FaceReidentificationResult::FaceReidentificationResult( +openvino_wrapper_lib::FaceReidentificationResult::FaceReidentificationResult( const cv::Rect & location) : Result(location) {} // FaceReidentification -dynamic_vino_lib::FaceReidentification::FaceReidentification(double match_thresh) -: dynamic_vino_lib::BaseInference() +openvino_wrapper_lib::FaceReidentification::FaceReidentification(double match_thresh) +: openvino_wrapper_lib::BaseInference() { - face_tracker_ = std::make_shared(1000, match_thresh, 0.3); + face_tracker_ = std::make_shared(1000, match_thresh, 0.3); } -dynamic_vino_lib::FaceReidentification::~FaceReidentification() = default; -void dynamic_vino_lib::FaceReidentification::loadNetwork( +openvino_wrapper_lib::FaceReidentification::~FaceReidentification() = default; +void openvino_wrapper_lib::FaceReidentification::loadNetwork( const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::FaceReidentification::enqueue( +bool openvino_wrapper_lib::FaceReidentification::enqueue( const cv::Mat & frame, const cv::Rect & input_frame_loc) { if (getEnqueuedNum() == 0) { results_.clear(); } - if (!dynamic_vino_lib::BaseInference::enqueue( + if (!openvino_wrapper_lib::BaseInference::enqueue( frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { return false; @@ -60,14 +60,14 @@ bool dynamic_vino_lib::FaceReidentification::enqueue( return true; } -bool dynamic_vino_lib::FaceReidentification::submitRequest() +bool openvino_wrapper_lib::FaceReidentification::submitRequest() { - return dynamic_vino_lib::BaseInference::submitRequest(); + return openvino_wrapper_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::FaceReidentification::fetchResults() +bool openvino_wrapper_lib::FaceReidentification::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); if (!can_fetch) {return false;} bool found_result = false; InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); @@ -85,23 +85,23 @@ bool dynamic_vino_lib::FaceReidentification::fetchResults() return true; } -int dynamic_vino_lib::FaceReidentification::getResultsLength() const +int openvino_wrapper_lib::FaceReidentification::getResultsLength() const { return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::FaceReidentification::getLocationResult(int idx) const +const openvino_wrapper_lib::Result * +openvino_wrapper_lib::FaceReidentification::getLocationResult(int idx) const { return &(results_[idx]); } -const std::string dynamic_vino_lib::FaceReidentification::getName() const +const std::string openvino_wrapper_lib::FaceReidentification::getName() const { return valid_model_->getModelCategory(); } -void dynamic_vino_lib::FaceReidentification::observeOutput( +void openvino_wrapper_lib::FaceReidentification::observeOutput( const std::shared_ptr & output) { if (output != nullptr) { @@ -109,7 +109,7 @@ void dynamic_vino_lib::FaceReidentification::observeOutput( } } -const std::vector dynamic_vino_lib::FaceReidentification::getFilteredROIs( +const std::vector openvino_wrapper_lib::FaceReidentification::getFilteredROIs( const std::string filter_conditions) const { if (!filter_conditions.empty()) { diff --git a/dynamic_vino_lib/src/inferences/head_pose_detection.cpp b/openvino_wrapper_lib/src/inferences/head_pose_detection.cpp similarity index 55% rename from dynamic_vino_lib/src/inferences/head_pose_detection.cpp rename to openvino_wrapper_lib/src/inferences/head_pose_detection.cpp index 0c5b3170..591c6fdd 100644 --- a/dynamic_vino_lib/src/inferences/head_pose_detection.cpp +++ b/openvino_wrapper_lib/src/inferences/head_pose_detection.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,38 +21,38 @@ #include #include #include -#include "dynamic_vino_lib/inferences/head_pose_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/inferences/head_pose_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" // HeadPoseResult -dynamic_vino_lib::HeadPoseResult::HeadPoseResult(const cv::Rect & location) +openvino_wrapper_lib::HeadPoseResult::HeadPoseResult(const cv::Rect & location) : Result(location) { } // Head Pose Detection -dynamic_vino_lib::HeadPoseDetection::HeadPoseDetection() -: dynamic_vino_lib::BaseInference() +openvino_wrapper_lib::HeadPoseDetection::HeadPoseDetection() +: openvino_wrapper_lib::BaseInference() { } -dynamic_vino_lib::HeadPoseDetection::~HeadPoseDetection() = default; +openvino_wrapper_lib::HeadPoseDetection::~HeadPoseDetection() = default; -void dynamic_vino_lib::HeadPoseDetection::loadNetwork( +void openvino_wrapper_lib::HeadPoseDetection::loadNetwork( std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::HeadPoseDetection::enqueue( +bool openvino_wrapper_lib::HeadPoseDetection::enqueue( const cv::Mat & frame, const cv::Rect & input_frame_loc) { if (getEnqueuedNum() == 0) { results_.clear(); } - bool succeed = dynamic_vino_lib::BaseInference::enqueue( + bool succeed = openvino_wrapper_lib::BaseInference::enqueue( frame, input_frame_loc, 1, getResultsLength(), valid_model_->getInputName()); if (!succeed) { return false; @@ -62,47 +62,47 @@ bool dynamic_vino_lib::HeadPoseDetection::enqueue( return true; } -bool dynamic_vino_lib::HeadPoseDetection::submitRequest() +bool openvino_wrapper_lib::HeadPoseDetection::submitRequest() { - return dynamic_vino_lib::BaseInference::submitRequest(); + return openvino_wrapper_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::HeadPoseDetection::fetchResults() +bool openvino_wrapper_lib::HeadPoseDetection::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); if (!can_fetch) { return false; } auto request = getEngine()->getRequest(); - InferenceEngine::Blob::Ptr angle_r = request->GetBlob(valid_model_->getOutputOutputAngleR()); - InferenceEngine::Blob::Ptr angle_p = request->GetBlob(valid_model_->getOutputOutputAngleP()); - InferenceEngine::Blob::Ptr angle_y = request->GetBlob(valid_model_->getOutputOutputAngleY()); + ov::Tensor angle_r = request.get_tensor(valid_model_->getOutputOutputAngleR()); + ov::Tensor angle_p = request.get_tensor(valid_model_->getOutputOutputAngleP()); + ov::Tensor angle_y = request.get_tensor(valid_model_->getOutputOutputAngleY()); for (int i = 0; i < getResultsLength(); ++i) { - results_[i].angle_r_ = angle_r->buffer().as()[i]; - results_[i].angle_p_ = angle_p->buffer().as()[i]; - results_[i].angle_y_ = angle_y->buffer().as()[i]; + results_[i].angle_r_ = angle_r.data()[i]; + results_[i].angle_p_ = angle_p.data()[i]; + results_[i].angle_y_ = angle_y.data()[i]; } return true; } -int dynamic_vino_lib::HeadPoseDetection::getResultsLength() const +int openvino_wrapper_lib::HeadPoseDetection::getResultsLength() const { return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::HeadPoseDetection::getLocationResult(int idx) const +const openvino_wrapper_lib::Result * +openvino_wrapper_lib::HeadPoseDetection::getLocationResult(int idx) const { return &(results_[idx]); } -const std::string dynamic_vino_lib::HeadPoseDetection::getName() const +const std::string openvino_wrapper_lib::HeadPoseDetection::getName() const { return valid_model_->getModelCategory(); } -void dynamic_vino_lib::HeadPoseDetection::observeOutput( +void openvino_wrapper_lib::HeadPoseDetection::observeOutput( const std::shared_ptr & output) { if (output != nullptr) { @@ -110,7 +110,7 @@ void dynamic_vino_lib::HeadPoseDetection::observeOutput( } } -const std::vector dynamic_vino_lib::HeadPoseDetection::getFilteredROIs( +const std::vector openvino_wrapper_lib::HeadPoseDetection::getFilteredROIs( const std::string filter_conditions) const { if (!filter_conditions.empty()) { diff --git a/dynamic_vino_lib/src/inferences/landmarks_detection.cpp b/openvino_wrapper_lib/src/inferences/landmarks_detection.cpp similarity index 69% rename from dynamic_vino_lib/src/inferences/landmarks_detection.cpp rename to openvino_wrapper_lib/src/inferences/landmarks_detection.cpp index 5ab122f9..f3e468c8 100644 --- a/dynamic_vino_lib/src/inferences/landmarks_detection.cpp +++ b/openvino_wrapper_lib/src/inferences/landmarks_detection.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,34 +20,34 @@ #include #include #include -#include "dynamic_vino_lib/inferences/landmarks_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inferences/landmarks_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" // LandmarksDetectionResult -dynamic_vino_lib::LandmarksDetectionResult::LandmarksDetectionResult( +openvino_wrapper_lib::LandmarksDetectionResult::LandmarksDetectionResult( const cv::Rect & location) : Result(location) {} // LandmarksDetection -dynamic_vino_lib::LandmarksDetection::LandmarksDetection() -: dynamic_vino_lib::BaseInference() {} +openvino_wrapper_lib::LandmarksDetection::LandmarksDetection() +: openvino_wrapper_lib::BaseInference() {} -dynamic_vino_lib::LandmarksDetection::~LandmarksDetection() = default; -void dynamic_vino_lib::LandmarksDetection::loadNetwork( +openvino_wrapper_lib::LandmarksDetection::~LandmarksDetection() = default; +void openvino_wrapper_lib::LandmarksDetection::loadNetwork( const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::LandmarksDetection::enqueue( +bool openvino_wrapper_lib::LandmarksDetection::enqueue( const cv::Mat & frame, const cv::Rect & input_frame_loc) { if (getEnqueuedNum() == 0) { results_.clear(); } - if (!dynamic_vino_lib::BaseInference::enqueue( + if (!openvino_wrapper_lib::BaseInference::enqueue( frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { return false; @@ -57,14 +57,14 @@ bool dynamic_vino_lib::LandmarksDetection::enqueue( return true; } -bool dynamic_vino_lib::LandmarksDetection::submitRequest() +bool openvino_wrapper_lib::LandmarksDetection::submitRequest() { - return dynamic_vino_lib::BaseInference::submitRequest(); + return openvino_wrapper_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::LandmarksDetection::fetchResults() +bool openvino_wrapper_lib::LandmarksDetection::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); if (!can_fetch) {return false;} bool found_result = false; InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); @@ -87,23 +87,23 @@ bool dynamic_vino_lib::LandmarksDetection::fetchResults() return true; } -int dynamic_vino_lib::LandmarksDetection::getResultsLength() const +int openvino_wrapper_lib::LandmarksDetection::getResultsLength() const { return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::LandmarksDetection::getLocationResult(int idx) const +const openvino_wrapper_lib::Result * +openvino_wrapper_lib::LandmarksDetection::getLocationResult(int idx) const { return &(results_[idx]); } -const std::string dynamic_vino_lib::LandmarksDetection::getName() const +const std::string openvino_wrapper_lib::LandmarksDetection::getName() const { return valid_model_->getModelCategory(); } -void dynamic_vino_lib::LandmarksDetection::observeOutput( +void openvino_wrapper_lib::LandmarksDetection::observeOutput( const std::shared_ptr & output) { if (output != nullptr) { @@ -111,7 +111,7 @@ void dynamic_vino_lib::LandmarksDetection::observeOutput( } } -const std::vector dynamic_vino_lib::LandmarksDetection::getFilteredROIs( +const std::vector openvino_wrapper_lib::LandmarksDetection::getFilteredROIs( const std::string filter_conditions) const { if (!filter_conditions.empty()) { diff --git a/dynamic_vino_lib/src/inferences/license_plate_detection.cpp b/openvino_wrapper_lib/src/inferences/license_plate_detection.cpp similarity index 60% rename from dynamic_vino_lib/src/inferences/license_plate_detection.cpp rename to openvino_wrapper_lib/src/inferences/license_plate_detection.cpp index bb42f285..aa0f8c1d 100644 --- a/dynamic_vino_lib/src/inferences/license_plate_detection.cpp +++ b/openvino_wrapper_lib/src/inferences/license_plate_detection.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,46 +20,45 @@ #include #include #include -#include "dynamic_vino_lib/inferences/license_plate_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inferences/license_plate_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" // LicensePlateDetectionResult -dynamic_vino_lib::LicensePlateDetectionResult::LicensePlateDetectionResult( +openvino_wrapper_lib::LicensePlateDetectionResult::LicensePlateDetectionResult( const cv::Rect & location) : Result(location) {} // LicensePlateDetection -dynamic_vino_lib::LicensePlateDetection::LicensePlateDetection() -: dynamic_vino_lib::BaseInference() {} +openvino_wrapper_lib::LicensePlateDetection::LicensePlateDetection() +: openvino_wrapper_lib::BaseInference() {} -dynamic_vino_lib::LicensePlateDetection::~LicensePlateDetection() = default; -void dynamic_vino_lib::LicensePlateDetection::loadNetwork( +openvino_wrapper_lib::LicensePlateDetection::~LicensePlateDetection() = default; +void openvino_wrapper_lib::LicensePlateDetection::loadNetwork( const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -void dynamic_vino_lib::LicensePlateDetection::fillSeqBlob() +void openvino_wrapper_lib::LicensePlateDetection::fillSeqBlob() { - InferenceEngine::Blob::Ptr seq_blob = getEngine()->getRequest()->GetBlob( + ov::Tensor seq_tensor = getEngine()->getRequest().get_tensor( valid_model_->getSeqInputName()); - int max_sequence_size = seq_blob->getTensorDesc().getDims()[0]; + int max_sequence_size = seq_tensor.get_shape()[0]; // second input is sequence, which is some relic from the training // it should have the leading 0.0f and rest 1.0f - float * blob_data = seq_blob->buffer().as(); - blob_data[0] = 0.0f; - std::fill(blob_data + 1, blob_data + max_sequence_size, 1.0f); + float * tensor_data = seq_tensor.data(); + std::fill(tensor_data, tensor_data + max_sequence_size, 1.0f); } -bool dynamic_vino_lib::LicensePlateDetection::enqueue( +bool openvino_wrapper_lib::LicensePlateDetection::enqueue( const cv::Mat & frame, const cv::Rect & input_frame_loc) { if (getEnqueuedNum() == 0) { results_.clear(); } - if (!dynamic_vino_lib::BaseInference::enqueue( + if (!openvino_wrapper_lib::BaseInference::enqueue( frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { return false; @@ -70,19 +69,19 @@ bool dynamic_vino_lib::LicensePlateDetection::enqueue( return true; } -bool dynamic_vino_lib::LicensePlateDetection::submitRequest() +bool openvino_wrapper_lib::LicensePlateDetection::submitRequest() { - return dynamic_vino_lib::BaseInference::submitRequest(); + return openvino_wrapper_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::LicensePlateDetection::fetchResults() +bool openvino_wrapper_lib::LicensePlateDetection::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); if (!can_fetch) {return false;} bool found_result = false; - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); + ov::InferRequest request = getEngine()->getRequest(); std::string output = valid_model_->getOutputName(); - const float * output_values = request->GetBlob(output)->buffer().as(); + const float * output_values = request.get_tensor(output).data(); for (int i = 0; i < getResultsLength(); i++) { std::string license = ""; int max_size = valid_model_->getMaxSequenceSize(); @@ -99,23 +98,23 @@ bool dynamic_vino_lib::LicensePlateDetection::fetchResults() return true; } -int dynamic_vino_lib::LicensePlateDetection::getResultsLength() const +int openvino_wrapper_lib::LicensePlateDetection::getResultsLength() const { return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::LicensePlateDetection::getLocationResult(int idx) const +const openvino_wrapper_lib::Result * +openvino_wrapper_lib::LicensePlateDetection::getLocationResult(int idx) const { return &(results_[idx]); } -const std::string dynamic_vino_lib::LicensePlateDetection::getName() const +const std::string openvino_wrapper_lib::LicensePlateDetection::getName() const { return valid_model_->getModelCategory(); } -void dynamic_vino_lib::LicensePlateDetection::observeOutput( +void openvino_wrapper_lib::LicensePlateDetection::observeOutput( const std::shared_ptr & output) { if (output != nullptr) { @@ -123,7 +122,7 @@ void dynamic_vino_lib::LicensePlateDetection::observeOutput( } } -const std::vector dynamic_vino_lib::LicensePlateDetection::getFilteredROIs( +const std::vector openvino_wrapper_lib::LicensePlateDetection::getFilteredROIs( const std::string filter_conditions) const { if (!filter_conditions.empty()) { diff --git a/dynamic_vino_lib/src/inferences/object_detection.cpp b/openvino_wrapper_lib/src/inferences/object_detection.cpp similarity index 67% rename from dynamic_vino_lib/src/inferences/object_detection.cpp rename to openvino_wrapper_lib/src/inferences/object_detection.cpp index 36f30f89..8470a103 100644 --- a/dynamic_vino_lib/src/inferences/object_detection.cpp +++ b/openvino_wrapper_lib/src/inferences/object_detection.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,37 +22,37 @@ #include #include #include -#include "dynamic_vino_lib/inferences/object_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inferences/object_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" // ObjectDetectionResult -dynamic_vino_lib::ObjectDetectionResult::ObjectDetectionResult(const cv::Rect & location) +openvino_wrapper_lib::ObjectDetectionResult::ObjectDetectionResult(const cv::Rect & location) : Result(location) { } // ObjectDetection -dynamic_vino_lib::ObjectDetection::ObjectDetection( +openvino_wrapper_lib::ObjectDetection::ObjectDetection( bool enable_roi_constraint, double show_output_thresh) : show_output_thresh_(show_output_thresh), - enable_roi_constraint_(enable_roi_constraint), dynamic_vino_lib::BaseInference() + enable_roi_constraint_(enable_roi_constraint), openvino_wrapper_lib::BaseInference() { result_filter_ = std::make_shared(); result_filter_->init(); } -dynamic_vino_lib::ObjectDetection::~ObjectDetection() = default; +openvino_wrapper_lib::ObjectDetection::~ObjectDetection() = default; -void dynamic_vino_lib::ObjectDetection::loadNetwork( +void openvino_wrapper_lib::ObjectDetection::loadNetwork( std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::ObjectDetection::enqueue( +bool openvino_wrapper_lib::ObjectDetection::enqueue( const cv::Mat & frame, const cv::Rect & input_frame_loc) { @@ -70,17 +70,13 @@ bool dynamic_vino_lib::ObjectDetection::enqueue( return false; } - // nonsense!! - // Result r(input_frame_loc); - // results_.clear(); - // results_.emplace_back(r); enqueued_frames_ += 1; return true; } -bool dynamic_vino_lib::ObjectDetection::fetchResults() +bool openvino_wrapper_lib::ObjectDetection::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); if (!can_fetch) { return false; } @@ -91,23 +87,23 @@ bool dynamic_vino_lib::ObjectDetection::fetchResults() getEngine(), results_, show_output_thresh_, enable_roi_constraint_); } -int dynamic_vino_lib::ObjectDetection::getResultsLength() const +int openvino_wrapper_lib::ObjectDetection::getResultsLength() const { return static_cast(results_.size()); } -const dynamic_vino_lib::ObjectDetection::Result * -dynamic_vino_lib::ObjectDetection::getLocationResult(int idx) const +const openvino_wrapper_lib::ObjectDetection::Result * +openvino_wrapper_lib::ObjectDetection::getLocationResult(int idx) const { return &(results_[idx]); } -const std::string dynamic_vino_lib::ObjectDetection::getName() const +const std::string openvino_wrapper_lib::ObjectDetection::getName() const { return valid_model_->getModelCategory(); } -void dynamic_vino_lib::ObjectDetection::observeOutput( +void openvino_wrapper_lib::ObjectDetection::observeOutput( const std::shared_ptr & output) { if (output != nullptr) { @@ -115,7 +111,7 @@ void dynamic_vino_lib::ObjectDetection::observeOutput( } } -const std::vector dynamic_vino_lib::ObjectDetection::getFilteredROIs( +const std::vector openvino_wrapper_lib::ObjectDetection::getFilteredROIs( const std::string filter_conditions) const { if (!result_filter_->isValidFilterConditions(filter_conditions)) { @@ -132,22 +128,22 @@ const std::vector dynamic_vino_lib::ObjectDetection::getFilteredROIs( // ObjectDetectionResultFilter -dynamic_vino_lib::ObjectDetectionResultFilter::ObjectDetectionResultFilter() {} +openvino_wrapper_lib::ObjectDetectionResultFilter::ObjectDetectionResultFilter() {} -void dynamic_vino_lib::ObjectDetectionResultFilter::init() +void openvino_wrapper_lib::ObjectDetectionResultFilter::init() { key_to_function_.insert(std::make_pair("label", isValidLabel)); key_to_function_.insert(std::make_pair("confidence", isValidConfidence)); } -void dynamic_vino_lib::ObjectDetectionResultFilter::acceptResults( +void openvino_wrapper_lib::ObjectDetectionResultFilter::acceptResults( const std::vector & results) { results_ = results; } std::vector -dynamic_vino_lib::ObjectDetectionResultFilter::getFilteredLocations() +openvino_wrapper_lib::ObjectDetectionResultFilter::getFilteredLocations() { std::vector locations; for (auto result : results_) { @@ -158,25 +154,25 @@ dynamic_vino_lib::ObjectDetectionResultFilter::getFilteredLocations() return locations; } -bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidLabel( +bool openvino_wrapper_lib::ObjectDetectionResultFilter::isValidLabel( const Result & result, const std::string & op, const std::string & target) { return stringCompare(result.getLabel(), op, target); } -bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidConfidence( +bool openvino_wrapper_lib::ObjectDetectionResultFilter::isValidConfidence( const Result & result, const std::string & op, const std::string & target) { return floatCompare(result.getConfidence(), op, stringToFloat(target)); } -bool dynamic_vino_lib::ObjectDetectionResultFilter::isValidResult( +bool openvino_wrapper_lib::ObjectDetectionResultFilter::isValidResult( const Result & result) { ISVALIDRESULT(key_to_function_, result); } -double dynamic_vino_lib::ObjectDetection::calcIoU( +double openvino_wrapper_lib::ObjectDetection::calcIoU( const cv::Rect & box_1, const cv::Rect & box_2) { diff --git a/dynamic_vino_lib/src/inferences/object_segmentation.cpp b/openvino_wrapper_lib/src/inferences/object_segmentation.cpp similarity index 66% rename from dynamic_vino_lib/src/inferences/object_segmentation.cpp rename to openvino_wrapper_lib/src/inferences/object_segmentation.cpp index e4772c06..699731e3 100644 --- a/dynamic_vino_lib/src/inferences/object_segmentation.cpp +++ b/openvino_wrapper_lib/src/inferences/object_segmentation.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,25 +23,26 @@ #include #include -#include "dynamic_vino_lib/inferences/object_segmentation.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include +#include "openvino_wrapper_lib/inferences/object_segmentation.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" // ObjectSegmentationResult -dynamic_vino_lib::ObjectSegmentationResult::ObjectSegmentationResult(const cv::Rect &location) +openvino_wrapper_lib::ObjectSegmentationResult::ObjectSegmentationResult(const cv::Rect &location) : Result(location) { } // ObjectSegmentation -dynamic_vino_lib::ObjectSegmentation::ObjectSegmentation(double show_output_thresh) - : show_output_thresh_(show_output_thresh), dynamic_vino_lib::BaseInference() +openvino_wrapper_lib::ObjectSegmentation::ObjectSegmentation(double show_output_thresh) + : show_output_thresh_(show_output_thresh), openvino_wrapper_lib::BaseInference() { } -dynamic_vino_lib::ObjectSegmentation::~ObjectSegmentation() = default; +openvino_wrapper_lib::ObjectSegmentation::~ObjectSegmentation() = default; -void dynamic_vino_lib::ObjectSegmentation::loadNetwork( +void openvino_wrapper_lib::ObjectSegmentation::loadNetwork( const std::shared_ptr network) { slog::info << "Loading Network: " << network->getModelCategory() << slog::endl; @@ -53,7 +54,7 @@ void dynamic_vino_lib::ObjectSegmentation::loadNetwork( * Deprecated! * This function only support OpenVINO version <=2018R5 */ -bool dynamic_vino_lib::ObjectSegmentation::enqueue_for_one_input( +bool openvino_wrapper_lib::ObjectSegmentation::enqueue_for_one_input( const cv::Mat &frame, const cv::Rect &input_frame_loc) { @@ -62,7 +63,7 @@ bool dynamic_vino_lib::ObjectSegmentation::enqueue_for_one_input( width_ = frame.cols; height_ = frame.rows; } - if (!dynamic_vino_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, + if (!openvino_wrapper_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { return false; @@ -73,7 +74,7 @@ bool dynamic_vino_lib::ObjectSegmentation::enqueue_for_one_input( return true; } -bool dynamic_vino_lib::ObjectSegmentation::enqueue( +bool openvino_wrapper_lib::ObjectSegmentation::enqueue( const cv::Mat &frame, const cv::Rect &input_frame_loc) { @@ -105,40 +106,48 @@ bool dynamic_vino_lib::ObjectSegmentation::enqueue( return true; } -bool dynamic_vino_lib::ObjectSegmentation::submitRequest() +bool openvino_wrapper_lib::ObjectSegmentation::submitRequest() { - return dynamic_vino_lib::BaseInference::submitRequest(); + return openvino_wrapper_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::ObjectSegmentation::fetchResults() +bool openvino_wrapper_lib::ObjectSegmentation::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); if (!can_fetch) { return false; } bool found_result = false; results_.clear(); - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); + ov::InferRequest infer_request = getEngine()->getRequest(); slog::debug << "Analyzing Detection results..." << slog::endl; std::string detection_output = valid_model_->getOutputName("detection"); - std::string mask_output = valid_model_->getOutputName("masks"); - const InferenceEngine::Blob::Ptr do_blob = request->GetBlob(detection_output.c_str()); - const auto do_data = do_blob->buffer().as(); - const auto masks_blob = request->GetBlob(mask_output.c_str()); - const auto masks_data = masks_blob->buffer().as(); - const size_t output_w = masks_blob->getTensorDesc().getDims().at(3); - const size_t output_h = masks_blob->getTensorDesc().getDims().at(2); - const size_t output_des = masks_blob-> getTensorDesc().getDims().at(1); - const size_t output_extra = masks_blob-> getTensorDesc().getDims().at(0); + ov::Tensor output_tensor = infer_request.get_tensor(detection_output); + const auto out_data = output_tensor.data(); + ov::Shape out_shape = output_tensor.get_shape(); + size_t output_w, output_h, output_des, output_extra = 0; + if (out_shape.size() == 3) { + output_w = out_shape[2]; + output_h = out_shape[1]; + output_des = out_shape[0]; + } else if (out_shape.size() == 4) { + output_w = out_shape[3]; + output_h = out_shape[2]; + output_des = out_shape[1]; + output_extra = out_shape[0]; + } else { + slog::warn << "unexpected output shape: " <GetBlob(detection_output)->buffer().as(); + const float *detections = output_tensor.data(); std::vector &labels = valid_model_->getLabels(); slog::debug << "label size " <(rowId, colId)[ch] = colors_[classId][ch]; } - //classId = static_cast(predictions[rowId * output_w + colId]); } else { for (int chId = 0; chId < output_des; ++chId) { float prob = detections[chId * output_h * output_w + rowId * output_w+ colId]; - //float prob = predictions[chId * output_h * output_w + rowId * output_w+ colId]; if (prob > maxProb) { classId = chId; @@ -192,23 +199,23 @@ bool dynamic_vino_lib::ObjectSegmentation::fetchResults() return true; } -int dynamic_vino_lib::ObjectSegmentation::getResultsLength() const +int openvino_wrapper_lib::ObjectSegmentation::getResultsLength() const { return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::ObjectSegmentation::getLocationResult(int idx) const +const openvino_wrapper_lib::Result * +openvino_wrapper_lib::ObjectSegmentation::getLocationResult(int idx) const { return &(results_[idx]); } -const std::string dynamic_vino_lib::ObjectSegmentation::getName() const +const std::string openvino_wrapper_lib::ObjectSegmentation::getName() const { return valid_model_->getModelCategory(); } -void dynamic_vino_lib::ObjectSegmentation::observeOutput( +void openvino_wrapper_lib::ObjectSegmentation::observeOutput( const std::shared_ptr &output) { if (output != nullptr) @@ -217,7 +224,7 @@ void dynamic_vino_lib::ObjectSegmentation::observeOutput( } } -const std::vector dynamic_vino_lib::ObjectSegmentation::getFilteredROIs( +const std::vector openvino_wrapper_lib::ObjectSegmentation::getFilteredROIs( const std::string filter_conditions) const { if (!filter_conditions.empty()) @@ -226,9 +233,5 @@ const std::vector dynamic_vino_lib::ObjectSegmentation::getFilteredROI << "Filter conditions: " << filter_conditions << slog::endl; } std::vector filtered_rois; - for (auto res : results_) - { - filtered_rois.push_back(res.getLocation()); - } return filtered_rois; } diff --git a/openvino_wrapper_lib/src/inferences/object_segmentation_maskrcnn.cpp b/openvino_wrapper_lib/src/inferences/object_segmentation_maskrcnn.cpp new file mode 100644 index 00000000..7c35fc55 --- /dev/null +++ b/openvino_wrapper_lib/src/inferences/object_segmentation_maskrcnn.cpp @@ -0,0 +1,238 @@ +// Copyright (c) 2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of ObjectSegmentation class and + * ObjectSegmentationResult class + * @file object_segmentation.cpp + */ +#include +#include +#include +#include +#include + +#include +#include "openvino_wrapper_lib/inferences/object_segmentation_maskrcnn.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" + +// ObjectSegmentationResult +openvino_wrapper_lib::ObjectSegmentationMaskrcnnResult::ObjectSegmentationMaskrcnnResult(const cv::Rect &location) + : Result(location) +{ +} + +// ObjectSegmentation +openvino_wrapper_lib::ObjectSegmentationMaskrcnn::ObjectSegmentationMaskrcnn(double show_output_thresh) + : show_output_thresh_(show_output_thresh), openvino_wrapper_lib::BaseInference() +{ +} + +openvino_wrapper_lib::ObjectSegmentationMaskrcnn::~ObjectSegmentationMaskrcnn() = default; + +void openvino_wrapper_lib::ObjectSegmentationMaskrcnn::loadNetwork( + const std::shared_ptr network) +{ + slog::info << "Loading Network: " << network->getModelCategory() << slog::endl; + valid_model_ = network; + setMaxBatchSize(network->getMaxBatchSize()); +} + +/** + * Deprecated! + * This function only support OpenVINO version <=2018R5 + */ +bool openvino_wrapper_lib::ObjectSegmentationMaskrcnn::enqueue_for_one_input( + const cv::Mat &frame, + const cv::Rect &input_frame_loc) +{ + if (width_ == 0 && height_ == 0) + { + width_ = frame.cols; + height_ = frame.rows; + } + if (!openvino_wrapper_lib::BaseInference::enqueue(frame, input_frame_loc, 1, 0, + valid_model_->getInputName())) + { + return false; + } + Result r(input_frame_loc); + results_.clear(); + results_.emplace_back(r); + return true; +} + +bool openvino_wrapper_lib::ObjectSegmentationMaskrcnn::enqueue( + const cv::Mat &frame, + const cv::Rect &input_frame_loc) +{ + if (width_ == 0 && height_ == 0) + { + width_ = frame.cols; + height_ = frame.rows; + } + + if (valid_model_ == nullptr || getEngine() == nullptr) + { + throw std::logic_error("Model or Engine is not set correctly!"); + return false; + } + + if (enqueued_frames_ >= valid_model_->getMaxBatchSize()) + { + slog::warn << "Number of " << getName() << "input more than maximum(" << + max_batch_size_ << ") processed by inference" << slog::endl; + return false; + } + + if (!valid_model_->enqueue(getEngine(), frame, input_frame_loc)) + { + return false; + } + + enqueued_frames_ += 1; + return true; +} + +bool openvino_wrapper_lib::ObjectSegmentationMaskrcnn::submitRequest() +{ + return openvino_wrapper_lib::BaseInference::submitRequest(); +} + +bool openvino_wrapper_lib::ObjectSegmentationMaskrcnn::fetchResults() +{ + + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); + if (!can_fetch) + { + return false; + } + bool found_result = false; + results_.clear(); + ov::InferRequest infer_request = getEngine()->getRequest(); + slog::debug << "Analyzing Detection results..." << slog::endl; + std::string detection_output = valid_model_->getOutputName("detection"); + std::string mask_output = valid_model_->getOutputName("masks"); + slog::debug << "Detection_output=" << detection_output << ", Mask_output=" << mask_output << slog::endl; + + //get detection data + ov::Tensor do_tensor = infer_request.get_tensor(detection_output.c_str()); + const auto do_data = do_tensor.data(); + ov::Shape do_shape = do_tensor.get_shape(); + slog::debug << "Detection Blob getDims = " <(); + ov::Shape mask_shape = mask_tensor.get_shape(); + + // determine models + size_t box_description_size = do_shape.back(); + OPENVINO_ASSERT(mask_shape.size() == 4); + size_t box_num = mask_shape[0]; + size_t C = mask_shape[1]; + size_t H = mask_shape[2]; + size_t W = mask_shape[3]; + size_t box_stride = W * H * C; + slog::debug << "box_description is:" << box_description_size << slog::endl; + slog::debug << "box_num is:" << box_num<< slog::endl; + slog::debug << "C is:" << C << slog::endl; + slog::debug << "H is:" << H << slog::endl; + slog::debug << "W is:" << W << slog::endl; + + for (size_t box = 0; box < box_num; ++box) { + // box description: batch, label, prob, x1, y1, x2, y2 + float * box_info = do_data + box * box_description_size; + auto batch = static_cast(box_info[0]); + slog::debug << "batch =" << batch << slog::endl; + if (batch < 0) { + slog::warn << "Batch size should be greater than 0. [batch=" << batch <<"]." << slog::endl; + break; + } + float prob = box_info[2]; + if (prob > show_output_thresh_) { + float x1 = std::min(std::max(0.0f, box_info[3] * width_), static_cast(width_)); + float y1 = std::min(std::max(0.0f, box_info[4] * height_), static_cast(height_)); + float x2 = std::min(std::max(0.0f, box_info[5] * width_), static_cast(width_)); + float y2 = std::min(std::max(0.0f, box_info[6] * height_), static_cast(height_)); + int box_width = static_cast(x2 - x1); + int box_height = static_cast(y2 - y1); + slog::debug << "Box[" << box_width << "x" << box_height << "]" << slog::endl; + if (box_width <= 0 || box_height <=0) break; + int class_id = static_cast(box_info[1] + 1e-6f); + float * mask_arr = mask_data + box_stride * box + H * W * (class_id - 1); + slog::info << "Detected class " << class_id << " with probability " << prob << " from batch " << batch + << ": [" << x1 << ", " << y1 << "], [" << x2 << ", " << y2 << "]" << slog::endl; + cv::Mat mask_mat(H, W, CV_32FC1, mask_arr); + cv::Rect roi = cv::Rect(static_cast(x1), static_cast(y1), box_width, box_height); + cv::Mat resized_mask_mat(box_height, box_width, CV_32FC1); + cv::resize(mask_mat, resized_mask_mat, cv::Size(box_width, box_height)); + Result result(roi); + result.confidence_ = prob; + std::vector & labels = valid_model_->getLabels(); + result.label_ = class_id < labels.size() ? labels[class_id] : + std::string("label #") + std::to_string(class_id); + result.mask_ = resized_mask_mat; + found_result = true; + slog::debug << "adding one segmentation Box ..." << slog::endl; + results_.emplace_back(result); + } + } + if (!found_result) { + slog::debug << "No Segmentation Result Found!" << slog::endl; + results_.clear(); + } + return true; +} + +int openvino_wrapper_lib::ObjectSegmentationMaskrcnn::getResultsLength() const +{ + return static_cast(results_.size()); +} + +const openvino_wrapper_lib::Result * +openvino_wrapper_lib::ObjectSegmentationMaskrcnn::getLocationResult(int idx) const +{ + return &(results_[idx]); +} + +const std::string openvino_wrapper_lib::ObjectSegmentationMaskrcnn::getName() const +{ + return valid_model_->getModelCategory(); +} + +void openvino_wrapper_lib::ObjectSegmentationMaskrcnn::observeOutput( + const std::shared_ptr &output) +{ + if (output != nullptr) + { + output->accept(results_); + } +} + +const std::vector openvino_wrapper_lib::ObjectSegmentationMaskrcnn::getFilteredROIs( + const std::string filter_conditions) const +{ + if (!filter_conditions.empty()) + { + slog::err << "Object segmentation does not support filtering now! " + << "Filter conditions: " << filter_conditions << slog::endl; + } + std::vector filtered_rois; + for (auto res : results_) + { + filtered_rois.push_back(res.getLocation()); + } + return filtered_rois; +} diff --git a/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp b/openvino_wrapper_lib/src/inferences/person_attribs_detection.cpp similarity index 59% rename from dynamic_vino_lib/src/inferences/person_attribs_detection.cpp rename to openvino_wrapper_lib/src/inferences/person_attribs_detection.cpp index 3831c70a..79b5c446 100644 --- a/dynamic_vino_lib/src/inferences/person_attribs_detection.cpp +++ b/openvino_wrapper_lib/src/inferences/person_attribs_detection.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,34 +20,35 @@ #include #include #include -#include "dynamic_vino_lib/inferences/person_attribs_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include +#include "openvino_wrapper_lib/inferences/person_attribs_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" // PersonAttribsDetectionResult -dynamic_vino_lib::PersonAttribsDetectionResult::PersonAttribsDetectionResult( +openvino_wrapper_lib::PersonAttribsDetectionResult::PersonAttribsDetectionResult( const cv::Rect & location) : Result(location) {} // PersonAttribsDetection -dynamic_vino_lib::PersonAttribsDetection::PersonAttribsDetection(double attribs_confidence) -: attribs_confidence_(attribs_confidence), dynamic_vino_lib::BaseInference() {} +openvino_wrapper_lib::PersonAttribsDetection::PersonAttribsDetection(double attribs_confidence) +: attribs_confidence_(attribs_confidence), openvino_wrapper_lib::BaseInference() {} -dynamic_vino_lib::PersonAttribsDetection::~PersonAttribsDetection() = default; -void dynamic_vino_lib::PersonAttribsDetection::loadNetwork( +openvino_wrapper_lib::PersonAttribsDetection::~PersonAttribsDetection() = default; +void openvino_wrapper_lib::PersonAttribsDetection::loadNetwork( const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::PersonAttribsDetection::enqueue( +bool openvino_wrapper_lib::PersonAttribsDetection::enqueue( const cv::Mat & frame, const cv::Rect & input_frame_loc) { if (getEnqueuedNum() == 0) { results_.clear(); } - if (!dynamic_vino_lib::BaseInference::enqueue( + if (!openvino_wrapper_lib::BaseInference::enqueue( frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { return false; @@ -57,32 +58,29 @@ bool dynamic_vino_lib::PersonAttribsDetection::enqueue( return true; } -bool dynamic_vino_lib::PersonAttribsDetection::submitRequest() +bool openvino_wrapper_lib::PersonAttribsDetection::submitRequest() { - return dynamic_vino_lib::BaseInference::submitRequest(); + return openvino_wrapper_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::PersonAttribsDetection::fetchResults() +bool openvino_wrapper_lib::PersonAttribsDetection::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); if (!can_fetch) {return false;} bool found_result = false; - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); + ov::InferRequest request = getEngine()->getRequest(); slog::debug << "Analyzing Attributes Detection results..." << slog::endl; std::string attribute_output = valid_model_->getOutputName("attributes_output_"); std::string top_output = valid_model_->getOutputName("top_output_"); std::string bottom_output = valid_model_->getOutputName("bottom_output_"); - /*auto attri_values = request->GetBlob(attribute_output)->buffer().as(); - auto top_values = request->GetBlob(top_output)->buffer().as(); - auto bottom_values = request->GetBlob(bottom_output)->buffer().as();*/ - InferenceEngine::Blob::Ptr attribBlob = request->GetBlob(attribute_output); - InferenceEngine::Blob::Ptr topBlob = request->GetBlob(top_output); - InferenceEngine::Blob::Ptr bottomBlob = request->GetBlob(bottom_output); + ov::Tensor attrib_tensor = request.get_tensor(attribute_output); + ov::Tensor top_tensor = request.get_tensor(top_output); + ov::Tensor bottom_tensor = request.get_tensor(bottom_output); - auto attri_values = attribBlob->buffer().as(); - auto top_values = topBlob->buffer().as(); - auto bottom_values = bottomBlob->buffer().as(); + auto attri_values = attrib_tensor.data(); + auto top_values = top_tensor.data(); + auto bottom_values = bottom_tensor.data(); int net_attrib_length = net_attributes_.size(); for (int i = 0; i < getResultsLength(); i++) { @@ -104,23 +102,23 @@ bool dynamic_vino_lib::PersonAttribsDetection::fetchResults() return true; } -int dynamic_vino_lib::PersonAttribsDetection::getResultsLength() const +int openvino_wrapper_lib::PersonAttribsDetection::getResultsLength() const { return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::PersonAttribsDetection::getLocationResult(int idx) const +const openvino_wrapper_lib::Result * +openvino_wrapper_lib::PersonAttribsDetection::getLocationResult(int idx) const { return &(results_[idx]); } -const std::string dynamic_vino_lib::PersonAttribsDetection::getName() const +const std::string openvino_wrapper_lib::PersonAttribsDetection::getName() const { return valid_model_->getModelCategory(); } -void dynamic_vino_lib::PersonAttribsDetection::observeOutput( +void openvino_wrapper_lib::PersonAttribsDetection::observeOutput( const std::shared_ptr & output) { if (output != nullptr) { @@ -128,7 +126,7 @@ void dynamic_vino_lib::PersonAttribsDetection::observeOutput( } } -const std::vector dynamic_vino_lib::PersonAttribsDetection::getFilteredROIs( +const std::vector openvino_wrapper_lib::PersonAttribsDetection::getFilteredROIs( const std::string filter_conditions) const { if (!filter_conditions.empty()) { diff --git a/dynamic_vino_lib/src/inferences/person_reidentification.cpp b/openvino_wrapper_lib/src/inferences/person_reidentification.cpp similarity index 60% rename from dynamic_vino_lib/src/inferences/person_reidentification.cpp rename to openvino_wrapper_lib/src/inferences/person_reidentification.cpp index 34280bd1..9d4d891a 100644 --- a/dynamic_vino_lib/src/inferences/person_reidentification.cpp +++ b/openvino_wrapper_lib/src/inferences/person_reidentification.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,37 +20,37 @@ #include #include #include -#include "dynamic_vino_lib/inferences/person_reidentification.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inferences/person_reidentification.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" // PersonReidentificationResult -dynamic_vino_lib::PersonReidentificationResult::PersonReidentificationResult( +openvino_wrapper_lib::PersonReidentificationResult::PersonReidentificationResult( const cv::Rect & location) : Result(location) {} // PersonReidentification -dynamic_vino_lib::PersonReidentification::PersonReidentification(double match_thresh) -: dynamic_vino_lib::BaseInference() +openvino_wrapper_lib::PersonReidentification::PersonReidentification(double match_thresh) +: openvino_wrapper_lib::BaseInference() { - person_tracker_ = std::make_shared(1000, match_thresh, 0.3); + person_tracker_ = std::make_shared(1000, match_thresh, 0.3); } -dynamic_vino_lib::PersonReidentification::~PersonReidentification() = default; -void dynamic_vino_lib::PersonReidentification::loadNetwork( +openvino_wrapper_lib::PersonReidentification::~PersonReidentification() = default; +void openvino_wrapper_lib::PersonReidentification::loadNetwork( const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::PersonReidentification::enqueue( +bool openvino_wrapper_lib::PersonReidentification::enqueue( const cv::Mat & frame, const cv::Rect & input_frame_loc) { if (getEnqueuedNum() == 0) { results_.clear(); } - if (!dynamic_vino_lib::BaseInference::enqueue( + if (!openvino_wrapper_lib::BaseInference::enqueue( frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { return false; @@ -60,19 +60,19 @@ bool dynamic_vino_lib::PersonReidentification::enqueue( return true; } -bool dynamic_vino_lib::PersonReidentification::submitRequest() +bool openvino_wrapper_lib::PersonReidentification::submitRequest() { - return dynamic_vino_lib::BaseInference::submitRequest(); + return openvino_wrapper_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::PersonReidentification::fetchResults() +bool openvino_wrapper_lib::PersonReidentification::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); if (!can_fetch) {return false;} bool found_result = false; - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); + ov::InferRequest request = getEngine()->getRequest(); std::string output = valid_model_->getOutputName(); - const float * output_values = request->GetBlob(output)->buffer().as(); + const float * output_values = request.get_tensor(output).data(); for (int i = 0; i < getResultsLength(); i++) { std::vector new_person = std::vector( output_values + 256 * i, output_values + 256 * i + 256); @@ -85,23 +85,23 @@ bool dynamic_vino_lib::PersonReidentification::fetchResults() return true; } -int dynamic_vino_lib::PersonReidentification::getResultsLength() const +int openvino_wrapper_lib::PersonReidentification::getResultsLength() const { return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::PersonReidentification::getLocationResult(int idx) const +const openvino_wrapper_lib::Result * +openvino_wrapper_lib::PersonReidentification::getLocationResult(int idx) const { return &(results_[idx]); } -const std::string dynamic_vino_lib::PersonReidentification::getName() const +const std::string openvino_wrapper_lib::PersonReidentification::getName() const { return valid_model_->getModelCategory(); } -void dynamic_vino_lib::PersonReidentification::observeOutput( +void openvino_wrapper_lib::PersonReidentification::observeOutput( const std::shared_ptr & output) { if (output != nullptr) { @@ -109,7 +109,7 @@ void dynamic_vino_lib::PersonReidentification::observeOutput( } } -const std::vector dynamic_vino_lib::PersonReidentification::getFilteredROIs( +const std::vector openvino_wrapper_lib::PersonReidentification::getFilteredROIs( const std::string filter_conditions) const { if (!filter_conditions.empty()) { diff --git a/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp b/openvino_wrapper_lib/src/inferences/vehicle_attribs_detection.cpp similarity index 60% rename from dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp rename to openvino_wrapper_lib/src/inferences/vehicle_attribs_detection.cpp index 14cf4e3e..232d73ea 100644 --- a/dynamic_vino_lib/src/inferences/vehicle_attribs_detection.cpp +++ b/openvino_wrapper_lib/src/inferences/vehicle_attribs_detection.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,34 +20,34 @@ #include #include #include -#include "dynamic_vino_lib/inferences/vehicle_attribs_detection.hpp" -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inferences/vehicle_attribs_detection.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/slog.hpp" // VehicleAttribsDetectionResult -dynamic_vino_lib::VehicleAttribsDetectionResult::VehicleAttribsDetectionResult( +openvino_wrapper_lib::VehicleAttribsDetectionResult::VehicleAttribsDetectionResult( const cv::Rect & location) : Result(location) {} // VehicleAttribsDetection -dynamic_vino_lib::VehicleAttribsDetection::VehicleAttribsDetection() -: dynamic_vino_lib::BaseInference() {} +openvino_wrapper_lib::VehicleAttribsDetection::VehicleAttribsDetection() +: openvino_wrapper_lib::BaseInference() {} -dynamic_vino_lib::VehicleAttribsDetection::~VehicleAttribsDetection() = default; -void dynamic_vino_lib::VehicleAttribsDetection::loadNetwork( +openvino_wrapper_lib::VehicleAttribsDetection::~VehicleAttribsDetection() = default; +void openvino_wrapper_lib::VehicleAttribsDetection::loadNetwork( const std::shared_ptr network) { valid_model_ = network; setMaxBatchSize(network->getMaxBatchSize()); } -bool dynamic_vino_lib::VehicleAttribsDetection::enqueue( +bool openvino_wrapper_lib::VehicleAttribsDetection::enqueue( const cv::Mat & frame, const cv::Rect & input_frame_loc) { if (getEnqueuedNum() == 0) { results_.clear(); } - if (!dynamic_vino_lib::BaseInference::enqueue( + if (!openvino_wrapper_lib::BaseInference::enqueue( frame, input_frame_loc, 1, 0, valid_model_->getInputName())) { return false; @@ -57,23 +57,23 @@ bool dynamic_vino_lib::VehicleAttribsDetection::enqueue( return true; } -bool dynamic_vino_lib::VehicleAttribsDetection::submitRequest() +bool openvino_wrapper_lib::VehicleAttribsDetection::submitRequest() { - return dynamic_vino_lib::BaseInference::submitRequest(); + return openvino_wrapper_lib::BaseInference::submitRequest(); } -bool dynamic_vino_lib::VehicleAttribsDetection::fetchResults() +bool openvino_wrapper_lib::VehicleAttribsDetection::fetchResults() { - bool can_fetch = dynamic_vino_lib::BaseInference::fetchResults(); + bool can_fetch = openvino_wrapper_lib::BaseInference::fetchResults(); if (!can_fetch) {return false;} bool found_result = false; - InferenceEngine::InferRequest::Ptr request = getEngine()->getRequest(); - //std::string color_name = valid_model_->getColorOutputName(); - //std::string type_name = valid_model_->getTypeOutputName(); + + ov::InferRequest infer_request = getEngine()->getRequest(); std::string color_name = valid_model_->getOutputName("color_output_"); std::string type_name = valid_model_->getOutputName("type_output_"); - const float * color_values = request->GetBlob(color_name)->buffer().as(); - const float * type_values = request->GetBlob(type_name)->buffer().as(); + const float * color_values = infer_request.get_tensor(color_name).data(); + const float * type_values = infer_request.get_tensor(type_name).data(); + for (int i = 0; i < getResultsLength(); i++) { auto color_id = std::max_element(color_values, color_values + 7) - color_values; auto type_id = std::max_element(type_values, type_values + 4) - type_values; @@ -87,23 +87,23 @@ bool dynamic_vino_lib::VehicleAttribsDetection::fetchResults() return true; } -int dynamic_vino_lib::VehicleAttribsDetection::getResultsLength() const +int openvino_wrapper_lib::VehicleAttribsDetection::getResultsLength() const { return static_cast(results_.size()); } -const dynamic_vino_lib::Result * -dynamic_vino_lib::VehicleAttribsDetection::getLocationResult(int idx) const +const openvino_wrapper_lib::Result * +openvino_wrapper_lib::VehicleAttribsDetection::getLocationResult(int idx) const { return &(results_[idx]); } -const std::string dynamic_vino_lib::VehicleAttribsDetection::getName() const +const std::string openvino_wrapper_lib::VehicleAttribsDetection::getName() const { return valid_model_->getModelCategory(); } -void dynamic_vino_lib::VehicleAttribsDetection::observeOutput( +void openvino_wrapper_lib::VehicleAttribsDetection::observeOutput( const std::shared_ptr & output) { if (output != nullptr) { @@ -111,7 +111,7 @@ void dynamic_vino_lib::VehicleAttribsDetection::observeOutput( } } -const std::vector dynamic_vino_lib::VehicleAttribsDetection::getFilteredROIs( +const std::vector openvino_wrapper_lib::VehicleAttribsDetection::getFilteredROIs( const std::string filter_conditions) const { if (!filter_conditions.empty()) { diff --git a/dynamic_vino_lib/src/inputs/image_input.cpp b/openvino_wrapper_lib/src/inputs/image_input.cpp similarity index 90% rename from dynamic_vino_lib/src/inputs/image_input.cpp rename to openvino_wrapper_lib/src/inputs/image_input.cpp index 3a30d226..4c6bdf6f 100644 --- a/dynamic_vino_lib/src/inputs/image_input.cpp +++ b/openvino_wrapper_lib/src/inputs/image_input.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,8 +18,8 @@ */ #include -#include "dynamic_vino_lib/inputs/image_input.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inputs/image_input.hpp" +#include "openvino_wrapper_lib/slog.hpp" Input::Image::Image(const std::string & file) { diff --git a/dynamic_vino_lib/src/inputs/image_topic.cpp b/openvino_wrapper_lib/src/inputs/image_topic.cpp similarity index 86% rename from dynamic_vino_lib/src/inputs/image_topic.cpp rename to openvino_wrapper_lib/src/inputs/image_topic.cpp index 5d356289..a716e2fe 100644 --- a/dynamic_vino_lib/src/inputs/image_topic.cpp +++ b/openvino_wrapper_lib/src/inputs/image_topic.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ #include #include -#include "dynamic_vino_lib/inputs/image_topic.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inputs/image_topic.hpp" +#include "openvino_wrapper_lib/slog.hpp" #define INPUT_TOPIC "/openvino_toolkit/image_raw" @@ -59,10 +59,6 @@ void Input::ImageTopic::cb(const sensor_msgs::msg::Image::SharedPtr image_msg) setHeader(image_msg->header); image_ = cv_bridge::toCvCopy(image_msg, "bgr8")->image; - //Suppose Image Topic is sent within BGR order, so the below line would work. - //image_ = cv::Mat(image_msg->height, image_msg->width, CV_8UC3, - // const_cast(&image_msg->data[0]), image_msg->step); - image_count_.increaseCounter(); } diff --git a/dynamic_vino_lib/src/inputs/ip_camera.cpp b/openvino_wrapper_lib/src/inputs/ip_camera.cpp similarity index 92% rename from dynamic_vino_lib/src/inputs/ip_camera.cpp rename to openvino_wrapper_lib/src/inputs/ip_camera.cpp index f4975648..e579ba54 100644 --- a/dynamic_vino_lib/src/inputs/ip_camera.cpp +++ b/openvino_wrapper_lib/src/inputs/ip_camera.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ * @brief a header file with declaration of IpCamera class * @file ip_camera.cpp */ -#include "dynamic_vino_lib/inputs/ip_camera.hpp" +#include "openvino_wrapper_lib/inputs/ip_camera.hpp" bool Input::IpCamera::initialize() diff --git a/dynamic_vino_lib/src/inputs/realsense_camera.cpp b/openvino_wrapper_lib/src/inputs/realsense_camera.cpp similarity index 95% rename from dynamic_vino_lib/src/inputs/realsense_camera.cpp rename to openvino_wrapper_lib/src/inputs/realsense_camera.cpp index 62847f23..f9aa759d 100644 --- a/dynamic_vino_lib/src/inputs/realsense_camera.cpp +++ b/openvino_wrapper_lib/src/inputs/realsense_camera.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,8 +16,8 @@ * @brief a header file with declaration of RealSenseCamera class * @file realsense_camera.cpp */ -#include "dynamic_vino_lib/inputs/realsense_camera.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/inputs/realsense_camera.hpp" +#include "openvino_wrapper_lib/slog.hpp" // RealSenseCamera bool Input::RealSenseCamera::initialize() diff --git a/dynamic_vino_lib/src/inputs/standard_camera.cpp b/openvino_wrapper_lib/src/inputs/standard_camera.cpp similarity index 94% rename from dynamic_vino_lib/src/inputs/standard_camera.cpp rename to openvino_wrapper_lib/src/inputs/standard_camera.cpp index fd7e209b..8b25f717 100644 --- a/dynamic_vino_lib/src/inputs/standard_camera.cpp +++ b/openvino_wrapper_lib/src/inputs/standard_camera.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ * @brief a header file with declaration of StandardCamera class * @file standard_camera.cpp */ -#include "dynamic_vino_lib/inputs/standard_camera.hpp" +#include "openvino_wrapper_lib/inputs/standard_camera.hpp" bool Input::StandardCamera::initialize() { diff --git a/dynamic_vino_lib/src/inputs/video_input.cpp b/openvino_wrapper_lib/src/inputs/video_input.cpp similarity index 93% rename from dynamic_vino_lib/src/inputs/video_input.cpp rename to openvino_wrapper_lib/src/inputs/video_input.cpp index d3279025..4c4e2a80 100644 --- a/dynamic_vino_lib/src/inputs/video_input.cpp +++ b/openvino_wrapper_lib/src/inputs/video_input.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ #include -#include "dynamic_vino_lib/inputs/video_input.hpp" +#include "openvino_wrapper_lib/inputs/video_input.hpp" // Video Input::Video::Video(const std::string & video) diff --git a/dynamic_vino_lib/src/models/age_gender_detection_model.cpp b/openvino_wrapper_lib/src/models/age_gender_detection_model.cpp similarity index 60% rename from dynamic_vino_lib/src/models/age_gender_detection_model.cpp rename to openvino_wrapper_lib/src/models/age_gender_detection_model.cpp index fa7e6f2d..ac3eea86 100644 --- a/dynamic_vino_lib/src/models/age_gender_detection_model.cpp +++ b/openvino_wrapper_lib/src/models/age_gender_detection_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,9 +18,9 @@ */ #include #include - -#include "dynamic_vino_lib/models/age_gender_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include +#include "openvino_wrapper_lib/models/age_gender_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Age Gender Classification Network Models::AgeGenderDetectionModel::AgeGenderDetectionModel( @@ -31,33 +31,37 @@ Models::AgeGenderDetectionModel::AgeGenderDetectionModel( { } bool Models::AgeGenderDetectionModel::updateLayerProperty( - InferenceEngine::CNNNetwork& net_reader) + std::shared_ptr& model) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; // set input property - InferenceEngine::InputsDataMap input_info_map(net_reader.getInputsInfo()); - if (input_info_map.size() != 1) { - slog::warn << "This model seems not Age-Gender-like, which should have only one input," - <<" but we got " << std::to_string(input_info_map.size()) << "inputs" + inputs_info_ = model->inputs(); + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + + ov::Shape input_tensor_shape = model->input().get_shape(); + if (inputs_info_.size() != 1) { + slog::warn << "This model seems not Age-Gender-like, which should have only one input, but we got" + << std::to_string(input_tensor_shape.size()) << "inputs" << slog::endl; return false; } - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::FP32); - input_info->setLayout(InferenceEngine::Layout::NCHW); - addInputInfo("input", input_info_map.begin()->first); + + addInputInfo("input", input_tensor_name_); + const ov::Layout tensor_layout{"NCHW"}; + input_info.tensor(). + set_element_type(ov::element::f32). + set_layout(tensor_layout); + // set output property - InferenceEngine::OutputsDataMap output_info_map(net_reader.getOutputsInfo()); - if (output_info_map.size() != 2) { - // throw std::logic_error("Age/Gender Recognition network should have two output layers"); - slog::warn << "This model seems not Age-gender like, which should have and only have 2" - " outputs, but we got " << std::to_string(output_info_map.size()) << "outputs" + outputs_info_ = model->outputs(); + if (outputs_info_.size() != 2) { + slog::warn << "This model seems not Age-Gender-like, which should have and only have 2 outputs, but we got" + << std::to_string(outputs_info_.size()) << "outputs" << slog::endl; return false; } - auto it = output_info_map.begin(); - InferenceEngine::DataPtr age_output_ptr = (it++)->second; - InferenceEngine::DataPtr gender_output_ptr = (it++)->second; #if(0) /// //Check More Configuration: @@ -83,16 +87,22 @@ bool Models::AgeGenderDetectionModel::updateLayerProperty( slog::info << "Gender layer: " << gender_output_ptr->getCreatorLayer().lock()->name << slog::endl; #endif - age_output_ptr->setPrecision(InferenceEngine::Precision::FP32); - age_output_ptr->setLayout(InferenceEngine::Layout::NCHW); - gender_output_ptr->setPrecision(InferenceEngine::Precision::FP32); - gender_output_ptr->setLayout(InferenceEngine::Layout::NCHW); + auto age_output_info = outputs_info_[1]; + ppp.output(age_output_info.get_any_name()). + tensor(). + set_element_type(ov::element::f32). + set_layout(tensor_layout); + auto gender_output_info = outputs_info_[0]; + ppp.output(gender_output_info.get_any_name()). + tensor(). + set_element_type(ov::element::f32). + set_layout(tensor_layout); - //output_age_ = age_output_ptr->name; - addOutputInfo("age", age_output_ptr->getName()); - //output_gender_ = gender_output_ptr->name; - addOutputInfo("gender", gender_output_ptr->getName()); + model = ppp.build(); + ov::set_batch(model, getMaxBatchSize()); + addOutputInfo("age", age_output_info.get_any_name()); + addOutputInfo("gender", gender_output_info.get_any_name()); printAttribute(); return true; } diff --git a/dynamic_vino_lib/src/models/attributes/ssd_model_attr.cpp b/openvino_wrapper_lib/src/models/attributes/ssd_model_attr.cpp similarity index 74% rename from dynamic_vino_lib/src/models/attributes/ssd_model_attr.cpp rename to openvino_wrapper_lib/src/models/attributes/ssd_model_attr.cpp index c2924c62..1340d490 100644 --- a/dynamic_vino_lib/src/models/attributes/ssd_model_attr.cpp +++ b/openvino_wrapper_lib/src/models/attributes/ssd_model_attr.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Intel Corporation +// Copyright (c) 2020-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,9 +18,9 @@ */ #include - -#include "dynamic_vino_lib/models/attributes/base_attribute.hpp" -#include "dynamic_vino_lib/slog.hpp" + +#include "openvino_wrapper_lib/models/attributes/base_attribute.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Face Detection Network Models::SSDModelAttr::SSDModelAttr( @@ -30,43 +30,46 @@ Models::SSDModelAttr::SSDModelAttr( } bool Models::SSDModelAttr::updateLayerProperty( - const InferenceEngine::CNNNetwork & net_reader) + const std::shared_ptr& model) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - - InferenceEngine::InputsDataMap input_info_map(net_reader.getInputsInfo()); + auto input_info_map = model->inputs(); if (input_info_map.size() != 1) { slog::warn << "This model seems not SSDNet-like, SSDnet has only one input, but we got " << std::to_string(input_info_map.size()) << "inputs" << slog::endl; return false; } - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - addInputInfo("input", input_info_map.begin()->first); + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + input_info.tensor().set_element_type(ov::element::u8); + addInputInfo("input", input_tensor_name_); - const InferenceEngine::SizeVector input_dims = input_info->getTensorDesc().getDims(); + ov::Shape input_dims = input_info_map[0].get_shape(); setInputHeight(input_dims[2]); setInputWidth(input_dims[3]); slog::info << "Checking OUTPUTs for model " << getModelName() << slog::endl; - InferenceEngine::OutputsDataMap output_info_map(net_reader.getOutputsInfo()); - if (output_info_map.size() != 1) { + auto outputs_info = model->outputs(); + if (outputs_info.size() != 1) { slog::warn << "This model seems not SSDNet-like! We got " - << std::to_string(output_info_map.size()) << "outputs, but SSDnet has only one." + << std::to_string(outputs_info.size()) << "outputs, but SSDnet has only one." << slog::endl; return false; } - InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; - addOutputInfo("output", output_info_map.begin()->first); - slog::info << "Checking Object Detection output ... Name=" << output_info_map.begin()->first + + ov::preprocess::OutputInfo& output_info = ppp.output(); + addOutputInfo("output", model->output().get_any_name()); + slog::info << "Checking Object Detection output ... Name=" << model->output().get_any_name() << slog::endl; - output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); + + output_info.tensor().set_element_type(ov::element::f32); ///TODO: double check this part: BEGIN #if(0) /// const InferenceEngine::CNNLayerPtr output_layer = - net_reader->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); + model->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); // output layer should have attribute called num_classes slog::info << "Checking Object Detection num_classes" << slog::endl; if (output_layer->params.find("num_classes") == output_layer->params.end()) { @@ -91,7 +94,9 @@ bool Models::SSDModelAttr::updateLayerProperty( ///TODO: double check this part: END // last dimension of output layer should be 7 - const InferenceEngine::SizeVector output_dims = output_data_ptr->getTensorDesc().getDims(); + auto outputsDataMap = model->outputs(); + auto & data = outputsDataMap[0]; + ov::Shape output_dims = data.get_shape(); setMaxProposalCount(static_cast(output_dims[2])); slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; @@ -114,4 +119,3 @@ bool Models::SSDModelAttr::updateLayerProperty( slog::info << "This model is SSDNet-like, Layer Property updated!" << slog::endl; return true; } - diff --git a/dynamic_vino_lib/src/models/base_model.cpp b/openvino_wrapper_lib/src/models/base_model.cpp similarity index 75% rename from dynamic_vino_lib/src/models/base_model.cpp rename to openvino_wrapper_lib/src/models/base_model.cpp index f9ddeaa6..70647b5a 100644 --- a/dynamic_vino_lib/src/models/base_model.cpp +++ b/openvino_wrapper_lib/src/models/base_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,9 +23,9 @@ #include #include #include -#include "dynamic_vino_lib/models/base_model.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/models/attributes/base_attribute.hpp" +#include "openvino_wrapper_lib/models/base_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/models/attributes/base_attribute.hpp" // Validated Base Network Models::BaseModel::BaseModel( @@ -39,41 +39,36 @@ Models::BaseModel::BaseModel( throw std::logic_error("model file name is empty!"); } - ///net_reader_ = std::make_shared(); } void Models::BaseModel::modelInit() { slog::info << "Loading network files" << model_loc_ << slog::endl; slog::info << label_loc_ << slog::endl; + // Read network model - ///net_reader_->ReadNetwork(model_loc_); - net_reader_ = engine.ReadNetwork(model_loc_); + model_ = engine.read_model(model_loc_); + // Extract model name and load it's weights // remove extension size_t last_index = model_loc_.find_last_of("."); std::string raw_name = model_loc_.substr(0, last_index); - ///std::string bin_file_name = raw_name + ".bin"; - ///net_reader_->ReadWeights(bin_file_name); + // Read labels (if any) std::string label_file_name = label_loc_.substr(0, last_index); - //std::string label_file_name = raw_name + ".labels"; loadLabelsFromFile(label_loc_); // Set batch size to given max_batch_size_ slog::info << "Batch size is set to " << max_batch_size_ << slog::endl; - ///net_reader_->getNetwork().setBatchSize(max_batch_size_); - net_reader_.setBatchSize(max_batch_size_); - - updateLayerProperty(net_reader_); + updateLayerProperty(model_); } #if 0 bool Models::BaseModel::updateLayerProperty( - InferenceEngine::CNNNetReader::Ptr net_reader) + InferenceEngine::CNNNetReader::Ptr model) { #if 0 - if (!updateLayerProperty(net_reader)){ + if (!updateLayerProperty(model)){ slog::warn << "The model(name: " << getModelName() << ") failed to update Layer Property!" << slog::endl; return false; diff --git a/openvino_wrapper_lib/src/models/emotion_detection_model.cpp b/openvino_wrapper_lib/src/models/emotion_detection_model.cpp new file mode 100644 index 00000000..9d7579bb --- /dev/null +++ b/openvino_wrapper_lib/src/models/emotion_detection_model.cpp @@ -0,0 +1,78 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of EmotionDetectionModel class + * @file emotion_detection_model.cpp + */ +#include +#include +#include "openvino_wrapper_lib/models/emotion_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" + + +// Validated Emotions Detection Network +Models::EmotionDetectionModel::EmotionDetectionModel( + const std::string & label_loc, const std::string & model_loc, int max_batch_size) +: BaseModel(label_loc, model_loc, max_batch_size) +{ +} + +bool Models::EmotionDetectionModel::updateLayerProperty +(std::shared_ptr& model) +{ + slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; + // set input property + inputs_info_ = model->inputs(); + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + + ov::Shape input_tensor_shape = model->input().get_shape(); + if (inputs_info_.size() != 1) { + slog::warn << "This model seems not Emotion-detection-model-like, which should have only one input, but we got" + << std::to_string(input_tensor_shape.size()) << "inputs" + << slog::endl; + return false; + } + + addInputInfo("input", input_tensor_name_); + const ov::Layout tensor_layout{"NHWC"}; + input_info.tensor(). + set_element_type(ov::element::f32). + set_layout(tensor_layout); + + // set output property + outputs_info_ = model->outputs(); + output_tensor_name_ = model->output().get_any_name(); + ov::preprocess::OutputInfo& output_info = ppp.output(output_tensor_name_); + if (outputs_info_.size() != 1) { + slog::warn << "This model should have and only have 1 output, but we got " + << std::to_string(outputs_info_.size()) << "outputs" + << slog::endl; + return false; + } + + model = ppp.build(); + ov::set_batch(model, getMaxBatchSize()); + addOutputInfo("output", output_tensor_name_); + + printAttribute(); + return true; +} + +const std::string Models::EmotionDetectionModel::getModelCategory() const +{ + return "Emotions Detection"; +} diff --git a/dynamic_vino_lib/src/models/face_detection_model.cpp b/openvino_wrapper_lib/src/models/face_detection_model.cpp similarity index 87% rename from dynamic_vino_lib/src/models/face_detection_model.cpp rename to openvino_wrapper_lib/src/models/face_detection_model.cpp index c673b6d7..ced9982e 100644 --- a/dynamic_vino_lib/src/models/face_detection_model.cpp +++ b/openvino_wrapper_lib/src/models/face_detection_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ #include -#include "dynamic_vino_lib/models/face_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/models/face_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Face Detection Network Models::FaceDetectionModel::FaceDetectionModel( diff --git a/dynamic_vino_lib/src/models/face_reidentification_model.cpp b/openvino_wrapper_lib/src/models/face_reidentification_model.cpp similarity index 85% rename from dynamic_vino_lib/src/models/face_reidentification_model.cpp rename to openvino_wrapper_lib/src/models/face_reidentification_model.cpp index a5d4572c..d8deb8f5 100644 --- a/dynamic_vino_lib/src/models/face_reidentification_model.cpp +++ b/openvino_wrapper_lib/src/models/face_reidentification_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,25 +17,25 @@ * @file face_reidentification_model.cpp */ #include -#include "dynamic_vino_lib/models/face_reidentification_model.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/models/face_reidentification_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Face Reidentification Network Models::FaceReidentificationModel::FaceReidentificationModel( const std::string & label_loc, const std::string & model_loc, int max_batch_size) : BaseModel(label_loc, model_loc, max_batch_size) {} void Models::FaceReidentificationModel::setLayerProperty( - InferenceEngine::CNNNetwork& net_reader) + InferenceEngine::CNNNetwork& model) { // set input property InferenceEngine::InputsDataMap input_info_map( - net_reader.getInputsInfo()); + model.getInputsInfo()); InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::U8); input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); // set output property InferenceEngine::OutputsDataMap output_info_map( - net_reader.getOutputsInfo()); + model.getOutputsInfo()); InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); @@ -45,7 +45,7 @@ void Models::FaceReidentificationModel::setLayerProperty( } void Models::FaceReidentificationModel::checkLayerProperty( - const InferenceEngine::CNNNetwork & net_reader) {} + const InferenceEngine::CNNNetwork & model) {} const std::string Models::FaceReidentificationModel::getModelCategory() const { diff --git a/dynamic_vino_lib/src/models/head_pose_detection_model.cpp b/openvino_wrapper_lib/src/models/head_pose_detection_model.cpp similarity index 58% rename from dynamic_vino_lib/src/models/head_pose_detection_model.cpp rename to openvino_wrapper_lib/src/models/head_pose_detection_model.cpp index faaa6dcf..2f4ec293 100644 --- a/dynamic_vino_lib/src/models/head_pose_detection_model.cpp +++ b/openvino_wrapper_lib/src/models/head_pose_detection_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,8 +20,8 @@ #include #include -#include "dynamic_vino_lib/models/head_pose_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/models/head_pose_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Head Pose Network Models::HeadPoseDetectionModel::HeadPoseDetectionModel( @@ -31,31 +31,45 @@ Models::HeadPoseDetectionModel::HeadPoseDetectionModel( } bool Models::HeadPoseDetectionModel::updateLayerProperty -(InferenceEngine::CNNNetwork& net_reader) +(std::shared_ptr& model) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; // set input property - InferenceEngine::InputsDataMap input_info_map(net_reader.getInputsInfo()); + auto input_info_map = model->inputs(); if (input_info_map.size() != 1) { slog::warn << "This model should have only one input, but we got" << std::to_string(input_info_map.size()) << "inputs" - << slog::endl; + << slog::endl; return false; } - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - addInputInfo("input", input_info_map.begin()->first); + + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + const ov::Layout input_tensor_layout{"NCHW"}; + input_info.tensor(). + set_element_type(ov::element::u8). + set_layout(input_tensor_layout); + addInputInfo("input", input_tensor_name_); // set output property - InferenceEngine::OutputsDataMap output_info_map(net_reader.getOutputsInfo()); - for (auto & output : output_info_map) { - output.second->setPrecision(InferenceEngine::Precision::FP32); - output.second->setLayout(InferenceEngine::Layout::NC); + auto output_info_map = model->outputs(); + std::vector outputs_name; + for (auto & output_item : output_info_map) { + std::string output_tensor_name_ = output_item.get_any_name(); + const ov::Layout output_tensor_layout{"NC"}; + ppp.output(output_tensor_name_). + tensor(). + set_element_type(ov::element::f32). + set_layout(output_tensor_layout); + outputs_name.push_back(output_tensor_name_); } + model = ppp.build(); + ov::set_batch(model, getMaxBatchSize()); + for (const std::string& outName : {output_angle_r_, output_angle_p_, output_angle_y_}) { - if (output_info_map.find(outName) == output_info_map.end()) { + if (find(outputs_name.begin(), outputs_name.end(), outName) == outputs_name.end()) { throw std::logic_error("There is no " + outName + " output in Head Pose Estimation network"); } else { addOutputInfo(outName, outName); diff --git a/dynamic_vino_lib/src/models/landmarks_detection_model.cpp b/openvino_wrapper_lib/src/models/landmarks_detection_model.cpp similarity index 84% rename from dynamic_vino_lib/src/models/landmarks_detection_model.cpp rename to openvino_wrapper_lib/src/models/landmarks_detection_model.cpp index 42aa5319..3c261c5b 100644 --- a/dynamic_vino_lib/src/models/landmarks_detection_model.cpp +++ b/openvino_wrapper_lib/src/models/landmarks_detection_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,25 +17,25 @@ * @file landmarks_detection_model.cpp */ #include -#include "dynamic_vino_lib/models/landmarks_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/models/landmarks_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Landmarks Detection Network Models::LandmarksDetectionModel::LandmarksDetectionModel( const std::string & label_loc, const std::string & model_loc, int max_batch_size) : BaseModel(label_loc, model_loc, max_batch_size) {} void Models::LandmarksDetectionModel::setLayerProperty( - InferenceEngine::CNNNetwork& net_reader) + InferenceEngine::CNNNetwork& model) { // set input property InferenceEngine::InputsDataMap input_info_map( - net_reader.getInputsInfo()); + model.getInputsInfo()); InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; input_info->setPrecision(InferenceEngine::Precision::U8); input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); // set output property InferenceEngine::OutputsDataMap output_info_map( - net_reader.getOutputsInfo()); + model.getOutputsInfo()); InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); output_data_ptr->setLayout(InferenceEngine::Layout::NCHW); @@ -45,15 +45,15 @@ void Models::LandmarksDetectionModel::setLayerProperty( } void Models::LandmarksDetectionModel::checkLayerProperty( - const InferenceEngine::CNNNetReader::Ptr & net_reader) + const InferenceEngine::CNNNetReader::Ptr & model) { InferenceEngine::InputsDataMap input_info_map( - net_reader->getNetwork().getInputsInfo()); + model->getNetwork().getInputsInfo()); if (input_info_map.size() != 1) { throw std::logic_error("Landmarks Detection topology should have only one input"); } InferenceEngine::OutputsDataMap output_info_map( - net_reader->getNetwork().getOutputsInfo()); + model->getNetwork().getOutputsInfo()); if (output_info_map.size() != 1) { throw std::logic_error("Landmarks Detection Network expects networks having one output"); } diff --git a/dynamic_vino_lib/src/models/license_plate_detection_model.cpp b/openvino_wrapper_lib/src/models/license_plate_detection_model.cpp similarity index 64% rename from dynamic_vino_lib/src/models/license_plate_detection_model.cpp rename to openvino_wrapper_lib/src/models/license_plate_detection_model.cpp index 171764f5..bfdf4ce5 100644 --- a/dynamic_vino_lib/src/models/license_plate_detection_model.cpp +++ b/openvino_wrapper_lib/src/models/license_plate_detection_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,40 +17,44 @@ * @file license_plate_detection_model.cpp */ #include -#include "dynamic_vino_lib/models/license_plate_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/models/license_plate_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Vehicle Attributes Detection Network Models::LicensePlateDetectionModel::LicensePlateDetectionModel( const std::string & label_loc, const std::string & model_loc, int max_batch_size) : BaseModel(label_loc, model_loc, max_batch_size) {} bool Models::LicensePlateDetectionModel::updateLayerProperty( - InferenceEngine::CNNNetwork& net_reader) + std::shared_ptr& model) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - InferenceEngine::InputsDataMap input_info_map( - net_reader.getInputsInfo()); + auto input_info_map = model->inputs(); if (input_info_map.size() != 2) { throw std::logic_error("Vehicle Attribs topology should have only two inputs"); } - auto sequence_input = (++input_info_map.begin()); - if (sequence_input->second->getTensorDesc().getDims()[0] != getMaxSequenceSize()) { + + auto sequence_input = input_info_map[1]; + if (sequence_input.get_shape()[0] != getMaxSequenceSize()) { throw std::logic_error("License plate detection max sequence size dismatch"); } - InferenceEngine::OutputsDataMap output_info_map( - net_reader.getOutputsInfo()); + + auto output_info_map = model->outputs(); if (output_info_map.size() != 1) { throw std::logic_error("Vehicle Attribs Network expects networks having one output"); } - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = input_info_map[0].get_any_name(); + const ov::Layout tensor_layout{"NCHW"}; + ppp.input(input_tensor_name_). + tensor(). + set_element_type(ov::element::u8). + set_layout(tensor_layout); + model = ppp.build(); - // set input and output layer name - input_ = input_info_map.begin()->first; - seq_input_ = (++input_info_map.begin())->first; - output_ = output_info_map.begin()->first; + input_ = input_tensor_name_; + seq_input_ = sequence_input.get_any_name(); + output_ = model->output().get_any_name(); return true; } diff --git a/dynamic_vino_lib/src/models/object_detection_ssd_model.cpp b/openvino_wrapper_lib/src/models/object_detection_ssd_model.cpp similarity index 73% rename from dynamic_vino_lib/src/models/object_detection_ssd_model.cpp rename to openvino_wrapper_lib/src/models/object_detection_ssd_model.cpp index d0996fdc..2c938ca8 100644 --- a/dynamic_vino_lib/src/models/object_detection_ssd_model.cpp +++ b/openvino_wrapper_lib/src/models/object_detection_ssd_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,11 +19,11 @@ #include #include #include -#include "dynamic_vino_lib/inferences/object_detection.hpp" -#include "dynamic_vino_lib/models/object_detection_ssd_model.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/engines/engine.hpp" -#include "dynamic_vino_lib/models/attributes/base_attribute.hpp" +#include "openvino_wrapper_lib/inferences/object_detection.hpp" +#include "openvino_wrapper_lib/models/object_detection_ssd_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/models/attributes/base_attribute.hpp" // Validated Object Detection Network Models::ObjectDetectionSSDModel::ObjectDetectionSSDModel( @@ -31,7 +31,6 @@ Models::ObjectDetectionSSDModel::ObjectDetectionSSDModel( : ObjectDetectionModel(label_loc, model_loc, max_batch_size) { slog::debug << "TESTING: in ObjectDetectionSSDModel" << slog::endl; - //addCandidatedAttr(std::make_shared()); } const std::string Models::ObjectDetectionSSDModel::getModelCategory() const @@ -63,14 +62,15 @@ bool Models::ObjectDetectionSSDModel::matToBlob( std::string input_name = getInputName(); slog::debug << "add input image to blob: " << input_name << slog::endl; - InferenceEngine::Blob::Ptr input_blob = - engine->getRequest()->GetBlob(input_name); - InferenceEngine::SizeVector blob_size = input_blob->getTensorDesc().getDims(); - const int width = blob_size[3]; - const int height = blob_size[2]; - const int channels = blob_size[1]; - u_int8_t * blob_data = input_blob->buffer().as(); + ov::Tensor in_tensor = engine->getRequest().get_tensor(input_name); + + ov::Shape in_shape = in_tensor.get_shape(); + const int width = in_shape[3]; + const int height = in_shape[2]; + const int channels = in_shape[1]; + + u_int8_t * blob_data = (u_int8_t *)in_tensor.data(); cv::Mat resized_image(orig_image); if (width != orig_image.size().width || height != orig_image.size().height) { @@ -93,7 +93,7 @@ bool Models::ObjectDetectionSSDModel::matToBlob( bool Models::ObjectDetectionSSDModel::fetchResults( const std::shared_ptr & engine, - std::vector & results, + std::vector & results, const float & confidence_thresh, const bool & enable_roi_constraint) { @@ -104,9 +104,9 @@ bool Models::ObjectDetectionSSDModel::fetchResults( } slog::debug << "Fetching Detection Results ..." << slog::endl; - InferenceEngine::InferRequest::Ptr request = engine->getRequest(); + ov::InferRequest request = engine->getRequest(); std::string output = getOutputName(); - const float * detections = request->GetBlob(output)->buffer().as(); + const float * detections = (float * )request.get_tensor(output).data(); slog::debug << "Analyzing Detection results..." << slog::endl; auto max_proposal_count = getMaxProposalCount(); @@ -116,7 +116,6 @@ bool Models::ObjectDetectionSSDModel::fetchResults( for (int i = 0; i < max_proposal_count; i++) { float image_id = detections[i * object_size + 0]; if (image_id < 0) { - //slog::info << "Found objects: " << i << "|" << results.size() << slog::endl; break; } @@ -131,12 +130,12 @@ bool Models::ObjectDetectionSSDModel::fetchResults( if (enable_roi_constraint) {r &= cv::Rect(0, 0, frame_size.width, frame_size.height);} - dynamic_vino_lib::ObjectDetectionResult result(r); + openvino_wrapper_lib::ObjectDetectionResult result(r); std::string label = label_num < labels.size() ? labels[label_num] : std::string("label #") + std::to_string(label_num); result.setLabel(label); float confidence = detections[i * object_size + 2]; - if (confidence <= confidence_thresh /* || r.x == 0 */) { // why r.x needs to be checked? + if (confidence <= confidence_thresh ) { continue; } result.setConfidence(confidence); @@ -148,43 +147,63 @@ bool Models::ObjectDetectionSSDModel::fetchResults( } bool Models::ObjectDetectionSSDModel::updateLayerProperty( - InferenceEngine::CNNNetwork& net_reader) + std::shared_ptr& model) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - InferenceEngine::InputsDataMap input_info_map(net_reader.getInputsInfo()); + auto input_info_map = model->inputs(); if (input_info_map.size() != 1) { slog::warn << "This model seems not SSDNet-like, SSDnet has only one input, but we got " << std::to_string(input_info_map.size()) << "inputs" << slog::endl; return false; } + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - addInputInfo("input", input_info_map.begin()->first); + input_info.tensor().set_element_type(ov::element::u8); + addInputInfo("input", input_tensor_name_); - const InferenceEngine::SizeVector input_dims = input_info->getTensorDesc().getDims(); + ov::Shape input_dims = input_info_map[0].get_shape(); + + ov::Layout tensor_layout = ov::Layout("NCHW"); + ov::Layout expect_layout = ov::Layout("NHWC"); setInputHeight(input_dims[2]); setInputWidth(input_dims[3]); + if (input_dims[1] == 3) + expect_layout = ov::Layout("NCHW"); + else if (input_dims[3] == 3) + expect_layout = ov::Layout("NHWC"); + else + slog::warn << "unexpect input shape " << input_dims << slog::endl; + + input_info.tensor(). + set_element_type(ov::element::u8). + set_layout(tensor_layout); + input_info.preprocess(). + convert_layout(expect_layout). + resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR); slog::info << "Checking OUTPUTs for model " << getModelName() << slog::endl; - InferenceEngine::OutputsDataMap output_info_map(net_reader.getOutputsInfo()); - if (output_info_map.size() != 1) { + auto outputs_info = model->outputs(); + if (outputs_info.size() != 1) { slog::warn << "This model seems not SSDNet-like! We got " - << std::to_string(output_info_map.size()) << "outputs, but SSDnet has only one." + << std::to_string(outputs_info.size()) << "outputs, but SSDnet has only one." << slog::endl; return false; } - InferenceEngine::DataPtr & output_data_ptr = output_info_map.begin()->second; - addOutputInfo("output", output_info_map.begin()->first); - slog::info << "Checking Object Detection output ... Name=" << output_info_map.begin()->first + ov::preprocess::OutputInfo& output_info = ppp.output(); + addOutputInfo("output", model->output().get_any_name()); + slog::info << "Checking Object Detection output ... Name=" << model->output().get_any_name() << slog::endl; - output_data_ptr->setPrecision(InferenceEngine::Precision::FP32); + + output_info.tensor().set_element_type(ov::element::f32); + model = ppp.build(); ///TODO: double check this part: BEGIN #if(0) const InferenceEngine::CNNLayerPtr output_layer = - net_reader->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); + model->getNetwork().getLayerByName(output_info_map.begin()->first.c_str()); // output layer should have attribute called num_classes slog::info << "Checking Object Detection num_classes" << slog::endl; if (output_layer->params.find("num_classes") == output_layer->params.end()) { @@ -209,7 +228,9 @@ bool Models::ObjectDetectionSSDModel::updateLayerProperty( ///TODO: double check this part: END // last dimension of output layer should be 7 - const InferenceEngine::SizeVector output_dims = output_data_ptr->getTensorDesc().getDims(); + auto outputsDataMap = model->outputs(); + auto & data = outputsDataMap[0]; + ov::Shape output_dims = data.get_shape(); setMaxProposalCount(static_cast(output_dims[2])); slog::info << "max proposal count is: " << getMaxProposalCount() << slog::endl; diff --git a/openvino_wrapper_lib/src/models/object_detection_yolov5_model.cpp b/openvino_wrapper_lib/src/models/object_detection_yolov5_model.cpp new file mode 100644 index 00000000..8dd7176d --- /dev/null +++ b/openvino_wrapper_lib/src/models/object_detection_yolov5_model.cpp @@ -0,0 +1,224 @@ +// Copyright (c) 2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of ObjectDetectionModel class + * @file object_detection_yolov5_model.cpp + */ +#include +#include +#include +#include +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +#include "openvino_wrapper_lib/inferences/object_detection.hpp" +#include "openvino_wrapper_lib/models/object_detection_yolov5_model.hpp" + + +// Validated Object Detection Network +Models::ObjectDetectionYolov5Model::ObjectDetectionYolov5Model( + const std::string & label_loc, const std::string & model_loc, int max_batch_size) +: ObjectDetectionModel(label_loc, model_loc, max_batch_size) +{ +} + +bool Models::ObjectDetectionYolov5Model::updateLayerProperty( + std::shared_ptr& model) +{ + slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; + auto input_info_map = model->inputs(); + if (input_info_map.size() != 1) { + slog::warn << "This model seems not Yolo-like, which has only one input, but we got " + << std::to_string(input_info_map.size()) << "inputs" << slog::endl; + return false; + } + // set input property + ov::Shape input_dims = input_info_map[0].get_shape(); + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + const ov::Layout input_tensor_layout{"NHWC"}; + setInputHeight(input_dims[2]); + setInputWidth(input_dims[3]); + input_info.tensor(). + set_element_type(ov::element::u8). + set_layout(input_tensor_layout). + set_color_format(ov::preprocess::ColorFormat::BGR); + input_info.preprocess(). + convert_element_type(ov::element::f32). + convert_color(ov::preprocess::ColorFormat::RGB).scale({255., 255., 255.}); + ppp.input().model().set_layout("NCHW"); + addInputInfo("input", input_tensor_name_); + + // set output property + auto output_info_map = model->outputs(); + if (output_info_map.size() != 1) { + slog::warn << "This model seems not Yolo-like! We got " + << std::to_string(output_info_map.size()) << "outputs, but Yolov5 has only one." + << slog::endl; + return false; + } + output_tensor_name_ = model->output().get_any_name(); + ov::preprocess::OutputInfo& output_info = ppp.output(); + addOutputInfo("output", output_tensor_name_); + output_info.tensor().set_element_type(ov::element::f32); + slog::info << "Checking Object Detection output ... Name=" << output_tensor_name_ + << slog::endl; + + model = ppp.build(); + + ov::Shape output_dims = output_info_map[0].get_shape(); + setMaxProposalCount(static_cast(output_dims[1])); + + auto object_size = static_cast(output_dims[2]); + setObjectSize(object_size); + + printAttribute(); + slog::info << "This model is Yolo-like, Layer Property updated!" << slog::endl; + return true; +} + +const std::string Models::ObjectDetectionYolov5Model::getModelCategory() const +{ + return "Object Detection Yolo v5"; +} + +bool Models::ObjectDetectionYolov5Model::enqueue( + const std::shared_ptr & engine, + const cv::Mat & frame, + const cv::Rect & input_frame_loc) +{ + setFrameSize(frame.cols, frame.rows); + + if (!matToBlob(frame, input_frame_loc, 1, 0, engine)) { + return false; + } + return true; +} + + +bool Models::ObjectDetectionYolov5Model::matToBlob( + const cv::Mat & orig_image, const cv::Rect &, float scale_factor, + int batch_index, const std::shared_ptr & engine) +{ + resize_img = pre_process_ov(orig_image); + input_image = orig_image; + + size_t height = resize_img.resized_image.rows; + size_t width = resize_img.resized_image.cols; + size_t channels = orig_image.channels(); + auto *input_data = (float *) resize_img.resized_image.data; + + ov::Tensor input_tensor; + input_tensor = ov::Tensor(ov::element::u8, {1, height, width, channels}, input_data); + engine->getRequest().set_input_tensor(input_tensor); + + return true; +} + +bool Models::ObjectDetectionYolov5Model::fetchResults( + const std::shared_ptr & engine, + std::vector & results, + const float & confidence_thresh, + const bool & enable_roi_constraint) +{ + const float NMS_THRESHOLD = 0.45; // remove overlapping bounding boxes + + ov::InferRequest request = engine->getRequest(); + std::string output = getOutputName(); + const ov::Tensor &output_tensor = request.get_output_tensor(); + ov::Shape output_shape = output_tensor.get_shape(); + auto *detections = output_tensor.data(); + std::vector boxes; + std::vector class_ids; + std::vector confidences; + std::vector & labels = getLabels(); + + for (size_t i = 0; i < output_shape.at(1); i++) { + float *detection = &detections[i * output_shape.at(2)]; + float confidence = detection[4]; + if (confidence < confidence_thresh) + continue; + + float *classes_scores = &detection[5]; + int col = static_cast(output_shape.at(2) - 5); + + cv::Mat scores(1, col, CV_32FC1, classes_scores); + cv::Point class_id; + double max_class_score; + cv::minMaxLoc(scores, nullptr, &max_class_score, nullptr, &class_id); + + if (max_class_score > confidence_thresh) { + confidences.emplace_back(confidence); + class_ids.emplace_back(class_id.x); + + float x = detection[0]; + float y = detection[1]; + float w = detection[2]; + float h = detection[3]; + auto x_min = x - (w / 2); + auto y_min = y - (h / 2); + + boxes.emplace_back(x_min, y_min, w, h); + } + } + + std::vector nms_result; + cv::dnn::NMSBoxes(boxes, confidences, confidence_thresh, NMS_THRESHOLD, nms_result); + for (int idx: nms_result) { + double rx = (double) input_image.cols / (resize_img.resized_image.cols - resize_img.dw); + double ry = (double) input_image.rows / (resize_img.resized_image.rows - resize_img.dh); + double vx = rx * boxes[idx].x; + double vy = ry * boxes[idx].y; + double vw = rx * boxes[idx].width; + double vh = ry * boxes[idx].height; + cv::Rect rec(vx, vy, vw, vh); + Result result(rec); + result.setConfidence(confidences[idx]); + std::string label = class_ids[idx] < labels.size() ? + labels[class_ids[idx]] : std::string("label #") + std::to_string(class_ids[idx]); + result.setLabel(label); + results.push_back(result); + } + + return true; +} + +Models::Resize_t Models::ObjectDetectionYolov5Model::pre_process_ov(const cv::Mat &input_image) { + const float INPUT_WIDTH = 640.f; + const float INPUT_HEIGHT = 640.f; + auto width = (float) input_image.cols; + auto height = (float) input_image.rows; + auto r = float(INPUT_WIDTH / std::max(width, height)); + int new_unpadW = int(round(width * r)); + int new_unpadH = int(round(height * r)); + Resize_t resize_img{}; + + cv::resize(input_image, resize_img.resized_image, {new_unpadW, new_unpadH}, + 0, 0, cv::INTER_AREA); + + resize_img.dw = (int) INPUT_WIDTH - new_unpadW; + resize_img.dh = (int) INPUT_HEIGHT - new_unpadH; + cv::Scalar color = cv::Scalar(100, 100, 100); + cv::copyMakeBorder(resize_img.resized_image, + resize_img.resized_image, + 0, + resize_img.dh, + 0, + resize_img.dw, + cv::BORDER_CONSTANT, + color); + + return resize_img; +} diff --git a/openvino_wrapper_lib/src/models/object_segmentation_maskrcnn_model.cpp b/openvino_wrapper_lib/src/models/object_segmentation_maskrcnn_model.cpp new file mode 100644 index 00000000..d08b5a57 --- /dev/null +++ b/openvino_wrapper_lib/src/models/object_segmentation_maskrcnn_model.cpp @@ -0,0 +1,238 @@ +// Copyright (c) 2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of ObjectSegmentationModel class + * @file object_segmentation_model.cpp + */ +#include +#include +#include +#include "openvino_wrapper_lib/models/object_segmentation_maskrcnn_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" + +// Validated Object Segmentation Network +Models::ObjectSegmentationMaskrcnnModel::ObjectSegmentationMaskrcnnModel( + const std::string & label_loc, + const std::string & model_loc, + int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ +} + +bool Models::ObjectSegmentationMaskrcnnModel::enqueue( + const std::shared_ptr &engine, + const cv::Mat &frame, + const cv::Rect &input_frame_loc) +{ + if (engine == nullptr) + { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; + } + + for (const auto &inputInfoItem : inputs_info_) + { + // Fill first input tensor with images. First b channel, then g and r channels + auto dims = inputInfoItem.get_shape(); + slog::debug << "input tensor shape is:"<< dims.size() <getRequest().get_tensor(inputInfoItem); + auto data = in_tensor.data(); + data[0] = static_cast(frame.rows); // height + data[1] = static_cast(frame.cols); // width + data[2] = 1; + } + } + + return true; +} + +bool Models::ObjectSegmentationMaskrcnnModel::matToBlob( + const cv::Mat &orig_image, const cv::Rect &, float scale_factor, + int batch_index, const std::shared_ptr &engine) +{ + (void)scale_factor; + (void)batch_index; + + if (engine == nullptr) + { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; + } + + ov::InferRequest infer_request = engine->getRequest(); + ov::Tensor input_tensor = infer_request.get_tensor(getInputName("input")); + ov::Shape input_shape = input_tensor.get_shape(); + + OPENVINO_ASSERT(input_shape.size() == 4); + // For frozen graph model: layout= "NHWC" + const size_t width = input_shape[2]; + const size_t height = input_shape[1]; + const size_t channels = input_shape[3]; + + slog::debug <<"width is:"<< width << slog::endl; + slog::debug <<"height is:"<< height << slog::endl; + slog::debug <<"channels is:"<< channels << slog::endl; + slog::debug <<"origin channels is:"<< orig_image.channels() << slog::endl; + slog::debug <<"input shape is:"<< input_shape << slog::endl; + + if (static_cast(orig_image.channels()) != channels) { + throw std::runtime_error("The number of channels for net input and image must match"); + } + +#if 1 + //input_tensor = ov::Tensor(ov::element::u8, {1, height, width, channels}, resized_image.data); + //engine->getRequest().set_tensor(input_tensor_name_, input_tensor); + unsigned char* data = input_tensor.data(); + cv::Size size = {(int)width, (int)height}; + cv::Mat resized_image(size, CV_8UC3, data); + cv::resize(orig_image, resized_image, size); +#else + const auto input_data = input_tensor.data(); + cv::Mat resized_image(orig_image); + if (static_cast(width) != orig_image.size().width || + static_cast(height) != orig_image.size().height) { + cv::resize(orig_image, resized_image, cv::Size(width, height)); + } + + int batchOffset = batch_index * width * height * channels; + if (channels == 1) { + for (size_t h = 0; h < height; h++) { + for (size_t w = 0; w < width; w++) { + input_data[batchOffset + h * width + w] = resized_image.at(h, w); + } + } + } else if (channels == 3) { + for (size_t c = 0; c < channels; c++) { + for (size_t h = 0; h < height; h++) { + for (size_t w = 0; w < width; w++) { + input_data[batchOffset + c * width * height + h * width + w] = + resized_image.at(h, w)[c]; + } + } + } + } else { + throw std::runtime_error("Unsupported number of channels"); + } +#endif + + return true; +} + +const std::string Models::ObjectSegmentationMaskrcnnModel::getModelCategory() const +{ + return "Object Segmentation"; +} + +bool Models::ObjectSegmentationMaskrcnnModel::updateLayerProperty( + std::shared_ptr& model) +{ + slog::info<< "Checking INPUTS for Model" <inputs(); + slog::debug<<"input size="<input("image_tensor").get_shape(); + slog::debug<<"image_tensor shape is:"<< input_shape.size() <input("image_info").get_shape(); + slog::debug<<"image_info shape is:"<< info_shape.size() <output("masks").get_shape(); + slog::debug<<"masks shape is:"<< mask_shape.size() <output("reshape_do_2d").get_shape(); + slog::debug<< "detection shape is:" << detection_shape.size() < +#include +#include +#include "openvino_wrapper_lib/models/object_segmentation_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/engines/engine.hpp" +// Validated Object Segmentation Network +Models::ObjectSegmentationModel::ObjectSegmentationModel( + const std::string & label_loc, + const std::string & model_loc, + int max_batch_size) + : BaseModel(label_loc, model_loc, max_batch_size) +{ +} + +bool Models::ObjectSegmentationModel::enqueue( + const std::shared_ptr &engine, + const cv::Mat &frame, + const cv::Rect &input_frame_loc) +{ + if (engine == nullptr) + { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; + } + + for (const auto &inputInfoItem : inputs_info_) + { + // Fill first input tensor with images. First b channel, then g and r channels + auto dims = inputInfoItem.get_shape(); + if (dims.size()==4) + { + matToBlob(frame, input_frame_loc, 1.0, 0, engine); + } + + // Fill second input tensor with image info + if (dims.size() == 2) + { + ov::Tensor in_tensor = engine->getRequest().get_tensor(inputInfoItem); + auto data = in_tensor.data(); + data[0] = static_cast(frame.rows); // height + data[1] = static_cast(frame.cols); // width + data[2] = 1; + } + } + + return true; + +} + +bool Models::ObjectSegmentationModel::matToBlob( + const cv::Mat &orig_image, const cv::Rect &, float scale_factor, + int batch_index, const std::shared_ptr &engine) +{ + (void)scale_factor; + (void)batch_index; + + if (engine == nullptr) + { + slog::err << "A frame is trying to be enqueued in a NULL Engine." << slog::endl; + return false; + } +#if 1 + const size_t width = getInputWidth(); + const size_t height = getInputHeight(); + const size_t channels = 3; + slog::debug <<"width is:"<< width << slog::endl; + slog::debug <<"height is:"<< height << slog::endl; + + if (orig_image.cols != width || orig_image.rows != height){ + cv::Size size = {(int)width, (int)height}; + cv::Mat resized_image(size, CV_8UC3); + cv::resize(orig_image, resized_image, size); + ov::Tensor input_tensor = ov::Tensor(ov::element::u8, {1, height, width, channels}, resized_image.data); + engine->getRequest().set_tensor(input_tensor_name_, input_tensor); + } else { + ov::Tensor input_tensor = ov::Tensor(ov::element::u8, {1, height, width, channels}, orig_image.data); + engine->getRequest().set_tensor(input_tensor_name_, input_tensor); + } +#else + ov::InferRequest infer_request = engine->getRequest(); + ov::Tensor input_tensor = infer_request.get_tensor(getInputName("input")); + ov::Shape input_shape = input_tensor.get_shape(); + + OPENVINO_ASSERT(input_shape.size() == 4); + // For frozen graph model: + const size_t width = input_shape[2]; + const size_t height = input_shape[1]; + const size_t channels = input_shape[3]; + + slog::debug <<"width is:"<< width << slog::endl; + slog::debug <<"height is:"<< height << slog::endl; + slog::debug <<"channels is:"<< channels << slog::endl; + slog::debug <<"origin channels is:"<< orig_image.channels() << slog::endl; + slog::debug <<"input shape is:"<< input_shape << slog::endl; + + if (static_cast(orig_image.channels()) != channels) { + throw std::runtime_error("The number of channels for net input and image must match"); + } + + unsigned char* data = input_tensor.data(); + cv::Size size = {(int)width, (int)height}; + cv::Mat resized_image(size, CV_8UC3, data); + cv::resize(orig_image, resized_image, size); +#endif + return true; +} + +const std::string Models::ObjectSegmentationModel::getModelCategory() const +{ + return "Object Segmentation"; +} + +bool Models::ObjectSegmentationModel::updateLayerProperty( + std::shared_ptr& model) +{ + slog::info<< "Checking INPUTS for Model" <inputs(); + slog::debug<<"input size"<input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + + ov::Layout tensor_layout = ov::Layout("NHWC"); + ov::Layout expect_layout = ov::Layout("NCHW"); + ov::Shape input_shape = model->input().get_shape(); + if (input_shape[1] == 3){ + expect_layout = ov::Layout("NCHW"); + setInputWidth(input_shape[3]); + setInputHeight(input_shape[2]); + } else if (input_shape[3] == 3){ + expect_layout = ov::Layout("NHWC"); + setInputWidth(input_shape[2]); + setInputHeight(input_shape[1]); + } else + slog::warn << "unexpect input shape " << input_shape << slog::endl; + + input_info.tensor(). + set_element_type(ov::element::u8). + set_layout(tensor_layout). + set_spatial_dynamic_shape(); + input_info.preprocess(). + convert_layout(expect_layout). + resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR); + addInputInfo("input", input_tensor_name_); + + auto outputs_info = model->outputs(); + if (outputs_info.size() != 1) { + slog::warn << "This inference sample should have only one output, but we got" + << std::to_string(outputs_info.size()) << "outputs" + << slog::endl; + return false; + } + + output_tensor_name_ = model->output().get_any_name(); + auto data = model->output(); + + ov::preprocess::OutputInfo& output_info = ppp.output(output_tensor_name_); + output_info.tensor().set_element_type(ov::element::f32); + model = ppp.build(); + std::vector &in_size_vector = input_shape; + slog::debug<<"dimensional"<(outSizeVector[ov::layout::height_idx(outputLayout)]); + outWidth = static_cast(outSizeVector[ov::layout::width_idx(outputLayout)]); + break; + case 4: + //outChannels = outSizeVector[1]; + //outHeight = outSizeVector[2]; + //outWidth = outSizeVector[3]; + outputLayout = "NCHW"; + outChannels = static_cast(outSizeVector[ov::layout::channels_idx(outputLayout)]); + outHeight = static_cast(outSizeVector[ov::layout::height_idx(outputLayout)]); + outWidth = static_cast(outSizeVector[ov::layout::width_idx(outputLayout)]); + break; + default: + throw std::runtime_error("Unexpected output blob shape. Only 4D and 3D output blobs are" + "supported."); + + } + if(outHeight == 0 || outWidth == 0){ + slog::err << "output_height or output_width is not set, please check the MaskOutput Info " + << "is set correctly." << slog::endl; + return false; + } + + slog::debug << "output WIDTH " << outWidth<< slog::endl; + slog::debug << "output HEIGHT " << outHeight<< slog::endl; + slog::debug << "output CHANNELS " << outChannels<< slog::endl; + slog::debug << "output NAME " << output_tensor_name_<< slog::endl; + addOutputInfo("detection", output_tensor_name_); + + printAttribute(); + slog::info << "This model is SSDNet-like, Layer Property updated!" << slog::endl; + return true; + +} diff --git a/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp b/openvino_wrapper_lib/src/models/person_attribs_detection_model.cpp similarity index 56% rename from dynamic_vino_lib/src/models/person_attribs_detection_model.cpp rename to openvino_wrapper_lib/src/models/person_attribs_detection_model.cpp index c12e4071..405740d6 100644 --- a/dynamic_vino_lib/src/models/person_attribs_detection_model.cpp +++ b/openvino_wrapper_lib/src/models/person_attribs_detection_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,45 +17,42 @@ * @file person_attribs_detection_model.cpp */ #include -#include "dynamic_vino_lib/models/person_attribs_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include +#include "openvino_wrapper_lib/models/person_attribs_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Person Attributes Detection Network Models::PersonAttribsDetectionModel::PersonAttribsDetectionModel( const std::string & label_loc, const std::string & model_loc, int max_batch_size) : BaseModel(label_loc, model_loc, max_batch_size) {} bool Models::PersonAttribsDetectionModel::updateLayerProperty( - InferenceEngine::CNNNetwork& net_reader) -{ + std::shared_ptr& model) +{ slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - InferenceEngine::InputsDataMap input_info_map( - net_reader.getInputsInfo()); + auto input_info_map = model->inputs(); if (input_info_map.size() != 1) { throw std::logic_error("Person Attribs topology should have only one input"); } - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); - addInputInfo("input", input_info_map.begin()->first); - + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + const ov::Layout tensor_layout{"NCHW"}; + input_info.tensor(). + set_element_type(ov::element::u8). + set_layout(tensor_layout); + slog::info << "Checking OUTPUTs for model " << getModelName() << slog::endl; - InferenceEngine::OutputsDataMap output_info_map( - net_reader.getOutputsInfo()); + auto output_info_map = model->outputs(); if (output_info_map.size() != 3) { throw std::logic_error("Person Attribs Network expects networks having 3 output"); } - input_ = input_info_map.begin()->first; - output_ = output_info_map.begin()->first; - auto output_iter = output_info_map.begin(); - InferenceEngine::DataPtr attribute_output_ptr = (output_iter++)->second; - InferenceEngine::DataPtr top_output_ptr = (output_iter++)->second; - InferenceEngine::DataPtr bottom_output_ptr = (output_iter++)->second; - - addOutputInfo("attributes_output_", attribute_output_ptr->getName()); - //output_gender_ = gender_output_ptr->name; - addOutputInfo("top_output_", top_output_ptr->getName()); - addOutputInfo("bottom_output_", bottom_output_ptr->getName()); + model = ppp.build(); + addInputInfo("input", input_tensor_name_); + addOutputInfo("attributes_output_",output_info_map[0].get_any_name()); + addOutputInfo("top_output_", output_info_map[1].get_any_name()); + addOutputInfo("bottom_output_", output_info_map[2].get_any_name()); + printAttribute(); return true; } diff --git a/openvino_wrapper_lib/src/models/person_reidentification_model.cpp b/openvino_wrapper_lib/src/models/person_reidentification_model.cpp new file mode 100644 index 00000000..911a3609 --- /dev/null +++ b/openvino_wrapper_lib/src/models/person_reidentification_model.cpp @@ -0,0 +1,53 @@ +// Copyright (c) 2018-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @brief a header file with declaration of PersonReidentificationModel class + * @file person_reidentification_model.cpp + */ +#include +#include "openvino_wrapper_lib/models/person_reidentification_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" +// Validated Person Reidentification Network +Models::PersonReidentificationModel::PersonReidentificationModel( + const std::string & label_loc, const std::string & model_loc, int max_batch_size) +: BaseModel(label_loc, model_loc, max_batch_size) {} + +bool Models::PersonReidentificationModel::updateLayerProperty( + std::shared_ptr& model) +{ + slog::info << "Checking Inputs for Model" << getModelName() << slog::endl; + auto input_info_map = model->inputs(); + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_ = input_info_map[0].get_any_name(); + const ov::Layout input_tensor_layout{"NCHW"}; + ppp.input(input_). + tensor(). + set_element_type(ov::element::u8). + set_layout(input_tensor_layout); + + // set output property + auto output_info_map = model->outputs(); + output_ = output_info_map[0].get_any_name(); + + model = ppp.build(); + ov::set_batch(model, getMaxBatchSize()); + + return true; +} + +const std::string Models::PersonReidentificationModel::getModelCategory() const +{ + return "Person Reidentification"; +} diff --git a/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp b/openvino_wrapper_lib/src/models/vehicle_attribs_detection_model.cpp similarity index 58% rename from dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp rename to openvino_wrapper_lib/src/models/vehicle_attribs_detection_model.cpp index 0637f3f6..e5a65947 100644 --- a/dynamic_vino_lib/src/models/vehicle_attribs_detection_model.cpp +++ b/openvino_wrapper_lib/src/models/vehicle_attribs_detection_model.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,44 +17,41 @@ * @file vehicle_attribs_detection_model.cpp */ #include -#include "dynamic_vino_lib/models/vehicle_attribs_detection_model.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/models/vehicle_attribs_detection_model.hpp" +#include "openvino_wrapper_lib/slog.hpp" // Validated Vehicle Attributes Detection Network Models::VehicleAttribsDetectionModel::VehicleAttribsDetectionModel( const std::string & label_loc, const std::string & model_loc, int max_batch_size) : BaseModel(label_loc, model_loc, max_batch_size) {} bool Models::VehicleAttribsDetectionModel::updateLayerProperty( - InferenceEngine::CNNNetwork& net_reader) + std::shared_ptr& model) { slog::info << "Checking INPUTs for model " << getModelName() << slog::endl; - // set input property - InferenceEngine::InputsDataMap input_info_map( - net_reader.getInputsInfo()); + auto input_info_map = model->inputs(); if (input_info_map.size() != 1) { throw std::logic_error("Vehicle Attribs topology should have only one input"); } - InferenceEngine::OutputsDataMap output_info_map( - net_reader.getOutputsInfo()); + + auto output_info_map = model->outputs(); if (output_info_map.size() != 2) { throw std::logic_error("Vehicle Attribs Network expects networks having two outputs"); } - InferenceEngine::InputInfo::Ptr input_info = input_info_map.begin()->second; - input_info->setPrecision(InferenceEngine::Precision::U8); - input_info->getInputData()->setLayout(InferenceEngine::Layout::NCHW); + ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model); + input_tensor_name_ = model->input().get_any_name(); + ov::preprocess::InputInfo& input_info = ppp.input(input_tensor_name_); + const ov::Layout tensor_layout{"NCHW"}; + input_info.tensor(). + set_element_type(ov::element::u8). + set_layout(tensor_layout); + model = ppp.build(); + + addInputInfo("input", input_tensor_name_); // set input and output layer name - input_ = input_info_map.begin()->first; - auto output_iter = output_info_map.begin(); - // color_output_ = (output_iter++)->second->name; - // type_output_ = (output_iter++)->second->name; - InferenceEngine::DataPtr color_output_ptr = (output_iter++)->second; - InferenceEngine::DataPtr type_output_ptr = (output_iter++)->second; - - addOutputInfo("color_output_", color_output_ptr->getName()); - //output_gender_ = gender_output_ptr->name; - addOutputInfo("type_output_", type_output_ptr->getName()); + addOutputInfo("color_output_", output_info_map[1].get_any_name()); + addOutputInfo("type_output_", output_info_map[0].get_any_name()); printAttribute(); return true; diff --git a/dynamic_vino_lib/src/outputs/base_output.cpp b/openvino_wrapper_lib/src/outputs/base_output.cpp similarity index 84% rename from dynamic_vino_lib/src/outputs/base_output.cpp rename to openvino_wrapper_lib/src/outputs/base_output.cpp index 84081496..e401ce44 100644 --- a/dynamic_vino_lib/src/outputs/base_output.cpp +++ b/openvino_wrapper_lib/src/outputs/base_output.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "dynamic_vino_lib/outputs/base_output.hpp" -#include "dynamic_vino_lib/pipeline.hpp" +#include "openvino_wrapper_lib/outputs/base_output.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" void Outputs::BaseOutput::setPipeline(Pipeline * const pipeline) { diff --git a/dynamic_vino_lib/src/outputs/image_window_output.cpp b/openvino_wrapper_lib/src/outputs/image_window_output.cpp similarity index 80% rename from dynamic_vino_lib/src/outputs/image_window_output.cpp rename to openvino_wrapper_lib/src/outputs/image_window_output.cpp index 1653b6f6..0f2b93e2 100644 --- a/dynamic_vino_lib/src/outputs/image_window_output.cpp +++ b/openvino_wrapper_lib/src/outputs/image_window_output.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,8 +23,8 @@ #include #include -#include "dynamic_vino_lib/outputs/image_window_output.hpp" -#include "dynamic_vino_lib/pipeline.hpp" +#include "openvino_wrapper_lib/outputs/image_window_output.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" Outputs::ImageWindowOutput::ImageWindowOutput(const std::string & output_name, int focal_length) : BaseOutput(output_name), focal_length_(focal_length) @@ -64,7 +64,7 @@ unsigned Outputs::ImageWindowOutput::findOutput( } void Outputs::ImageWindowOutput::accept( - const std::vector & results) + const std::vector & results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -75,7 +75,7 @@ void Outputs::ImageWindowOutput::accept( } void Outputs::ImageWindowOutput::accept( - const std::vector & results) + const std::vector & results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -87,7 +87,7 @@ void Outputs::ImageWindowOutput::accept( } void Outputs::ImageWindowOutput::accept( - const std::vector & results) + const std::vector & results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -98,7 +98,7 @@ void Outputs::ImageWindowOutput::accept( } void Outputs::ImageWindowOutput::accept( - const std::vector & results) + const std::vector & results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -111,7 +111,7 @@ void Outputs::ImageWindowOutput::accept( } void Outputs::ImageWindowOutput::accept( - const std::vector & results) + const std::vector & results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -133,7 +133,7 @@ void Outputs::ImageWindowOutput::accept( } void Outputs::ImageWindowOutput::accept( - const std::vector & results) + const std::vector & results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -143,46 +143,67 @@ void Outputs::ImageWindowOutput::accept( } } + void Outputs::ImageWindowOutput::mergeMask( - const std::vector & results) + const std::vector & results) +{ + const float alpha = 0.7f; + //const float MASK_THRESHOLD = 0.5; + //only for merged mask mat got from modles::fetchResults() + for (unsigned i=0; i & results) +{ + for (unsigned i = 0; i < results.size(); i++) { + cv::Rect result_rect = results[i].getLocation(); + unsigned target_index = findOutput(result_rect); + + auto fd_conf = results[i].getConfidence(); + if (fd_conf > 0) { + outputs_[target_index].rect = result_rect; + std::ostringstream ostream; + ostream << "[" << std::fixed << std::setprecision(3) << fd_conf << "]"; + outputs_[target_index].desc += ostream.str(); + auto label = results[i].getLabel(); + outputs_[target_index].desc += "[" + label + "]"; + } + } + mergeMask(results); +} + +void Outputs::ImageWindowOutput::mergeMask( + const std::vector & results) { - /* std::map class_color; for (unsigned i = 0; i < results.size(); i++) { std::string class_label = results[i].getLabel(); if (class_color.find(class_label) == class_color.end()) { class_color[class_label] = class_color.size(); } - auto & color = colors_[class_color[class_label]]; + auto & color = colors_[class_color[class_label] % colors_.size() ]; const float alpha = 0.7f; const float MASK_THRESHOLD = 0.5; cv::Rect location = results[i].getLocation(); cv::Mat roi_img = frame_(location); cv::Mat mask = results[i].getMask(); - cv::Mat colored_mask(location.height, location.width, frame_.type()); - - for (int h = 0; h < mask.size().height; ++h) { - for (int w = 0; w < mask.size().width; ++w) { - for (int ch = 0; ch < colored_mask.channels(); ++ch) { - colored_mask.at(h, w)[ch] = mask.at(h, w) > MASK_THRESHOLD ? - 255 * color[ch] : - roi_img.at(h, w)[ch]; - } - } - } + cv::Mat colored_mask(location.height, location.width, frame_.type(), + cv::Scalar(color[2], color[1], color[0]) ); + roi_img.copyTo(colored_mask, mask <= MASK_THRESHOLD); cv::addWeighted(colored_mask, alpha, roi_img, 1.0f - alpha, 0.0f, roi_img); } - */ - const float alpha = 0.5f; - cv::Mat roi_img = frame_; - cv::Mat colored_mask = results[0].getMask(); - cv::resize(colored_mask,colored_mask,cv::Size(frame_.size().width,frame_.size().height)); - cv::addWeighted(colored_mask, alpha, roi_img, 1.0f - alpha, 0.0f, roi_img); } - void Outputs::ImageWindowOutput::accept( - const std::vector & results) + const std::vector & results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -200,8 +221,11 @@ void Outputs::ImageWindowOutput::accept( mergeMask(results); } + + + void Outputs::ImageWindowOutput::accept( - const std::vector & results) + const std::vector & results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -217,7 +241,7 @@ void Outputs::ImageWindowOutput::accept( } void Outputs::ImageWindowOutput::accept( - const std::vector & results) + const std::vector & results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -235,7 +259,7 @@ void Outputs::ImageWindowOutput::accept( } void Outputs::ImageWindowOutput::accept( - const std::vector & results) + const std::vector & results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -248,7 +272,7 @@ void Outputs::ImageWindowOutput::accept( } void Outputs::ImageWindowOutput::accept( - const std::vector & results) + const std::vector & results) { for (unsigned i = 0; i < results.size(); i++) { cv::Rect result_rect = results[i].getLocation(); @@ -297,7 +321,7 @@ cv::Mat Outputs::ImageWindowOutput::getRotationTransform(double yaw, double pitc } void Outputs::ImageWindowOutput::accept( - const std::vector & results) + const std::vector & results) { for (unsigned i = 0; i < results.size(); i++) { auto result = results[i]; diff --git a/dynamic_vino_lib/src/outputs/ros_service_output.cpp b/openvino_wrapper_lib/src/outputs/ros_service_output.cpp similarity index 89% rename from dynamic_vino_lib/src/outputs/ros_service_output.cpp rename to openvino_wrapper_lib/src/outputs/ros_service_output.cpp index bb20c00a..257e8a35 100644 --- a/dynamic_vino_lib/src/outputs/ros_service_output.cpp +++ b/openvino_wrapper_lib/src/outputs/ros_service_output.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ #include #include #include -#include "dynamic_vino_lib/outputs/ros_service_output.hpp" +#include "openvino_wrapper_lib/outputs/ros_service_output.hpp" #include "cv_bridge/cv_bridge.h" void Outputs::RosServiceOutput::setServiceResponse( @@ -42,7 +42,7 @@ void Outputs::RosServiceOutput::setResponseForFace( } void Outputs::RosServiceOutput::setServiceResponse( - std::shared_ptr response) + std::shared_ptr response) { if (age_gender_topic_ != nullptr) { response->age_gender.objects = age_gender_topic_->objects; @@ -50,7 +50,7 @@ void Outputs::RosServiceOutput::setServiceResponse( } void Outputs::RosServiceOutput::setServiceResponse( - std::shared_ptr response) + std::shared_ptr response) { if (emotions_topic_ != nullptr) { response->emotion.emotions = emotions_topic_->emotions; @@ -58,7 +58,7 @@ void Outputs::RosServiceOutput::setServiceResponse( } void Outputs::RosServiceOutput::setServiceResponse( - std::shared_ptr response) + std::shared_ptr response) { if (headpose_topic_ != nullptr) { response->headpose.headposes = headpose_topic_->headposes; @@ -66,7 +66,7 @@ void Outputs::RosServiceOutput::setServiceResponse( } void Outputs::RosServiceOutput::setServiceResponse( - std::shared_ptr response) + std::shared_ptr response) { slog::info << "in People::Response ..."; if (faces_topic_ != nullptr) { diff --git a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp b/openvino_wrapper_lib/src/outputs/ros_topic_output.cpp similarity index 71% rename from dynamic_vino_lib/src/outputs/ros_topic_output.cpp rename to openvino_wrapper_lib/src/outputs/ros_topic_output.cpp index 1d24fbac..a0f8a531 100644 --- a/dynamic_vino_lib/src/outputs/ros_topic_output.cpp +++ b/openvino_wrapper_lib/src/outputs/ros_topic_output.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,37 +20,33 @@ #include #include #include -#include "dynamic_vino_lib/outputs/ros_topic_output.hpp" -#include "dynamic_vino_lib/pipeline_params.hpp" -#include "dynamic_vino_lib/pipeline.hpp" +#include "openvino_wrapper_lib/outputs/ros_topic_output.hpp" +#include "openvino_wrapper_lib/pipeline_params.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" #include "cv_bridge/cv_bridge.h" Outputs::RosTopicOutput::RosTopicOutput(std::string output_name, const rclcpp::Node::SharedPtr node) : BaseOutput(output_name) { - // rmw_qos_profile_t qos = rmw_qos_profile_default; - // qos.depth = 10; - // qos.reliability = RMW_QOS_POLICY_RELIABILITY_RELIABLE; - // qos.history = RMW_QOS_POLICY_HISTORY_KEEP_ALL; if(node != nullptr){ node_ = node; } else { node_ = rclcpp::Node::make_shared(output_name + "_topic_publisher"); } - pub_license_plate_ = node_->create_publisher( + pub_license_plate_ = node_->create_publisher( "/openvino_toolkit/" + output_name_ + "/detected_license_plates", 16); - pub_vehicle_attribs_ = node_->create_publisher( + pub_vehicle_attribs_ = node_->create_publisher( "/openvino_toolkit/" + output_name_ + "/detected_vehicles_attribs", 16); - pub_landmarks_ = node_->create_publisher( + pub_landmarks_ = node_->create_publisher( "/openvino_toolkit/" + output_name_ + "/detected_landmarks", 16); - pub_face_reid_ = node_->create_publisher( + pub_face_reid_ = node_->create_publisher( "/openvino_toolkit/" + output_name_ + "/reidentified_faces", 16); - pub_person_attribs_ = node_->create_publisher( + pub_person_attribs_ = node_->create_publisher( "/openvino_toolkit/" + output_name_ + "/person_attributes", 16); - pub_person_reid_ = node_->create_publisher( + pub_person_reid_ = node_->create_publisher( "/openvino_toolkit/" + output_name_ + "/reidentified_persons", 16); - pub_segmented_object_ = node_->create_publisher( + pub_segmented_object_ = node_->create_publisher( "/openvino_toolkit/" + output_name_ + "/segmented_obejcts", 16); pub_detected_object_ = node_->create_publisher( "/openvino_toolkit/" + output_name_ + "/detected_objects", 16); @@ -58,12 +54,12 @@ Outputs::RosTopicOutput::RosTopicOutput(std::string output_name, node_->create_publisher( "/openvino_toolkit/" + output_name_ + "/faces", 16); pub_emotion_ = - node_->create_publisher( + node_->create_publisher( "/openvino_toolkit/" + output_name_ + "/emotions", 16); - pub_age_gender_ = node_->create_publisher( + pub_age_gender_ = node_->create_publisher( "/openvino_toolkit/" + output_name_ + "/age_genders", 16); pub_headpose_ = - node_->create_publisher( + node_->create_publisher( "/openvino_toolkit/" + output_name_ + "/headposes", 16); emotions_topic_ = nullptr; detected_objects_topic_ = nullptr; @@ -85,12 +81,11 @@ void Outputs::RosTopicOutput::feedFrame(const cv::Mat & frame) } void Outputs::RosTopicOutput::accept( - const std::vector & results) + const std::vector & results) { - vehicle_attribs_topic_ = std::make_shared(); - people_msgs::msg::VehicleAttribs attribs; + vehicle_attribs_topic_ = std::make_shared(); + object_msgs::msg::VehicleAttribs attribs; for (auto & r : results) { - // slog::info << ">"; auto loc = r.getLocation(); attribs.roi.x_offset = loc.x; attribs.roi.y_offset = loc.y; @@ -103,12 +98,11 @@ void Outputs::RosTopicOutput::accept( } void Outputs::RosTopicOutput::accept( - const std::vector & results) + const std::vector & results) { - license_plate_topic_ = std::make_shared(); - people_msgs::msg::LicensePlate plate; + license_plate_topic_ = std::make_shared(); + object_msgs::msg::LicensePlate plate; for (auto & r : results) { - // slog::info << ">"; auto loc = r.getLocation(); plate.roi.x_offset = loc.x; plate.roi.y_offset = loc.y; @@ -120,12 +114,11 @@ void Outputs::RosTopicOutput::accept( } void Outputs::RosTopicOutput::accept( - const std::vector & results) + const std::vector & results) { - face_reid_topic_ = std::make_shared(); - people_msgs::msg::Reidentification face; + face_reid_topic_ = std::make_shared(); + object_msgs::msg::Reidentification face; for (auto & r : results) { - // slog::info << ">"; auto loc = r.getLocation(); face.roi.x_offset = loc.x; face.roi.y_offset = loc.y; @@ -137,12 +130,11 @@ void Outputs::RosTopicOutput::accept( } void Outputs::RosTopicOutput::accept( - const std::vector & results) + const std::vector & results) { - landmarks_topic_ = std::make_shared(); - people_msgs::msg::Landmark landmark; + landmarks_topic_ = std::make_shared(); + object_msgs::msg::Landmark landmark; for (auto & r : results) { - // slog::info << ">"; auto loc = r.getLocation(); landmark.roi.x_offset = loc.x; landmark.roi.y_offset = loc.y; @@ -160,12 +152,11 @@ void Outputs::RosTopicOutput::accept( } void Outputs::RosTopicOutput::accept( - const std::vector & results) + const std::vector & results) { - person_attribs_topic_ = std::make_shared(); - people_msgs::msg::PersonAttribute person_attrib; + person_attribs_topic_ = std::make_shared(); + object_msgs::msg::PersonAttribute person_attrib; for (auto & r : results) { - // slog::info << ">"; auto loc = r.getLocation(); person_attrib.roi.x_offset = loc.x; person_attrib.roi.y_offset = loc.y; @@ -177,12 +168,11 @@ void Outputs::RosTopicOutput::accept( } void Outputs::RosTopicOutput::accept( - const std::vector & results) + const std::vector & results) { - person_reid_topic_ = std::make_shared(); - people_msgs::msg::Reidentification person; + person_reid_topic_ = std::make_shared(); + object_msgs::msg::Reidentification person; for (auto & r : results) { - // slog::info << ">"; auto loc = r.getLocation(); person.roi.x_offset = loc.x; person.roi.y_offset = loc.y; @@ -194,12 +184,11 @@ void Outputs::RosTopicOutput::accept( } void Outputs::RosTopicOutput::accept( - const std::vector & results) + const std::vector & results) { - segmented_objects_topic_ = std::make_shared(); - people_msgs::msg::ObjectInMask object; + segmented_objects_topic_ = std::make_shared(); + object_msgs::msg::ObjectInMask object; for (auto & r : results) { - // slog::info << ">"; auto loc = r.getLocation(); object.roi.x_offset = loc.x; object.roi.y_offset = loc.y; @@ -218,12 +207,34 @@ void Outputs::RosTopicOutput::accept( } void Outputs::RosTopicOutput::accept( - const std::vector & results) + const std::vector & results) +{ + segmented_objects_topic_ = std::make_shared(); + object_msgs::msg::ObjectInMask object; + for (auto & r : results) { + auto loc = r.getLocation(); + object.roi.x_offset = loc.x; + object.roi.y_offset = loc.y; + object.roi.width = loc.width; + object.roi.height = loc.height; + object.object_name = r.getLabel(); + object.probability = r.getConfidence(); + cv::Mat mask = r.getMask(); + for (int h = 0; h < mask.size().height; ++h) { + for (int w = 0; w < mask.size().width; ++w) { + object.mask_array.push_back(mask.at(h, w)); + } + } + segmented_objects_topic_->objects_vector.push_back(object); + } +} + +void Outputs::RosTopicOutput::accept( + const std::vector & results) { detected_objects_topic_ = std::make_shared(); object_msgs::msg::ObjectInBox object; for (auto & r : results) { - // slog::info << ">"; auto loc = r.getLocation(); object.roi.x_offset = loc.x; object.roi.y_offset = loc.y; @@ -236,13 +247,12 @@ void Outputs::RosTopicOutput::accept( } void Outputs::RosTopicOutput::accept( - const std::vector & results) + const std::vector & results) { faces_topic_ = std::make_shared(); object_msgs::msg::ObjectInBox face; for (auto r : results) { - // slog::info << ">"; auto loc = r.getLocation(); face.roi.x_offset = loc.x; face.roi.y_offset = loc.y; @@ -255,13 +265,12 @@ void Outputs::RosTopicOutput::accept( } } -void Outputs::RosTopicOutput::accept(const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector & results) { - emotions_topic_ = std::make_shared(); + emotions_topic_ = std::make_shared(); - people_msgs::msg::Emotion emotion; + object_msgs::msg::Emotion emotion; for (auto r : results) { - // slog::info << ">"; auto loc = r.getLocation(); emotion.roi.x_offset = loc.x; emotion.roi.y_offset = loc.y; @@ -272,13 +281,12 @@ void Outputs::RosTopicOutput::accept(const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector & results) { - age_gender_topic_ = std::make_shared(); + age_gender_topic_ = std::make_shared(); - people_msgs::msg::AgeGender ag; + object_msgs::msg::AgeGender ag; for (auto r : results) { - // slog::info << ">"; auto loc = r.getLocation(); ag.roi.x_offset = loc.x; ag.roi.y_offset = loc.y; @@ -297,11 +305,11 @@ void Outputs::RosTopicOutput::accept(const std::vector & results) +void Outputs::RosTopicOutput::accept(const std::vector & results) { - headpose_topic_ = std::make_shared(); + headpose_topic_ = std::make_shared(); - people_msgs::msg::HeadPose hp; + object_msgs::msg::HeadPose hp; for (auto r : results) { auto loc = r.getLocation(); hp.roi.x_offset = loc.x; @@ -319,67 +327,56 @@ void Outputs::RosTopicOutput::handleOutput() { auto header = getPipeline()->getInputDevice()->getLockedHeader(); if (vehicle_attribs_topic_ != nullptr) { - // slog::info << "publishing landmarks detection outputs." << slog::endl; vehicle_attribs_topic_->header = header; pub_vehicle_attribs_->publish(*vehicle_attribs_topic_); vehicle_attribs_topic_ = nullptr; } if (license_plate_topic_ != nullptr) { - // slog::info << "publishing face reidentification outputs." << slog::endl; license_plate_topic_->header = header; pub_license_plate_->publish(*license_plate_topic_); license_plate_topic_ = nullptr; } if (landmarks_topic_ != nullptr) { - // slog::info << "publishing landmarks detection outputs." << slog::endl; landmarks_topic_->header = header; pub_landmarks_->publish(*landmarks_topic_); landmarks_topic_ = nullptr; } if (face_reid_topic_ != nullptr) { - // slog::info << "publishing face reidentification outputs." << slog::endl; face_reid_topic_->header = header; pub_face_reid_->publish(*face_reid_topic_); face_reid_topic_ = nullptr; } if (person_attribs_topic_ != nullptr) { - // slog::info << "publishing person attributes outputs." << slog::endl; person_attribs_topic_->header = header; pub_person_attribs_->publish(*person_attribs_topic_); person_attribs_topic_ = nullptr; } if (person_reid_topic_ != nullptr) { - // slog::info << "publishing preson reidentification outputs." << slog::endl; person_reid_topic_->header = header; pub_person_reid_->publish(*person_reid_topic_); person_reid_topic_ = nullptr; } if (segmented_objects_topic_ != nullptr) { - // slog::info << "publishing segmented objects outputs." << slog::endl; segmented_objects_topic_->header = header; pub_segmented_object_->publish(*segmented_objects_topic_); segmented_objects_topic_ = nullptr; } if (detected_objects_topic_ != nullptr) { - // slog::info << "publishing detected objects outputs." << slog::endl; detected_objects_topic_->header = header; pub_detected_object_->publish(*detected_objects_topic_); detected_objects_topic_ = nullptr; } if (faces_topic_ != nullptr) { - // slog::info << "publishing faces outputs." << slog::endl; faces_topic_->header = header; pub_face_->publish(*faces_topic_); faces_topic_ = nullptr; } if (emotions_topic_ != nullptr) { - // slog::info << "publishing emotions outputs." << slog::endl; emotions_topic_->header = header; pub_emotion_->publish(*emotions_topic_); emotions_topic_ = nullptr; } if (age_gender_topic_ != nullptr) { - // slog::info << "publishing age gender outputs." << slog::endl; age_gender_topic_->header = header; pub_age_gender_->publish(*age_gender_topic_); age_gender_topic_ = nullptr; diff --git a/dynamic_vino_lib/src/outputs/rviz_output.cpp b/openvino_wrapper_lib/src/outputs/rviz_output.cpp similarity index 68% rename from dynamic_vino_lib/src/outputs/rviz_output.cpp rename to openvino_wrapper_lib/src/outputs/rviz_output.cpp index a9778ccf..f35e6414 100644 --- a/dynamic_vino_lib/src/outputs/rviz_output.cpp +++ b/openvino_wrapper_lib/src/outputs/rviz_output.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,8 +22,8 @@ #include #include #include "cv_bridge/cv_bridge.h" -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/outputs/rviz_output.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/outputs/rviz_output.hpp" Outputs::RvizOutput::RvizOutput(std::string output_name, const rclcpp::Node::SharedPtr node) : BaseOutput(output_name) @@ -45,57 +45,63 @@ void Outputs::RvizOutput::feedFrame(const cv::Mat & frame) } void Outputs::RvizOutput::accept( - const std::vector & results) + const std::vector & results) { image_window_output_->accept(results); } void Outputs::RvizOutput::accept( - const std::vector & results) + const std::vector & results) { image_window_output_->accept(results); } void Outputs::RvizOutput::accept( - const std::vector & results) + const std::vector & results) { image_window_output_->accept(results); } void Outputs::RvizOutput::accept( - const std::vector & results) + const std::vector & results) { image_window_output_->accept(results); } -void Outputs::RvizOutput::accept(const std::vector & results) +void Outputs::RvizOutput::accept(const std::vector & results) { image_window_output_->accept(results); } void Outputs::RvizOutput::accept( - const std::vector & results) + const std::vector & results) { image_window_output_->accept(results); } void Outputs::RvizOutput::accept( - const std::vector & results) + const std::vector & results) { image_window_output_->accept(results); } -void Outputs::RvizOutput::accept(const std::vector & results) +void Outputs::RvizOutput::accept( + const std::vector & results) +{ + image_window_output_->accept(results); +} + +void Outputs::RvizOutput::accept(const std::vector & results) { image_window_output_->accept(results); } -void Outputs::RvizOutput::accept(const std::vector & results) +void Outputs::RvizOutput::accept(const std::vector & results) { image_window_output_->accept(results); } -void Outputs::RvizOutput::accept(const std::vector & results) +void Outputs::RvizOutput::accept(const std::vector & results) { image_window_output_->accept(results); } diff --git a/dynamic_vino_lib/src/pipeline.cpp b/openvino_wrapper_lib/src/pipeline.cpp similarity index 88% rename from dynamic_vino_lib/src/pipeline.cpp rename to openvino_wrapper_lib/src/pipeline.cpp index 25ba0f55..cd454103 100644 --- a/dynamic_vino_lib/src/pipeline.cpp +++ b/openvino_wrapper_lib/src/pipeline.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,16 +17,17 @@ * @file pipeline.cpp */ -#include +#include #include #include #include #include #include +#include -#include "dynamic_vino_lib/inputs/base_input.hpp" -#include "dynamic_vino_lib/inputs/image_input.hpp" -#include "dynamic_vino_lib/pipeline.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/image_input.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" Pipeline::Pipeline(const std::string & name) { @@ -120,7 +121,7 @@ void Pipeline::addConnect(const std::string & parent, const std::string & name) bool Pipeline::add( const std::string & parent, const std::string & name, - std::shared_ptr inference) + std::shared_ptr inference) { if (parent.empty() || name.empty() || !isLegalConnect(parent, name)) { slog::err << "ARGuments ERROR when adding inference instance!" << slog::endl; @@ -137,14 +138,14 @@ bool Pipeline::add( bool Pipeline::add( const std::string & name, - std::shared_ptr inference) + std::shared_ptr inference) { if (name.empty()) { slog::err << "Item name can't be empty!" << slog::endl; return false; } - std::map>::iterator it = + std::map>::iterator it = name_to_detection_map_.find(name); if (it != name_to_detection_map_.end()) { slog::warn << "inferance instance for [" << name << @@ -187,14 +188,12 @@ void Pipeline::runOnce() initInferenceCounter(); if (!input_device_->read(&frame_)) { - // throw std::logic_error("Failed to get frame from cv::VideoCapture"); - // slog::warn << "Failed to get frame from input_device." << slog::endl; return; //do nothing if now frame read out } width_ = frame_.cols; height_ = frame_.rows; slog::debug << "DEBUG: in Pipeline run process..." << slog::endl; - // auto t0 = std::chrono::high_resolution_clock::now(); + for (auto pos = next_.equal_range(input_device_name_); pos.first != pos.second; ++pos.first) { std::string detection_name = pos.first->second; slog::debug << "DEBUG: Enqueue for detection: " << detection_name << slog::endl; @@ -215,12 +214,8 @@ void Pipeline::runOnce() std::unique_lock lock(counter_mutex_); cv_.wait(lock, [self = this]() {return self->counter_ == 0;}); - //auto t1 = std::chrono::high_resolution_clock::now(); - //typedef std::chrono::duration> ms; - slog::debug << "DEBUG: in Pipeline run process...handleOutput" << slog::endl; for (auto & pair : name_to_output_map_) { - // slog::info << "Handling Output ..." << pair.first << slog::endl; pair.second->handleOutput(); } } @@ -236,14 +231,17 @@ void Pipeline::setCallback() { for (auto & pair : name_to_detection_map_) { std::string detection_name = pair.first; - std::function callb; - callb = [detection_name, self = this]() + std::function callb; + callb = [detection_name, self = this](std::exception_ptr ex) { + if (ex) + throw ex; + self->callback(detection_name); return; }; - pair.second->getEngine()->getRequest()->SetCompletionCallback(callb); - } + pair.second->getEngine()->getRequest().set_callback(callb); + } } void Pipeline::callback(const std::string & detection_name) @@ -274,7 +272,7 @@ void Pipeline::callback(const std::string & detection_name) increaseInferenceCounter(); next_detection_ptr->submitRequest(); auto request = next_detection_ptr->getEngine()->getRequest(); - request->Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); + request.wait(); } } } @@ -296,14 +294,12 @@ void Pipeline::increaseInferenceCounter() { std::lock_guard lk(counter_mutex_); ++counter_; - // slog::info << "counter = " << counter_ << slog::endl; } void Pipeline::decreaseInferenceCounter() { std::lock_guard lk(counter_mutex_); --counter_; - // slog::info << "counter = " << counter_ << slog::endl; } void Pipeline::countFPS() diff --git a/dynamic_vino_lib/src/pipeline_manager.cpp b/openvino_wrapper_lib/src/pipeline_manager.cpp similarity index 74% rename from dynamic_vino_lib/src/pipeline_manager.cpp rename to openvino_wrapper_lib/src/pipeline_manager.cpp index 5d348255..184ac922 100644 --- a/dynamic_vino_lib/src/pipeline_manager.cpp +++ b/openvino_wrapper_lib/src/pipeline_manager.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,55 +17,55 @@ * @file pipeline_manager.cpp */ -#include +#include #include #include #include #include #if 0 -#include "dynamic_vino_lib/inferences/landmarks_detection.hpp" -#include "dynamic_vino_lib/inferences/face_reidentification.hpp" -#include "dynamic_vino_lib/models/face_reidentification_model.hpp" -#include "dynamic_vino_lib/models/landmarks_detection_model.hpp" +#include "openvino_wrapper_lib/inferences/landmarks_detection.hpp" +#include "openvino_wrapper_lib/inferences/face_reidentification.hpp" +#include "openvino_wrapper_lib/models/face_reidentification_model.hpp" +#include "openvino_wrapper_lib/models/landmarks_detection_model.hpp" #endif -#include "dynamic_vino_lib/models/vehicle_attribs_detection_model.hpp" -#include "dynamic_vino_lib/models/license_plate_detection_model.hpp" -#include "dynamic_vino_lib/models/person_reidentification_model.hpp" -#include "dynamic_vino_lib/models/person_attribs_detection_model.hpp" -#include "dynamic_vino_lib/inferences/vehicle_attribs_detection.hpp" -#include "dynamic_vino_lib/inferences/license_plate_detection.hpp" -#include "dynamic_vino_lib/inferences/person_reidentification.hpp" -#include "dynamic_vino_lib/inferences/person_attribs_detection.hpp" -#include "dynamic_vino_lib/inferences/face_detection.hpp" -#include "dynamic_vino_lib/models/face_detection_model.hpp" -#include "dynamic_vino_lib/inferences/age_gender_detection.hpp" -#include "dynamic_vino_lib/models/age_gender_detection_model.hpp" -#include "dynamic_vino_lib/inferences/emotions_detection.hpp" -#include "dynamic_vino_lib/models/emotion_detection_model.hpp" -#include "dynamic_vino_lib/inferences/head_pose_detection.hpp" -#include "dynamic_vino_lib/models/head_pose_detection_model.hpp" -#include "dynamic_vino_lib/models/object_detection_yolov2_model.hpp" -#include "dynamic_vino_lib/models/object_detection_ssd_model.hpp" -#include "dynamic_vino_lib/inferences/object_segmentation.hpp" -#include "dynamic_vino_lib/models/object_segmentation_model.hpp" -#include "dynamic_vino_lib/inputs/base_input.hpp" -#include "dynamic_vino_lib/inputs/image_input.hpp" -#include "dynamic_vino_lib/inputs/realsense_camera.hpp" -#include "dynamic_vino_lib/inputs/realsense_camera_topic.hpp" -#include "dynamic_vino_lib/inputs/standard_camera.hpp" -#include "dynamic_vino_lib/inputs/ip_camera.hpp" -#include "dynamic_vino_lib/inputs/video_input.hpp" -#include "dynamic_vino_lib/outputs/image_window_output.hpp" -#include "dynamic_vino_lib/outputs/ros_topic_output.hpp" -#include "dynamic_vino_lib/outputs/rviz_output.hpp" -#include "dynamic_vino_lib/outputs/ros_service_output.hpp" -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/pipeline_params.hpp" -#include "dynamic_vino_lib/services/pipeline_processing_server.hpp" -#include "dynamic_vino_lib/engines/engine_manager.hpp" +#include "openvino_wrapper_lib/models/vehicle_attribs_detection_model.hpp" +#include "openvino_wrapper_lib/models/license_plate_detection_model.hpp" +#include "openvino_wrapper_lib/models/person_reidentification_model.hpp" +#include "openvino_wrapper_lib/models/person_attribs_detection_model.hpp" +#include "openvino_wrapper_lib/inferences/vehicle_attribs_detection.hpp" +#include "openvino_wrapper_lib/inferences/license_plate_detection.hpp" +#include "openvino_wrapper_lib/inferences/person_reidentification.hpp" +#include "openvino_wrapper_lib/inferences/person_attribs_detection.hpp" +#include "openvino_wrapper_lib/inferences/face_detection.hpp" +#include "openvino_wrapper_lib/models/face_detection_model.hpp" +#include "openvino_wrapper_lib/inferences/age_gender_detection.hpp" +#include "openvino_wrapper_lib/models/age_gender_detection_model.hpp" +#include "openvino_wrapper_lib/inferences/emotions_detection.hpp" +#include "openvino_wrapper_lib/models/emotion_detection_model.hpp" +#include "openvino_wrapper_lib/inferences/head_pose_detection.hpp" +#include "openvino_wrapper_lib/models/head_pose_detection_model.hpp" +#include "openvino_wrapper_lib/models/object_detection_yolov5_model.hpp" +#include "openvino_wrapper_lib/models/object_detection_ssd_model.hpp" +#include "openvino_wrapper_lib/inferences/object_segmentation.hpp" +#include "openvino_wrapper_lib/models/object_segmentation_model.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/image_input.hpp" +#include "openvino_wrapper_lib/inputs/realsense_camera.hpp" +#include "openvino_wrapper_lib/inputs/realsense_camera_topic.hpp" +#include "openvino_wrapper_lib/inputs/standard_camera.hpp" +#include "openvino_wrapper_lib/inputs/ip_camera.hpp" +#include "openvino_wrapper_lib/inputs/video_input.hpp" +#include "openvino_wrapper_lib/outputs/image_window_output.hpp" +#include "openvino_wrapper_lib/outputs/ros_topic_output.hpp" +#include "openvino_wrapper_lib/outputs/rviz_output.hpp" +#include "openvino_wrapper_lib/outputs/ros_service_output.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/pipeline_params.hpp" +#include "openvino_wrapper_lib/services/pipeline_processing_server.hpp" +#include "openvino_wrapper_lib/engines/engine_manager.hpp" std::shared_ptr PipelineManager::createPipeline(const Params::ParamManager::PipelineRawData & params, rclcpp::Node::SharedPtr node) @@ -111,9 +111,6 @@ PipelineManager::createPipeline(const Params::ParamManager::PipelineRawData & pa pipeline->add(it->first, it->second); } - // slog::info << "Updateing filters ..." << slog::endl; - // pipeline->addFilters(params.filters); - pipelines_.insert({params.name, data}); pipeline->setCallback(); @@ -189,16 +186,16 @@ PipelineManager::parseOutput(const PipelineData & pdata) return outputs; } -std::map> +std::map> PipelineManager::parseInference(const Params::ParamManager::PipelineRawData & params) { - std::map> inferences; + std::map> inferences; for (auto & infer : params.infers) { if (infer.name.empty() || infer.model.empty()) { continue; } slog::info << "Parsing Inference: " << infer.name << slog::endl; - std::shared_ptr object = nullptr; + std::shared_ptr object = nullptr; if (infer.name == kInferTpye_FaceDetection) { object = createFaceDetection(infer); @@ -212,6 +209,8 @@ PipelineManager::parseInference(const Params::ParamManager::PipelineRawData & pa object = createObjectDetection(infer); } else if (infer.name == kInferTpye_ObjectSegmentation) { object = createObjectSegmentation(infer); + } else if (infer.name == kInferTpye_ObjectSegmentationMaskrcnn) { + object = createObjectSegmentationMaskrcnn(infer); } else if (infer.name == kInferTpye_PersonReidentification) { object = createPersonReidentification(infer); } else if (infer.name == kInferTpye_PersonAttribsDetection) { @@ -238,46 +237,46 @@ PipelineManager::parseInference(const Params::ParamManager::PipelineRawData & pa } -std::shared_ptr +std::shared_ptr PipelineManager::createFaceDetection( const Params::ParamManager::InferenceRawData & infer) { return createObjectDetection(infer); } -std::shared_ptr +std::shared_ptr PipelineManager::createAgeGenderRecognition(const Params::ParamManager::InferenceRawData & param) { auto model = std::make_shared(param.label, param.model, param.batch); model->modelInit(); auto engine = engine_manager_.createEngine(param.engine, model); - auto infer = std::make_shared(); + auto infer = std::make_shared(); infer->loadNetwork(model); infer->loadEngine(engine); return infer; } -std::shared_ptr +std::shared_ptr PipelineManager::createEmotionRecognition(const Params::ParamManager::InferenceRawData & param) { auto model = std::make_shared(param.label, param.model, param.batch); model->modelInit(); auto engine = engine_manager_.createEngine(param.engine, model); - auto infer = std::make_shared(); + auto infer = std::make_shared(); infer->loadNetwork(model); infer->loadEngine(engine); return infer; } -std::shared_ptr +std::shared_ptr PipelineManager::createHeadPoseEstimation(const Params::ParamManager::InferenceRawData & param) { auto model = std::make_shared(param.label, param.model, param.batch); model->modelInit(); auto engine = engine_manager_.createEngine(param.engine, model); - auto infer = std::make_shared(); + auto infer = std::make_shared(); infer->loadNetwork(model); infer->loadEngine(engine); @@ -285,24 +284,24 @@ PipelineManager::createHeadPoseEstimation(const Params::ParamManager::InferenceR } -std::shared_ptr +std::shared_ptr PipelineManager::createObjectDetection( const Params::ParamManager::InferenceRawData & infer) { std::shared_ptr object_detection_model; - std::shared_ptr object_inference_ptr; + std::shared_ptr object_inference_ptr; slog::debug << "for test in createObjectDetection()" << slog::endl; if (infer.model_type == kInferTpye_ObjectDetectionTypeSSD) { object_detection_model = std::make_shared(infer.label, infer.model, infer.batch); } - if (infer.model_type == kInferTpye_ObjectDetectionTypeYolov2) { + if (infer.model_type == kInferTpye_ObjectDetectionTypeYolov5) { object_detection_model = - std::make_shared(infer.label, infer.model, infer.batch); + std::make_shared(infer.label, infer.model, infer.batch); } slog::debug << "for test in createObjectDetection(), Created SSDModel" << slog::endl; - object_inference_ptr = std::make_shared( + object_inference_ptr = std::make_shared( infer.enable_roi_constraint, infer.confidence_threshold); // To-do theshold configuration slog::debug << "for test in createObjectDetection(), before modelInit()" << slog::endl; object_detection_model->modelInit(); @@ -315,7 +314,7 @@ PipelineManager::createObjectDetection( return object_inference_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceRawData & infer) { auto model = @@ -324,7 +323,25 @@ PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceR slog::info << "Segmentation model initialized." << slog::endl; auto engine = engine_manager_.createEngine(infer.engine, model); slog::info << "Segmentation Engine initialized." << slog::endl; - auto segmentation_inference_ptr = std::make_shared( + auto segmentation_inference_ptr = std::make_shared( + infer.confidence_threshold); + slog::info << "Segmentation Inference instanced." << slog::endl; + segmentation_inference_ptr->loadNetwork(model); + segmentation_inference_ptr->loadEngine(engine); + + return segmentation_inference_ptr; +} + +std::shared_ptr +PipelineManager::createObjectSegmentationMaskrcnn(const Params::ParamManager::InferenceRawData & infer) +{ + auto model = + std::make_shared(infer.label, infer.model, infer.batch); + model->modelInit(); + slog::info << "Segmentation model initialized." << slog::endl; + auto engine = engine_manager_.createEngine(infer.engine, model); + slog::info << "Segmentation Engine initialized." << slog::endl; + auto segmentation_inference_ptr = std::make_shared( infer.confidence_threshold); slog::info << "Segmentation Inference instanced." << slog::endl; segmentation_inference_ptr->loadNetwork(model); @@ -333,12 +350,12 @@ PipelineManager::createObjectSegmentation(const Params::ParamManager::InferenceR return segmentation_inference_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createPersonReidentification( const Params::ParamManager::InferenceRawData & infer) { std::shared_ptr person_reidentification_model; - std::shared_ptr reidentification_inference_ptr; + std::shared_ptr reidentification_inference_ptr; slog::debug << "for test in createPersonReidentification()"<(infer.label, infer.model, infer.batch); @@ -346,7 +363,7 @@ PipelineManager::createPersonReidentification( slog::info << "Reidentification model initialized" << slog::endl; auto person_reidentification_engine = engine_manager_.createEngine(infer.engine, person_reidentification_model); reidentification_inference_ptr = - std::make_shared(infer.confidence_threshold); + std::make_shared(infer.confidence_threshold); slog::debug<< "for test in createPersonReidentification(), before loadNetwork"<loadNetwork(person_reidentification_model); reidentification_inference_ptr->loadEngine(person_reidentification_engine); @@ -355,7 +372,7 @@ PipelineManager::createPersonReidentification( return reidentification_inference_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createVehicleAttribsDetection( const Params::ParamManager::InferenceRawData & infer) { @@ -364,14 +381,14 @@ PipelineManager::createVehicleAttribsDetection( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto vehicle_attribs_ptr = - std::make_shared(); + std::make_shared(); vehicle_attribs_ptr->loadNetwork(model); vehicle_attribs_ptr->loadEngine(engine); return vehicle_attribs_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createLicensePlateDetection( const Params::ParamManager::InferenceRawData & infer) { @@ -380,14 +397,14 @@ PipelineManager::createLicensePlateDetection( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto license_plate_ptr = - std::make_shared(); + std::make_shared(); license_plate_ptr->loadNetwork(model); license_plate_ptr->loadEngine(engine); return license_plate_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createPersonAttribsDetection( const Params::ParamManager::InferenceRawData & infer) { @@ -397,7 +414,7 @@ PipelineManager::createPersonAttribsDetection( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto attribs_inference_ptr = - std::make_shared(infer.confidence_threshold); + std::make_shared(infer.confidence_threshold); attribs_inference_ptr->loadNetwork(model); attribs_inference_ptr->loadEngine(engine); @@ -405,7 +422,7 @@ PipelineManager::createPersonAttribsDetection( } #if 0 -std::shared_ptr +std::shared_ptr PipelineManager::createPersonReidentification( const Params::ParamManager::InferenceRawData & infer) { @@ -414,14 +431,14 @@ PipelineManager::createPersonReidentification( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto reidentification_inference_ptr = - std::make_shared(infer.confidence_threshold); + std::make_shared(infer.confidence_threshold); reidentification_inference_ptr->loadNetwork(model); reidentification_inference_ptr->loadEngine(engine); return reidentification_inference_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createPersonAttribsDetection( const Params::ParamManager::InferenceRawData & infer) { @@ -431,14 +448,14 @@ PipelineManager::createPersonAttribsDetection( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto attribs_inference_ptr = - std::make_shared(infer.confidence_threshold); + std::make_shared(infer.confidence_threshold); attribs_inference_ptr->loadNetwork(model); attribs_inference_ptr->loadEngine(engine); return attribs_inference_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createLandmarksDetection( const Params::ParamManager::InferenceRawData & infer) { @@ -447,14 +464,14 @@ PipelineManager::createLandmarksDetection( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto landmarks_inference_ptr = - std::make_shared(); + std::make_shared(); landmarks_inference_ptr->loadNetwork(model); landmarks_inference_ptr->loadEngine(engine); return landmarks_inference_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createFaceReidentification( const Params::ParamManager::InferenceRawData & infer) { @@ -463,14 +480,14 @@ PipelineManager::createFaceReidentification( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto face_reid_ptr = - std::make_shared(infer.confidence_threshold); + std::make_shared(infer.confidence_threshold); face_reid_ptr->loadNetwork(model); face_reid_ptr->loadEngine(engine); return face_reid_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createVehicleAttribsDetection( const Params::ParamManager::InferenceRawData & infer) { @@ -479,14 +496,14 @@ PipelineManager::createVehicleAttribsDetection( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto vehicle_attribs_ptr = - std::make_shared(); + std::make_shared(); vehicle_attribs_ptr->loadNetwork(model); vehicle_attribs_ptr->loadEngine(engine); return vehicle_attribs_ptr; } -std::shared_ptr +std::shared_ptr PipelineManager::createLicensePlateDetection( const Params::ParamManager::InferenceRawData & infer) { @@ -495,7 +512,7 @@ PipelineManager::createLicensePlateDetection( model->modelInit(); auto engine = engine_manager_.createEngine(infer.engine, model); auto license_plate_ptr = - std::make_shared(); + std::make_shared(); license_plate_ptr->loadNetwork(model); license_plate_ptr->loadEngine(engine); @@ -556,7 +573,7 @@ void PipelineManager::runAll() void PipelineManager::runService() { auto node = std::make_shared>("pipeline_service"); + >("pipeline_service"); while (service_.state != PipelineState_ThreadStopped && service_.thread != nullptr) { rclcpp::spin_some(node); std::this_thread::sleep_for(std::chrono::milliseconds(1)); diff --git a/dynamic_vino_lib/src/pipeline_params.cpp b/openvino_wrapper_lib/src/pipeline_params.cpp similarity index 94% rename from dynamic_vino_lib/src/pipeline_params.cpp rename to openvino_wrapper_lib/src/pipeline_params.cpp index 9649e684..9b9b3c41 100644 --- a/dynamic_vino_lib/src/pipeline_params.cpp +++ b/openvino_wrapper_lib/src/pipeline_params.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,11 +17,11 @@ * @file pipeline.cpp */ -#include +#include #include #include #include -#include "dynamic_vino_lib/pipeline_params.hpp" +#include "openvino_wrapper_lib/pipeline_params.hpp" PipelineParams::PipelineParams(const std::string & name) { diff --git a/dynamic_vino_lib/src/services/frame_processing_server.cpp b/openvino_wrapper_lib/src/services/frame_processing_server.cpp similarity index 84% rename from dynamic_vino_lib/src/services/frame_processing_server.cpp rename to openvino_wrapper_lib/src/services/frame_processing_server.cpp index 5a227798..06e8387c 100644 --- a/dynamic_vino_lib/src/services/frame_processing_server.cpp +++ b/openvino_wrapper_lib/src/services/frame_processing_server.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "dynamic_vino_lib/services/frame_processing_server.hpp" -#include +#include "openvino_wrapper_lib/services/frame_processing_server.hpp" +#include #include #include -#include +#include #include #include #include @@ -25,11 +25,11 @@ #include #include -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/inputs/base_input.hpp" -#include "dynamic_vino_lib/inputs/image_input.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/image_input.hpp" +#include "openvino_wrapper_lib/slog.hpp" namespace vino_service { @@ -93,5 +93,5 @@ void FrameProcessingServer::cbService( } template class FrameProcessingServer; -template class FrameProcessingServer; +template class FrameProcessingServer; } // namespace vino_service diff --git a/dynamic_vino_lib/src/services/pipeline_processing_server.cpp b/openvino_wrapper_lib/src/services/pipeline_processing_server.cpp similarity index 87% rename from dynamic_vino_lib/src/services/pipeline_processing_server.cpp rename to openvino_wrapper_lib/src/services/pipeline_processing_server.cpp index c5beeb97..b24fb2eb 100644 --- a/dynamic_vino_lib/src/services/pipeline_processing_server.cpp +++ b/openvino_wrapper_lib/src/services/pipeline_processing_server.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "dynamic_vino_lib/services/pipeline_processing_server.hpp" +#include "openvino_wrapper_lib/services/pipeline_processing_server.hpp" #include -#include +#include #include #include #include @@ -23,9 +23,9 @@ #include #include -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/slog.hpp" namespace vino_service { @@ -52,13 +52,13 @@ void PipelineProcessingServer::setResponse( std::shared_ptr response) { for (auto it = pipelines_->begin(); it != pipelines_->end(); ++it) { - pipeline_srv_msgs::msg::Pipeline pipeline_msg; + openvino_msgs::msg::Pipeline pipeline_msg; pipeline_msg.name = it->first; pipeline_msg.running_status = std::to_string(it->second.state); auto connection_map = it->second.pipeline->getPipelineDetail(); for (auto & current_pipe : connection_map) { - pipeline_srv_msgs::msg::Connection connection; + openvino_msgs::msg::Connection connection; connection.input = current_pipe.first.c_str(); connection.output = current_pipe.second.c_str(); pipeline_msg.connections.push_back(connection); @@ -100,5 +100,5 @@ void PipelineProcessingServer::cbService( } setResponse(response); } -template class PipelineProcessingServer; +template class PipelineProcessingServer; } // namespace vino_service diff --git a/sample/CMakeLists.txt b/sample/CMakeLists.txt index 656f0a19..25f50b09 100644 --- a/sample/CMakeLists.txt +++ b/sample/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -9,14 +9,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +set(OpenVINO_LIBRARIES openvino::runtime) cmake_minimum_required(VERSION 3.5) -project(dynamic_vino_sample) +project(openvino_node) -# Default to C++14 +# Default to C++17 if(NOT CMAKE_CXX_STANDARD) - set(CMAKE_CXX_STANDARD 14) + set(CMAKE_CXX_STANDARD 17) endif() -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wall -Wextra -Wpedantic) @@ -30,19 +31,17 @@ else() endif() set(CMAKE_CXX_FLAGS "-fPIE -fPIC -D_FORTIFY_SOURCE=2 -fstack-protector -Wformat -Wformat-security -Wall ${CMAKE_CXX_FLAGS}") - find_package(ament_cmake REQUIRED) find_package(ament_index_cpp REQUIRED) find_package(rclcpp REQUIRED) find_package(rcutils) find_package(OpenCV REQUIRED) find_package(cv_bridge REQUIRED) -find_package(InferenceEngine REQUIRED) -find_package(dynamic_vino_lib REQUIRED) +find_package(OpenVINO REQUIRED) +find_package(openvino_wrapper_lib REQUIRED) find_package(object_msgs REQUIRED) -find_package(people_msgs REQUIRED) -find_package(pipeline_srv_msgs REQUIRED) -find_package(vino_param_lib REQUIRED) +find_package(openvino_msgs REQUIRED) +find_package(openvino_param_lib REQUIRED) find_package(yaml_cpp_vendor REQUIRED) find_package(realsense2 REQUIRED) find_package(rclcpp_components) @@ -97,21 +96,12 @@ source_group("include" FILES ${MAIN_HEADERS}) include_directories(${OpenCV_INCLUDE_DIRS}) include_directories(${PROJECT_SOURCE_DIR}/include) -include_directories(${dynamic_vino_lib_INCLUDE_DIRS}) -include_directories(${vino_param_lib_INCLUDE_DIRS}) -include_directories(${InferenceEngine_INCLUDE_DIRS}) -include_directories(${InferenceEngine_INCLUDE_DIRS}/../samples) -include_directories(${InferenceEngine_INCLUDE_DIRS}/../samples/extension) -include_directories(${InferenceEngine_INCLUDE_DIRS}/../src) -#include_directories(${InferenceEngine_INCLUDE_DIRS}/../samples/build/thirdparty/gflags/include) -#include_directories(${InferenceEngine_INCLUDE_DIRS}/../build/samples/thirdparty/gflags/include) - +include_directories(${openvino_wrapper_lib_INCLUDE_DIRS}) +include_directories(${openvino_param_lib_INCLUDE_DIRS}) +include_directories(${OpenVINO_DIRS}) include_directories(${realsense2_INCLUDE_DIRS}) -#include_directories (/opt/ros2_openvino/include) # Create library file from sources. -#add_executable(${PROJECT_NAME} ${MAIN_SRC} ${MAIN_HEADERS}) - if(UNIX) set(LIB_DL dl) endif() @@ -124,8 +114,8 @@ target_link_libraries(vino_param_sample ) ament_target_dependencies(vino_param_sample - "vino_param_lib" - "dynamic_vino_lib" + "openvino_param_lib" + "openvino_wrapper_lib" "yaml_cpp_vendor" ) @@ -142,11 +132,10 @@ ament_target_dependencies(pipeline_with_params "object_msgs" "ament_index_cpp" "class_loader" - "dynamic_vino_lib" - "InferenceEngine" - "people_msgs" - "pipeline_srv_msgs" - "vino_param_lib" + "openvino_wrapper_lib" + "OpenVINO" + "openvino_msgs" + "openvino_param_lib" "OpenCV" "yaml_cpp_vendor" "realsense2" @@ -166,19 +155,15 @@ ament_target_dependencies(composable_pipeline "object_msgs" "ament_index_cpp" "class_loader" - "dynamic_vino_lib" - "InferenceEngine" - "people_msgs" - "pipeline_srv_msgs" - "vino_param_lib" + "openvino_wrapper_lib" + "OpenVINO" + "openvino_msgs" + "openvino_param_lib" "OpenCV" "yaml_cpp_vendor" "realsense2" ) rclcpp_components_register_nodes(composable_pipeline "ComposablePipeline") -#set(node_plugins "") -#set(node_plugins "${node_plugins}ComposablePipeline;$\n") - add_executable(image_object_server src/image_object_server.cpp @@ -193,11 +178,10 @@ ament_target_dependencies(image_object_server "object_msgs" "ament_index_cpp" "class_loader" - "dynamic_vino_lib" - "InferenceEngine" - "people_msgs" - "pipeline_srv_msgs" - "vino_param_lib" + "openvino_wrapper_lib" + "OpenVINO" + "openvino_msgs" + "openvino_param_lib" "OpenCV" ) @@ -214,11 +198,10 @@ ament_target_dependencies(image_people_server "object_msgs" "ament_index_cpp" "class_loader" - "dynamic_vino_lib" - "InferenceEngine" - "people_msgs" - "pipeline_srv_msgs" - "vino_param_lib" + "openvino_wrapper_lib" + "OpenVINO" + "openvino_msgs" + "openvino_param_lib" "OpenCV" ) @@ -235,11 +218,10 @@ ament_target_dependencies(image_object_client "object_msgs" "ament_index_cpp" "class_loader" - "dynamic_vino_lib" - "InferenceEngine" - "people_msgs" - "pipeline_srv_msgs" - "vino_param_lib" + "openvino_wrapper_lib" + "OpenVINO" + "openvino_msgs" + "openvino_param_lib" "OpenCV" ) @@ -256,11 +238,10 @@ ament_target_dependencies(image_people_client "object_msgs" "ament_index_cpp" "class_loader" - "dynamic_vino_lib" - "InferenceEngine" - "people_msgs" - "pipeline_srv_msgs" - "vino_param_lib" + "openvino_wrapper_lib" + "OpenVINO" + "openvino_msgs" + "openvino_param_lib" "OpenCV" ) diff --git a/sample/include/utility.hpp b/sample/include/utility.hpp index f0a302de..8cba421f 100644 --- a/sample/include/utility.hpp +++ b/sample/include/utility.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/sample/launch/image_object_server.launch.py b/sample/launch/image_object_server.launch.py index 2d3bbbba..cdf978cd 100644 --- a/sample/launch/image_object_server.launch.py +++ b/sample/launch/image_object_server.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', 'image_object_server.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='image_object_server', + package='openvino_node', node_executable='image_object_server', arguments=['-config', default_yaml], output='screen'), ]) diff --git a/sample/launch/image_people_server.launch.py b/sample/launch/image_people_server.launch.py index c0a4ee57..0e2873d1 100644 --- a/sample/launch/image_people_server.launch.py +++ b/sample/launch/image_people_server.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', 'image_people_server.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='image_people_server', + package='openvino_node', node_executable='image_people_server', arguments=['-config', default_yaml], output='screen'), ]) diff --git a/sample/launch/multi_pipeline_service.launch.py b/sample/launch/multi_pipeline_service.launch.py index 2bcdd2f6..aacc6973 100644 --- a/sample/launch/multi_pipeline_service.launch.py +++ b/sample/launch/multi_pipeline_service.launch.py @@ -22,14 +22,14 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', 'multi_pipleine_service.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default2.rviz') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/object1/detected_objects', diff --git a/sample/launch/pipeline_composite_object_topic.launch.py b/sample/launch/pipeline_composite_object_topic.launch.py index 5184448f..767a28ed 100644 --- a/sample/launch/pipeline_composite_object_topic.launch.py +++ b/sample/launch/pipeline_composite_object_topic.launch.py @@ -5,7 +5,7 @@ import os def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', 'pipeline_composite_object_topic.yaml') container = ComposableNodeContainer( node_name='vision_pipeline', @@ -20,7 +20,7 @@ def generate_launch_description(): parameters=[get_package_share_directory('realsense_examples')+'/config/d435i.yaml'], extra_arguments=[{'use_intra_process_comms':'true'}]), ComposableNode( - package='dynamic_vino_sample', + package='openvino_node', node_plugin='ComposablePipeline', node_name='composable_pipeline', parameters=[{"config":default_yaml}], diff --git a/sample/launch/pipeline_face_reidentification.launch.py b/sample/launch/pipeline_face_reidentification.launch.py index 758cc881..5a2ceb51 100644 --- a/sample/launch/pipeline_face_reidentification.launch.py +++ b/sample/launch/pipeline_face_reidentification.launch.py @@ -24,16 +24,16 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_face_reidentification.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_face_reidentification.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_face_reidentification.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], diff --git a/sample/launch/pipeline_image.launch.py b/sample/launch/pipeline_image.launch.py index 8b272141..0ac9f013 100644 --- a/sample/launch/pipeline_image.launch.py +++ b/sample/launch/pipeline_image.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_image.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_image.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_image.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/people/faces', diff --git a/sample/launch/pipeline_image_ci_test.py b/sample/launch/pipeline_image_ci_test.py new file mode 100644 index 00000000..644ccb76 --- /dev/null +++ b/sample/launch/pipeline_image_ci_test.py @@ -0,0 +1,56 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_image.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_image_ci.yaml')), + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/people/faces', + '/ros2_openvino_toolkit/face_detection'), + ('/openvino_toolkit/people/emotions', + '/ros2_openvino_toolkit/emotions_recognition'), + ('/openvino_toolkit/people/headposes', + '/ros2_openvino_toolkit/headposes_estimation'), + ('/openvino_toolkit/people/age_genders', + '/ros2_openvino_toolkit/people/age_genders_Recognition'), + ('/openvino_toolkit/people/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_object.launch.py b/sample/launch/pipeline_object.launch.py index fd9aaafb..457faae1 100644 --- a/sample/launch/pipeline_object.launch.py +++ b/sample/launch/pipeline_object.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_object.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_object.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_object.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/object/detected_objects', @@ -44,8 +43,4 @@ def generate_launch_description(): '/ros2_openvino_toolkit/image_rviz')], output='screen'), - # Rviz - #launch_ros.actions.Node( - # package='rviz2', node_executable='rviz2', output='screen', - # arguments=['--display-config', default_rviz]), ]) diff --git a/sample/launch/pipeline_object_topic.launch.py b/sample/launch/pipeline_object_topic.launch.py index cac7cc28..07de7471 100644 --- a/sample/launch/pipeline_object_topic.launch.py +++ b/sample/launch/pipeline_object_topic.launch.py @@ -24,25 +24,21 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_object_topic.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_object_topic.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_object_topic.yaml')), # Realsense # NOTE: Split realsense_node launching from OpenVINO package, which # will be launched by RDK launching file or manually. - #launch_ros.actions.Node( - # package='realsense_ros2_camera', node_executable='realsense_ros2_camera', - # output='screen'), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/object/detected_objects', diff --git a/sample/launch/pipeline_object_yolo.launch.py b/sample/launch/pipeline_object_yolo.launch.py index a4bbd01d..d5df63ac 100644 --- a/sample/launch/pipeline_object_yolo.launch.py +++ b/sample/launch/pipeline_object_yolo.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_object_yolo.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_object_yolo.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_object_yolo.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/object/detected_objects', diff --git a/sample/launch/pipeline_object_yolo_ci_test.py b/sample/launch/pipeline_object_yolo_ci_test.py new file mode 100644 index 00000000..083ed88d --- /dev/null +++ b/sample/launch/pipeline_object_yolo_ci_test.py @@ -0,0 +1,51 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_object_yolo.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_object_yolo_ci.yaml')), + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/object/detected_objects', + '/ros2_openvino_toolkit/detected_objects'), + ('/openvino_toolkit/object/images', + '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_object_yolo_topic.launch.py b/sample/launch/pipeline_object_yolo_topic.launch.py index 7f6c0d22..451f4b95 100644 --- a/sample/launch/pipeline_object_yolo_topic.launch.py +++ b/sample/launch/pipeline_object_yolo_topic.launch.py @@ -24,25 +24,21 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_object_yolo_topic.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_object_yolo_topic.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_object_yolo_topic.yaml')), # Realsense # NOTE: Split realsense_node launching from OpenVINO package, which # will be launched by RDK launching file or manually. - #launch_ros.actions.Node( - # package='realsense_ros2_camera', node_executable='realsense_ros2_camera', - # output='screen'), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), diff --git a/sample/launch/pipeline_people.launch.py b/sample/launch/pipeline_people.launch.py index 3c10c216..88386dcd 100644 --- a/sample/launch/pipeline_people.launch.py +++ b/sample/launch/pipeline_people.launch.py @@ -25,19 +25,18 @@ def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_people.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_people.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_people.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/people/detected_objects', diff --git a/sample/launch/pipeline_people_ci_test.py b/sample/launch/pipeline_people_ci_test.py new file mode 100644 index 00000000..e37d4e45 --- /dev/null +++ b/sample/launch/pipeline_people_ci_test.py @@ -0,0 +1,58 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_people.yaml') + + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_people_ci.yaml')), + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/people/detected_objects', + '/ros2_openvino_toolkit/face_detection'), + ('/openvino_toolkit/people/emotions', + '/ros2_openvino_toolkit/emotions_recognition'), + ('/openvino_toolkit/people/headposes', + '/ros2_openvino_toolkit/headposes_estimation'), + ('/openvino_toolkit/people/age_genders', + '/ros2_openvino_toolkit/age_genders_Recognition'), + ('/openvino_toolkit/people/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_people_ip.launch.py b/sample/launch/pipeline_people_ip.launch.py index 2cd41a3b..eb69a212 100644 --- a/sample/launch/pipeline_people_ip.launch.py +++ b/sample/launch/pipeline_people_ip.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_people_ip.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_people_ip.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_people_ip.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/people/detected_objects', diff --git a/sample/launch/pipeline_person_attributes.launch.py b/sample/launch/pipeline_person_attributes.launch.py index ce6d6d50..10645ca2 100644 --- a/sample/launch/pipeline_person_attributes.launch.py +++ b/sample/launch/pipeline_person_attributes.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_person_attributes.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_person_attributes.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_person_attributes.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/object/detected_objects', diff --git a/sample/launch/pipeline_person_attributes_ci_test.py b/sample/launch/pipeline_person_attributes_ci_test.py new file mode 100644 index 00000000..75db6d5c --- /dev/null +++ b/sample/launch/pipeline_person_attributes_ci_test.py @@ -0,0 +1,51 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_person_attributes.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_person_attributes_ci.yaml')), + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/object/detected_objects', + '/ros2_openvino_toolkit/detected_objects'), + ('/openvino_toolkit/object/person_attributes','/ros2_openvino_toolkit/person_attributes'), + ('/openvino_toolkit/object/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_reidentification.launch.py b/sample/launch/pipeline_reidentification.launch.py index 630aa61c..defca5b3 100644 --- a/sample/launch/pipeline_reidentification.launch.py +++ b/sample/launch/pipeline_reidentification.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_reidentification.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_reidentification.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_reidentification.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/object/detected_objects', diff --git a/sample/launch/pipeline_reidentification_ci_test.py b/sample/launch/pipeline_reidentification_ci_test.py new file mode 100644 index 00000000..9461bcf1 --- /dev/null +++ b/sample/launch/pipeline_reidentification_ci_test.py @@ -0,0 +1,52 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_reidentification.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_reidentification_ci.yaml')), + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/object/detected_objects', + '/ros2_openvino_toolkit/detected_objects'), + ('/openvino_toolkit/object/reidentified_persons', + '/ros2_openvino_toolkit/reidentified_persons'), + ('/openvino_toolkit/object/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_segmentation.launch.py b/sample/launch/pipeline_segmentation.launch.py index 9b511f6a..d5303ae4 100644 --- a/sample/launch/pipeline_segmentation.launch.py +++ b/sample/launch/pipeline_segmentation.launch.py @@ -24,25 +24,21 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_segmentation.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_segmentation.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_segmentation.yaml')), # Realsense # NOTE: Split realsense_node launching from OpenVINO package, which # will be launched by RDK launching file or manually. - #launch_ros.actions.Node( - # package='realsense_ros2_camera', node_executable='realsense_ros2_camera', - # output='screen'), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), diff --git a/sample/launch/pipeline_segmentation_ci_test.py b/sample/launch/pipeline_segmentation_ci_test.py new file mode 100644 index 00000000..7be59e6d --- /dev/null +++ b/sample/launch/pipeline_segmentation_ci_test.py @@ -0,0 +1,55 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_segmentation.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_segmentation_ci.yaml')), + # Realsense + # NOTE: Split realsense_node launching from OpenVINO package, which + # will be launched by RDK launching file or manually. + + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), + ('/openvino_toolkit/segmentation/segmented_obejcts', + '/ros2_openvino_toolkit/segmented_obejcts'), + ('/openvino_toolkit/segmentation/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_segmentation_image.launch.py b/sample/launch/pipeline_segmentation_image.launch.py index bf460736..584ddfc3 100644 --- a/sample/launch/pipeline_segmentation_image.launch.py +++ b/sample/launch/pipeline_segmentation_image.launch.py @@ -24,25 +24,21 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_segmentation_image.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_segmentation_image.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_segmentation_image.yaml')), # Realsense # NOTE: Split realsense_node launching from OpenVINO package, which # will be launched by RDK launching file or manually. - #launch_ros.actions.Node( - # package='realsense_ros2_camera', node_executable='realsense_ros2_camera', - # output='screen'), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), diff --git a/sample/launch/pipeline_segmentation_image_ci_test.py b/sample/launch/pipeline_segmentation_image_ci_test.py new file mode 100644 index 00000000..ef831657 --- /dev/null +++ b/sample/launch/pipeline_segmentation_image_ci_test.py @@ -0,0 +1,55 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_segmentation_image.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_segmentation_image_ci.yaml')), + # Realsense + # NOTE: Split realsense_node launching from OpenVINO package, which + # will be launched by RDK launching file or manually. + + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), + ('/openvino_toolkit/segmentation/segmented_obejcts', + '/ros2_openvino_toolkit/segmented_obejcts'), + ('/openvino_toolkit/segmentation/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_segmentation_maskrcnn.launch.py b/sample/launch/pipeline_segmentation_maskrcnn.launch.py new file mode 100644 index 00000000..b07d5476 --- /dev/null +++ b/sample/launch/pipeline_segmentation_maskrcnn.launch.py @@ -0,0 +1,55 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_segmentation.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_segmentation_maskrcnn.yaml')), + # Realsense + # NOTE: Split realsense_node launching from OpenVINO package, which + # will be launched by RDK launching file or manually. + + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), + ('/openvino_toolkit/segmentation/segmented_obejcts', + '/ros2_openvino_toolkit/segmented_obejcts'), + ('/openvino_toolkit/segmentation/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + launch_ros.actions.Node( + package='rviz2', + executable='rviz2', output='screen', + arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_segmentation_maskrcnn_ci_test.py b/sample/launch/pipeline_segmentation_maskrcnn_ci_test.py new file mode 100644 index 00000000..98c71950 --- /dev/null +++ b/sample/launch/pipeline_segmentation_maskrcnn_ci_test.py @@ -0,0 +1,55 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_segmentation.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_segmentation_maskrcnn_ci.yaml')), + # Realsense + # NOTE: Split realsense_node launching from OpenVINO package, which + # will be launched by RDK launching file or manually. + + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), + ('/openvino_toolkit/segmentation/segmented_obejcts', + '/ros2_openvino_toolkit/segmented_obejcts'), + ('/openvino_toolkit/segmentation/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_vehicle_detection.launch.py b/sample/launch/pipeline_vehicle_detection.launch.py index 56cb722d..a147d95c 100644 --- a/sample/launch/pipeline_vehicle_detection.launch.py +++ b/sample/launch/pipeline_vehicle_detection.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_vehicle_detection.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_vehicle_detection.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_vehicle_detection.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/object/detected_license_plates', diff --git a/sample/launch/pipeline_vehicle_detection_ci_test.py b/sample/launch/pipeline_vehicle_detection_ci_test.py new file mode 100644 index 00000000..f4d72f15 --- /dev/null +++ b/sample/launch/pipeline_vehicle_detection_ci_test.py @@ -0,0 +1,52 @@ +# Copyright 2018 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch face detection and rviz.""" + +import os + +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +import launch_ros.actions + +from launch.substitutions import LaunchConfiguration, PythonExpression +import launch + +def generate_launch_description(): + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', + #'pipeline_vehicle_detection.yaml') + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', + 'rviz/default.rviz') + return LaunchDescription([ + launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_vehicle_detection_ci.yaml')), + # Openvino detection + launch_ros.actions.Node( + package='openvino_node', + executable='pipeline_with_params', + arguments=['-config', LaunchConfiguration('yaml_path')], + remappings=[ + ('/openvino_toolkit/object/detected_license_plates', + '/ros2_openvino_toolkit/detected_license_plates'), + ('/openvino_toolkit/object/detected_vehicles_attribs', + '/ros2_openvino_toolkit/detected_vehicles_attribs'), + ('/openvino_toolkit/object/images', '/ros2_openvino_toolkit/image_rviz')], + output='screen'), + + # Rviz + #launch_ros.actions.Node( + #package='rviz2', + #executable='rviz2', output='screen', + #arguments=['--display-config', default_rviz]), + ]) diff --git a/sample/launch/pipeline_video.launch.py b/sample/launch/pipeline_video.launch.py index a232fee9..63f76cec 100644 --- a/sample/launch/pipeline_video.launch.py +++ b/sample/launch/pipeline_video.launch.py @@ -24,18 +24,17 @@ import launch def generate_launch_description(): - #default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + #default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', #'pipeline_video.yaml') - default_rviz = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'launch', + default_rviz = os.path.join(get_package_share_directory('openvino_node'), 'launch', 'rviz/default.rviz') return LaunchDescription([ launch.actions.DeclareLaunchArgument(name='yaml_path', default_value = - os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param','pipeline_video.yaml')), + os.path.join(get_package_share_directory('openvino_node'), 'param','pipeline_video.yaml')), # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', + package='openvino_node', executable='pipeline_with_params', - #arguments=['-config', default_yaml], arguments=['-config', LaunchConfiguration('yaml_path')], remappings=[ ('/openvino_toolkit/segmentation/segmented_obejcts', diff --git a/sample/launch/ros2_openvino_oa.launch.py b/sample/launch/ros2_openvino_oa.launch.py index 687c8fc7..b759e165 100644 --- a/sample/launch/ros2_openvino_oa.launch.py +++ b/sample/launch/ros2_openvino_oa.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_sample'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_node'), 'param', 'pipeline_object_topic.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/image_raw', '/camera/color/image_raw'), diff --git a/sample/package.xml b/sample/package.xml index e86a34fa..b82fcf92 100644 --- a/sample/package.xml +++ b/sample/package.xml @@ -1,7 +1,7 @@ - dynamic_vino_sample + openvino_node 0.9.0 a ROS2 wrapper package for Intel OpenVINO Weizhi Liu @@ -33,8 +33,8 @@ limitations under the License. gflags yaml_cpp_vendor ament_index_cpp - dynamic_vino_lib - vino_param_lib + openvino_wrapper_lib + openvino_param_lib cv_bridge object_msgs realsense2 @@ -49,8 +49,8 @@ limitations under the License. ament_index_cpp class_loader cv_bridge - dynamic_vino_lib - vino_param_lib + openvino_wrapper_lib + openvino_param_lib object_msgs realsense2 diff --git a/sample/param/image_object_server.yaml b/sample/param/image_object_server.yaml index 030cb841..19c7bb48 100644 --- a/sample/param/image_object_server.yaml +++ b/sample/param/image_object_server.yaml @@ -1,9 +1,10 @@ Pipelines: - name: object inputs: [Image] + input_path: to/be/set/image_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU label: to/be/set/xxx.labels batch: 1 @@ -15,6 +16,6 @@ Pipelines: right: [ObjectDetection] - left: ObjectDetection right: [RosService] - input_path: "/home/intel/Pictures/car.png" + Common: diff --git a/sample/param/image_people_server.yaml b/sample/param/image_people_server.yaml index 578ec311..21e2a2de 100644 --- a/sample/param/image_people_server.yaml +++ b/sample/param/image_people_server.yaml @@ -1,26 +1,27 @@ Pipelines: - name: people inputs: [Image] + input_path: to/be/set/image_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP32/face-detection-adas-0001.xml engine: CPU - label: /to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 @@ -36,6 +37,5 @@ Pipelines: right: [RosService, RViz] - left: HeadPoseEstimation right: [RosService, RViz] - input_path: "~/Pictures/face.jpeg" Common: diff --git a/sample/param/multi_pipleine_service.yaml b/sample/param/multi_pipleine_service.yaml index 2a55d57c..0220ae11 100644 --- a/sample/param/multi_pipleine_service.yaml +++ b/sample/param/multi_pipleine_service.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [StandardCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU label: to/be/set/xxx.labels batch: 1 @@ -24,7 +24,7 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP32/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_composite_object_topic.yaml b/sample/param/pipeline_composite_object_topic.yaml index 58e6bbf7..61c4d6f2 100644 --- a/sample/param/pipeline_composite_object_topic.yaml +++ b/sample/param/pipeline_composite_object_topic.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [RealSenseCameraTopic] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/public/ssd_mobilenet_v2_coco/FP16/ssd_mobilenet_v2_coco.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU #MYRIAD label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_face_reidentification.yaml b/sample/param/pipeline_face_reidentification.yaml index 08c5bef2..c2cd0f5a 100644 --- a/sample/param/pipeline_face_reidentification.yaml +++ b/sample/param/pipeline_face_reidentification.yaml @@ -3,19 +3,19 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: LandmarksDetection - model: /opt/openvino_toolkit/models/landmarks-regression/output/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml + model: /opt/openvino_toolkit/models/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: FaceReidentification - model: /opt/openvino_toolkit/models/face-reidentification/output/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml + model: /opt/openvino_toolkit/models/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_image.yaml b/sample/param/pipeline_image.yaml index 3a0d0923..f41c3dc2 100644 --- a/sample/param/pipeline_image.yaml +++ b/sample/param/pipeline_image.yaml @@ -1,27 +1,29 @@ -Pipelines: +Pipelines: - name: people inputs: [Image] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/sample_faces.jpg + input_path: to/be/set/image_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU + label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU + label: to/be/set/xxx.labels batch: 16 outputs: [ImageWindow, RosTopic, RViz] connects: diff --git a/sample/param/pipeline_image_ci.yaml b/sample/param/pipeline_image_ci.yaml new file mode 100644 index 00000000..37da03ba --- /dev/null +++ b/sample/param/pipeline_image_ci.yaml @@ -0,0 +1,41 @@ +Pipelines: +- name: people + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_faces.jpg + infers: + - name: FaceDetection + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: AgeGenderRecognition + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + - name: EmotionRecognition + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels + batch: 16 + - name: HeadPoseEstimation + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + outputs: [RosTopic] + connects: + - left: Image + right: [FaceDetection] + - left: FaceDetection + right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, RosTopic] + - left: AgeGenderRecognition + right: [RosTopic] + - left: EmotionRecognition + right: [RosTopic] + - left: HeadPoseEstimation + right: [RosTopic] + +Common: diff --git a/sample/param/pipeline_image_video.yaml b/sample/param/pipeline_image_video.yaml index 887cfe25..b383f30f 100644 --- a/sample/param/pipeline_image_video.yaml +++ b/sample/param/pipeline_image_video.yaml @@ -1,27 +1,27 @@ Pipelines: - name: people inputs: [Video] - input_path: /home/houk/Desktop/video + input_path: to/be/set/video_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/pipeline_object.yaml b/sample/param/pipeline_object.yaml index 7c0f97d7..62e2f9ca 100644 --- a/sample/param/pipeline_object.yaml +++ b/sample/param/pipeline_object.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [StandardCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/public/ssd_mobilenet_v2_coco/FP16/ssd_mobilenet_v2_coco.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_object_topic.yaml b/sample/param/pipeline_object_topic.yaml index 39c9cd34..2d1a2c7d 100644 --- a/sample/param/pipeline_object_topic.yaml +++ b/sample/param/pipeline_object_topic.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [StandardCamera] #[RealSenseCameraTopic] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/public/ssd_mobilenet_v2_coco/FP16/ssd_mobilenet_v2_coco.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU #MYRIAD label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_object_yolo.yaml b/sample/param/pipeline_object_yolo.yaml index 1c629dee..2d5e479b 100644 --- a/sample/param/pipeline_object_yolo.yaml +++ b/sample/param/pipeline_object_yolo.yaml @@ -1,11 +1,11 @@ Pipelines: - name: object - inputs: [RealSenseCamera] + inputs: [Image] + input_path: to/be/set/image_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/public/yolo-v2-tf/FP16/yolo-v2-tf.xml - #model: /opt/openvino_toolkit/darkflow/output/fp16/yolov2-voc.xml - model_type: yolov2 + model: /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/yolov5n.xml + model_type: yolov5 engine: CPU #MYRIAD label: to/be/set/xxx.labels batch: 1 @@ -13,7 +13,7 @@ Pipelines: enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame outputs: [ImageWindow, RosTopic, RViz] connects: - - left: RealSenseCamera + - left: Image right: [ObjectDetection] - left: ObjectDetection right: [ImageWindow] diff --git a/sample/param/pipeline_object_yolo_ci.yaml b/sample/param/pipeline_object_yolo_ci.yaml new file mode 100644 index 00000000..7804ecb0 --- /dev/null +++ b/sample/param/pipeline_object_yolo_ci.yaml @@ -0,0 +1,21 @@ +Pipelines: +- name: object + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_faces.jpg + infers: + - name: ObjectDetection + model: /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/yolov5n.xml + model_type: yolov5 + engine: CPU #MYRIAD + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectDetection] + - left: ObjectDetection + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_object_yolo_topic.yaml b/sample/param/pipeline_object_yolo_topic.yaml index 7d268287..fdff4264 100644 --- a/sample/param/pipeline_object_yolo_topic.yaml +++ b/sample/param/pipeline_object_yolo_topic.yaml @@ -3,9 +3,8 @@ Pipelines: inputs: [RealSenseCameraTopic] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16/yolov2-voc.xml - #model: /opt/openvino_toolkit/darkflow/output/fp16/yolov2-voc.xml - model_type: yolov2 + model: /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/yolov5n.xml + model_type: yolov5 engine: MYRIAD label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_people.yaml b/sample/param/pipeline_people.yaml index a68b4cc3..6d9805e0 100644 --- a/sample/param/pipeline_people.yaml +++ b/sample/param/pipeline_people.yaml @@ -1,26 +1,26 @@ Pipelines: - name: people inputs: [StandardCamera] - infers: + infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/pipeline_people_ci.yaml b/sample/param/pipeline_people_ci.yaml new file mode 100644 index 00000000..ce0a1e90 --- /dev/null +++ b/sample/param/pipeline_people_ci.yaml @@ -0,0 +1,41 @@ +Pipelines: +- name: people + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_faces.jpg + infers: + - name: FaceDetection + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: AgeGenderRecognition + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + - name: EmotionRecognition + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels + batch: 16 + - name: HeadPoseEstimation + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 16 + outputs: [RosTopic] + connects: + - left: Image + right: [FaceDetection] + - left: FaceDetection + right: [AgeGenderRecognition, EmotionRecognition, HeadPoseEstimation, RosTopic] + - left: AgeGenderRecognition + right: [RosTopic] + - left: EmotionRecognition + right: [RosTopic] + - left: HeadPoseEstimation + right: [RosTopic] + +Common: diff --git a/sample/param/pipeline_people_ip.yaml b/sample/param/pipeline_people_ip.yaml index ba01c412..b37903c1 100644 --- a/sample/param/pipeline_people_ip.yaml +++ b/sample/param/pipeline_people_ip.yaml @@ -4,24 +4,24 @@ Pipelines: input_path: "rtsp://" infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP32/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP32/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/pipeline_person_attributes.yaml b/sample/param/pipeline_person_attributes.yaml index 527ec655..8721c40f 100644 --- a/sample/param/pipeline_person_attributes.yaml +++ b/sample/param/pipeline_person_attributes.yaml @@ -3,14 +3,14 @@ Pipelines: inputs: [StandardCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/person-detection/output/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: PersonAttribsDetection - model: /opt/openvino_toolkit/models/person-attributes/output/intel/person-attributes-recognition-crossroad-0230/FP32/person-attributes-recognition-crossroad-0230.xml + model: /opt/openvino_toolkit/models/intel/person-attributes-recognition-crossroad-0230/FP32/person-attributes-recognition-crossroad-0230.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_person_attributes_ci.yaml b/sample/param/pipeline_person_attributes_ci.yaml new file mode 100644 index 00000000..786c2461 --- /dev/null +++ b/sample/param/pipeline_person_attributes_ci.yaml @@ -0,0 +1,28 @@ +Pipelines: +- name: object + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_faces.jpg + infers: + - name: ObjectDetection + model: /opt/openvino_toolkit/models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: PersonAttribsDetection + model: /opt/openvino_toolkit/models/intel/person-attributes-recognition-crossroad-0230/FP32/person-attributes-recognition-crossroad-0230.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectDetection] + - left: ObjectDetection + right: [PersonAttribsDetection, RosTopic] + - left: PersonAttribsDetection + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_reidentification.yaml b/sample/param/pipeline_reidentification.yaml index 5a0d472a..2598031b 100644 --- a/sample/param/pipeline_reidentification.yaml +++ b/sample/param/pipeline_reidentification.yaml @@ -3,14 +3,14 @@ Pipelines: inputs: [StandardCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/person-detection/output/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: PersonReidentification - model: /opt/openvino_toolkit/models/person-reidentification/output/intel/person-reidentification-retail-0277/FP32/person-reidentification-retail-0277.xml + model: /opt/openvino_toolkit/models/intel/person-reidentification-retail-0277/FP32/person-reidentification-retail-0277.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_reidentification_ci.yaml b/sample/param/pipeline_reidentification_ci.yaml new file mode 100644 index 00000000..72b8f22a --- /dev/null +++ b/sample/param/pipeline_reidentification_ci.yaml @@ -0,0 +1,28 @@ +Pipelines: +- name: object + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_faces.jpg + infers: + - name: ObjectDetection + model: /opt/openvino_toolkit/models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: PersonReidentification + model: /opt/openvino_toolkit/models/intel/person-reidentification-retail-0277/FP32/person-reidentification-retail-0277.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.7 + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectDetection] + - left: ObjectDetection + right: [PersonReidentification] + - left: PersonReidentification + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_segmentation.yaml b/sample/param/pipeline_segmentation.yaml index bd5a1b80..f0eccb13 100644 --- a/sample/param/pipeline_segmentation.yaml +++ b/sample/param/pipeline_segmentation.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [RealSenseCameraTopic] infers: - name: ObjectSegmentation - model: /opt/openvino_toolkit/models/public/deeplabv3/FP16/deeplabv3.xml + model: /opt/openvino_toolkit/models/convert/public/deeplabv3/FP16/deeplabv3.xml engine: CPU #"HETERO:CPU,GPU,MYRIAD" label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_segmentation_ci.yaml b/sample/param/pipeline_segmentation_ci.yaml new file mode 100644 index 00000000..b8f075c2 --- /dev/null +++ b/sample/param/pipeline_segmentation_ci.yaml @@ -0,0 +1,19 @@ +Pipelines: +- name: segmentation + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_car.png + infers: + - name: ObjectSegmentation + model: /opt/openvino_toolkit/models/convert/public/deeplabv3/FP16/deeplabv3.xml + engine: CPU #"HETERO:CPU,GPU,MYRIAD" + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectSegmentation] + - left: ObjectSegmentation + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_segmentation_image.yaml b/sample/param/pipeline_segmentation_image.yaml index 616d290d..33481f8b 100644 --- a/sample/param/pipeline_segmentation_image.yaml +++ b/sample/param/pipeline_segmentation_image.yaml @@ -1,10 +1,10 @@ Pipelines: - name: segmentation inputs: [Image] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/expressway.jpg + input_path: to/be/set/image_path infers: - name: ObjectSegmentation - model: /opt/openvino_toolkit/models/semantic-segmentation/output/intel/semantic-segmentation-adas-0001/FP16/semantic-segmentation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP16/semantic-segmentation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_segmentation_image_ci.yaml b/sample/param/pipeline_segmentation_image_ci.yaml new file mode 100644 index 00000000..c80832bc --- /dev/null +++ b/sample/param/pipeline_segmentation_image_ci.yaml @@ -0,0 +1,19 @@ +Pipelines: +- name: segmentation + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_car.png + infers: + - name: ObjectSegmentation + model: /opt/openvino_toolkit/models/intel/semantic-segmentation-adas-0001/FP16/semantic-segmentation-adas-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectSegmentation] + - left: ObjectSegmentation + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_segmentation_maskrcnn.yaml b/sample/param/pipeline_segmentation_maskrcnn.yaml new file mode 100644 index 00000000..fa47f088 --- /dev/null +++ b/sample/param/pipeline_segmentation_maskrcnn.yaml @@ -0,0 +1,22 @@ +Pipelines: +- name: segmentation + inputs: [StandardCamera] + infers: + - name: ObjectSegmentationMaskrcnn + model: /opt/openvino_toolkit/models/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP16/mask_rcnn_inception_resnet_v2_atrous_coco.xml + engine: CPU #"HETERO:CPU,GPU,MYRIAD" + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + outputs: [ImageWindow, RosTopic, RViz] + connects: + - left: StandardCamera + right: [ObjectSegmentationMaskrcnn] + - left: ObjectSegmentationMaskrcnn + right: [ImageWindow] + - left: ObjectSegmentationMaskrcnn + right: [RosTopic] + - left: ObjectSegmentationMaskrcnn + right: [RViz] + +OpenvinoCommon: diff --git a/sample/param/pipeline_segmentation_maskrcnn_ci.yaml b/sample/param/pipeline_segmentation_maskrcnn_ci.yaml new file mode 100644 index 00000000..855b6833 --- /dev/null +++ b/sample/param/pipeline_segmentation_maskrcnn_ci.yaml @@ -0,0 +1,19 @@ +Pipelines: +- name: segmentation + inputs: [Image] + input_path: /root/catkin_ws/src/ros2_openvino_toolkit/data/images/sample_car.png + infers: + - name: ObjectSegmentationMaskrcnn + model: /opt/openvino_toolkit/models/public/mask_rcnn_inception_resnet_v2_atrous_coco/FP16/mask_rcnn_inception_resnet_v2_atrous_coco.xml + engine: CPU #"HETERO:CPU,GPU,MYRIAD" + label: to/be/set/xxx.labels + batch: 1 + confidence_threshold: 0.5 + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectSegmentationMaskrcnn] + - left: ObjectSegmentationMaskrcnn + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_vehicle_detection.yaml b/sample/param/pipeline_vehicle_detection.yaml index a91af9ea..3eff9e59 100644 --- a/sample/param/pipeline_vehicle_detection.yaml +++ b/sample/param/pipeline_vehicle_detection.yaml @@ -3,18 +3,18 @@ Pipelines: inputs: [StandardCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml + model: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml engine: CPU - label: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.labels + label: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.labels batch: 1 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: VehicleAttribsDetection - model: /opt/openvino_toolkit/models/vehicle-attributes-recognition/output/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml + model: /opt/openvino_toolkit/models/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: LicensePlateDetection - model: /opt/openvino_toolkit/models/license-plate-recognition/output/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml + model: /opt/openvino_toolkit/models/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/pipeline_vehicle_detection_ci.yaml b/sample/param/pipeline_vehicle_detection_ci.yaml new file mode 100644 index 00000000..760ff276 --- /dev/null +++ b/sample/param/pipeline_vehicle_detection_ci.yaml @@ -0,0 +1,35 @@ +Pipelines: +- name: object + inputs: [Image] + input_path: /root/jpg/car.jpg + infers: + - name: ObjectDetection + model: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml + engine: CPU + label: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.labels + batch: 1 + enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame + - name: VehicleAttribsDetection + model: /opt/openvino_toolkit/models/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + - name: LicensePlateDetection + model: /opt/openvino_toolkit/models/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml + engine: CPU + label: to/be/set/xxx.labels + batch: 1 + outputs: [RosTopic] + connects: + - left: Image + right: [ObjectDetection] + - left: ObjectDetection + right: [{VehicleAttribsDetection: label == vehicle && confidence >= 0.8}, {LicensePlateDetection: label == license && confidence >= 0.8}] + - left: ObjectDetection + right: [RosTopic] + - left: VehicleAttribsDetection + right: [RosTopic] + - left: LicensePlateDetection + right: [RosTopic] + +OpenvinoCommon: diff --git a/sample/param/pipeline_video.yaml b/sample/param/pipeline_video.yaml index 0872be8e..0493ca76 100644 --- a/sample/param/pipeline_video.yaml +++ b/sample/param/pipeline_video.yaml @@ -1,10 +1,10 @@ Pipelines: - name: segmentation inputs: [Video] - input_path: /home/ubuntu20/jiawei/ros-ov/ros2_galactic_openvino_ws/src/ros2_openvino_toolkit/data/car_cut.mp4 + input_path: to/be/set/video_path infers: - name: ObjectSegmentation - model: /opt/openvino_toolkit/models/public/deeplabv3/FP16/deeplabv3.xml + model: /opt/openvino_toolkit/models/convert/public/deeplabv3/FP16/deeplabv3.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/testParam/param/image_object_service_test.yaml b/sample/param/testParam/param/image_object_service_test.yaml index 9a1ffd0b..baea4479 100644 --- a/sample/param/testParam/param/image_object_service_test.yaml +++ b/sample/param/testParam/param/image_object_service_test.yaml @@ -1,10 +1,10 @@ Pipelines: - name: object inputs: [Image] - input_path: "/opt/openvino_toolkit/ros2_overlay_ws/src/ros2_openvino_toolkit/data/images/car_vihecle.png" + input_path: to/be/set/image_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/testParam/param/image_people_service_test.yaml b/sample/param/testParam/param/image_people_service_test.yaml index ec579426..40f6513e 100644 --- a/sample/param/testParam/param/image_people_service_test.yaml +++ b/sample/param/testParam/param/image_people_service_test.yaml @@ -1,26 +1,27 @@ Pipelines: - name: people inputs: [Image] + input_path: to/be/set/image_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 16 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 @@ -36,6 +37,5 @@ Pipelines: right: [RosService] - left: HeadPoseEstimation right: [RosService] - input_path: "/home/intel/ros2_overlay_ws/src/ros2_openvino_toolkit/data/images/team.png" Common: diff --git a/sample/param/testParam/param/pipeline_anormal.yaml b/sample/param/testParam/param/pipeline_anormal.yaml index 8f5f5146..2e3a4214 100644 --- a/sample/param/testParam/param/pipeline_anormal.yaml +++ b/sample/param/testParam/param/pipeline_anormal.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [StandardCamera, Image, video] infers: - name: Objectdetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: GPU label: to/be/set/xxx.labels batch: 16 @@ -21,7 +21,7 @@ Pipelines: inputs: [StandardCamera, Image, video] infers: - name: - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: GPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/testParam/param/pipeline_face_reid_video.yaml b/sample/param/testParam/param/pipeline_face_reid_video.yaml index f59b2a7d..82986615 100644 --- a/sample/param/testParam/param/pipeline_face_reid_video.yaml +++ b/sample/param/testParam/param/pipeline_face_reid_video.yaml @@ -1,22 +1,22 @@ Pipelines: - name: people inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/sample/param/testParam/data/face_reid.mp4 + input_path: to/be/set/video_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: LandmarksDetection - model: /opt/openvino_toolkit/models/landmarks-regression/output/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml + model: /opt/openvino_toolkit/models/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: FaceReidentification - model: /opt/openvino_toolkit/models/face-reidentification/output/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml + model: /opt/openvino_toolkit/models/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/testParam/param/pipeline_face_reidentification_test.yaml b/sample/param/testParam/param/pipeline_face_reidentification_test.yaml index 6313811a..54ee42ee 100644 --- a/sample/param/testParam/param/pipeline_face_reidentification_test.yaml +++ b/sample/param/testParam/param/pipeline_face_reidentification_test.yaml @@ -3,19 +3,19 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: LandmarksDetection - model: /opt/openvino_toolkit/models/landmarks-regression/output/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml + model: /opt/openvino_toolkit/models/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: FaceReidentification - model: /opt/openvino_toolkit/models/face-reidentification/output/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml + model: /opt/openvino_toolkit/models/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/testParam/param/pipeline_face_test.yaml b/sample/param/testParam/param/pipeline_face_test.yaml index f831fd19..68b395c9 100644 --- a/sample/param/testParam/param/pipeline_face_test.yaml +++ b/sample/param/testParam/param/pipeline_face_test.yaml @@ -1,27 +1,27 @@ Pipelines: - name: people inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/sample/param/testParam/data/people_detection.mp4 + input_path: to/be/set/video_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/testParam/param/pipeline_image_test.yaml b/sample/param/testParam/param/pipeline_image_test.yaml index 50540acd..074cbd22 100644 --- a/sample/param/testParam/param/pipeline_image_test.yaml +++ b/sample/param/testParam/param/pipeline_image_test.yaml @@ -1,27 +1,27 @@ Pipelines: - name: people inputs: [Image] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/team.jpg + input_path: to/be/set/image_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/testParam/param/pipeline_object_test.yaml b/sample/param/testParam/param/pipeline_object_test.yaml index c45999ec..542d3142 100644 --- a/sample/param/testParam/param/pipeline_object_test.yaml +++ b/sample/param/testParam/param/pipeline_object_test.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: GPU label: to/be/set/xxx.labels batch: 16 diff --git a/sample/param/testParam/param/pipeline_object_yolo_test.yaml b/sample/param/testParam/param/pipeline_object_yolo_test.yaml index dfdbe15d..35fc06c9 100644 --- a/sample/param/testParam/param/pipeline_object_yolo_test.yaml +++ b/sample/param/testParam/param/pipeline_object_yolo_test.yaml @@ -3,9 +3,8 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16/yolov2-voc.xml - #model: /opt/openvino_toolkit/darkflow/output/fp16/yolov2-voc.xml - model_type: yolov2 + model: /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/yolov5n.xml + model_type: yolov5 engine: GPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/testParam/param/pipeline_reidentification_test.yaml b/sample/param/testParam/param/pipeline_reidentification_test.yaml index 8bb8228d..527742fe 100644 --- a/sample/param/testParam/param/pipeline_reidentification_test.yaml +++ b/sample/param/testParam/param/pipeline_reidentification_test.yaml @@ -1,17 +1,17 @@ Pipelines: - name: object inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/sample/param/testParam/data/people_reid.mp4 + input_path: to/be/set/video_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/person-detection/output/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: PersonReidentification - model: /opt/openvino_toolkit/models/person-reidentification/output/intel/person-reidentification-retail-0076/FP32/person-reidentification-retail-0076.xml + model: /opt/openvino_toolkit/models/intel/person-reidentification-retail-0277/FP32/person-reidentification-retail-0277.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/param/testParam/param/pipeline_segmentation_test.yaml b/sample/param/testParam/param/pipeline_segmentation_test.yaml index 2a7a0dfd..7ba13e01 100644 --- a/sample/param/testParam/param/pipeline_segmentation_test.yaml +++ b/sample/param/testParam/param/pipeline_segmentation_test.yaml @@ -1,7 +1,7 @@ Pipelines: - name: segmentation inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/sample/param/testParam/data/segmentation.mp4 + input_path: to/be/set/video_path infers: - name: ObjectSegmentation model: /opt/openvino_toolkit/models/segmentation/output/FP16/frozen_inference_graph.xml diff --git a/sample/param/testParam/param/pipeline_vehicle_detection_test.yaml b/sample/param/testParam/param/pipeline_vehicle_detection_test.yaml index 5f2d7b50..ae4c173b 100644 --- a/sample/param/testParam/param/pipeline_vehicle_detection_test.yaml +++ b/sample/param/testParam/param/pipeline_vehicle_detection_test.yaml @@ -1,21 +1,21 @@ Pipelines: - name: object inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/sample/param/testParam/data/vehicle_detection.mp4 + input_path: to/be/set/video_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml + model: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.labels batch: 1 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: VehicleAttribsDetection - model: /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml + model: /opt/openvino_toolkit/models/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: LicensePlateDetection - model: /opt/openvino_toolkit/models/license-plate-recognition/output/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml + model: /opt/openvino_toolkit/models/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/sample/src/image_object_client.cpp b/sample/src/image_object_client.cpp index 4d58ef72..9704febf 100644 --- a/sample/src/image_object_client.cpp +++ b/sample/src/image_object_client.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,13 +13,13 @@ // limitations under the License. #include -#include +#include #include #include #include #include -#include "dynamic_vino_lib/services/frame_processing_server.hpp" +#include "openvino_wrapper_lib/services/frame_processing_server.hpp" int main(int argc, char ** argv) { @@ -27,7 +27,7 @@ int main(int argc, char ** argv) auto node = rclcpp::Node::make_shared("service_example_for_object"); if (argc != 2) { - RCLCPP_INFO(node->get_logger(), "Usage: ros2 run dynamic_vino_sample image_object_client" + RCLCPP_INFO(node->get_logger(), "Usage: ros2 run openvino_node image_object_client" ""); return -1; } diff --git a/sample/src/image_object_server.cpp b/sample/src/image_object_server.cpp index 9b28edb5..50cb7e81 100644 --- a/sample/src/image_object_server.cpp +++ b/sample/src/image_object_server.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,18 +13,18 @@ // limitations under the License. #include -#include +#include #include #include #include -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/services/frame_processing_server.hpp" -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/inputs/base_input.hpp" -#include "dynamic_vino_lib/inputs/image_input.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/services/frame_processing_server.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/image_input.hpp" +#include "openvino/openvino.hpp" #if(defined(USE_OLD_E_PLUGIN_API)) #include #endif diff --git a/sample/src/image_people_client.cpp b/sample/src/image_people_client.cpp index 21adda4f..240beae3 100644 --- a/sample/src/image_people_client.cpp +++ b/sample/src/image_people_client.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,17 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include +#include +#include #include -#include +#include #include #include #include #include #include -#include "dynamic_vino_lib/services/frame_processing_server.hpp" +#include "openvino_wrapper_lib/services/frame_processing_server.hpp" int main(int argc, char ** argv) { @@ -30,15 +30,15 @@ int main(int argc, char ** argv) auto node = rclcpp::Node::make_shared("service_example_for_face"); if (argc != 2) { - RCLCPP_INFO(node->get_logger(), "Usage: ros2 run dynamic_vino_sample image_object_client" + RCLCPP_INFO(node->get_logger(), "Usage: ros2 run openvino_node image_object_client" ""); return -1; } std::string image_path = argv[1]; - auto client = node->create_client("/openvino_toolkit/service"); - auto request = std::make_shared(); + auto client = node->create_client("/openvino_toolkit/service"); + auto request = std::make_shared(); request->image_path = image_path; while (!client->wait_for_service(std::chrono::seconds(1))) { diff --git a/sample/src/image_people_server.cpp b/sample/src/image_people_server.cpp index c8e0ced8..2f9fdcc2 100644 --- a/sample/src/image_people_server.cpp +++ b/sample/src/image_people_server.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,20 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include +#include #include -#include +#include #include #include #include -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/services/frame_processing_server.hpp" -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "dynamic_vino_lib/inputs/base_input.hpp" -#include "dynamic_vino_lib/inputs/image_input.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/services/frame_processing_server.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino_wrapper_lib/inputs/base_input.hpp" +#include "openvino_wrapper_lib/inputs/image_input.hpp" +#include "openvino/openvino.hpp" #if(defined(USE_OLD_E_PLUGIN_API)) #include #endif @@ -39,7 +39,7 @@ int main(int argc, char ** argv) try { auto node = std::make_shared>("service_people_detection", config_path); + >("service_people_detection", config_path); rclcpp::spin(node); } catch (std::exception & e) { std::cout << e.what() << std::endl; diff --git a/sample/src/parameters.cpp b/sample/src/parameters.cpp index 729633e1..45bef956 100644 --- a/sample/src/parameters.cpp +++ b/sample/src/parameters.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ * \file sample/parameters.cpp */ -#include -#include +#include +#include #include #include #include diff --git a/sample/src/pipeline_composite.cpp b/sample/src/pipeline_composite.cpp index d37c809f..aea1630a 100644 --- a/sample/src/pipeline_composite.cpp +++ b/sample/src/pipeline_composite.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ #include #include -#include +#include #include #include #include @@ -37,13 +37,13 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" #if(defined(USE_OLD_E_PLUGIN_API)) #include #endif -#include "inference_engine.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" //#include "utility.hpp" @@ -85,18 +85,18 @@ class ComposablePipeline : public rclcpp::Node } std::shared_ptr node_handler(this); - // auto createPipeline = PipelineManager::getInstance().createPipeline; for (auto & p : pipelines) { PipelineManager::getInstance().createPipeline(p, node_handler); } PipelineManager::getInstance().runAll(); - //PipelineManager::getInstance().joinAll(); } std::string getConfigPath() { - return rclcpp::Node::declare_parameter("config"); + // TODO: Fix api for humble + // return declare_parameter("config").get(); + return ""; } }; diff --git a/sample/src/pipeline_with_params.cpp b/sample/src/pipeline_with_params.cpp index 7bfa8ca8..6d11ff9a 100644 --- a/sample/src/pipeline_with_params.cpp +++ b/sample/src/pipeline_with_params.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ #include #include -#include +#include #include #include #include @@ -37,14 +37,14 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/services/pipeline_processing_server.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/services/pipeline_processing_server.hpp" +#include "openvino_wrapper_lib/slog.hpp" #if(defined(USE_OLD_E_PLUGIN_API)) #include #endif -#include "inference_engine.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" #include "utility.hpp" @@ -65,12 +65,12 @@ int main(int argc, char * argv[]) rclcpp::executors::SingleThreadedExecutor exec; rclcpp::Node::SharedPtr main_node = rclcpp::Node::make_shared("openvino_pipeline"); rclcpp::Node::SharedPtr service_node = std::make_shared>("pipeline_service"); + >("pipeline_service"); // register signal SIGINT and signal handler //signal(SIGINT, signalHandler); try { - std::cout << "InferenceEngine: " << InferenceEngine::GetInferenceEngineVersion() << std::endl; + std::cout << "OpenVINO: " << ov::get_openvino_version() << std::endl; // ----- Parsing and validation of input args----------------------- std::string config = getConfigPath(argc, argv); @@ -86,7 +86,6 @@ int main(int argc, char * argv[]) if (pipelines.size() < 1) { throw std::logic_error("Pipeline parameters should be set!"); } - // auto createPipeline = PipelineManager::getInstance().createPipeline; for (auto & p : pipelines) { PipelineManager::getInstance().createPipeline(p, main_node); } diff --git a/script/viewer/service.py b/script/viewer/service.py index d0f99702..82d7d2e4 100644 --- a/script/viewer/service.py +++ b/script/viewer/service.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -from pipeline_srv_msgs.srv import * +from openvino_msgs.srv import * import rclpy import sys from pipeTree import TreeNode diff --git a/script/viewer/viewer.py b/script/viewer/viewer.py index 93b71918..a82ace37 100644 --- a/script/viewer/viewer.py +++ b/script/viewer/viewer.py @@ -3,7 +3,7 @@ from PyQt5.QtGui import QPainter,QPen,QBrush,QColor from PyQt5.QtCore import QRect from service import reqPipelineService,getTree -from pipeline_srv_msgs.srv import * +from openvino_msgs.srv import * from pipeTree import TreeNode diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 59509625..f6c1fde6 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2018-2022 Intel Corporation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,22 +14,22 @@ cmake_minimum_required(VERSION 3.5) -project(dynamic_vino_test) +project(openvino_test) list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake) #################################### -## to use C++14 -set(CMAKE_CXX_STANDARD 14) +## to use C++17 +set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_FLAGS "-std=c++14 ${CMAKE_CXX_FLAGS}") +set(CMAKE_CXX_FLAGS "-std=c++17 ${CMAKE_CXX_FLAGS}") #################################### -message(STATUS "Looking for inference engine configuration file at: ${CMAKE_PREFIX_PATH}") -find_package(InferenceEngine) -if(NOT InferenceEngine_FOUND) +find_package(OpenVINO) +if(NOT OpenVINO_FOUND) message(FATAL_ERROR "") endif() +set(OpenVINO_LIBRARIES openvino::runtime) # Find OpenCV libray if exists find_package(OpenCV REQUIRED) @@ -51,12 +51,11 @@ find_package(rmw REQUIRED) find_package(std_msgs REQUIRED) find_package(sensor_msgs REQUIRED) find_package(object_msgs REQUIRED) -find_package(people_msgs REQUIRED) -find_package(pipeline_srv_msgs REQUIRED) +find_package(openvino_msgs REQUIRED) find_package(class_loader REQUIRED) -find_package(vino_param_lib REQUIRED) +find_package(openvino_param_lib REQUIRED) find_package(yaml_cpp_vendor REQUIRED) -find_package(dynamic_vino_lib REQUIRED) +find_package(openvino_wrapper_lib REQUIRED) set(CMAKE_BUILD_TYPE "Release") if("${CMAKE_BUILD_TYPE}" STREQUAL "") @@ -119,10 +118,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter -Wno-deprecated-de include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/common/format_reader ${CMAKE_CURRENT_SOURCE_DIR}/include - ${InferenceEngine_INCLUDE_DIRS} - ${InferenceEngine_INCLUDE_DIRS}/../samples - ${InferenceEngine_INCLUDE_DIRS}/../samples/extention - ${InferenceEngine_INCLUDE_DIRS}/../src + ${OpenVINO_DIRS} ${realsense2_INCLUDE_DIRS} ) @@ -147,17 +143,16 @@ if(BUILD_TESTING) if(TARGET ${target}) ament_target_dependencies(${target} "rclcpp" - "vino_param_lib" + "openvino_param_lib" "object_msgs" - "people_msgs" - "pipeline_srv_msgs" - "InferenceEngine" - "OpenCV" - "realsense2" + "openvino_msgs" + "OpenVINO" + "OpenCV" + "realsense2" "ament_index_cpp" "yaml_cpp_vendor" "class_loader" - "dynamic_vino_lib") + "openvino_wrapper_lib") endif() endmacro() diff --git a/tests/launch/image_object_service_test.launch.py b/tests/launch/image_object_service_test.launch.py index aa4e85e7..9b8ecb32 100644 --- a/tests/launch/image_object_service_test.launch.py +++ b/tests/launch/image_object_service_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'image_object_service_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='image_object_server', + package='openvino_node', node_executable='image_object_server', arguments=['-config', default_yaml], output='screen'), ]) diff --git a/tests/launch/image_people_service_test.launch.py b/tests/launch/image_people_service_test.launch.py index 6db0d65f..a9519dd4 100644 --- a/tests/launch/image_people_service_test.launch.py +++ b/tests/launch/image_people_service_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'image_people_service_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='image_people_server', + package='openvino_node', node_executable='image_people_server', arguments=['-config', default_yaml], output='screen'), ]) diff --git a/tests/launch/pipeline_face_reidentification_test.launch.py b/tests/launch/pipeline_face_reidentification_test.launch.py index cd4b0844..9515a86e 100644 --- a/tests/launch/pipeline_face_reidentification_test.launch.py +++ b/tests/launch/pipeline_face_reidentification_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'pipeline_face_reidentification_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/people/detected_objects', '/ros2_openvino_toolkit/face_detection'), diff --git a/tests/launch/pipeline_face_test.launch.py b/tests/launch/pipeline_face_test.launch.py index 9c08bd36..2311342e 100644 --- a/tests/launch/pipeline_face_test.launch.py +++ b/tests/launch/pipeline_face_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'pipeline_face_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/people/detected_objects', diff --git a/tests/launch/pipeline_image_test.launch.py b/tests/launch/pipeline_image_test.launch.py index 947667d1..e4a2b738 100644 --- a/tests/launch/pipeline_image_test.launch.py +++ b/tests/launch/pipeline_image_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'pipeline_image_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/people/detected_objects', diff --git a/tests/launch/pipeline_object_test.launch.py b/tests/launch/pipeline_object_test.launch.py index b324415d..752080c7 100644 --- a/tests/launch/pipeline_object_test.launch.py +++ b/tests/launch/pipeline_object_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'pipeline_object_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/object/detected_objects', diff --git a/tests/launch/pipeline_reidentification_test.launch.py b/tests/launch/pipeline_reidentification_test.launch.py index b29f0316..080d619c 100644 --- a/tests/launch/pipeline_reidentification_test.launch.py +++ b/tests/launch/pipeline_reidentification_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'pipeline_reidentification_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/object/detected_objects', diff --git a/tests/launch/pipeline_segmentation_test.launch.py b/tests/launch/pipeline_segmentation_test.launch.py index 613e5747..5cceb3eb 100644 --- a/tests/launch/pipeline_segmentation_test.launch.py +++ b/tests/launch/pipeline_segmentation_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'pipeline_segmentation_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/segmentation/segmented_obejcts', diff --git a/tests/launch/pipeline_vehicle_detection_test.launch.py b/tests/launch/pipeline_vehicle_detection_test.launch.py index b039ec8d..deb7cda3 100644 --- a/tests/launch/pipeline_vehicle_detection_test.launch.py +++ b/tests/launch/pipeline_vehicle_detection_test.launch.py @@ -22,12 +22,12 @@ def generate_launch_description(): - default_yaml = os.path.join(get_package_share_directory('dynamic_vino_test'), 'param', + default_yaml = os.path.join(get_package_share_directory('openvino_test'), 'param', 'pipeline_vehicle_detection_test.yaml') return LaunchDescription([ # Openvino detection launch_ros.actions.Node( - package='dynamic_vino_sample', node_executable='pipeline_with_params', + package='openvino_node', node_executable='pipeline_with_params', arguments=['-config', default_yaml], remappings=[ ('/openvino_toolkit/object/detected_license_plates', diff --git a/tests/package.xml b/tests/package.xml index 6a0e4085..53577f50 100644 --- a/tests/package.xml +++ b/tests/package.xml @@ -1,7 +1,7 @@ - dynamic_vino_test + openvino_test 0.9.0 a ROS2 wrapper package for Intel OpenVINO Weizhi Liu @@ -35,12 +35,11 @@ limitations under the License. class_loader cv_bridge object_msgs - people_msgs - pipeline_srv_msgs - vino_param_lib + openvino_msgs + openvino_param_lib realsense2 openvino_common - dynamic_vino_lib + openvino_wrapper_lib rosidl_default_runtime builtin_interfaces @@ -53,11 +52,10 @@ limitations under the License. class_loader cv_bridge object_msgs - people_msgs - pipeline_srv_msgs - vino_param_lib + openvino_msgs + openvino_param_lib realsense2 - dynamic_vino_lib + openvino_wrapper_lib ament_lint_auto ament_lint_common diff --git a/tests/param/image_object_service_test.yaml b/tests/param/image_object_service_test.yaml index 9a1ffd0b..baea4479 100644 --- a/tests/param/image_object_service_test.yaml +++ b/tests/param/image_object_service_test.yaml @@ -1,10 +1,10 @@ Pipelines: - name: object inputs: [Image] - input_path: "/opt/openvino_toolkit/ros2_overlay_ws/src/ros2_openvino_toolkit/data/images/car_vihecle.png" + input_path: to/be/set/image_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/tests/param/image_people_service_test.yaml b/tests/param/image_people_service_test.yaml index ec579426..40f6513e 100644 --- a/tests/param/image_people_service_test.yaml +++ b/tests/param/image_people_service_test.yaml @@ -1,26 +1,27 @@ Pipelines: - name: people inputs: [Image] + input_path: to/be/set/image_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 16 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 @@ -36,6 +37,5 @@ Pipelines: right: [RosService] - left: HeadPoseEstimation right: [RosService] - input_path: "/home/intel/ros2_overlay_ws/src/ros2_openvino_toolkit/data/images/team.png" Common: diff --git a/tests/param/pipeline_anormal.yaml b/tests/param/pipeline_anormal.yaml index 8f5f5146..2e3a4214 100644 --- a/tests/param/pipeline_anormal.yaml +++ b/tests/param/pipeline_anormal.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [StandardCamera, Image, video] infers: - name: Objectdetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: GPU label: to/be/set/xxx.labels batch: 16 @@ -21,7 +21,7 @@ Pipelines: inputs: [StandardCamera, Image, video] infers: - name: - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: GPU label: to/be/set/xxx.labels batch: 16 diff --git a/tests/param/pipeline_face_reid_video.yaml b/tests/param/pipeline_face_reid_video.yaml index 517178a3..82986615 100644 --- a/tests/param/pipeline_face_reid_video.yaml +++ b/tests/param/pipeline_face_reid_video.yaml @@ -1,22 +1,22 @@ Pipelines: - name: people inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/tests/data/face_reid.mp4 + input_path: to/be/set/video_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: LandmarksDetection - model: /opt/openvino_toolkit/models/landmarks-regression/output/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml + model: /opt/openvino_toolkit/models/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: FaceReidentification - model: /opt/openvino_toolkit/models/face-reidentification/output/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml + model: /opt/openvino_toolkit/models/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/tests/param/pipeline_face_reidentification_test.yaml b/tests/param/pipeline_face_reidentification_test.yaml index 6313811a..54ee42ee 100644 --- a/tests/param/pipeline_face_reidentification_test.yaml +++ b/tests/param/pipeline_face_reidentification_test.yaml @@ -3,19 +3,19 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: /opt/openvino_toolkit/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: LandmarksDetection - model: /opt/openvino_toolkit/models/landmarks-regression/output/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml + model: /opt/openvino_toolkit/models/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: FaceReidentification - model: /opt/openvino_toolkit/models/face-reidentification/output/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml + model: /opt/openvino_toolkit/models/intel/face-reidentification-retail-0095/FP32/face-reidentification-retail-0095.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/tests/param/pipeline_face_test.yaml b/tests/param/pipeline_face_test.yaml index 3aca2024..68b395c9 100644 --- a/tests/param/pipeline_face_test.yaml +++ b/tests/param/pipeline_face_test.yaml @@ -1,27 +1,27 @@ Pipelines: - name: people inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/tests/data/people_detection.mp4 + input_path: to/be/set/video_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/tests/param/pipeline_image_test.yaml b/tests/param/pipeline_image_test.yaml index 50540acd..074cbd22 100644 --- a/tests/param/pipeline_image_test.yaml +++ b/tests/param/pipeline_image_test.yaml @@ -1,27 +1,27 @@ Pipelines: - name: people inputs: [Image] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/data/images/team.jpg + input_path: to/be/set/image_path infers: - name: FaceDetection - model: /opt/openvino_toolkit/models/face_detection/output/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: AgeGenderRecognition - model: /opt/openvino_toolkit/models/age-gender-recognition/output/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 16 - name: EmotionRecognition - model: /opt/openvino_toolkit/models/emotions-recognition/output/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml + model: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/emotions-recognition-retail-0003/FP32/emotions-recognition-retail-0003.labels batch: 16 - name: HeadPoseEstimation - model: /opt/openvino_toolkit/models/head-pose-estimation/output/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml + model: /opt/openvino_toolkit/models/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 16 diff --git a/tests/param/pipeline_object_test.yaml b/tests/param/pipeline_object_test.yaml index c45999ec..542d3142 100644 --- a/tests/param/pipeline_object_test.yaml +++ b/tests/param/pipeline_object_test.yaml @@ -3,7 +3,7 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/mobilenet-ssd/caffe/output/FP16/mobilenet-ssd.xml + model: /opt/openvino_toolkit/models/convert/public/mobilenet-ssd/FP16/mobilenet-ssd.xml engine: GPU label: to/be/set/xxx.labels batch: 16 diff --git a/tests/param/pipeline_object_yolo_test.yaml b/tests/param/pipeline_object_yolo_test.yaml index dfdbe15d..35fc06c9 100644 --- a/tests/param/pipeline_object_yolo_test.yaml +++ b/tests/param/pipeline_object_yolo_test.yaml @@ -3,9 +3,8 @@ Pipelines: inputs: [RealSenseCamera] infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/object_detection/YOLOv2-voc/tf/output/FP16/yolov2-voc.xml - #model: /opt/openvino_toolkit/darkflow/output/fp16/yolov2-voc.xml - model_type: yolov2 + model: /opt/openvino_toolkit/models/convert/public/yolov5n/FP32/yolov5n.xml + model_type: yolov5 engine: GPU label: to/be/set/xxx.labels batch: 1 diff --git a/tests/param/pipeline_reidentification_test.yaml b/tests/param/pipeline_reidentification_test.yaml index 9f854572..527742fe 100644 --- a/tests/param/pipeline_reidentification_test.yaml +++ b/tests/param/pipeline_reidentification_test.yaml @@ -1,17 +1,17 @@ Pipelines: - name: object inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/tests/data/people_reid.mp4 + input_path: to/be/set/video_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/person-detection/output/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml + model: /opt/openvino_toolkit/models/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml engine: CPU label: to/be/set/xxx.labels batch: 1 confidence_threshold: 0.5 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: PersonReidentification - model: /opt/openvino_toolkit/models/person-reidentification/output/intel/person-reidentification-retail-0076/FP32/person-reidentification-retail-0076.xml + model: /opt/openvino_toolkit/models/intel/person-reidentification-retail-0277/FP32/person-reidentification-retail-0277.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/tests/param/pipeline_segmentation_test.yaml b/tests/param/pipeline_segmentation_test.yaml index 74d22368..7ba13e01 100644 --- a/tests/param/pipeline_segmentation_test.yaml +++ b/tests/param/pipeline_segmentation_test.yaml @@ -1,7 +1,7 @@ Pipelines: - name: segmentation inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/tests/data/segmentation.mp4 + input_path: to/be/set/video_path infers: - name: ObjectSegmentation model: /opt/openvino_toolkit/models/segmentation/output/FP16/frozen_inference_graph.xml diff --git a/tests/param/pipeline_vehicle_detection_test.yaml b/tests/param/pipeline_vehicle_detection_test.yaml index d425a36d..ae4c173b 100644 --- a/tests/param/pipeline_vehicle_detection_test.yaml +++ b/tests/param/pipeline_vehicle_detection_test.yaml @@ -1,21 +1,21 @@ Pipelines: - name: object inputs: [Video] - input_path: /opt/openvino_toolkit/ros2_openvino_toolkit/tests/data/vehicle_detection.mp4 + input_path: to/be/set/video_path infers: - name: ObjectDetection - model: /opt/openvino_toolkit/models/vehicle-license-plate-detection/output/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml + model: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.xml engine: CPU - label: to/be/set/xxx.labels + label: /opt/openvino_toolkit/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106.labels batch: 1 enable_roi_constraint: true # set enable_roi_constraint to false if you don't want to make the inferred ROI (region of interest) constrained into the camera frame - name: VehicleAttribsDetection - model: /opt/openvino_toolkit/models/vehicle-attributes-recongnition/output/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml + model: /opt/openvino_toolkit/models/intel/vehicle-attributes-recognition-barrier-0039/FP32/vehicle-attributes-recognition-barrier-0039.xml engine: CPU label: to/be/set/xxx.labels batch: 1 - name: LicensePlateDetection - model: /opt/openvino_toolkit/models/license-plate-recognition/output/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml + model: /opt/openvino_toolkit/models/intel/license-plate-recognition-barrier-0001/FP32/license-plate-recognition-barrier-0001.xml engine: CPU label: to/be/set/xxx.labels batch: 1 diff --git a/tests/src/lib/unittest_createPipelineCheck.cpp b/tests/src/lib/unittest_createPipelineCheck.cpp index 2e48fb34..b531391b 100644 --- a/tests/src/lib/unittest_createPipelineCheck.cpp +++ b/tests/src/lib/unittest_createPipelineCheck.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include @@ -32,16 +32,16 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" std::string getConfigPath(std::string config_file) { std::string content; std::string prefix_path; - ament_index_cpp::get_resource("packages", "dynamic_vino_test", content, &prefix_path); - return prefix_path + "/share/dynamic_vino_test/param/" + config_file; + ament_index_cpp::get_resource("packages", "openvino_test", content, &prefix_path); + return prefix_path + "/share/openvino_test/param/" + config_file; } TEST(UnitTestCheckPipeline, testCreatePipeline) diff --git a/tests/src/service/unittest_objectService.cpp b/tests/src/service/unittest_objectService.cpp index 90b66a12..ecf11b33 100644 --- a/tests/src/service/unittest_objectService.cpp +++ b/tests/src/service/unittest_objectService.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Intel Corporation. All Rights Reserved +// Copyright (c) 2017-2022 Intel Corporation. All Rights Reserved // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ // limitations under the License. #include -#include +#include #include #include #include @@ -22,7 +22,7 @@ #include #include -#include "dynamic_vino_lib/services/frame_processing_server.hpp" +#include "openvino_wrapper_lib/services/frame_processing_server.hpp" std::string generate_file_path(std::string path) { @@ -79,7 +79,7 @@ int main(int argc, char ** argv) rclcpp::init(argc, argv); testing::InitGoogleTest(&argc, argv); auto offset = std::chrono::seconds(20); - system("ros2 launch dynamic_vino_test image_object_service_test.launch.py &"); + system("ros2 launch openvino_test image_object_service_test.launch.py &"); rclcpp::sleep_for(offset); int ret = RUN_ALL_TESTS(); system("killall -s SIGINT image_object_server &"); diff --git a/tests/src/service/unittest_peopleService.cpp b/tests/src/service/unittest_peopleService.cpp index 7e944ecc..eca6a0c7 100644 --- a/tests/src/service/unittest_peopleService.cpp +++ b/tests/src/service/unittest_peopleService.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Intel Corporation. All Rights Reserved +// Copyright (c) 2017-2022 Intel Corporation. All Rights Reserved // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include +#include +#include #include -#include +#include #include #include #include @@ -24,7 +24,7 @@ #include #include -#include "dynamic_vino_lib/services/frame_processing_server.hpp" +#include "openvino_wrapper_lib/services/frame_processing_server.hpp" std::string generate_file_path(std::string path) { @@ -38,11 +38,11 @@ TEST(UnitTestPeople, testPeople) { auto node = rclcpp::Node::make_shared("openvino_people_service_test"); - auto client = node->create_client("/openvino_toolkit/service"); + auto client = node->create_client("/openvino_toolkit/service"); ASSERT_TRUE(client->wait_for_service(std::chrono::seconds(20))); - auto request = std::make_shared(); + auto request = std::make_shared(); std::string buffer = generate_file_path("data/images/team.jpg"); std::cout << buffer << std::endl; @@ -71,7 +71,7 @@ int main(int argc, char ** argv) rclcpp::init(argc, argv); testing::InitGoogleTest(&argc, argv); auto offset = std::chrono::seconds(20); - system("ros2 launch dynamic_vino_test image_people_service_test.launch.py &"); + system("ros2 launch openvino_test image_people_service_test.launch.py &"); rclcpp::sleep_for(offset); int ret = RUN_ALL_TESTS(); system("killall -s SIGINT image_people_server &"); diff --git a/tests/src/topic/unittest_faceDetectionCheck.cpp b/tests/src/topic/unittest_faceDetectionCheck.cpp index 95fc2118..90c007ed 100644 --- a/tests/src/topic/unittest_faceDetectionCheck.cpp +++ b/tests/src/topic/unittest_faceDetectionCheck.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,13 +14,13 @@ #include #include -#include -#include +#include +#include #include -#include -#include +#include +#include #include -#include +#include #include #include @@ -37,10 +37,10 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" @@ -102,7 +102,7 @@ TEST(UnitTestFaceDetection, testEmotionDetection) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_emotionRecognition_callback = - [&sub_called](const people_msgs::msg::EmotionsStamped::SharedPtr msg) -> void { + [&sub_called](const object_msgs::msg::EmotionsStamped::SharedPtr msg) -> void { emotion_test_pass = true; sub_called.set_value(true); }; @@ -111,7 +111,7 @@ TEST(UnitTestFaceDetection, testEmotionDetection) executor.add_node(node); { - auto sub2 = node->create_subscription( + auto sub2 = node->create_subscription( "/ros2_openvino_toolkit/emotions_recognition", qos, openvino_emotionRecognition_callback); executor.spin_once(std::chrono::seconds(0)); @@ -130,7 +130,7 @@ TEST(UnitTestFaceDetection, testageGenderDetection) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_ageGender_callback = - [&sub_called](const people_msgs::msg::AgeGenderStamped::SharedPtr msg) -> void { + [&sub_called](const object_msgs::msg::AgeGenderStamped::SharedPtr msg) -> void { ageGender_test_pass = true; sub_called.set_value(true); }; @@ -139,7 +139,7 @@ TEST(UnitTestFaceDetection, testageGenderDetection) executor.add_node(node); { - auto sub3 = node->create_subscription( + auto sub3 = node->create_subscription( "/ros2_openvino_toolkit/age_genders_Recognition", qos, openvino_ageGender_callback); executor.spin_once(std::chrono::seconds(0)); @@ -158,7 +158,7 @@ TEST(UnitTestFaceDetection, testheadPoseDetection) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_headPose_callback = - [&sub_called](const people_msgs::msg::HeadPoseStamped::SharedPtr msg) -> void { + [&sub_called](const object_msgs::msg::HeadPoseStamped::SharedPtr msg) -> void { headPose_test_pass = true; sub_called.set_value(true); }; @@ -167,7 +167,7 @@ TEST(UnitTestFaceDetection, testheadPoseDetection) executor.add_node(node); { - auto sub4 = node->create_subscription( + auto sub4 = node->create_subscription( "/ros2_openvino_toolkit/headposes_estimation", qos, openvino_headPose_callback); executor.spin_once(std::chrono::seconds(0)); @@ -183,7 +183,7 @@ int main(int argc, char * argv[]) testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); auto offset = std::chrono::seconds(30); - system("ros2 launch dynamic_vino_test pipeline_face_test.launch.py &"); + system("ros2 launch openvino_test pipeline_face_test.launch.py &"); int ret = RUN_ALL_TESTS(); rclcpp::sleep_for(offset); system("killall -s SIGINT pipeline_with_params &"); diff --git a/tests/src/topic/unittest_face_reidentification.cpp b/tests/src/topic/unittest_face_reidentification.cpp index 395e1bb4..439d86b0 100644 --- a/tests/src/topic/unittest_face_reidentification.cpp +++ b/tests/src/topic/unittest_face_reidentification.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,12 +14,12 @@ #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include -#include +#include #include #include @@ -36,10 +36,10 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" @@ -100,7 +100,7 @@ TEST(UnitTestFaceReidentification, testLandmarkDetection) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_landmark_detection_callback = - [&sub_called](const people_msgs::msg::LandmarkStamped::SharedPtr msg) -> void { + [&sub_called](const object_msgs::msg::LandmarkStamped::SharedPtr msg) -> void { if(msg->landmarks.size() > 0) landmark_detection = true; sub_called.set_value(true); @@ -110,7 +110,7 @@ TEST(UnitTestFaceReidentification, testLandmarkDetection) executor.add_node(node); { - auto sub1 = node->create_subscription( + auto sub1 = node->create_subscription( "/ros2_openvino_toolkit/detected_landmarks", qos, openvino_landmark_detection_callback); executor.spin_once(std::chrono::seconds(0)); @@ -128,7 +128,7 @@ TEST(UnitTestFaceReidentification, testReidentification) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_face_reidentification_callback = - [&sub_called](const people_msgs::msg::ReidentificationStamped::SharedPtr msg) -> void { + [&sub_called](const object_msgs::msg::ReidentificationStamped::SharedPtr msg) -> void { if(msg->reidentified_vector.size() > 0) test_pass = true; sub_called.set_value(true); @@ -138,7 +138,7 @@ TEST(UnitTestFaceReidentification, testReidentification) executor.add_node(node); { - auto sub1 = node->create_subscription( + auto sub1 = node->create_subscription( "/ros2_openvino_toolkit/reidentified_faces", qos, openvino_face_reidentification_callback); executor.spin_once(std::chrono::seconds(0)); @@ -154,7 +154,7 @@ int main(int argc, char * argv[]) testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); auto offset = std::chrono::seconds(30); - system("ros2 launch dynamic_vino_test pipeline_face_reidentification_test.launch.py &"); + system("ros2 launch openvino_test pipeline_face_reidentification_test.launch.py &"); int ret = RUN_ALL_TESTS(); rclcpp::sleep_for(offset); system("killall -s SIGINT pipeline_with_params &"); diff --git a/tests/src/topic/unittest_imageCheck.cpp b/tests/src/topic/unittest_imageCheck.cpp index 7f4a333a..4f3359ac 100644 --- a/tests/src/topic/unittest_imageCheck.cpp +++ b/tests/src/topic/unittest_imageCheck.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,13 +14,13 @@ #include #include -#include -#include +#include +#include #include -#include -#include +#include +#include #include -#include +#include #include #include @@ -37,10 +37,10 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" @@ -102,7 +102,7 @@ TEST(UnitTestFaceDetection, testEmotionDetection) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_emotionRecognition_callback = - [&sub_called](const people_msgs::msg::EmotionsStamped::SharedPtr msg) -> void { + [&sub_called](const object_msgs::msg::EmotionsStamped::SharedPtr msg) -> void { emotion_test_pass = true; sub_called.set_value(true); }; @@ -111,7 +111,7 @@ TEST(UnitTestFaceDetection, testEmotionDetection) executor.add_node(node); { - auto sub2 = node->create_subscription( + auto sub2 = node->create_subscription( "/ros2_openvino_toolkit/emotions_recognition", qos, openvino_emotionRecognition_callback); executor.spin_once(std::chrono::seconds(0)); @@ -130,7 +130,7 @@ TEST(UnitTestFaceDetection, testageGenderDetection) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_ageGender_callback = - [&sub_called](const people_msgs::msg::AgeGenderStamped::SharedPtr msg) -> void { + [&sub_called](const object_msgs::msg::AgeGenderStamped::SharedPtr msg) -> void { ageGender_test_pass = true; sub_called.set_value(true); }; @@ -139,7 +139,7 @@ TEST(UnitTestFaceDetection, testageGenderDetection) executor.add_node(node); { - auto sub3 = node->create_subscription( + auto sub3 = node->create_subscription( "/ros2_openvino_toolkit/age_genders_Recognition", qos, openvino_ageGender_callback); executor.spin_once(std::chrono::seconds(0)); @@ -158,7 +158,7 @@ TEST(UnitTestFaceDetection, testheadPoseDetection) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_headPose_callback = - [&sub_called](const people_msgs::msg::HeadPoseStamped::SharedPtr msg) -> void { + [&sub_called](const object_msgs::msg::HeadPoseStamped::SharedPtr msg) -> void { headPose_test_pass = true; sub_called.set_value(true); }; @@ -167,7 +167,7 @@ TEST(UnitTestFaceDetection, testheadPoseDetection) executor.add_node(node); { - auto sub4 = node->create_subscription( + auto sub4 = node->create_subscription( "/ros2_openvino_toolkit/headposes_estimation", qos, openvino_headPose_callback); executor.spin_once(std::chrono::seconds(0)); @@ -183,7 +183,7 @@ int main(int argc, char * argv[]) testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); auto offset = std::chrono::seconds(30); - system("ros2 launch dynamic_vino_test pipeline_image_test.launch.py &"); + system("ros2 launch openvino_test pipeline_image_test.launch.py &"); int ret = RUN_ALL_TESTS(); rclcpp::sleep_for(offset); system("killall -s SIGINT pipeline_with_params &"); diff --git a/tests/src/topic/unittest_objectDetectionCheck.cpp b/tests/src/topic/unittest_objectDetectionCheck.cpp index ad5facf2..e5917c13 100644 --- a/tests/src/topic/unittest_objectDetectionCheck.cpp +++ b/tests/src/topic/unittest_objectDetectionCheck.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include @@ -32,10 +32,10 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" @@ -91,7 +91,7 @@ int main(int argc, char * argv[]) testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); auto offset = std::chrono::seconds(60); - system("ros2 launch dynamic_vino_test pipeline_object_test.launch.py &"); + system("ros2 launch openvino_test pipeline_object_test.launch.py &"); int ret = RUN_ALL_TESTS(); rclcpp::sleep_for(offset); system("killall -s SIGINT pipeline_with_params &"); diff --git a/tests/src/topic/unittest_reidentification.cpp b/tests/src/topic/unittest_reidentification.cpp index 05c10b11..b4fc16c2 100644 --- a/tests/src/topic/unittest_reidentification.cpp +++ b/tests/src/topic/unittest_reidentification.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,10 +14,10 @@ #include #include -#include -#include +#include +#include #include -#include +#include #include #include @@ -34,10 +34,10 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" @@ -69,7 +69,7 @@ TEST(UnitTestPersonReidentification, testReidentification) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_reidentification_callback = - [&sub_called](const people_msgs::msg::ReidentificationStamped::SharedPtr msg) -> void { + [&sub_called](const object_msgs::msg::ReidentificationStamped::SharedPtr msg) -> void { test_pass = true; sub_called.set_value(true); }; @@ -78,7 +78,7 @@ TEST(UnitTestPersonReidentification, testReidentification) executor.add_node(node); { - auto sub1 = node->create_subscription( + auto sub1 = node->create_subscription( "/ros2_openvino_toolkit/reidentified_persons", qos, openvino_reidentification_callback); executor.spin_once(std::chrono::seconds(0)); @@ -94,7 +94,7 @@ int main(int argc, char * argv[]) testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); auto offset = std::chrono::seconds(30); - system("ros2 launch dynamic_vino_test pipeline_reidentification_test.launch.py &"); + system("ros2 launch openvino_test pipeline_reidentification_test.launch.py &"); int ret = RUN_ALL_TESTS(); system("killall -s SIGINT pipeline_with_params &"); rclcpp::shutdown(); diff --git a/tests/src/topic/unittest_segmentationCheck.cpp b/tests/src/topic/unittest_segmentationCheck.cpp index 52d6e278..33d76d5c 100644 --- a/tests/src/topic/unittest_segmentationCheck.cpp +++ b/tests/src/topic/unittest_segmentationCheck.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,9 +14,9 @@ #include #include -#include +#include #include -#include +#include #include #include @@ -33,10 +33,10 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" @@ -67,7 +67,7 @@ TEST(UnitTestObjectDetection, testObjectDetection) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_faceDetection_callback = - [&sub_called](const people_msgs::msg::ObjectsInMasks::SharedPtr msg) -> void { + [&sub_called](const object_msgs::msg::ObjectsInMasks::SharedPtr msg) -> void { test_pass = true; sub_called.set_value(true); }; @@ -76,7 +76,7 @@ TEST(UnitTestObjectDetection, testObjectDetection) executor.add_node(node); { - auto sub1 = node->create_subscription( + auto sub1 = node->create_subscription( "/ros2_openvino_toolkit/segmented_obejcts", qos, openvino_faceDetection_callback); executor.spin_once(std::chrono::seconds(0)); @@ -92,7 +92,7 @@ int main(int argc, char * argv[]) testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); auto offset = std::chrono::seconds(60); - system("ros2 launch dynamic_vino_test pipeline_segmentation_test.launch.py &"); + system("ros2 launch openvino_test pipeline_segmentation_test.launch.py &"); int ret = RUN_ALL_TESTS(); rclcpp::sleep_for(offset); system("killall -s SIGINT pipeline_with_params &"); diff --git a/tests/src/topic/unittest_vehicleDetectionCheck.cpp b/tests/src/topic/unittest_vehicleDetectionCheck.cpp index e325ba31..41781526 100644 --- a/tests/src/topic/unittest_vehicleDetectionCheck.cpp +++ b/tests/src/topic/unittest_vehicleDetectionCheck.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2018-2022 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,12 +14,12 @@ #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include -#include +#include #include #include @@ -36,10 +36,10 @@ #include #include -#include "dynamic_vino_lib/pipeline.hpp" -#include "dynamic_vino_lib/pipeline_manager.hpp" -#include "dynamic_vino_lib/slog.hpp" -#include "inference_engine.hpp" +#include "openvino_wrapper_lib/pipeline.hpp" +#include "openvino_wrapper_lib/pipeline_manager.hpp" +#include "openvino_wrapper_lib/slog.hpp" +#include "openvino/openvino.hpp" #include "librealsense2/rs.hpp" #include "opencv2/opencv.hpp" @@ -71,7 +71,7 @@ TEST(UnitTestPersonReidentification, testReidentification) std::shared_future sub_called_future(sub_called.get_future()); auto openvino_vehicle_callback = - [&sub_called](const people_msgs::msg::LicensePlateStamped::SharedPtr msg) -> void { + [&sub_called](const object_msgs::msg::LicensePlateStamped::SharedPtr msg) -> void { test_pass = true; sub_called.set_value(true); }; @@ -80,7 +80,7 @@ TEST(UnitTestPersonReidentification, testReidentification) executor.add_node(node); { - auto sub1 = node->create_subscription( + auto sub1 = node->create_subscription( "/ros2_openvino_toolkit/detected_license_plates", qos, openvino_vehicle_callback); executor.spin_once(std::chrono::seconds(0)); @@ -96,7 +96,7 @@ int main(int argc, char * argv[]) testing::InitGoogleTest(&argc, argv); rclcpp::init(argc, argv); auto offset = std::chrono::seconds(30); - system("ros2 launch dynamic_vino_test pipeline_vehicle_detection_test.launch.py &"); + system("ros2 launch openvino_test pipeline_vehicle_detection_test.launch.py &"); int ret = RUN_ALL_TESTS(); system("killall -s SIGINT pipeline_with_params &"); rclcpp::shutdown(); diff --git a/vino_param_lib/include/vino_param_lib/slog.hpp b/vino_param_lib/include/vino_param_lib/slog.hpp deleted file mode 120000 index f8aaab99..00000000 --- a/vino_param_lib/include/vino_param_lib/slog.hpp +++ /dev/null @@ -1 +0,0 @@ -../../../dynamic_vino_lib/include/dynamic_vino_lib/slog.hpp \ No newline at end of file