diff --git a/docker/OpenCV/Dockerfile b/docker/OpenCV/Dockerfile new file mode 100644 index 000000000..45b75fc5c --- /dev/null +++ b/docker/OpenCV/Dockerfile @@ -0,0 +1,112 @@ +FROM ubuntu:20.04 +USER root +WORKDIR / + +# Setup timezone +ENV TZ=Europe/Moscow +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +# Add user +RUN useradd -ms /bin/bash openvino && \ + chown openvino -R /home/openvino + +# Install dependencies +ARG DEPENDENCIES="apt-utils \ + autoconf \ + sudo \ + vim \ + automake \ + build-essential \ + cmake \ + cpio \ + curl \ + dialog \ + gnupg2 \ + libdrm2 \ + libglib2.0-0 \ + lsb-release \ + libgtk-3-0 \ + libtool \ + python3-pip \ + python3-setuptools \ + python3-dev \ + python3-venv \ + pciutils \ + libpython3.8 \ + udev \ + unzip \ + wget \ + git \ + ninja-build" +RUN apt-get update && \ + apt-get install -y -qq --no-install-recommends ${DEPENDENCIES} && \ + rm -rf /var/lib/apt/lists/* + +# Install OpenVINO +ENV INSTALL_DIR /opt/intel/openvino_2022 +ARG OV_SHORT_VERSION=2022.3 +ARG OV_LONG_VERSION="${OV_SHORT_VERSION}.0.9052.9752fafe8eb_x86_64" +RUN sudo mkdir /opt/intel && \ + wget https://storage.openvinotoolkit.org/repositories/openvino/packages/${OV_SHORT_VERSION}/linux/l_openvino_toolkit_ubuntu20_${OV_LONG_VERSION}.tgz \ + -O /tmp/openvino.tgz --no-check-certificate --quiet && \ + tar -zxvf /tmp/openvino.tgz -C /opt/intel/ && \ + sudo ln -s /opt/intel/l_openvino_toolkit_ubuntu20_${OV_LONG_VERSION} $INSTALL_DIR && \ + rm -rf /tmp/* + +# Install OpenVINO dependencies +WORKDIR $INSTALL_DIR/install_dependencies +RUN ls -la $INSTALL_DIR/install_dependencies && echo y | ./install_openvino_dependencies.sh +RUN cat /root/.bashrc > tmp && echo 'source $INSTALL_DIR/setupvars.sh' > /root/.bashrc + +# Configure for GPU, MYRIAD +RUN echo y | ./install_NEO_OCL_driver.sh +RUN usermod -a -G video,users root + +# Install openvino-dev +WORKDIR /tmp/ +RUN pip3 install --upgrade pip +RUN pip3 install openvino-dev==${OV_SHORT_VERSION} + +# Install OpenCV +ARG OCV_VERSION=4.7.0 +RUN pip uninstall opencv-python -y +RUN git clone --recurse-submodules https://github.com/opencv/opencv.git --depth 1 --branch ${OCV_VERSION} --single-branch +ENV OpenCV_BUILD_DIR=/root/build-opencv +RUN mkdir $OpenCV_BUILD_DIR +WORKDIR $OpenCV_BUILD_DIR +RUN /bin/bash -c 'source $INSTALL_DIR/setupvars.sh && \ + cmake -G Ninja \ + -D CMAKE_INSTALL_PREFIX=install \ + -D CMAKE_BUILD_TYPE=Release \ + -D BUILD_EXAMPLES=OFF \ + -D BUILD_TESTS=OFF \ + -D BUILD_DOCS=OFF \ + -D WITH_OPENVINO=ON \ + -D OPENCV_LIB_INSTALL_PATH=lib \ + -D OPENCV_CONFIG_INSTALL_PATH=cmake \ + -D PYTHON3_PACKAGES_PATH=install/python/python3 \ + /tmp/opencv/ && ninja && cmake --install .' && \ + rm -rf /tmp/* +ENV OpenCV_INSTALL_DIR="$OpenCV_BUILD_DIR/install" +ENV OpenCV_DIR="$OpenCV_INSTALL_DIR/cmake" +ENV LD_LIBRARY_PATH="$OpenCV_INSTALL_DIR/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" +ENV PYTHONPATH="$OpenCV_INSTALL_DIR/python/python3/cv2/python-3.8${PYTHONPATH:+:$PYTHONPATH}" + +#accuracy-check +ARG OMZ_VERSION="${OV_SHORT_VERSION}.0" +WORKDIR /tmp/ +RUN git clone --recursive https://github.com/openvinotoolkit/open_model_zoo.git --branch ${OMZ_VERSION} --single-branch --depth 1 +WORKDIR /tmp/open_model_zoo/tools/accuracy_checker +RUN wget https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz +RUN tar xvf cifar-10-python.tar.gz -C sample +RUN /bin/bash -c 'source $INSTALL_DIR/setupvars.sh && \ + accuracy_check -c sample/sample_config.yml -m data/test_models -s sample' + +# Download DLI source code +WORKDIR /tmp/ +RUN git clone https://github.com/itlab-vision/dl-benchmark.git --depth 1 && \ + pip3 install docker PyYAML + +# Download dataset if repository with dataset is set +ARG DATASET_DOWNLOAD_LINK +RUN if [ -z "$DATASET_DOWNLOAD_LINK" ] ; then echo Argument DATASET_DOWNLOAD_LINK not provided ; else git clone $DATASET_DOWNLOAD_LINK ; fi \ No newline at end of file diff --git a/src/benchmark/README.md b/src/benchmark/README.md index 55371a3c9..2e78c4b04 100644 --- a/src/benchmark/README.md +++ b/src/benchmark/README.md @@ -206,7 +206,10 @@ python3 inference_benchmark.py \ ### Использование 1. В конфигурационном файле (секция `FrameworkDependent`) - укажите `Mode`: `ovbenchmark_cpp_latency` или `ovbenchmark_cpp_throughput`. + укажите `Mode`: `sync` или `async`; + `CodeSource`: `ovbenchmark`; + `Runtime`: `cpp`; + `Hint`: `none`, `latency` или `throughput`. 1. Найдите исполняемый файл `benchmark_app` по адресу, приведенному ниже. @@ -236,7 +239,10 @@ pip install openvino_dev[mxnet,caffe,caffe2,onnx,pytorch,tensorflow2]==.+)$') + for line in self._output: + res = regex.search(line) + if res: + return res.group('value') + return None + class OpenVINOBenchmarkPythonProcess(OpenVINOBenchmarkProcess): - def __init__(self, test, executor, log, perf_hint=''): - super().__init__(test, executor, log, perf_hint) - self._perf_hint = perf_hint + def __init__(self, test, executor, log, perf_hint='', api_mode=''): + super().__init__(test, executor, log, perf_hint, api_mode) @staticmethod def create_process(test, executor, log): @@ -88,44 +106,37 @@ def _fill_command_line(self): batch = self._test.indep_parameters.batch_size device = self._test.indep_parameters.device iteration = self._test.indep_parameters.iteration + frontend = self._test.dep_parameters.frontend + time = int(self._test.indep_parameters.test_time_limit) + + arguments = f'-m {model_xml} -i {dataset} -b {batch} -d {device} -niter {iteration} -t {time}' - arguments = f'-m {model_xml} -i {dataset} -b {batch} -d {device} -niter {iteration}' + arguments = self._add_api_mode_for_cmd_line(arguments, self._api_mode) arguments = self._add_perf_hint_for_cmd_line(arguments, self._perf_hint) arguments = self._add_common_arguments(arguments, device) + if frontend != 'IR': + arguments = self._add_optional_argument_to_cmd_line(arguments, '-imean', self._test.dep_parameters.mean) + arguments = self._add_optional_argument_to_cmd_line(arguments, '-iscale', + self._test.dep_parameters.input_scale) command_line = f'benchmark_app {arguments}' - return command_line - - -class OpenVINOBenchmarkPythonOnnxProcess(OpenVINOBenchmarkPythonProcess): - def __init__(self, test, executor, log): - super().__init__(test, executor, log, 'none') - - @staticmethod - def create_process(test, executor, log): - return OpenVINOBenchmarkPythonOnnxProcess(test, executor, log) - - def _fill_command_line(self): - model_xml = self._test.model.model - dataset = self._test.dataset.path - batch = self._test.indep_parameters.batch_size - device = self._test.indep_parameters.device - iteration = self._test.indep_parameters.iteration - arguments = (f'-m {model_xml} -i {dataset} -b {batch} -d {device} -niter {iteration} ' - f'-hint none -api sync ') - - arguments = self._add_common_arguments(arguments, device) - arguments = self._add_optional_argument_to_cmd_line(arguments, '-imean', self._test.dep_parameters.mean) - arguments = self._add_optional_argument_to_cmd_line(arguments, '-iscale', self._test.dep_parameters.input_scale) - - command_line = f'benchmark_app {arguments}' return command_line + def extract_inference_param(self, key): + if key == 'nireq': + regex = re.compile(r'\s*(\d+)\s*inference\s+requests') + for line in self._output: + if 'Measuring performance' in line: + res = regex.search(line) + if res: + return res.group(1) + return None + return super().extract_inference_param(key) + class OpenVINOBenchmarkCppProcess(OpenVINOBenchmarkProcess): - def __init__(self, test, executor, log, cpp_benchmarks_dir, perf_hint=''): - super().__init__(test, executor, log, perf_hint) - self._perf_hint = perf_hint + def __init__(self, test, executor, log, cpp_benchmarks_dir, perf_hint='', api_mode=''): + super().__init__(test, executor, log, perf_hint, api_mode) invalid_path_exception = ValueError('Must provide valid path to the folder ' 'with OpenVINO C++ benchmark_app (--openvino_cpp_benchmark_dir)') @@ -148,21 +159,29 @@ def _fill_command_line(self): batch = self._test.indep_parameters.batch_size device = self._test.indep_parameters.device iteration = self._test.indep_parameters.iteration + frontend = self._test.dep_parameters.frontend + time = int(self._test.indep_parameters.test_time_limit) - arguments = (f'-m {model_xml} -i {dataset} -b {batch} -d {device} -niter {iteration} ' + arguments = (f'-m {model_xml} -i {dataset} -b {batch} -d {device} -niter {iteration} -t {time} ' f'-report_type "no_counters" -json_stats -report_folder {self._report_path.parent.absolute()}') + arguments = self._add_api_mode_for_cmd_line(arguments, self._api_mode) arguments = self._add_perf_hint_for_cmd_line(arguments, self._perf_hint) arguments = self._add_common_arguments(arguments, device) + if frontend != 'IR': + arguments = self._add_optional_argument_to_cmd_line(arguments, '-imean', self._test.dep_parameters.mean) + arguments = self._add_optional_argument_to_cmd_line(arguments, '-iscale', + self._test.dep_parameters.input_scale) command_line = f'{self._benchmark_path} {arguments}' + return command_line def get_performance_metrics(self): if self._status != 0 or len(self._output) == 0: return None, None, None - report = json.loads(self._executor.get_file_content(self._report_path)) + report = self.get_json_report_content() # calculate average time of single pass metric to align output with custom launchers MILLISECONDS_IN_SECOND = 1000 @@ -176,29 +195,7 @@ def get_performance_metrics(self): return average_time_of_single_pass, fps, latency - -class OpenVINOBenchmarkCppOnnxProcess(OpenVINOBenchmarkCppProcess): - def __init__(self, test, executor, log, cpp_benchmarks_dir): - super().__init__(test, executor, log, cpp_benchmarks_dir, 'none') - - @staticmethod - def create_process(test, executor, log, cpp_benchmarks_dir=None): - return OpenVINOBenchmarkCppOnnxProcess(test, executor, log, cpp_benchmarks_dir) - - def _fill_command_line(self): - model_xml = self._test.model.model - dataset = self._test.dataset.path - batch = self._test.indep_parameters.batch_size - device = self._test.indep_parameters.device - iteration = self._test.indep_parameters.iteration - - arguments = (f'-m {model_xml} -i {dataset} -b {batch} -d {device} -niter {iteration} ' - f'-hint none -api sync -report_type "no_counters" ' - f'-json_stats -report_folder {self._report_path.parent.absolute()}') - - arguments = self._add_common_arguments(arguments, device) - arguments = self._add_optional_argument_to_cmd_line(arguments, '-imean', self._test.dep_parameters.mean) - arguments = self._add_optional_argument_to_cmd_line(arguments, '-iscale', self._test.dep_parameters.input_scale) - - command_line = f'{self._benchmark_path} {arguments}' - return command_line + def extract_inference_param(self, key): + if key == 'nireq': + return self.get_json_report_content()['configuration_setup']['nireq'] + return super().extract_inference_param(key) diff --git a/src/benchmark/frameworks/openvino/openvino_parameters_parser.py b/src/benchmark/frameworks/openvino/openvino_parameters_parser.py index 635a33215..eadf1ad7e 100644 --- a/src/benchmark/frameworks/openvino/openvino_parameters_parser.py +++ b/src/benchmark/frameworks/openvino/openvino_parameters_parser.py @@ -8,6 +8,10 @@ class OpenVINOParametersParser(DependentParametersParser): def parse_parameters(self, curr_test): CONFIG_FRAMEWORK_DEPENDENT_TAG = 'FrameworkDependent' CONFIG_FRAMEWORK_DEPENDENT_MODE_TAG = 'Mode' + CONFIG_FRAMEWORK_DEPENDENT_CODE_SOURCE_TAG = 'CodeSource' + CONFIG_FRAMEWORK_DEPENDENT_RUNTIME_TAG = 'Runtime' + CONFIG_FRAMEWORK_DEPENDENT_HINT_TAG = 'Hint' + CONFIG_FRAMEWORK_DEPENDENT_FRONTEND_TAG = 'Frontend' CONFIG_FRAMEWORK_DEPENDENT_EXTENSION_TAG = 'Extension' CONFIG_FRAMEWORK_DEPENDENT_INFER_REQUEST_COUNT_TAG = 'InferenceRequestsCount' CONFIG_FRAMEWORK_DEPENDENT_ASYNC_REQUEST_COUNT_TAG = 'AsyncRequestCount' @@ -22,6 +26,18 @@ def parse_parameters(self, curr_test): _mode = dep_parameters_tag.getElementsByTagName( CONFIG_FRAMEWORK_DEPENDENT_MODE_TAG)[0].firstChild + + _code_source, _runtime, _hint = None, None, None + if dep_parameters_tag.getElementsByTagName(CONFIG_FRAMEWORK_DEPENDENT_CODE_SOURCE_TAG): + _code_source = dep_parameters_tag.getElementsByTagName( + CONFIG_FRAMEWORK_DEPENDENT_CODE_SOURCE_TAG)[0].firstChild + if dep_parameters_tag.getElementsByTagName(CONFIG_FRAMEWORK_DEPENDENT_RUNTIME_TAG): + _runtime = dep_parameters_tag.getElementsByTagName( + CONFIG_FRAMEWORK_DEPENDENT_RUNTIME_TAG)[0].firstChild + if dep_parameters_tag.getElementsByTagName(CONFIG_FRAMEWORK_DEPENDENT_HINT_TAG): + _hint = dep_parameters_tag.getElementsByTagName( + CONFIG_FRAMEWORK_DEPENDENT_HINT_TAG)[0].firstChild + _extension = dep_parameters_tag.getElementsByTagName( CONFIG_FRAMEWORK_DEPENDENT_EXTENSION_TAG)[0].firstChild _async_request_count = dep_parameters_tag.getElementsByTagName( @@ -36,6 +52,10 @@ def parse_parameters(self, curr_test): _infer_request_count = dep_parameters_tag.getElementsByTagName( CONFIG_FRAMEWORK_DEPENDENT_INFER_REQUEST_COUNT_TAG)[0].firstChild + _frontend = None + if dep_parameters_tag.getElementsByTagName(CONFIG_FRAMEWORK_DEPENDENT_FRONTEND_TAG): + _frontend = dep_parameters_tag.getElementsByTagName(CONFIG_FRAMEWORK_DEPENDENT_FRONTEND_TAG)[0].firstChild + _shape, _layout, _mean, _input_scale = None, None, None, None if dep_parameters_tag.getElementsByTagName(CONFIG_FRAMEWORK_DEPENDENT_SHAPE_TAG): _shape = dep_parameters_tag.getElementsByTagName(CONFIG_FRAMEWORK_DEPENDENT_SHAPE_TAG)[0].firstChild @@ -48,6 +68,10 @@ def parse_parameters(self, curr_test): return OpenVINOParameters( mode=_mode.data if _mode else None, + code_source=_code_source.data if _code_source else 'handwritten', + runtime=_runtime.data if _runtime else None, + hint=_hint.data if _hint else None, + frontend=_frontend.data if _frontend else 'IR', extension=_extension.data if _extension else None, infer_request_count=_infer_request_count.data if _infer_request_count else None, async_request_count=_async_request_count.data if _async_request_count else None, @@ -61,9 +85,14 @@ def parse_parameters(self, curr_test): class OpenVINOParameters(FrameworkParameters): - def __init__(self, mode, extension, infer_request_count, async_request_count, thread_count, stream_count, + def __init__(self, mode, code_source, runtime, hint, frontend, extension, + infer_request_count, async_request_count, thread_count, stream_count, shape, layout, mean, input_scale): self.mode = None + self.code_source = None + self.runtime = None + self.hint = None + self.frontend = None self.extension = None self.infer_request = None self.async_request = None @@ -76,55 +105,112 @@ def __init__(self, mode, extension, infer_request_count, async_request_count, th if self._mode_is_correct(mode): self.mode = mode.title() + + if self._code_source_is_correct(code_source): + self.code_source = code_source + + if self._parameter_is_not_none(runtime): + if self._runtime_is_correct(runtime): + self.runtime = runtime + else: + if self.code_source == 'ovbenchmark': + self.runtime = 'python' + + if self._parameter_is_not_none(hint): + if self._hint_is_correct(hint): + self.hint = hint + else: + if self.code_source == 'ovbenchmark': + self.hint = 'latency' + if self._extension_path_is_correct(extension): self.extension = extension else: raise ValueError('Wrong extension path for device. File not found.') - if self._parameter_not_is_none(infer_request_count): + + if self._parameter_is_not_none(infer_request_count): if self._int_value_is_correct(infer_request_count): self.infer_request = infer_request_count - if self.mode == 'Sync' or 'ovbenchmark' in self.mode.lower(): - if self._parameter_not_is_none(thread_count): + if self.code_source == 'ovbenchmark' and self.hint != 'none': + raise ValueError(f'Cannot set nireq for ovbenchmark and hint {self.hint}') + + if self.mode == 'Sync' or self.code_source == 'ovbenchmark': + if self._parameter_is_not_none(thread_count): if self._int_value_is_correct(thread_count): self.nthreads = int(thread_count) else: raise ValueError('Thread count can only take values: integer greater than zero.') - if self.mode == 'Async': - if self._parameter_not_is_none(async_request_count): + + if self.mode == 'Async' and self.code_source == 'handwritten': + if self._parameter_is_not_none(async_request_count): if self._int_value_is_correct(async_request_count): self.async_request = async_request_count else: - raise ValueError('Async requiest count can only take values: integer greater than zero.') - if self.mode == 'Async' or 'ovbenchmark' in self.mode.lower(): - if self._parameter_not_is_none(stream_count): + raise ValueError('Async request count can only take values: integer greater than zero.') + + if self.mode == 'Async' or self.code_source == 'ovbenchmark': + if self._parameter_is_not_none(stream_count): if self._int_value_is_correct(stream_count): self.nstreams = stream_count else: raise ValueError('Stream count can only take values: integer greater than zero.') - if 'ovbenchmark' in self.mode.lower(): - if self._parameter_not_is_none(shape): + if self.code_source == 'ovbenchmark': + if self._parameter_is_not_none(shape): self.shape = shape.strip() - if self._parameter_not_is_none(layout): + if self._parameter_is_not_none(layout): self.layout = layout.strip() - if 'onnx' in self.mode.lower(): - if self._parameter_not_is_none(mean): + if self._frontend_is_correct(frontend): + self.frontend = frontend if frontend else 'IR' + if self.frontend != 'IR': + if self._parameter_is_not_none(mean): if self._mean_is_correct(mean): self.mean = mean.strip() else: raise ValueError('Mean can only take values: list of 3 float elements.') - if self._parameter_not_is_none(input_scale): + if self._parameter_is_not_none(input_scale): self.input_scale = input_scale.strip() @staticmethod def _mode_is_correct(mode): - const_correct_mode = ['sync', 'async', - 'ovbenchmark_python_latency', 'ovbenchmark_python_throughput', 'ovbenchmark_python_onnx', - 'ovbenchmark_cpp_latency', 'ovbenchmark_cpp_throughput', 'ovbenchmark_cpp_onnx'] + const_correct_mode = ['sync', 'async'] if mode.lower() in const_correct_mode: return True raise ValueError(f'Mode is a required parameter. Mode can only take values: {", ".join(const_correct_mode)}') + @staticmethod + def _code_source_is_correct(code_source): + const_correct_code_source = ['ovbenchmark', 'handwritten'] + if code_source.lower() in const_correct_code_source: + return True + raise ValueError(f'SourceCode can only take values: {", ".join(const_correct_code_source)}') + + @staticmethod + def _runtime_is_correct(runtime): + const_correct_runtime = ['cpp', 'python'] + if runtime.lower() in const_correct_runtime: + return True + raise ValueError(f'Runtime is an optional parameter (by default it is empty), ' + f'but if not empty Runtime can only take values: {", ".join(const_correct_runtime)}') + + @staticmethod + def _hint_is_correct(hint): + const_correct_hint = ['latency', 'throughput', 'none'] + if hint.lower() in const_correct_hint: + return True + raise ValueError(f'Hint is an optional parameter (by default it is empty), ' + f'but if not empty Hint can only take values: {", ".join(const_correct_hint)}') + + @staticmethod + def _frontend_is_correct(frontend): + const_correct_frontend = ['ir', 'tensorflow', 'onnx'] + if not frontend: + return True + if frontend.lower() in const_correct_frontend: + return True + raise ValueError('Frontend is an optional parameter (by default it is ir), ' + f'but Frontend can only take values: {", ".join(const_correct_frontend)}') + def _extension_path_is_correct(self, extension): - return not self._parameter_not_is_none(extension) or Path(extension).exists() + return not self._parameter_is_not_none(extension) or Path(extension).exists() diff --git a/src/benchmark/frameworks/openvino/openvino_process.py b/src/benchmark/frameworks/openvino/openvino_process.py index 08f5e199c..52b865f22 100644 --- a/src/benchmark/frameworks/openvino/openvino_process.py +++ b/src/benchmark/frameworks/openvino/openvino_process.py @@ -6,3 +6,6 @@ class OpenVINOProcess(ProcessHandler, ABC): def __init__(self, test, executor, log): super().__init__(test, executor, log) + + def extract_inference_param(self, key): + return None diff --git a/src/benchmark/frameworks/openvino/openvino_process_factory.py b/src/benchmark/frameworks/openvino/openvino_process_factory.py index 0360ebd68..f197329f1 100644 --- a/src/benchmark/frameworks/openvino/openvino_process_factory.py +++ b/src/benchmark/frameworks/openvino/openvino_process_factory.py @@ -1,24 +1,22 @@ -from .openvino_benchmark_process import (OpenVINOBenchmarkPythonProcess, OpenVINOBenchmarkCppProcess, - OpenVINOBenchmarkPythonOnnxProcess, OpenVINOBenchmarkCppOnnxProcess) +from .openvino_benchmark_process import OpenVINOBenchmarkPythonProcess, OpenVINOBenchmarkCppProcess from .openvino_python_api_process import AsyncOpenVINOProcess, SyncOpenVINOProcess def create_process(test, executor, log, cpp_benchmarks_dir=None): mode = test.dep_parameters.mode.lower() - if mode == 'sync': + code_source = test.dep_parameters.code_source + runtime = test.dep_parameters.runtime + hint = test.dep_parameters.hint + if mode == 'sync' and code_source == 'handwritten': return SyncOpenVINOProcess(test, executor, log) - if mode == 'async': + if mode == 'async' and code_source == 'handwritten': return AsyncOpenVINOProcess(test, executor, log) - if mode == 'ovbenchmark_python_latency': - return OpenVINOBenchmarkPythonProcess(test, executor, log, 'latency') - if mode == 'ovbenchmark_python_throughput': - return OpenVINOBenchmarkPythonProcess(test, executor, log, 'throughput') - if mode == 'ovbenchmark_python_onnx': - return OpenVINOBenchmarkPythonOnnxProcess(test, executor, log) - if mode == 'ovbenchmark_cpp_latency': - return OpenVINOBenchmarkCppProcess(test, executor, log, cpp_benchmarks_dir, 'latency') - if mode == 'ovbenchmark_cpp_throughput': - return OpenVINOBenchmarkCppProcess(test, executor, log, cpp_benchmarks_dir, 'throughput') - if mode == 'ovbenchmark_cpp_onnx': - return OpenVINOBenchmarkCppOnnxProcess(test, executor, log, cpp_benchmarks_dir) - raise AssertionError(f'Unknown openvino running mode {mode}') + if code_source == 'ovbenchmark' and runtime == 'python': + return OpenVINOBenchmarkPythonProcess(test, executor, log, hint, mode) + if code_source == 'ovbenchmark' and runtime == 'cpp': + return OpenVINOBenchmarkCppProcess(test, executor, log, cpp_benchmarks_dir, hint, mode) + raise AssertionError('Unsupportend combination of: ' + f'openvino running mode {mode}, ' + f'code_source {code_source}, ' + f'runtime {runtime}, ' + f'hint {hint}') diff --git a/src/benchmark/frameworks/openvino/openvino_test.py b/src/benchmark/frameworks/openvino/openvino_test.py index 93328ac33..1e908b322 100644 --- a/src/benchmark/frameworks/openvino/openvino_test.py +++ b/src/benchmark/frameworks/openvino/openvino_test.py @@ -1,24 +1,37 @@ -from collections import OrderedDict - from ..config_parser.test_reporter import Test class OpenVINOTest(Test): - def __init__(self, model, dataset, indep_parameters, dep_parameters): - super().__init__(model, dataset, indep_parameters, dep_parameters) - - def get_report(self): - parameters = OrderedDict() - parameters.update({'Device': self.indep_parameters.device}) - parameters.update({'Async request count': self.dep_parameters.async_request}) - parameters.update({'Infer request count': self.dep_parameters.infer_request}) - parameters.update({'Iteration count': self.indep_parameters.iteration}) - parameters.update({'Thread count': self.dep_parameters.nthreads}) - parameters.update({'Stream count': self.dep_parameters.nstreams}) - parameters.update({'Mean': self.dep_parameters.mean}) - parameters.update({'Scale': self.dep_parameters.input_scale}) - parameters.update({'Shape': self.dep_parameters.shape}) - other_param = self._get_optional_parameters_string(parameters) + def get_report(self, process): + tensors_num = self.dep_parameters.infer_request + if process.get_status() == 0 and not tensors_num: + self._log.info('InferenceRequestsCount is not set in XML config, ' + 'will try to extract it from the launcher JSON report or console output') + tensors_num = process.extract_inference_param('nireq') + + if self.dep_parameters.mode.lower() == 'sync': + infer_requests_count = 1 + else: + infer_requests_count = tensors_num + + RUNTIME_PARAMETER_NAMES = ('INFERENCE_PRECISION_HINT', 'INFERENCE_NUM_THREADS', 'NUM_STREAMS', + 'OPTIMAL_NUMBER_OF_INFER_REQUESTS', 'AFFINITY', 'Count') + runtime_parameters = {key: process.extract_inference_param(key) for key in RUNTIME_PARAMETER_NAMES} + + if runtime_parameters['Count'] is not None: + # for benchmark app + actual_iterations = int(runtime_parameters['Count'].strip().split(' ')[0]) + runtime_parameters.pop('Count') + else: + # effective for sync/async python launchers + actual_iterations = self.indep_parameters.iteration + + parameters = self.prepare_framework_params() + parameters['Infer request count'] = infer_requests_count + parameters['Number of tensors'] = tensors_num + parameters['Iteration count'] = actual_iterations + parameters.update(runtime_parameters) + optional_parameters_string = self._get_optional_parameters_string(parameters) report_res = { 'task': self.model.task, @@ -29,7 +42,7 @@ def get_report(self): 'precision': self.model.precision, 'batch_size': self.indep_parameters.batch_size, 'mode': self.dep_parameters.mode, - 'framework_params': other_param, + 'framework_params': optional_parameters_string, } return report_res diff --git a/src/benchmark/frameworks/processes.py b/src/benchmark/frameworks/processes.py index 8e593a567..12f7486db 100644 --- a/src/benchmark/frameworks/processes.py +++ b/src/benchmark/frameworks/processes.py @@ -12,6 +12,7 @@ def __init__(self, test, executor, log): self._output = None self._status = None self.inference_script_root = Path(self._executor.get_path_to_inference_folder()) + self._report_path = None @staticmethod def get_cmd_python_version(): @@ -39,8 +40,12 @@ def execute(self): self.__log.info(f'Start inference test on model: {self._test.model.name}') self.__log.info(f'Command line is: {command_line}') self._executor.set_target_framework(self._test.indep_parameters.inference_framework) - self._status, self._output = self._executor.execute_process(command_line, - self._test.indep_parameters.test_time_limit) + + # add timeout overhead because time_limit in bechmark app applies for inference stage only + # set None n case of test_time_limit is unset for backward compatibility + configured_time_limit = self._test.indep_parameters.test_time_limit + timeout = configured_time_limit + 300 if configured_time_limit else None + self._status, self._output = self._executor.execute_process(command_line, timeout) if type(self._output) is not list: self._output = self._output.decode('utf-8').split('\n')[:-1] @@ -60,11 +65,18 @@ def get_status(self): def get_performance_metrics(self): pass + def get_json_report_content(self): + if self._report_path: + return json.loads(self._executor.get_file_content(self._report_path)) + + def get_output_lines(self): + return self._output + def get_performance_metrics_cpp(self): if self._status != 0 or len(self._output) == 0: return None, None, None - report = json.loads(self._executor.get_file_content(self._report_path)) + report = self.get_json_report_content() # calculate average time of single pass metric to align output with custom launchers MILLISECONDS_IN_SECOND = 1000 @@ -87,18 +99,19 @@ def _fill_command_line_cpp(self): weights = self._test.model.weight dataset = self._test.dataset.path iteration_count = self._test.indep_parameters.iteration + time = int(self._test.indep_parameters.test_time_limit) arguments = f'-m {model}' if weights.lower() != 'none': arguments += f' -w {weights}' - arguments += f' -i {dataset} -niter {iteration_count} -save_report -report_path {self._report_path}' + arguments += f' -i {dataset} -niter {iteration_count} -save_report -report_path {self._report_path} -t {time}' arguments = self._add_optional_argument_to_cmd_line(arguments, '-b', self._test.indep_parameters.batch_size) - arguments = self._add_optional_argument_to_cmd_line(arguments, '-shape', self._test.dep_parameters.shape) + arguments = self._add_optional_argument_to_cmd_line(arguments, '-shape', self._test.dep_parameters.input_shape) arguments = self._add_optional_argument_to_cmd_line(arguments, '-layout', self._test.dep_parameters.layout) arguments = self._add_optional_argument_to_cmd_line(arguments, '-mean', self._test.dep_parameters.mean) - arguments = self._add_optional_argument_to_cmd_line(arguments, '-scale', self._test.dep_parameters.scale) + arguments = self._add_optional_argument_to_cmd_line(arguments, '-scale', self._test.dep_parameters.input_scale) arguments = self._add_optional_argument_to_cmd_line(arguments, '-nthreads', self._test.dep_parameters.thread_count) arguments = self._add_optional_argument_to_cmd_line(arguments, '-nireq', @@ -152,6 +165,12 @@ def __make_log_filename(self): ] if hasattr(self._test.dep_parameters, 'mode'): test_settings.append(self._test.dep_parameters.mode) + if hasattr(self._test.dep_parameters, 'code_source'): + test_settings.append(self._test.dep_parameters.code_source) + if hasattr(self._test.dep_parameters, 'runtime'): + test_settings.append(self._test.dep_parameters.runtime) + if hasattr(self._test.dep_parameters, 'hint'): + test_settings.append(self._test.dep_parameters.hint) filename = '_'.join(test_settings) filename += '.log' return filename diff --git a/src/benchmark/frameworks/pytorch/pytorch_parameters_parser.py b/src/benchmark/frameworks/pytorch/pytorch_parameters_parser.py index 5b31b2461..bd67aa299 100644 --- a/src/benchmark/frameworks/pytorch/pytorch_parameters_parser.py +++ b/src/benchmark/frameworks/pytorch/pytorch_parameters_parser.py @@ -56,19 +56,19 @@ def __init__(self, input_name, input_shape, normalize, mean, std, output_name, m self.model_type = None self.inference_mode = None - if self._parameter_not_is_none(input_name): + if self._parameter_is_not_none(input_name): self.input_name = input_name - if self._parameter_not_is_none(input_shape): + if self._parameter_is_not_none(input_shape): self.input_shape = input_shape - if self._parameter_not_is_none(normalize): + if self._parameter_is_not_none(normalize): self.normalize = normalize - if self._parameter_not_is_none(mean): + if self._parameter_is_not_none(mean): self.mean = mean - if self._parameter_not_is_none(std): + if self._parameter_is_not_none(std): self.std = std - if self._parameter_not_is_none(output_name): + if self._parameter_is_not_none(output_name): self.output_name = output_name - if self._parameter_not_is_none(model_type): + if self._parameter_is_not_none(model_type): self.model_type = model_type - if self._parameter_not_is_none(inference_mode): + if self._parameter_is_not_none(inference_mode): self.inference_mode = inference_mode diff --git a/src/benchmark/frameworks/pytorch/pytorch_test.py b/src/benchmark/frameworks/pytorch/pytorch_test.py deleted file mode 100644 index 25771a134..000000000 --- a/src/benchmark/frameworks/pytorch/pytorch_test.py +++ /dev/null @@ -1,35 +0,0 @@ -from collections import OrderedDict - -from ..config_parser.test_reporter import Test - - -class PyTorchTest(Test): - def __init__(self, model, dataset, indep_parameters, dep_parameters): - super().__init__(model, dataset, indep_parameters, dep_parameters) - - def get_report(self): - parameters = OrderedDict() - parameters.update({'Device': self.indep_parameters.device}) - parameters.update({'Iteration count': self.indep_parameters.iteration}) - parameters.update({'Input name': self.dep_parameters.input_name}) - parameters.update({'Normalization flag': self.dep_parameters.normalize}) - parameters.update({'Mean': self.dep_parameters.mean}) - parameters.update({'Standard deviation': self.dep_parameters.std}) - parameters.update({'Output Name': self.dep_parameters.output_name}) - parameters.update({'Model type': self.dep_parameters.model_type}) - parameters.update({'Inference mode': self.dep_parameters.inference_mode}) - other_param = self._get_optional_parameters_string(parameters) - - report_res = { - 'task': self.model.task, - 'model': self.model.name, - 'dataset': self.dataset.name, - 'source_framework': self.model.source_framework, - 'inference_framework': self.indep_parameters.inference_framework, - 'precision': self.model.precision, - 'batch_size': self.indep_parameters.batch_size, - 'mode': 'Sync', - 'framework_params': other_param, - } - - return report_res diff --git a/src/benchmark/frameworks/pytorch/pytorch_wrapper.py b/src/benchmark/frameworks/pytorch/pytorch_wrapper.py index e76315bba..54992e7ba 100644 --- a/src/benchmark/frameworks/pytorch/pytorch_wrapper.py +++ b/src/benchmark/frameworks/pytorch/pytorch_wrapper.py @@ -1,5 +1,5 @@ from .pytorch_process import PyTorchProcess -from .pytorch_test import PyTorchTest +from ..config_parser.test_reporter import Test from ..framework_wrapper import FrameworkWrapper from ..known_frameworks import KnownFrameworks @@ -13,4 +13,4 @@ def create_process(test, executor, log, cpp_benchmarks_dir=None): @staticmethod def create_test(model, dataset, indep_parameters, dep_parameters): - return PyTorchTest(model, dataset, indep_parameters, dep_parameters) + return Test(model, dataset, indep_parameters, dep_parameters) diff --git a/src/benchmark/frameworks/tensorflow/tensorflow_parameters_parser.py b/src/benchmark/frameworks/tensorflow/tensorflow_parameters_parser.py index 03a492158..0cc7d8285 100644 --- a/src/benchmark/frameworks/tensorflow/tensorflow_parameters_parser.py +++ b/src/benchmark/frameworks/tensorflow/tensorflow_parameters_parser.py @@ -67,46 +67,46 @@ def __init__(self, channel_swap, mean, input_scale, input_shape, input_name, out self.num_intra_threads = None self.kmp_affinity = None - if self._parameter_not_is_none(channel_swap): + if self._parameter_is_not_none(channel_swap): if self._channel_swap_is_correct(channel_swap): self.channel_swap = channel_swap else: raise ValueError('Channel swap can only take values: list of unique values 0, 1, 2.') - if self._parameter_not_is_none(mean): + if self._parameter_is_not_none(mean): if self._mean_is_correct(mean): self.mean = mean else: raise ValueError('Mean can only take values: list of 3 float elements.') - if self._parameter_not_is_none(input_scale): + if self._parameter_is_not_none(input_scale): if self._float_value_is_correct(input_scale): self.input_scale = input_scale else: raise ValueError('Input scale can only take values: float greater than zero.') - if self._parameter_not_is_none(input_shape): + if self._parameter_is_not_none(input_shape): if self._input_shape_is_correct(input_shape): self.input_shape = input_shape else: raise ValueError('Input shape can only take values: list of 3 integer elements greater than zero.') - if self._parameter_not_is_none(input_name): + if self._parameter_is_not_none(input_name): self.input_name = input_name - if self._parameter_not_is_none(output_names): + if self._parameter_is_not_none(output_names): self.output_names = output_names - if self._parameter_not_is_none(thread_count): + if self._parameter_is_not_none(thread_count): if self._int_value_is_correct(thread_count): self.nthreads = thread_count else: raise ValueError('Threads count can only take integer value') - if self._parameter_not_is_none(inter_op_parallelism_threads): + if self._parameter_is_not_none(inter_op_parallelism_threads): if self._int_value_is_correct(inter_op_parallelism_threads): self.num_inter_threads = inter_op_parallelism_threads else: raise ValueError('Inter op parallelism threads can only take integer value') - if self._parameter_not_is_none(intra_op_parallelism_threads): + if self._parameter_is_not_none(intra_op_parallelism_threads): if self._int_value_is_correct(intra_op_parallelism_threads): self.num_intra_threads = intra_op_parallelism_threads else: raise ValueError('Intra op parallelism threads can only take integer value') - if self._parameter_not_is_none(kmp_affinity): + if self._parameter_is_not_none(kmp_affinity): self.kmp_affinity = kmp_affinity def _input_shape_is_correct(self, input_shape): diff --git a/src/benchmark/frameworks/tensorflow/tensorflow_test.py b/src/benchmark/frameworks/tensorflow/tensorflow_test.py deleted file mode 100644 index a7e3e0626..000000000 --- a/src/benchmark/frameworks/tensorflow/tensorflow_test.py +++ /dev/null @@ -1,28 +0,0 @@ -from ..config_parser.test_reporter import Test - - -class TensorFlowTest(Test): - def __init__(self, model, dataset, indep_parameters, dep_parameters): - super().__init__(model, dataset, indep_parameters, dep_parameters) - - def get_report(self): - other_param = ', '.join([f'Device: {self.indep_parameters.device}', - f'Iteration count: {self.indep_parameters.iteration}', - f'Thread count: {self.dep_parameters.nthreads}', - f'Inter threads: {self.dep_parameters.num_inter_threads}', - f'Intra threads: {self.dep_parameters.num_intra_threads}', - f'KMP_AFFINITY: {self.dep_parameters.kmp_affinity}']) - - report_res = { - 'task': self.model.task, - 'model': self.model.name, - 'dataset': self.dataset.name, - 'source_framework': self.model.source_framework, - 'inference_framework': self.indep_parameters.inference_framework, - 'precision': self.model.precision, - 'batch_size': self.indep_parameters.batch_size, - 'mode': 'Sync', - 'framework_params': other_param, - } - - return report_res diff --git a/src/benchmark/frameworks/tensorflow/tensorflow_wrapper.py b/src/benchmark/frameworks/tensorflow/tensorflow_wrapper.py index 2fbacf5ab..f672f11be 100644 --- a/src/benchmark/frameworks/tensorflow/tensorflow_wrapper.py +++ b/src/benchmark/frameworks/tensorflow/tensorflow_wrapper.py @@ -1,5 +1,5 @@ from .tensorflow_process import TensorFlowProcess -from .tensorflow_test import TensorFlowTest +from ..config_parser.test_reporter import Test from ..framework_wrapper import FrameworkWrapper @@ -12,4 +12,4 @@ def create_process(test, executor, log, cpp_benchmarks_dir=None): @staticmethod def create_test(model, dataset, indep_parameters, dep_parameters): - return TensorFlowTest(model, dataset, indep_parameters, dep_parameters) + return Test(model, dataset, indep_parameters, dep_parameters) diff --git a/src/benchmark/frameworks/tensorflow_lite/tensorflow_lite_parameters_parser.py b/src/benchmark/frameworks/tensorflow_lite/tensorflow_lite_parameters_parser.py index 31231bfab..069381712 100644 --- a/src/benchmark/frameworks/tensorflow_lite/tensorflow_lite_parameters_parser.py +++ b/src/benchmark/frameworks/tensorflow_lite/tensorflow_lite_parameters_parser.py @@ -62,26 +62,26 @@ def __init__(self, channel_swap, mean, input_scale, input_shape, layout, input_n self.delegate = None self.delegate_options = None - if self._parameter_not_is_none(channel_swap): + if self._parameter_is_not_none(channel_swap): self.channel_swap = self._process_sequence_arg(channel_swap) - if self._parameter_not_is_none(mean): + if self._parameter_is_not_none(mean): self.mean = self._process_sequence_arg(mean) - if self._parameter_not_is_none(input_scale): + if self._parameter_is_not_none(input_scale): self.input_scale = self._process_sequence_arg(input_scale) - if self._parameter_not_is_none(input_shape): + if self._parameter_is_not_none(input_shape): self.input_shape = self._process_sequence_arg(input_shape) - if self._parameter_not_is_none(input_name): + if self._parameter_is_not_none(input_name): self.input_name = input_name - if self._parameter_not_is_none(layout): + if self._parameter_is_not_none(layout): self.layout = layout - if self._parameter_not_is_none(thread_count): + if self._parameter_is_not_none(thread_count): if self._int_value_is_correct(thread_count): self.nthreads = thread_count else: raise ValueError('Threads count can only take integer value') - if self._parameter_not_is_none(delegate): + if self._parameter_is_not_none(delegate): self.delegate = delegate - if self._parameter_not_is_none(delegate_options): + if self._parameter_is_not_none(delegate_options): self.delegate_options = delegate_options @staticmethod diff --git a/src/benchmark/frameworks/tensorflow_lite/tensorflow_lite_test.py b/src/benchmark/frameworks/tensorflow_lite/tensorflow_lite_test.py deleted file mode 100644 index 36b1f216c..000000000 --- a/src/benchmark/frameworks/tensorflow_lite/tensorflow_lite_test.py +++ /dev/null @@ -1,32 +0,0 @@ -from ..config_parser.test_reporter import Test - - -class TensorFlowLiteTest(Test): - def __init__(self, model, dataset, indep_parameters, dep_parameters): - super().__init__(model, dataset, indep_parameters, dep_parameters) - - def get_report(self): - other_param = ', '.join([f'Device: {self.indep_parameters.device}', - f'Iteration count: {self.indep_parameters.iteration}', - f'Thread count: {self.dep_parameters.nthreads}', - f'Channel swap: {self.dep_parameters.channel_swap}', - f'Shape: {self.dep_parameters.input_shape}', - f'Layout: {self.dep_parameters.layout}', - f'Mean: {self.dep_parameters.mean}', - f'Scale: {self.dep_parameters.input_scale}', - f'Delegate: {self.dep_parameters.delegate}', - f'Delegate options: {self.dep_parameters.delegate_options}']) - - report_res = { - 'task': self.model.task, - 'model': self.model.name, - 'dataset': self.dataset.name, - 'source_framework': self.model.source_framework, - 'inference_framework': self.indep_parameters.inference_framework, - 'precision': self.model.precision, - 'batch_size': self.indep_parameters.batch_size, - 'mode': 'Sync', - 'framework_params': other_param, - } - - return report_res diff --git a/src/benchmark/frameworks/tensorflow_lite/tensorflow_lite_wrapper.py b/src/benchmark/frameworks/tensorflow_lite/tensorflow_lite_wrapper.py index 84b453a53..04f9bc86b 100644 --- a/src/benchmark/frameworks/tensorflow_lite/tensorflow_lite_wrapper.py +++ b/src/benchmark/frameworks/tensorflow_lite/tensorflow_lite_wrapper.py @@ -1,5 +1,5 @@ from .tensorflow_lite_process import TensorFlowLiteProcess -from .tensorflow_lite_test import TensorFlowLiteTest +from ..config_parser.test_reporter import Test from ..framework_wrapper import FrameworkWrapper @@ -12,4 +12,4 @@ def create_process(test, executor, log, cpp_benchmark_path=None): @staticmethod def create_test(model, dataset, indep_parameters, dep_parameters): - return TensorFlowLiteTest(model, dataset, indep_parameters, dep_parameters) + return Test(model, dataset, indep_parameters, dep_parameters) diff --git a/src/benchmark/inference_benchmark.py b/src/benchmark/inference_benchmark.py index 02d65ebe1..edbf48575 100644 --- a/src/benchmark/inference_benchmark.py +++ b/src/benchmark/inference_benchmark.py @@ -101,7 +101,8 @@ def inference_benchmark(executor_type, test_list, output_handler, log, args = cli_argument_parser() log.info(f'Parsing configuration file {args.config_path}') - test_list = process_config(args.config_path, log) + test_list, test_creation_status = process_config(args.config_path, log) + log.info('All tests are added to test list' if not test_creation_status else 'Not all tests are added to test list') log.info(f'Create result table with name: {args.result_file}') @@ -115,4 +116,4 @@ def inference_benchmark(executor_type, test_list, output_handler, log, args.cpp_benchmarks_dir, args.openvino_cpp_benchmark_dir) log.info('Inference tests completed' if not inference_status.value else 'Inference tests failed') - sys.exit(inference_status.value) + sys.exit(inference_status.value or test_creation_status) diff --git a/src/benchmark/output.py b/src/benchmark/output.py index 51383e619..44305b7b0 100644 --- a/src/benchmark/output.py +++ b/src/benchmark/output.py @@ -33,7 +33,7 @@ def __init__(self, table_name, csv_delimiter): @staticmethod def __create_table_row(executor, test, process): - report = test.get_report() + report = test.get_report(process=process) if process is not None: status_code = process.get_status() process_status = (Status(status_code) if (Status.has_value(status_code) and status_code != 1) diff --git a/src/benchmark/tests/test_processes.py b/src/benchmark/tests/test_processes.py index ddc5588f9..8f09ffae5 100644 --- a/src/benchmark/tests/test_processes.py +++ b/src/benchmark/tests/test_processes.py @@ -7,9 +7,7 @@ from src.benchmark.frameworks.intel_caffe.intel_caffe_process import IntelCaffeProcess from src.benchmark.frameworks.known_frameworks import KnownFrameworks from src.benchmark.frameworks.openvino.openvino_benchmark_process import (OpenVINOBenchmarkPythonProcess, - OpenVINOBenchmarkCppProcess, - OpenVINOBenchmarkPythonOnnxProcess, - OpenVINOBenchmarkCppOnnxProcess) + OpenVINOBenchmarkCppProcess) from src.benchmark.frameworks.openvino.openvino_process import OpenVINOProcess from src.benchmark.frameworks.openvino.openvino_python_api_process import AsyncOpenVINOProcess, SyncOpenVINOProcess from src.benchmark.frameworks.processes import ProcessHandler @@ -66,22 +64,24 @@ def test_python_version(os, mocker): ['OpenCV DNN Cpp', OpenCVDNNCppProcess], ['ONNX Runtime', OnnxRuntimeProcess], ['TensorFlowLite', TensorFlowLiteProcess]]) -@pytest.mark.parametrize('mode', [['sync', SyncOpenVINOProcess], ['async', AsyncOpenVINOProcess], - ['ovbenchmark_python_latency', OpenVINOBenchmarkPythonProcess], - ['ovbenchmark_python_throughput', OpenVINOBenchmarkPythonProcess], - ['ovbenchmark_cpp_latency', OpenVINOBenchmarkCppProcess], - ['ovbenchmark_cpp_throughput', OpenVINOBenchmarkCppProcess], - ['ovbenchmark_cpp_onnx', OpenVINOBenchmarkCppOnnxProcess], - ['ovbenchmark_python_onnx', OpenVINOBenchmarkPythonOnnxProcess]]) -def test_framework_wrapper(inference_framework, mode, mocker): +@pytest.mark.parametrize('complex_test', [['sync', 'handwritten', None, SyncOpenVINOProcess], + ['async', 'handwritten', None, AsyncOpenVINOProcess], + ['sync', 'ovbenchmark', 'python', OpenVINOBenchmarkPythonProcess], + ['sync', 'ovbenchmark', 'cpp', OpenVINOBenchmarkCppProcess], + ['async', 'ovbenchmark', 'python', OpenVINOBenchmarkPythonProcess], + ['async', 'ovbenchmark', 'cpp', OpenVINOBenchmarkCppProcess], + ]) +def test_framework_wrapper(inference_framework, complex_test, mocker): test = TEST_BASIC_LINE test.indep_parameters.inference_framework = inference_framework[0] - test.dep_parameters.mode = mode[0] + test.dep_parameters.mode = complex_test[0] + test.dep_parameters.code_source = complex_test[1] + test.dep_parameters.runtime = complex_test[2] wrapper = WRAPPER_REGISTRY[inference_framework[0]] mocker.patch('pathlib.Path.is_file', return_value=True) if inference_framework[0] == KnownFrameworks.openvino_dldt: assert isinstance(wrapper.create_process(test, get_host_executor(mocker), log, 'valid/benchmark/path'), - mode[1]) + complex_test[-1]) else: assert isinstance(wrapper.create_process(test, get_host_executor(mocker), log, 'valid/benchmark/path'), inference_framework[1]) diff --git a/src/configs/README.md b/src/configs/README.md index b1f8ec4bb..0195d4f75 100644 --- a/src/configs/README.md +++ b/src/configs/README.md @@ -57,9 +57,20 @@ of OpenVINO Toolkit: - `Mode` - тег, обязательный для заполнения. Описывает программный интерфейс вывода. - Допустимые значения `Sync` (используется для реализации latency-режима) и `Async` - (используется для реализации latency-режима при создании очереди из одного запроса - и throughput-режима при создании очереди из большего числа запросов). + Допустимые значения: + - `Sync` используется для реализации latency-режима. + - `Async` используется для реализации latency-режима при создании очереди из одного запроса + и throughput-режима при создании очереди из большего числа запросов. + - `CodeSource` - тег, необязательный для заполнения; по умолчанию равен `handwritten`; + определяет запуск `benchmark_app` из пакет Intel Distribution of OpenVINO Toolkit или + запуск собственных реализаций вывода средствами OpenVINO. + - `Runtime`- тег, необязательный для заполнения; может отсуствовать; по умолчанию равен `python`; + используется совместно с запуском benchmark_app. Определяет версию benchmark_app. + - `Hint`- тег, необязательный для заполнения; может отсуствовать; по умолчанию равен `latency`; + используется совместно с запуском benchmark_app. Определяет "подсказку" + для выставления оптимальных параметров режимов OpenVINO. + - `Frontend` - тег, необязательный для заполнения; по умолчанию равен `ir`; + используется совместно с запуском benchmark_app. Определяет расширение внешнего интерфейса. - `Extension` - тег, необязательный для заполнения. Описывает абсолютный путь до реализации слоев, неподдерживаемых OpenVINO. - `AsyncRequestCount` - опциональный тег. Может быть заполнен для асинхронноого @@ -69,12 +80,15 @@ физическому количеству ядер в системе. - `StreamCount` - опциональный тег. Может быть заполнен для асинхронного интерфейса. Описывает максимальное количество одновременно выполняющихся запросов на вывод. - - `InputShape` - тег, необязательный для заполнения; может отсуствовать. Определяет размеры входного тензора. По умолчанию не установлен. - - `Layout`- тег, необязательный для заполнения; может отсуствовать. Определяет формат входного тензора. По умолчанию не установлен. - - `Mean` - тег, необязательный для заполнения; может отсуствовать. Определяет средние значения, которые будут вычитаться + - `InputShape` - тег, необязательный для заполнения; только для случая, когда `frontend` не равен `ir`; + может отсуствовать. Определяет размеры входного тензора. По умолчанию не установлен. + - `Layout`- тег, необязательный для заполнения; только для случая, когда `frontend` не равен `ir`; + может отсуствовать. Определяет формат входного тензора. По умолчанию не установлен. + - `Mean` - тег, необязательный для заполнения; только для случая, когда `frontend` не равен `ir`; + может отсуствовать. Определяет средние значения, которые будут вычитаться по каждому из каналов входного изображения. - - `InputScale`- тег, необязательный для заполнения; может отсуствовать. Определяет коэффициент масштабирования входного - изображения. + - `InputScale`- тег, необязательный для заполнения; только для случая, когда `frontend` не равен `ir`; + может отсуствовать. Определяет коэффициент масштабирования входного изображения. - Набор тегов для тестирования вывода средствами Intel Optimization for Caffe: @@ -118,7 +132,8 @@ переменной окружения, [здесь][kmp-affinity-docs]. - Набор тегов для тестирования вывода средствами ONNX Runtime и OpenCV DNN CPP: - + - `Backend` - тег, необязательный для заполнения; используется в OpenCV DNN CPP. + Определяет вычислительный бэкенд фреймворка. Поддерживаются два вида бэкенда: IE и DNN. По умолчанию установлен как DNN. - `InputShape` - тег, необязательный для заполнения для ONNX Runtime и обязательный для OpenCV DNN CPP. Определяет размеры входного тензора. По умолчанию не установлен. В настоящий момент OpenCV не может определить размеры входного тензора во многих случаях, поэтому необходимо @@ -418,6 +433,7 @@ 60 + DNN [1,3,224,224] [123.675,116.28,103.53] diff --git a/src/configs/benchmark_configuration_file_template.xml b/src/configs/benchmark_configuration_file_template.xml index c532d8b54..4de3cb5cb 100644 --- a/src/configs/benchmark_configuration_file_template.xml +++ b/src/configs/benchmark_configuration_file_template.xml @@ -27,6 +27,7 @@ + @@ -144,6 +145,7 @@ + DNN diff --git a/src/cpp_dl_benchmark/CMakeLists.txt b/src/cpp_dl_benchmark/CMakeLists.txt index 9eae6e51d..b99d30a49 100644 --- a/src/cpp_dl_benchmark/CMakeLists.txt +++ b/src/cpp_dl_benchmark/CMakeLists.txt @@ -10,6 +10,7 @@ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin") option(ENABLE_CLANG_FORMAT "Enable clang-format during the build" ON) option(BUILD_ONNXRUNTIME_LAUNCHER "Build OpenCV launcher" ON) option(BUILD_OPENCV_LAUNCHER "Build OpenCV launcher" ON) +option(BUILD_OPENCV_OV_LAUNCHER "Build OpenCV with OpenVINO backend launcher" ON) include(CMakeParseArguments) @@ -64,7 +65,7 @@ find_package(OpenCV REQUIRED core imgproc imgcodecs) add_subdirectory(common) add_subdirectory(thirdparty/gflags) -if (BUILD_OPENCV_LAUNCHER OR BUILD_OPENCV_IE_LAUNCHER) +if (BUILD_OPENCV_LAUNCHER OR BUILD_OPENCV_OV_LAUNCHER) add_subdirectory(opencv_launcher) endif() diff --git a/src/cpp_dl_benchmark/README.md b/src/cpp_dl_benchmark/README.md index 8146aca96..773cd3e0c 100644 --- a/src/cpp_dl_benchmark/README.md +++ b/src/cpp_dl_benchmark/README.md @@ -20,7 +20,7 @@ The tool was tested on Ubuntu 20.04 (64-bit) with default GCC* 9.4.0 ## Build -To build specific launcher please refer to the corresponding `README.md` file in the launcher directory. +To build specific launcher please refer to the corresponding `README.md` file in the launcher directory. By default all launchers will be built. ## Usage diff --git a/src/cpp_dl_benchmark/main.cpp b/src/cpp_dl_benchmark/main.cpp index 6e1c50671..e877b8239 100644 --- a/src/cpp_dl_benchmark/main.cpp +++ b/src/cpp_dl_benchmark/main.cpp @@ -2,7 +2,7 @@ #include "inputs_preparation/inputs_preparation.hpp" #include "utils/args_handler.hpp" -#ifdef OCV_DNN +#if defined(OCV_DNN) || defined(OCV_DNN_WITH_OV) #include "opencv_launcher.hpp" #elif ORT_DEFAULT #include "onnxruntime_launcher.hpp" @@ -27,12 +27,12 @@ constexpr char model_msg[] = "path to a file with a trained model or a config file.\n" " available formats\n" " ONNX Runtime - onnx\n" - " OpenCV DNN - onnx, pb, protoxt."; + " OpenCV - .xml, onnx, pb, protoxt."; DEFINE_string(m, "", model_msg); constexpr char weights_msg[] = "path to a model weights file.\n" - " available formats:\n" - " OpenCV DNN - caffemodel."; + " available formats:\n" + " OpenCV - caffemodel, .bin"; DEFINE_string(w, "", weights_msg); constexpr char input_msg[] = @@ -93,6 +93,8 @@ void parse(int argc, char* argv[]) { std::cout << #ifdef OCV_DNN "opencv_dnn" +#elif OCV_DNN_WITH_OV + "opencv_dnn_ov" #elif ORT_DEFAULT "onnxruntime" #endif @@ -119,11 +121,6 @@ void parse(int argc, char* argv[]) { if (FLAGS_m.empty()) { throw std::invalid_argument{"-m can't be empty"}; } -#ifdef OCV_DNN - if (FLAGS_shape.empty()) { - throw std::invalid_argument{"[--shape <[N,C,H,W]>] can't be empty"}; - } -#endif } void log_model_inputs_outputs(const IOTensorsInfo& tensors_info) { @@ -173,7 +170,7 @@ int main(int argc, char* argv[]) { std::unique_ptr launcher; -#ifdef OCV_DNN +#if defined(OCV_DNN) || defined(OCV_DNN_WITH_OV) launcher = std::make_unique(FLAGS_nthreads); #elif ORT_DEFAULT launcher = std::make_unique(FLAGS_nthreads); @@ -193,6 +190,8 @@ int main(int argc, char* argv[]) { {{"inference_framework", #ifdef OCV_DNN "opencv_dnn" +#elif OCV_DNN_WITH_OV + "opencv_dnn_ov" #elif ORT_DEFAULT "onnxruntime" #endif diff --git a/src/cpp_dl_benchmark/onnxruntime_launcher/README.md b/src/cpp_dl_benchmark/onnxruntime_launcher/README.md index 1b33cbb2e..507305863 100644 --- a/src/cpp_dl_benchmark/onnxruntime_launcher/README.md +++ b/src/cpp_dl_benchmark/onnxruntime_launcher/README.md @@ -5,7 +5,7 @@ The tool allows to measure deep learning models inference performance with [ONNX ## Build ONNX Runtime 1. Clone repository, checkout to the latest stable release and update submodules: - + ``` git clone https://github.com/microsoft/onnxruntime.git cd onnxruntime @@ -14,13 +14,13 @@ The tool allows to measure deep learning models inference performance with [ONNX ``` 1. Create `build` directory: - + ``` mkdir build && cd build ``` 1. Configure it with `cmake`: - + ``` cmake -DCMAKE_INSTALL_PREFIX=install -DCMAKE_BUILD_TYPE=Release -Donnxruntime_BUILD_FOR_NATIVE_MACHINE=ON -Donnxruntime_BUILD_UNIT_TESTS=OFF -Donnxruntime_BUILD_SHARED_LIB=ON -Donnxruntime_USE_FULL_PROTOBUF=ON ../cmake ``` @@ -54,7 +54,7 @@ To build the tool you need to have an installation of [ONNX Runtime][onnx-runtim 1. In the created directory run `cmake` command: ``` - cmake -DCMAKE_BUILD_TYPE=Release -BUILD_ONNXRUNTIME_LAUNCHER=ON /src/cpp_dl_benchmark + cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_ONNXRUNTIME_LAUNCHER=ON -DBUILD_OPENCV_LAUNCHER=OFF -DBUILD_OPENCV_OV_LAUNCHER=OFF /src/cpp_dl_benchmark ``` 1. Build tool diff --git a/src/cpp_dl_benchmark/opencv_launcher/CMakeLists.txt b/src/cpp_dl_benchmark/opencv_launcher/CMakeLists.txt index b004a1b6d..7e1899a40 100644 --- a/src/cpp_dl_benchmark/opencv_launcher/CMakeLists.txt +++ b/src/cpp_dl_benchmark/opencv_launcher/CMakeLists.txt @@ -3,9 +3,20 @@ find_package(OpenCV REQUIRED dnn) file(GLOB_RECURSE HEADERS "${CMAKE_CURRENT_SOURCE_DIR}/include/*") file(GLOB_RECURSE SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/src/*") -add_launcher(NAME opencv_dnn_benchmark - SOURCES ${SOURCES} - HEADERS ${HEADERS} - INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include" - DEPENDENCIES opencv_dnn - DEFINITIONS OCV_DNN) +if (BUILD_OPENCV_LAUNCHER) + add_launcher(NAME opencv_dnn_benchmark + SOURCES ${SOURCES} + HEADERS ${HEADERS} + INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include" + DEPENDENCIES opencv_dnn + DEFINITIONS OCV_DNN) +endif() + +if (BUILD_OPENCV_OV_LAUNCHER) + add_launcher(NAME opencv_dnn_ov_benchmark + SOURCES ${SOURCES} + HEADERS ${HEADERS} + INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include" + DEPENDENCIES opencv_dnn + DEFINITIONS OCV_DNN_WITH_OV) +endif() diff --git a/src/cpp_dl_benchmark/opencv_launcher/README.md b/src/cpp_dl_benchmark/opencv_launcher/README.md index 9d6ae3921..5206d0ec1 100644 --- a/src/cpp_dl_benchmark/opencv_launcher/README.md +++ b/src/cpp_dl_benchmark/opencv_launcher/README.md @@ -14,15 +14,27 @@ To get `OpenCV` you need either download [prebuilt binaries](https://opencv.org/ ``` 1. Create `build` directory: - + ``` mkdir build && cd build ``` 1. Configure it with `cmake`: - + + - For OpenCV with OpenVINO: + + Setup environment variables to detect OpenVINO: + ``` - cmake -DCMAKE_INSTALL_PREFIX=install -DCMAKE_BUILD_TYPE=Release -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DBUILD_DOCS=OFF .. + source /setupvars.sh + + cmake -DCMAKE_INSTALL_PREFIX=install -DCMAKE_BUILD_TYPE=Release -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DWITH_OPENVINO=ON -DBUILD_DOCS=OFF .. + ``` + + - For OpenCV: + + ``` + cmake -DCMAKE_INSTALL_PREFIX=install -DCMAKE_BUILD_TYPE=Release -DBUILD_EXAMPLES=OFF -DBUILD_TESTS=OFF -DWITH_OPENVINO=OFF -DBUILD_DOCS=OFF .. ``` 1. Build and install project: @@ -52,22 +64,26 @@ so that cmake can find it during configuration step: 1. In the created directory run `cmake` command: - ``` - cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_OPENCV_LAUNCHER=ON /src/cpp_dl_benchmark - ``` + - For OPENCV_LAUNCHER with OpenVINO: + ``` + cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_OPENCV_OV_LAUNCHER=ON -DBUILD_OPENCV_LAUNCHER=OFF -DBUILD_ONNXRUNTIME_LAUNCHER=OFF /src/cpp_dl_benchmark + ``` + - For OPENCV_LAUNCHER: + ``` + cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_OPENCV_OV_LAUNCHER=OFF -DBUILD_OPENCV_LAUNCHER=ON -DBUILD_ONNXRUNTIME_LAUNCHER=OFF /src/cpp_dl_benchmark + ``` 1. Build tool ``` - cmake --build . + cmake --build . -- -j$(nproc --all) ``` Application binaries will be placed into `//bin` directory, where `BUILD_TYPE` whether `Debug` or `Release`. ## Usage -OpenCV DNN launcher supports models in `ONNX`, `Caffe` and `TensorFlow` formats, -no custom backends are tested for now. +OpenCV DNN launcher supports models in `IR (OpenVINO)` with `DNN_BACKEND_INFERENCE_ENGINE` (backend); `ONNX`, `Caffe` and `TensorFlow` formats with `DNN_BACKEND_OPENCV` (backend). Limitations on the models: - One input diff --git a/src/cpp_dl_benchmark/opencv_launcher/include/opencv_launcher.hpp b/src/cpp_dl_benchmark/opencv_launcher/include/opencv_launcher.hpp index c0ad429a8..891e12a20 100644 --- a/src/cpp_dl_benchmark/opencv_launcher/include/opencv_launcher.hpp +++ b/src/cpp_dl_benchmark/opencv_launcher/include/opencv_launcher.hpp @@ -47,5 +47,7 @@ class OCVLauncher : public Launcher { std::vector output_blobs; + static void set_backend(cv::dnn::Net& net); + void run(const cv::Mat& input_blob); }; diff --git a/src/cpp_dl_benchmark/opencv_launcher/src/opencv_launcher.cpp b/src/cpp_dl_benchmark/opencv_launcher/src/opencv_launcher.cpp index a947eb473..4b230bc0b 100644 --- a/src/cpp_dl_benchmark/opencv_launcher/src/opencv_launcher.cpp +++ b/src/cpp_dl_benchmark/opencv_launcher/src/opencv_launcher.cpp @@ -29,9 +29,17 @@ void OCVLauncher::log_framework_version() const { logger::info << "OpenCV version: " << CV_VERSION << logger::endl; } +void OCVLauncher::set_backend(cv::dnn::Net& net) { +#ifdef OCV_DNN + net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV); +#elif OCV_DNN_WITH_OV + net.setPreferableBackend(cv::dnn::DNN_BACKEND_INFERENCE_ENGINE); +#endif +} + void OCVLauncher::read(const std::string model_file, const std::string weights_file) { net = cv::dnn::readNet(model_file, weights_file); - net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV); + set_backend(net); std::vector inputShapes, outputShapes; net.getLayerShapes(MatShape(), 0, inputShapes, outputShapes); @@ -72,11 +80,11 @@ void OCVLauncher::fill_inputs_outputs_info() { IOTensorsInfo OCVLauncher::get_io_tensors_info() const { std::vector input_tensors_info{{input_names[0], - input_shapes[0], - input_shapes[0], - "", - utils::DataPrecision::FP32, - true}}; // only CV_32F type for IO supported + input_shapes[0], + input_shapes[0], + "", + utils::DataPrecision::FP32, + true}}; // only CV_32F type for IO supported std::vector output_tensors_info; for (size_t i = 0; i < output_names.size(); ++i) { output_tensors_info.push_back( diff --git a/test/smoke_test/smoke_config.xml b/test/smoke_test/smoke_config.xml index 45b78514d..aeb6aa1ec 100644 --- a/test/smoke_test/smoke_config.xml +++ b/test/smoke_test/smoke_config.xml @@ -56,6 +56,161 @@ + + + classification + mobilenet-v1-1.0-224-tf + FP32 + tf + ./working_dir_smoke/public/mobilenet-v1-1.0-224-tf/FP32/mobilenet-v1-1.0-224-tf.xml + ./working_dir_smoke/public/mobilenet-v1-1.0-224-tf/FP32/mobilenet-v1-1.0-224-tf.bin + + + Data + ./black_square.jpg + + + OpenVINO DLDT + 1 + CPU + 10 + 6 + + + async + ovbenchmark + + + + + + + + + classification + mobilenet-v1-1.0-224-tf + FP32 + tf + ./working_dir_smoke/public/mobilenet-v1-1.0-224-tf/FP32/mobilenet-v1-1.0-224-tf.xml + ./working_dir_smoke/public/mobilenet-v1-1.0-224-tf/FP32/mobilenet-v1-1.0-224-tf.bin + + + Data + ./black_square.jpg + + + OpenVINO DLDT + 1 + CPU + 10 + 6 + + + sync + ovbenchmark + + + + + + + + + classification + mobilenet-v1-1.0-224-tf + FP32 + tf + ./working_dir_smoke/public/mobilenet-v1-1.0-224-tf/FP32/mobilenet-v1-1.0-224-tf.xml + ./working_dir_smoke/public/mobilenet-v1-1.0-224-tf/FP32/mobilenet-v1-1.0-224-tf.bin + + + Data + ./black_square.jpg + + + OpenVINO DLDT + 1 + CPU + 10 + 6 + + + async + ovbenchmark + python + none + + + + + + + + + classification + mobilenet-v1-1.0-224-tf + FP32 + tf + ./working_dir_smoke/public/mobilenet-v1-1.0-224-tf/FP32/mobilenet-v1-1.0-224-tf.xml + ./working_dir_smoke/public/mobilenet-v1-1.0-224-tf/FP32/mobilenet-v1-1.0-224-tf.bin + + + Data + ./black_square.jpg + + + OpenVINO DLDT + 1 + CPU + 10 + 6 + + + async + ovbenchmark + python + throughput + + + + + + + + + classification + mobilenet-v1-1.0-224-tf + FP32 + tf + ./working_dir_smoke/public/mobilenet-v1-1.0-224-tf/mobilenet_v1_1.0_224_frozen.pb + none + + + Data + ./black_square.jpg + + + OpenVINO DLDT + 1 + CPU + 10 + 6 + + + async + ovbenchmark + tensorflow + + [127.5,127.5,127.5] + [127.5,127.5,127.5] + + + + + + + + classification @@ -99,7 +254,7 @@ None - ImageNet + Data ./black_square.jpg @@ -131,7 +286,7 @@ - ImageNet + Data ./black_square.jpg