From 01a953fe25a1ffdf0bfea7d1800da9607b54bc40 Mon Sep 17 00:00:00 2001 From: Sai Kiran Polisetty Date: Mon, 14 Aug 2023 19:55:44 +0530 Subject: [PATCH] Add L0 test to protect end-to-end ensemble model (#739) * L0 test for Ensemble model * Modify L0_ensemble_model * Update test_config_generator.py * Remove unnecessary flags * Add copyright information * Copyright * Correct Copyright body * Copyright * Fix precommit errors --- examples/quick-start/add/1/model.py | 121 ++++++++++++++++++ examples/quick-start/add/config.pbtxt | 41 ++++++ .../quick-start/ensemble_add_sub/config.pbtxt | 80 ++++++++++++ examples/quick-start/sub/1/model.py | 121 ++++++++++++++++++ examples/quick-start/sub/config.pbtxt | 41 ++++++ qa/L0_ensemble_model/check_results.py | 104 +++++++++++++++ qa/L0_ensemble_model/test.sh | 94 ++++++++++++++ qa/L0_ensemble_model/test_config_generator.py | 64 +++++++++ qa/common/util.sh | 4 +- 9 files changed, 668 insertions(+), 2 deletions(-) create mode 100755 examples/quick-start/add/1/model.py create mode 100644 examples/quick-start/add/config.pbtxt create mode 100644 examples/quick-start/ensemble_add_sub/config.pbtxt create mode 100755 examples/quick-start/sub/1/model.py create mode 100644 examples/quick-start/sub/config.pbtxt create mode 100755 qa/L0_ensemble_model/check_results.py create mode 100755 qa/L0_ensemble_model/test.sh create mode 100755 qa/L0_ensemble_model/test_config_generator.py diff --git a/examples/quick-start/add/1/model.py b/examples/quick-start/add/1/model.py new file mode 100755 index 000000000..96996301e --- /dev/null +++ b/examples/quick-start/add/1/model.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 + +# Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import json + +# triton_python_backend_utils is available in every Triton Python model. You +# need to use this module to create inference requests and responses. It also +# contains some utility functions for extracting information from model_config +# and converting Triton input/output types to numpy types. +import triton_python_backend_utils as pb_utils + + +class TritonPythonModel: + """Your Python model must use the same class name. Every Python model + that is created must have "TritonPythonModel" as the class name. + """ + + def initialize(self, args): + """`initialize` is called only once when the model is being loaded. + Implementing `initialize` function is optional. This function allows + the model to initialize any state associated with this model. + + Parameters + ---------- + args : dict + Both keys and values are strings. The dictionary keys and values are: + * model_config: A JSON string containing the model configuration + * model_instance_kind: A string containing model instance kind + * model_instance_device_id: A string containing model instance device ID + * model_repository: Model repository path + * model_version: Model version + * model_name: Model name + """ + + # You must parse model_config. JSON string is not parsed here + self.model_config = model_config = json.loads(args["model_config"]) + + # Get OUTPUT0 configuration + output0_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT0") + + # Convert Triton types to numpy types + self.output0_dtype = pb_utils.triton_string_to_numpy( + output0_config["data_type"] + ) + + def execute(self, requests): + """`execute` MUST be implemented in every Python model. `execute` + function receives a list of pb_utils.InferenceRequest as the only + argument. This function is called when an inference request is made + for this model. Depending on the batching configuration (e.g. Dynamic + Batching) used, `requests` may contain multiple requests. Every + Python model, must create one pb_utils.InferenceResponse for every + pb_utils.InferenceRequest in `requests`. If there is an error, you can + set the error argument when creating a pb_utils.InferenceResponse + + Parameters + ---------- + requests : list + A list of pb_utils.InferenceRequest + + Returns + ------- + list + A list of pb_utils.InferenceResponse. The length of this list must + be the same as `requests` + """ + + output0_dtype = self.output0_dtype + + responses = [] + + # Every Python backend must iterate over everyone of the requests + # and create a pb_utils.InferenceResponse for each of them. + for request in requests: + # Get INPUT0 + in_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0") + # Get INPUT1 + in_1 = pb_utils.get_input_tensor_by_name(request, "INPUT1") + + out_0 = in_0.as_numpy() + in_1.as_numpy() + + # Create output tensors. You need pb_utils.Tensor + # objects to create pb_utils.InferenceResponse. + out_tensor_0 = pb_utils.Tensor("OUTPUT0", out_0.astype(output0_dtype)) + + # Create InferenceResponse. You can set an error here in case + # there was a problem with handling this inference request. + # Below is an example of how you can set errors in inference + # response: + # + # pb_utils.InferenceResponse( + # output_tensors=..., TritonError("An error occurred")) + inference_response = pb_utils.InferenceResponse( + output_tensors=[out_tensor_0] + ) + responses.append(inference_response) + + # You should return a list of pb_utils.InferenceResponse. Length + # of this list must match the length of `requests` list. + return responses + + def finalize(self): + """`finalize` is called only once when the model is being unloaded. + Implementing `finalize` function is OPTIONAL. This function allows + the model to perform any necessary clean ups before exit. + """ + print("Cleaning up...") diff --git a/examples/quick-start/add/config.pbtxt b/examples/quick-start/add/config.pbtxt new file mode 100644 index 000000000..51efce9da --- /dev/null +++ b/examples/quick-start/add/config.pbtxt @@ -0,0 +1,41 @@ +# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +name: "add" +backend: "python" + +input [ + { + name: "INPUT0" + data_type: TYPE_FP32 + dims: [ 4 ] + } +] +input [ + { + name: "INPUT1" + data_type: TYPE_FP32 + dims: [ 4 ] + } +] +output [ + { + name: "OUTPUT0" + data_type: TYPE_FP32 + dims: [ 4 ] + } +] + +instance_group [{ kind: KIND_CPU }] diff --git a/examples/quick-start/ensemble_add_sub/config.pbtxt b/examples/quick-start/ensemble_add_sub/config.pbtxt new file mode 100644 index 000000000..14cd8f0bf --- /dev/null +++ b/examples/quick-start/ensemble_add_sub/config.pbtxt @@ -0,0 +1,80 @@ +# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +name: "ensemble_add_sub" +platform: "ensemble" + +input [ + { + name: "INPUT0" + data_type: TYPE_FP32 + dims: [ 4 ] + }, + { + name: "INPUT1" + data_type: TYPE_FP32 + dims: [ 4 ] + } +] + +output [ + { + name: "OUTPUT0" + data_type: TYPE_FP32 + dims: [ 4 ] + }, + { + name: "OUTPUT1" + data_type: TYPE_FP32 + dims: [ 4 ] + } +] + +ensemble_scheduling { + step [ + { + model_name: "add" + model_version: 1 + input_map { + key: "INPUT0" + value: "INPUT0" + } + input_map { + key: "INPUT1" + value: "INPUT1" + } + output_map { + key: "OUTPUT0" + value: "OUTPUT0" + } + }, + { + model_name: "sub" + model_version: 1 + input_map { + key: "INPUT0" + value: "INPUT0" + } + input_map { + key: "INPUT1" + value: "INPUT1" + } + output_map { + key: "OUTPUT1" + value: "OUTPUT1" + } + } + ] +} diff --git a/examples/quick-start/sub/1/model.py b/examples/quick-start/sub/1/model.py new file mode 100755 index 000000000..4fc24c016 --- /dev/null +++ b/examples/quick-start/sub/1/model.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 + +# Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import json + +# triton_python_backend_utils is available in every Triton Python model. You +# need to use this module to create inference requests and responses. It also +# contains some utility functions for extracting information from model_config +# and converting Triton input/output types to numpy types. +import triton_python_backend_utils as pb_utils + + +class TritonPythonModel: + """Your Python model must use the same class name. Every Python model + that is created must have "TritonPythonModel" as the class name. + """ + + def initialize(self, args): + """`initialize` is called only once when the model is being loaded. + Implementing `initialize` function is optional. This function allows + the model to initialize any state associated with this model. + + Parameters + ---------- + args : dict + Both keys and values are strings. The dictionary keys and values are: + * model_config: A JSON string containing the model configuration + * model_instance_kind: A string containing model instance kind + * model_instance_device_id: A string containing model instance device ID + * model_repository: Model repository path + * model_version: Model version + * model_name: Model name + """ + + # You must parse model_config. JSON string is not parsed here + self.model_config = model_config = json.loads(args["model_config"]) + + # Get OUTPUT1 configuration + output1_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT1") + + # Convert Triton types to numpy types + self.output1_dtype = pb_utils.triton_string_to_numpy( + output1_config["data_type"] + ) + + def execute(self, requests): + """`execute` MUST be implemented in every Python model. `execute` + function receives a list of pb_utils.InferenceRequest as the only + argument. This function is called when an inference request is made + for this model. Depending on the batching configuration (e.g. Dynamic + Batching) used, `requests` may contain multiple requests. Every + Python model, must create one pb_utils.InferenceResponse for every + pb_utils.InferenceRequest in `requests`. If there is an error, you can + set the error argument when creating a pb_utils.InferenceResponse + + Parameters + ---------- + requests : list + A list of pb_utils.InferenceRequest + + Returns + ------- + list + A list of pb_utils.InferenceResponse. The length of this list must + be the same as `requests` + """ + + output1_dtype = self.output1_dtype + + responses = [] + + # Every Python backend must iterate over everyone of the requests + # and create a pb_utils.InferenceResponse for each of them. + for request in requests: + # Get INPUT0 + in_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0") + # Get INPUT1 + in_1 = pb_utils.get_input_tensor_by_name(request, "INPUT1") + + out_1 = in_0.as_numpy() - in_1.as_numpy() + + # Create output tensors. You need pb_utils.Tensor + # objects to create pb_utils.InferenceResponse. + out_tensor_1 = pb_utils.Tensor("OUTPUT1", out_1.astype(output1_dtype)) + + # Create InferenceResponse. You can set an error here in case + # there was a problem with handling this inference request. + # Below is an example of how you can set errors in inference + # response: + # + # pb_utils.InferenceResponse( + # output_tensors=..., TritonError("An error occurred")) + inference_response = pb_utils.InferenceResponse( + output_tensors=[out_tensor_1] + ) + responses.append(inference_response) + + # You should return a list of pb_utils.InferenceResponse. Length + # of this list must match the length of `requests` list. + return responses + + def finalize(self): + """`finalize` is called only once when the model is being unloaded. + Implementing `finalize` function is OPTIONAL. This function allows + the model to perform any necessary clean ups before exit. + """ + print("Cleaning up...") diff --git a/examples/quick-start/sub/config.pbtxt b/examples/quick-start/sub/config.pbtxt new file mode 100644 index 000000000..548f6120c --- /dev/null +++ b/examples/quick-start/sub/config.pbtxt @@ -0,0 +1,41 @@ +# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +name: "sub" +backend: "python" + +input [ + { + name: "INPUT0" + data_type: TYPE_FP32 + dims: [ 4 ] + } +] +input [ + { + name: "INPUT1" + data_type: TYPE_FP32 + dims: [ 4 ] + } +] +output [ + { + name: "OUTPUT1" + data_type: TYPE_FP32 + dims: [ 4 ] + } +] + +instance_group [{ kind: KIND_CPU }] diff --git a/qa/L0_ensemble_model/check_results.py b/qa/L0_ensemble_model/check_results.py new file mode 100755 index 000000000..649fde29f --- /dev/null +++ b/qa/L0_ensemble_model/check_results.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 + +# Copyright 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import sys + +import yaml + + +class TestOutputValidator: + """ + Functions that validate the output + of the test + """ + + def __init__(self, config, test_name, analyzer_log): + self._config = config + self._models = config["profile_models"] + self._analyzer_log = analyzer_log + + check_function = self.__getattribute__(f"check_{test_name}") + + if check_function(): + sys.exit(0) + else: + sys.exit(1) + + def check_profile_logs(self): + """ + Check that each model was profiled the number of times + corresponding with batch size and concurrency combinations + + (No model config parameter combos expected here!) + """ + + with open(self._analyzer_log, "r") as f: + log_contents = f.read() + + expected_min_num_measurements = 20 + expected_max_num_measurements = 80 + + for model in self._models: + token = f"Profiling {model}_config" + token_idx = 0 + found_count = 0 + while True: + token_idx = log_contents.find(token, token_idx + 1) + if token_idx == -1: + break + found_count += 1 + if ( + found_count < expected_min_num_measurements + or found_count > expected_max_num_measurements + ): + print( + f"\n***\n*** Expected range of measurements for {model} : {expected_min_num_measurements} to {expected_max_num_measurements}. " + f"Found {found_count}. \n***" + ) + return False + return True + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "-f", + "--config-file", + type=str, + required=True, + help="The path to the config yaml file.", + ) + parser.add_argument( + "-l", + "--analyzer-log-file", + type=str, + required=True, + help="The full path to the analyzer log.", + ) + parser.add_argument( + "-t", + "--test-name", + type=str, + required=True, + help="The name of the test to be run.", + ) + args = parser.parse_args() + + with open(args.config_file, "r") as f: + config = yaml.safe_load(f) + + TestOutputValidator(config, args.test_name, args.analyzer_log_file) diff --git a/qa/L0_ensemble_model/test.sh b/qa/L0_ensemble_model/test.sh new file mode 100755 index 000000000..2ec42e80b --- /dev/null +++ b/qa/L0_ensemble_model/test.sh @@ -0,0 +1,94 @@ +#!/bin/bash +# Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source ../common/util.sh +source ../common/check_analyzer_results.sh +create_logs_dir "L0_ensemble_model" + +# Set test parameters +MODEL_ANALYZER="$(which model-analyzer)" +REPO_VERSION=${NVIDIA_TRITON_SERVER_VERSION} +MODEL_REPOSITORY=${MODEL_REPOSITORY:="/mnt/nvdl/datasets/inferenceserver/$REPO_VERSION/qa_ensemble_model_repository/qa_simple_ensemble_model_repository"} +QA_MODELS="ensemble_add_sub" +MODEL_NAMES="$(echo $QA_MODELS | sed 's/ /,/g')" +TRITON_LAUNCH_MODE=${TRITON_LAUNCH_MODE:="local"} +CLIENT_PROTOCOL="grpc" +PORTS=($(find_available_ports 3)) +GPUS=($(get_all_gpus_uuids)) +OUTPUT_MODEL_REPOSITORY=${OUTPUT_MODEL_REPOSITORY:=$(get_output_directory)} +CONFIG_FILE="config.yml" +FILENAME_SERVER_ONLY="server-metrics.csv" +FILENAME_INFERENCE_MODEL="model-metrics-inference.csv" +FILENAME_GPU_MODEL="model-metrics-gpu.csv" + +rm -rf $OUTPUT_MODEL_REPOSITORY +create_result_paths +SERVER_LOG=$TEST_LOG_DIR/server.log + +python3 test_config_generator.py --profile-models $MODEL_NAMES + +# Run the analyzer and check the results +RET=0 + +set +e + +MODEL_ANALYZER_ARGS="-m $MODEL_REPOSITORY -f $CONFIG_FILE" +MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS --client-protocol=$CLIENT_PROTOCOL --triton-launch-mode=$TRITON_LAUNCH_MODE" +MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS --triton-http-endpoint localhost:${PORTS[0]} --triton-grpc-endpoint localhost:${PORTS[1]}" +MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS --triton-metrics-url http://localhost:${PORTS[2]}/metrics" +MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS --output-model-repository-path $OUTPUT_MODEL_REPOSITORY --override-output-model-repository" +MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS -e $EXPORT_PATH --checkpoint-directory $CHECKPOINT_DIRECTORY --filename-server-only=$FILENAME_SERVER_ONLY" +MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS --filename-model-inference=$FILENAME_INFERENCE_MODEL --filename-model-gpu=$FILENAME_GPU_MODEL" +MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS --skip-detailed-reports --triton-output-path=$SERVER_LOG" +MODEL_ANALYZER_SUBCOMMAND="profile" + +run_analyzer + +if [ $? -ne 0 ]; then + echo -e "\n***\n*** Test Failed. model-analyzer $MODEL_ANALYZER_SUBCOMMAND exited with non-zero exit code. \n***" + cat $ANALYZER_LOG + RET=1 +else + # Check the Analyzer log for correct output + TEST_NAME='profile_logs' + python3 check_results.py -f $CONFIG_FILE -t $TEST_NAME -l $ANALYZER_LOG + if [ $? -ne 0 ]; then + echo -e "\n***\n*** Test Output Verification Failed for $TEST_NAME test.\n***" + cat $ANALYZER_LOG + RET=1 + fi + + SERVER_METRICS_FILE=${EXPORT_PATH}/results/${FILENAME_SERVER_ONLY} + MODEL_METRICS_GPU_FILE=${EXPORT_PATH}/results/${FILENAME_GPU_MODEL} + MODEL_METRICS_INFERENCE_FILE=${EXPORT_PATH}/results/${FILENAME_INFERENCE_MODEL} + + for file in SERVER_METRICS_FILE, MODEL_METRICS_GPU_FILE, MODEL_METRICS_INFERENCE_FILE; do + check_no_csv_exists $file + if [ $? -ne 0 ]; then + echo -e "\n***\n*** Test Output Verification Failed.\n***" + cat $ANALYZER_LOG + RET=1 + fi + done +fi +set -e + +if [ $RET -eq 0 ]; then + echo -e "\n***\n*** Test PASSED\n***" +else + echo -e "\n***\n*** Test FAILED\n***" +fi + +exit $RET diff --git a/qa/L0_ensemble_model/test_config_generator.py b/qa/L0_ensemble_model/test_config_generator.py new file mode 100755 index 000000000..15125d425 --- /dev/null +++ b/qa/L0_ensemble_model/test_config_generator.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 + +# Copyright 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +import yaml + + +class TestConfigGenerator: + """ + This class contains functions that + create configs for various test scenarios. + + The `setup` function does the work common to all tests + + TO ADD A TEST: Simply add a member function whose name starts + with 'generate'. + """ + + def __init__(self): + test_functions = [ + self.__getattribute__(name) + for name in dir(self) + if name.startswith("generate") + ] + + for test_function in test_functions: + self.setup() + test_function() + + def setup(self): + parser = argparse.ArgumentParser() + parser.add_argument( + "-m", + "--profile-models", + type=str, + required=True, + help="Comma separated list of models to be profiled", + ) + + args = parser.parse_args() + self.config = {} + self.config["profile_models"] = sorted(args.profile_models.split(",")) + + def generate_config(self): + with open("config.yml", "w+") as f: + yaml.dump(self.config, f) + + +if __name__ == "__main__": + TestConfigGenerator() diff --git a/qa/common/util.sh b/qa/common/util.sh index 4e1578256..84054858a 100755 --- a/qa/common/util.sh +++ b/qa/common/util.sh @@ -22,7 +22,7 @@ ANALYZER_LOG=${ANALYZER_LOG:="$LOGS_DIR/test.log"} mkdir -p $LOGS_DIR -create_logs_dir() { +function create_logs_dir() { # Arguments: # $1: L0 Script name # Check if the L0 script name is empty or not @@ -34,7 +34,7 @@ create_logs_dir() { mkdir -p "$LOGS_DIR" } -create_result_paths() { +function create_result_paths() { # Creates ANALYZER_LOG, EXPORT_PATH, CHECKPOINT_DIRECTORY and TEST_LOG_DIR # Arguments: # -test-name : - L0 Script name (optional argument)