Skip to content

Commit

Permalink
Add end-to-end L0 BLS test (#747)
Browse files Browse the repository at this point in the history
* Add BLS example model

* Modify BLS model and revert add, sub model changes

* Add BLS model L0 script

* Correct test_config_generator.py

* Update permissions of new files added

* Add input_data.json

* Fix formattng issues

* Fix Precommit issues
  • Loading branch information
pskiran1 authored Aug 21, 2023
1 parent 64ea400 commit 6ed033c
Show file tree
Hide file tree
Showing 11 changed files with 491 additions and 18 deletions.
12 changes: 5 additions & 7 deletions examples/quick-start/add/1/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,11 @@ def initialize(self, args):
# You must parse model_config. JSON string is not parsed here
self.model_config = model_config = json.loads(args["model_config"])

# Get OUTPUT0 configuration
output0_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT0")
# Get OUTPUT configuration
output_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT")

# Convert Triton types to numpy types
self.output0_dtype = pb_utils.triton_string_to_numpy(
output0_config["data_type"]
)
self.output_dtype = pb_utils.triton_string_to_numpy(output_config["data_type"])

def execute(self, requests):
"""`execute` MUST be implemented in every Python model. `execute`
Expand All @@ -79,7 +77,7 @@ def execute(self, requests):
be the same as `requests`
"""

output0_dtype = self.output0_dtype
output_dtype = self.output_dtype

responses = []

Expand All @@ -95,7 +93,7 @@ def execute(self, requests):

# Create output tensors. You need pb_utils.Tensor
# objects to create pb_utils.InferenceResponse.
out_tensor_0 = pb_utils.Tensor("OUTPUT0", out_0.astype(output0_dtype))
out_tensor_0 = pb_utils.Tensor("OUTPUT", out_0.astype(output_dtype))

# Create InferenceResponse. You can set an error here in case
# there was a problem with handling this inference request.
Expand Down
2 changes: 1 addition & 1 deletion examples/quick-start/add/config.pbtxt
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ input [
]
output [
{
name: "OUTPUT0"
name: "OUTPUT"
data_type: TYPE_FP32
dims: [ 4 ]
}
Expand Down
131 changes: 131 additions & 0 deletions examples/quick-start/bls/1/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
#!/usr/bin/env python3

# Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import json

# triton_python_backend_utils is available in every Triton Python model. You
# need to use this module to create inference requests and responses. It also
# contains some utility functions for extracting information from model_config
# and converting Triton input/output types to numpy types.
import triton_python_backend_utils as pb_utils


class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""

def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""

# You must parse model_config. JSON string is not parsed here
self.model_config = json.loads(args["model_config"])

def execute(self, requests):
"""`execute` must be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference request is made
for this model. Depending on the batching configuration (e.g. Dynamic
Batching) used, `requests` may contain multiple requests. Every
Python model, must create one pb_utils.InferenceResponse for every
pb_utils.InferenceRequest in `requests`. If there is an error, you can
set the error argument when creating a pb_utils.InferenceResponse
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""

responses = []
# Every Python backend must iterate over everyone of the requests
# and create a pb_utils.InferenceResponse for each of them.
for request in requests:
# Get INPUT0
in_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0")

# Get INPUT1
in_1 = pb_utils.get_input_tensor_by_name(request, "INPUT1")

# Get Model Name
model_name = pb_utils.get_input_tensor_by_name(request, "MODEL_NAME")

# Model Name string
model_name_string = model_name.as_numpy()[0]

# Create inference request object
infer_request = pb_utils.InferenceRequest(
model_name=model_name_string,
requested_output_names=["OUTPUT"],
inputs=[in_0, in_1],
)

# Perform synchronous blocking inference request
infer_response = infer_request.exec()

# Make sure that the inference response doesn't have an error. If
# it has an error and you can't proceed with your model execution
# you can raise an exception.
if infer_response.has_error():
raise pb_utils.TritonModelException(infer_response.error().message())

# Create InferenceResponse. You can set an error here in case
# there was a problem with handling this inference request.
# Below is an example of how you can set errors in inference
# response:
#
# pb_utils.InferenceResponse(
# output_tensors=..., TritonError("An error occurred"))
#
# Because the infer_response of the models contains the final
# outputs with correct output names, we can just pass the list
# of outputs to the InferenceResponse object.
inference_response = pb_utils.InferenceResponse(
output_tensors=infer_response.output_tensors()
)
responses.append(inference_response)

# You should return a list of pb_utils.InferenceResponse. Length
# of this list must match the length of `requests` list.
return responses

def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is OPTIONAL. This function allows
the model to perform any necessary clean ups before exit.
"""
print("Cleaning up...")
44 changes: 44 additions & 0 deletions examples/quick-start/bls/config.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

name: "bls"
backend: "python"

input [
{
name: "MODEL_NAME"
data_type: TYPE_STRING
dims: [ 1 ]
},
{
name: "INPUT0"
data_type: TYPE_FP32
dims: [ 4 ]
},
{
name: "INPUT1"
data_type: TYPE_FP32
dims: [ 4 ]
}
]

output [
{
name: "OUTPUT"
data_type: TYPE_FP32
dims: [ 4 ]
}
]

instance_group [ { kind: KIND_CPU }]
4 changes: 2 additions & 2 deletions examples/quick-start/ensemble_add_sub/config.pbtxt
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ ensemble_scheduling {
value: "INPUT1"
}
output_map {
key: "OUTPUT0"
key: "OUTPUT"
value: "OUTPUT0"
}
},
Expand All @@ -72,7 +72,7 @@ ensemble_scheduling {
value: "INPUT1"
}
output_map {
key: "OUTPUT1"
key: "OUTPUT"
value: "OUTPUT1"
}
}
Expand Down
12 changes: 5 additions & 7 deletions examples/quick-start/sub/1/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,11 @@ def initialize(self, args):
# You must parse model_config. JSON string is not parsed here
self.model_config = model_config = json.loads(args["model_config"])

# Get OUTPUT1 configuration
output1_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT1")
# Get OUTPUT configuration
output_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT")

# Convert Triton types to numpy types
self.output1_dtype = pb_utils.triton_string_to_numpy(
output1_config["data_type"]
)
self.output_dtype = pb_utils.triton_string_to_numpy(output_config["data_type"])

def execute(self, requests):
"""`execute` MUST be implemented in every Python model. `execute`
Expand All @@ -79,7 +77,7 @@ def execute(self, requests):
be the same as `requests`
"""

output1_dtype = self.output1_dtype
output_dtype = self.output_dtype

responses = []

Expand All @@ -95,7 +93,7 @@ def execute(self, requests):

# Create output tensors. You need pb_utils.Tensor
# objects to create pb_utils.InferenceResponse.
out_tensor_1 = pb_utils.Tensor("OUTPUT1", out_1.astype(output1_dtype))
out_tensor_1 = pb_utils.Tensor("OUTPUT", out_1.astype(output_dtype))

# Create InferenceResponse. You can set an error here in case
# there was a problem with handling this inference request.
Expand Down
2 changes: 1 addition & 1 deletion examples/quick-start/sub/config.pbtxt
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ input [
]
output [
{
name: "OUTPUT1"
name: "OUTPUT"
data_type: TYPE_FP32
dims: [ 4 ]
}
Expand Down
104 changes: 104 additions & 0 deletions qa/L0_bls_model/check_results.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
#!/usr/bin/env python3

# Copyright 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import sys

import yaml


class TestOutputValidator:
"""
Functions that validate the output
of the test
"""

def __init__(self, config, test_name, analyzer_log):
self._config = config
self._models = config["profile_models"]
self._analyzer_log = analyzer_log

check_function = self.__getattribute__(f"check_{test_name}")

if check_function():
sys.exit(0)
else:
sys.exit(1)

def check_profile_logs(self):
"""
Check that each model was profiled the number of times
corresponding with batch size and concurrency combinations
(No model config parameter combos expected here!)
"""

with open(self._analyzer_log, "r") as f:
log_contents = f.read()

expected_min_num_measurements = 20
expected_max_num_measurements = 80

for model in self._models:
token = f"Profiling {model}_config"
token_idx = 0
found_count = 0
while True:
token_idx = log_contents.find(token, token_idx + 1)
if token_idx == -1:
break
found_count += 1
if (
found_count < expected_min_num_measurements
or found_count > expected_max_num_measurements
):
print(
f"\n***\n*** Expected range of measurements for {model} : {expected_min_num_measurements} to {expected_max_num_measurements}. "
f"Found {found_count}. \n***"
)
return False
return True


if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-f",
"--config-file",
type=str,
required=True,
help="The path to the config yaml file.",
)
parser.add_argument(
"-l",
"--analyzer-log-file",
type=str,
required=True,
help="The full path to the analyzer log.",
)
parser.add_argument(
"-t",
"--test-name",
type=str,
required=True,
help="The name of the test to be run.",
)
args = parser.parse_args()

with open(args.config_file, "r") as f:
config = yaml.safe_load(f)

TestOutputValidator(config, args.test_name, args.analyzer_log_file)
Loading

0 comments on commit 6ed033c

Please sign in to comment.