Skip to content

Commit

Permalink
Add L0 test to protect end-to-end ensemble model (#739)
Browse files Browse the repository at this point in the history
* L0 test for Ensemble model

* Modify L0_ensemble_model

* Update test_config_generator.py

* Remove unnecessary flags

* Add copyright information

* Copyright

* Correct Copyright body

* Copyright

* Fix precommit errors
  • Loading branch information
pskiran1 authored Aug 14, 2023
1 parent 209144f commit 01a953f
Show file tree
Hide file tree
Showing 9 changed files with 668 additions and 2 deletions.
121 changes: 121 additions & 0 deletions examples/quick-start/add/1/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
#!/usr/bin/env python3

# Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import json

# triton_python_backend_utils is available in every Triton Python model. You
# need to use this module to create inference requests and responses. It also
# contains some utility functions for extracting information from model_config
# and converting Triton input/output types to numpy types.
import triton_python_backend_utils as pb_utils


class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""

def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""

# You must parse model_config. JSON string is not parsed here
self.model_config = model_config = json.loads(args["model_config"])

# Get OUTPUT0 configuration
output0_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT0")

# Convert Triton types to numpy types
self.output0_dtype = pb_utils.triton_string_to_numpy(
output0_config["data_type"]
)

def execute(self, requests):
"""`execute` MUST be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference request is made
for this model. Depending on the batching configuration (e.g. Dynamic
Batching) used, `requests` may contain multiple requests. Every
Python model, must create one pb_utils.InferenceResponse for every
pb_utils.InferenceRequest in `requests`. If there is an error, you can
set the error argument when creating a pb_utils.InferenceResponse
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""

output0_dtype = self.output0_dtype

responses = []

# Every Python backend must iterate over everyone of the requests
# and create a pb_utils.InferenceResponse for each of them.
for request in requests:
# Get INPUT0
in_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0")
# Get INPUT1
in_1 = pb_utils.get_input_tensor_by_name(request, "INPUT1")

out_0 = in_0.as_numpy() + in_1.as_numpy()

# Create output tensors. You need pb_utils.Tensor
# objects to create pb_utils.InferenceResponse.
out_tensor_0 = pb_utils.Tensor("OUTPUT0", out_0.astype(output0_dtype))

# Create InferenceResponse. You can set an error here in case
# there was a problem with handling this inference request.
# Below is an example of how you can set errors in inference
# response:
#
# pb_utils.InferenceResponse(
# output_tensors=..., TritonError("An error occurred"))
inference_response = pb_utils.InferenceResponse(
output_tensors=[out_tensor_0]
)
responses.append(inference_response)

# You should return a list of pb_utils.InferenceResponse. Length
# of this list must match the length of `requests` list.
return responses

def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is OPTIONAL. This function allows
the model to perform any necessary clean ups before exit.
"""
print("Cleaning up...")
41 changes: 41 additions & 0 deletions examples/quick-start/add/config.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

name: "add"
backend: "python"

input [
{
name: "INPUT0"
data_type: TYPE_FP32
dims: [ 4 ]
}
]
input [
{
name: "INPUT1"
data_type: TYPE_FP32
dims: [ 4 ]
}
]
output [
{
name: "OUTPUT0"
data_type: TYPE_FP32
dims: [ 4 ]
}
]

instance_group [{ kind: KIND_CPU }]
80 changes: 80 additions & 0 deletions examples/quick-start/ensemble_add_sub/config.pbtxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

name: "ensemble_add_sub"
platform: "ensemble"

input [
{
name: "INPUT0"
data_type: TYPE_FP32
dims: [ 4 ]
},
{
name: "INPUT1"
data_type: TYPE_FP32
dims: [ 4 ]
}
]

output [
{
name: "OUTPUT0"
data_type: TYPE_FP32
dims: [ 4 ]
},
{
name: "OUTPUT1"
data_type: TYPE_FP32
dims: [ 4 ]
}
]

ensemble_scheduling {
step [
{
model_name: "add"
model_version: 1
input_map {
key: "INPUT0"
value: "INPUT0"
}
input_map {
key: "INPUT1"
value: "INPUT1"
}
output_map {
key: "OUTPUT0"
value: "OUTPUT0"
}
},
{
model_name: "sub"
model_version: 1
input_map {
key: "INPUT0"
value: "INPUT0"
}
input_map {
key: "INPUT1"
value: "INPUT1"
}
output_map {
key: "OUTPUT1"
value: "OUTPUT1"
}
}
]
}
121 changes: 121 additions & 0 deletions examples/quick-start/sub/1/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
#!/usr/bin/env python3

# Copyright 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import json

# triton_python_backend_utils is available in every Triton Python model. You
# need to use this module to create inference requests and responses. It also
# contains some utility functions for extracting information from model_config
# and converting Triton input/output types to numpy types.
import triton_python_backend_utils as pb_utils


class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""

def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to initialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""

# You must parse model_config. JSON string is not parsed here
self.model_config = model_config = json.loads(args["model_config"])

# Get OUTPUT1 configuration
output1_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT1")

# Convert Triton types to numpy types
self.output1_dtype = pb_utils.triton_string_to_numpy(
output1_config["data_type"]
)

def execute(self, requests):
"""`execute` MUST be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference request is made
for this model. Depending on the batching configuration (e.g. Dynamic
Batching) used, `requests` may contain multiple requests. Every
Python model, must create one pb_utils.InferenceResponse for every
pb_utils.InferenceRequest in `requests`. If there is an error, you can
set the error argument when creating a pb_utils.InferenceResponse
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""

output1_dtype = self.output1_dtype

responses = []

# Every Python backend must iterate over everyone of the requests
# and create a pb_utils.InferenceResponse for each of them.
for request in requests:
# Get INPUT0
in_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0")
# Get INPUT1
in_1 = pb_utils.get_input_tensor_by_name(request, "INPUT1")

out_1 = in_0.as_numpy() - in_1.as_numpy()

# Create output tensors. You need pb_utils.Tensor
# objects to create pb_utils.InferenceResponse.
out_tensor_1 = pb_utils.Tensor("OUTPUT1", out_1.astype(output1_dtype))

# Create InferenceResponse. You can set an error here in case
# there was a problem with handling this inference request.
# Below is an example of how you can set errors in inference
# response:
#
# pb_utils.InferenceResponse(
# output_tensors=..., TritonError("An error occurred"))
inference_response = pb_utils.InferenceResponse(
output_tensors=[out_tensor_1]
)
responses.append(inference_response)

# You should return a list of pb_utils.InferenceResponse. Length
# of this list must match the length of `requests` list.
return responses

def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is OPTIONAL. This function allows
the model to perform any necessary clean ups before exit.
"""
print("Cleaning up...")
Loading

0 comments on commit 01a953f

Please sign in to comment.