Skip to content

Commit

Permalink
refactor(tensorrt_common)!: fix namespace, directory structure & move…
Browse files Browse the repository at this point in the history
… to perception namespace (#9099)

* refactor(tensorrt_common)!: fix namespace, directory structure & move to perception namespace

Signed-off-by: amadeuszsz <[email protected]>

* refactor(tensorrt_common): directory structure

Signed-off-by: amadeuszsz <[email protected]>

* style(pre-commit): autofix

* fix(tensorrt_common): correct package name for logging

Signed-off-by: amadeuszsz <[email protected]>

---------

Signed-off-by: amadeuszsz <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Kenzo Lobos Tsunekawa <[email protected]>
  • Loading branch information
3 people authored Oct 21, 2024
1 parent 20ebe9d commit 35d5fbf
Show file tree
Hide file tree
Showing 40 changed files with 179 additions and 160 deletions.
2 changes: 1 addition & 1 deletion .github/CODEOWNERS
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ common/goal_distance_calculator/** [email protected]
common/osqp_interface/** [email protected] [email protected] [email protected] [email protected]
common/polar_grid/** [email protected]
common/qp_interface/** [email protected] [email protected] [email protected] [email protected]
common/tensorrt_common/** [email protected] [email protected] [email protected] [email protected]
common/tier4_adapi_rviz_plugin/** [email protected] [email protected] [email protected]
common/tier4_api_utils/** [email protected]
common/tier4_camera_view_rviz_plugin/** [email protected] [email protected]
Expand Down Expand Up @@ -132,6 +131,7 @@ perception/autoware_raindrop_cluster_filter/** [email protected] yoshi.ri@tier
perception/autoware_shape_estimation/** [email protected] [email protected] [email protected]
perception/autoware_simple_object_merger/** [email protected] [email protected] [email protected]
perception/autoware_tensorrt_classifier/** [email protected] [email protected]
perception/autoware_tensorrt_common/** [email protected] [email protected] [email protected] [email protected]
perception/autoware_tensorrt_yolox/** [email protected] [email protected] [email protected]
perception/autoware_tracking_object_merger/** [email protected] [email protected] [email protected]
perception/autoware_traffic_light_arbiter/** [email protected] [email protected]
Expand Down
2 changes: 0 additions & 2 deletions .github/labeler.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,5 +42,3 @@
"tag:require-cuda-build-and-test":
- perception/**/*
- sensing/**/*
- common/cuda_utils/**/*
- common/tensorrt_common/**/*
2 changes: 0 additions & 2 deletions common/.pages
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ nav:
- 'comparisons': common/autoware_auto_common/design/comparisons
- 'autoware_grid_map_utils': common/autoware_grid_map_utils
- 'autoware_point_types': common/autoware_point_types
- 'Cuda Utils': common/cuda_utils
- 'Geography Utils': common/geography_utils
- 'Global Parameter Loader': common/global_parameter_loader/Readme
- 'Glog Component': common/glog_component
Expand All @@ -24,7 +23,6 @@ nav:
- 'Signal Processing':
- 'Introduction': common/signal_processing
- 'Butterworth Filter': common/signal_processing/documentation/ButterworthFilter
- 'TensorRT Common': common/tensorrt_common
- 'autoware_universe_utils': common/autoware_universe_utils
- 'traffic_light_utils': common/traffic_light_utils
- 'TVM Utility':
Expand Down
2 changes: 1 addition & 1 deletion perception/autoware_bytetrack/package.xml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

<depend>autoware_kalman_filter</depend>
<depend>autoware_perception_msgs</depend>
<depend>autoware_tensorrt_common</depend>
<depend>cuda_utils</depend>
<depend>cv_bridge</depend>
<depend>eigen</depend>
Expand All @@ -26,7 +27,6 @@
<depend>rclcpp</depend>
<depend>rclcpp_components</depend>
<depend>sensor_msgs</depend>
<depend>tensorrt_common</depend>
<depend>tier4_perception_msgs</depend>
<depend>yaml-cpp</depend>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ find_package(autoware_universe_utils REQUIRED)
find_package(tier4_debug_msgs REQUIRED)
find_package(tier4_perception_msgs REQUIRED)

find_package(tensorrt_common)
if(NOT ${tensorrt_common_FOUND})
message(WARNING "The tensorrt_common package is not found. Please check its dependencies.")
find_package(autoware_tensorrt_common)
if(NOT ${autoware_tensorrt_common_FOUND})
message(WARNING "The autoware_tensorrt_common package is not found. Please check its dependencies.")
# to avoid launch file missing without a gpu
ament_package()
install(
Expand Down Expand Up @@ -48,8 +48,8 @@ target_link_libraries(${PROJECT_NAME}
pcl_common
rclcpp::rclcpp
rclcpp_components::component
tensorrt_common::tensorrt_common
tf2_eigen::tf2_eigen
${autoware_tensorrt_common_TARGETS}
${tier4_debug_msgs_TARGETS}
${tier4_perception_msgs_TARGETS}
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@
#include "autoware/lidar_apollo_instance_segmentation/feature_generator.hpp"
#include "autoware/lidar_apollo_instance_segmentation/node.hpp"

#include <autoware/tensorrt_common/tensorrt_common.hpp>
#include <autoware/universe_utils/transform/transforms.hpp>
#include <cuda_utils/cuda_unique_ptr.hpp>
#include <cuda_utils/stream_unique_ptr.hpp>
#include <tensorrt_common/tensorrt_common.hpp>
#include <tf2_eigen/tf2_eigen.hpp>

#include <tf2_ros/buffer_interface.h>
Expand Down Expand Up @@ -54,7 +54,7 @@ class LidarApolloInstanceSegmentation : public LidarInstanceSegmentationInterfac
const sensor_msgs::msg::PointCloud2 & input, sensor_msgs::msg::PointCloud2 & transformed_cloud,
float z_offset);

std::unique_ptr<tensorrt_common::TrtCommon> trt_common_;
std::unique_ptr<autoware::tensorrt_common::TrtCommon> trt_common_;

rclcpp::Node * node_;
std::shared_ptr<Cluster2D> cluster2d_;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,13 @@
<buildtool_depend>ament_cmake</buildtool_depend>

<depend>autoware_perception_msgs</depend>
<depend>autoware_tensorrt_common</depend>
<depend>autoware_universe_utils</depend>
<depend>cuda_utils</depend>
<depend>libpcl-all-dev</depend>
<depend>pcl_conversions</depend>
<depend>rclcpp</depend>
<depend>rclcpp_components</depend>
<depend>tensorrt_common</depend>
<depend>tf2_eigen</depend>
<depend>tier4_debug_msgs</depend>
<depend>tier4_perception_msgs</depend>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,8 @@ LidarApolloInstanceSegmentation::LidarApolloInstanceSegmentation(rclcpp::Node *
z_offset_ = node_->declare_parameter<float>("z_offset", -2.0);
const auto precision = node_->declare_parameter("precision", "fp32");

trt_common_ = std::make_unique<tensorrt_common::TrtCommon>(
onnx_file, precision, nullptr, tensorrt_common::BatchConfig{1, 1, 1}, 1 << 30);
trt_common_ = std::make_unique<autoware::tensorrt_common::TrtCommon>(
onnx_file, precision, nullptr, autoware::tensorrt_common::BatchConfig{1, 1, 1}, 1 << 30);
trt_common_->setup();

if (!trt_common_->isInitialized()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

#include "NvInfer.h"
#include "autoware/lidar_centerpoint/centerpoint_config.hpp"
#include "tensorrt_common/tensorrt_common.hpp"
#include "autoware/tensorrt_common/tensorrt_common.hpp"

#include <iostream>
#include <memory>
Expand All @@ -36,15 +36,15 @@ class TensorRTWrapper
bool init(
const std::string & onnx_path, const std::string & engine_path, const std::string & precision);

tensorrt_common::TrtUniquePtr<nvinfer1::IExecutionContext> context_{nullptr};
autoware::tensorrt_common::TrtUniquePtr<nvinfer1::IExecutionContext> context_{nullptr};

protected:
virtual bool setProfile(
nvinfer1::IBuilder & builder, nvinfer1::INetworkDefinition & network,
nvinfer1::IBuilderConfig & config) = 0;

CenterPointConfig config_;
tensorrt_common::Logger logger_;
autoware::tensorrt_common::Logger logger_;

private:
bool parseONNX(
Expand All @@ -57,9 +57,9 @@ class TensorRTWrapper

bool createContext();

tensorrt_common::TrtUniquePtr<nvinfer1::IRuntime> runtime_{nullptr};
tensorrt_common::TrtUniquePtr<nvinfer1::IHostMemory> plan_{nullptr};
tensorrt_common::TrtUniquePtr<nvinfer1::ICudaEngine> engine_{nullptr};
autoware::tensorrt_common::TrtUniquePtr<nvinfer1::IRuntime> runtime_{nullptr};
autoware::tensorrt_common::TrtUniquePtr<nvinfer1::IHostMemory> plan_{nullptr};
autoware::tensorrt_common::TrtUniquePtr<nvinfer1::ICudaEngine> engine_{nullptr};
};

} // namespace autoware::lidar_centerpoint
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ bool HeadTRT::setProfile(
if (
out_name == std::string("heatmap") &&
network.getOutput(ci)->getDimensions().d[1] != static_cast<int32_t>(out_channel_sizes_[ci])) {
tensorrt_common::LOG_ERROR(logger_)
autoware::tensorrt_common::LOG_ERROR(logger_)
<< "Expected and actual number of classes do not match" << std::endl;
return false;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,10 @@ TensorRTWrapper::~TensorRTWrapper()
bool TensorRTWrapper::init(
const std::string & onnx_path, const std::string & engine_path, const std::string & precision)
{
runtime_ =
tensorrt_common::TrtUniquePtr<nvinfer1::IRuntime>(nvinfer1::createInferRuntime(logger_));
runtime_ = autoware::tensorrt_common::TrtUniquePtr<nvinfer1::IRuntime>(
nvinfer1::createInferRuntime(logger_));
if (!runtime_) {
tensorrt_common::LOG_ERROR(logger_) << "Failed to create runtime" << std::endl;
autoware::tensorrt_common::LOG_ERROR(logger_) << "Failed to create runtime" << std::endl;
return false;
}

Expand All @@ -63,15 +63,15 @@ bool TensorRTWrapper::init(
bool TensorRTWrapper::createContext()
{
if (!engine_) {
tensorrt_common::LOG_ERROR(logger_)
autoware::tensorrt_common::LOG_ERROR(logger_)
<< "Failed to create context: Engine was not created" << std::endl;
return false;
}

context_ =
tensorrt_common::TrtUniquePtr<nvinfer1::IExecutionContext>(engine_->createExecutionContext());
context_ = autoware::tensorrt_common::TrtUniquePtr<nvinfer1::IExecutionContext>(
engine_->createExecutionContext());
if (!context_) {
tensorrt_common::LOG_ERROR(logger_) << "Failed to create context" << std::endl;
autoware::tensorrt_common::LOG_ERROR(logger_) << "Failed to create context" << std::endl;
return false;
}

Expand All @@ -82,17 +82,17 @@ bool TensorRTWrapper::parseONNX(
const std::string & onnx_path, const std::string & engine_path, const std::string & precision,
const std::size_t workspace_size)
{
auto builder =
tensorrt_common::TrtUniquePtr<nvinfer1::IBuilder>(nvinfer1::createInferBuilder(logger_));
auto builder = autoware::tensorrt_common::TrtUniquePtr<nvinfer1::IBuilder>(
nvinfer1::createInferBuilder(logger_));
if (!builder) {
tensorrt_common::LOG_ERROR(logger_) << "Failed to create builder" << std::endl;
autoware::tensorrt_common::LOG_ERROR(logger_) << "Failed to create builder" << std::endl;
return false;
}

auto config =
tensorrt_common::TrtUniquePtr<nvinfer1::IBuilderConfig>(builder->createBuilderConfig());
auto config = autoware::tensorrt_common::TrtUniquePtr<nvinfer1::IBuilderConfig>(
builder->createBuilderConfig());
if (!config) {
tensorrt_common::LOG_ERROR(logger_) << "Failed to create config" << std::endl;
autoware::tensorrt_common::LOG_ERROR(logger_) << "Failed to create config" << std::endl;
return false;
}
#if (NV_TENSORRT_MAJOR * 1000) + (NV_TENSORRT_MINOR * 100) + NV_TENSOR_PATCH >= 8400
Expand All @@ -102,42 +102,43 @@ bool TensorRTWrapper::parseONNX(
#endif
if (precision == "fp16") {
if (builder->platformHasFastFp16()) {
tensorrt_common::LOG_INFO(logger_) << "Using TensorRT FP16 Inference" << std::endl;
autoware::tensorrt_common::LOG_INFO(logger_) << "Using TensorRT FP16 Inference" << std::endl;
config->setFlag(nvinfer1::BuilderFlag::kFP16);
} else {
tensorrt_common::LOG_INFO(logger_)
autoware::tensorrt_common::LOG_INFO(logger_)
<< "TensorRT FP16 Inference isn't supported in this environment" << std::endl;
}
}

const auto flag =
1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
auto network =
tensorrt_common::TrtUniquePtr<nvinfer1::INetworkDefinition>(builder->createNetworkV2(flag));
auto network = autoware::tensorrt_common::TrtUniquePtr<nvinfer1::INetworkDefinition>(
builder->createNetworkV2(flag));
if (!network) {
tensorrt_common::LOG_ERROR(logger_) << "Failed to create network" << std::endl;
autoware::tensorrt_common::LOG_ERROR(logger_) << "Failed to create network" << std::endl;
return false;
}

auto parser = tensorrt_common::TrtUniquePtr<nvonnxparser::IParser>(
auto parser = autoware::tensorrt_common::TrtUniquePtr<nvonnxparser::IParser>(
nvonnxparser::createParser(*network, logger_));
parser->parseFromFile(onnx_path.c_str(), static_cast<int>(nvinfer1::ILogger::Severity::kERROR));

if (!setProfile(*builder, *network, *config)) {
tensorrt_common::LOG_ERROR(logger_) << "Failed to set profile" << std::endl;
autoware::tensorrt_common::LOG_ERROR(logger_) << "Failed to set profile" << std::endl;
return false;
}

plan_ = tensorrt_common::TrtUniquePtr<nvinfer1::IHostMemory>(
plan_ = autoware::tensorrt_common::TrtUniquePtr<nvinfer1::IHostMemory>(
builder->buildSerializedNetwork(*network, *config));
if (!plan_) {
tensorrt_common::LOG_ERROR(logger_) << "Failed to create serialized network" << std::endl;
autoware::tensorrt_common::LOG_ERROR(logger_)
<< "Failed to create serialized network" << std::endl;
return false;
}
engine_ = tensorrt_common::TrtUniquePtr<nvinfer1::ICudaEngine>(
engine_ = autoware::tensorrt_common::TrtUniquePtr<nvinfer1::ICudaEngine>(
runtime_->deserializeCudaEngine(plan_->data(), plan_->size()));
if (!engine_) {
tensorrt_common::LOG_ERROR(logger_) << "Failed to create engine" << std::endl;
autoware::tensorrt_common::LOG_ERROR(logger_) << "Failed to create engine" << std::endl;
return false;
}

Expand All @@ -146,7 +147,7 @@ bool TensorRTWrapper::parseONNX(

bool TensorRTWrapper::saveEngine(const std::string & engine_path)
{
tensorrt_common::LOG_INFO(logger_) << "Writing to " << engine_path << std::endl;
autoware::tensorrt_common::LOG_INFO(logger_) << "Writing to " << engine_path << std::endl;
std::ofstream file(engine_path, std::ios::out | std::ios::binary);
file.write(reinterpret_cast<const char *>(plan_->data()), plan_->size());
return true;
Expand All @@ -158,9 +159,10 @@ bool TensorRTWrapper::loadEngine(const std::string & engine_path)
std::stringstream engine_buffer;
engine_buffer << engine_file.rdbuf();
std::string engine_str = engine_buffer.str();
engine_ = tensorrt_common::TrtUniquePtr<nvinfer1::ICudaEngine>(runtime_->deserializeCudaEngine(
reinterpret_cast<const void *>(engine_str.data()), engine_str.size()));
tensorrt_common::LOG_INFO(logger_) << "Loaded engine from " << engine_path << std::endl;
engine_ =
autoware::tensorrt_common::TrtUniquePtr<nvinfer1::ICudaEngine>(runtime_->deserializeCudaEngine(
reinterpret_cast<const void *>(engine_str.data()), engine_str.size()));
autoware::tensorrt_common::LOG_INFO(logger_) << "Loaded engine from " << engine_path << std::endl;
return true;
}

Expand Down
2 changes: 1 addition & 1 deletion perception/autoware_lidar_centerpoint/package.xml
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,11 @@

<depend>autoware_object_recognition_utils</depend>
<depend>autoware_perception_msgs</depend>
<depend>autoware_tensorrt_common</depend>
<depend>autoware_universe_utils</depend>
<depend>pcl_ros</depend>
<depend>rclcpp</depend>
<depend>rclcpp_components</depend>
<depend>tensorrt_common</depend>
<depend>tf2_eigen</depend>
<depend>tf2_geometry_msgs</depend>
<depend>tf2_sensor_msgs</depend>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
#include "autoware/lidar_transfusion/transfusion_config.hpp"
#include "autoware/lidar_transfusion/utils.hpp"

#include <tensorrt_common/tensorrt_common.hpp>
#include <autoware/tensorrt_common/tensorrt_common.hpp>

#include <NvInfer.h>

Expand Down Expand Up @@ -57,8 +57,8 @@ class NetworkTRT
const std::string & onnx_path, const std::string & engine_path, const std::string & precision);
const char * getTensorName(NetworkIO name);

tensorrt_common::TrtUniquePtr<nvinfer1::ICudaEngine> engine{nullptr};
tensorrt_common::TrtUniquePtr<nvinfer1::IExecutionContext> context{nullptr};
autoware::tensorrt_common::TrtUniquePtr<nvinfer1::ICudaEngine> engine{nullptr};
autoware::tensorrt_common::TrtUniquePtr<nvinfer1::IExecutionContext> context{nullptr};

private:
bool parseONNX(
Expand All @@ -73,9 +73,9 @@ class NetworkTRT
bool validateNetworkIO();
nvinfer1::Dims validateTensorShape(NetworkIO name, const std::vector<int> shape);

tensorrt_common::TrtUniquePtr<nvinfer1::IRuntime> runtime_{nullptr};
tensorrt_common::TrtUniquePtr<nvinfer1::IHostMemory> plan_{nullptr};
tensorrt_common::Logger logger_;
autoware::tensorrt_common::TrtUniquePtr<nvinfer1::IRuntime> runtime_{nullptr};
autoware::tensorrt_common::TrtUniquePtr<nvinfer1::IHostMemory> plan_{nullptr};
autoware::tensorrt_common::Logger logger_;
TransfusionConfig config_;
std::vector<const char *> tensors_names_;

Expand Down
Loading

0 comments on commit 35d5fbf

Please sign in to comment.