Skip to content

Commit

Permalink
tensor sort in get_inputs/get_outputs (#19)
Browse files Browse the repository at this point in the history
Co-authored-by: Gao Yue <[email protected]>
  • Loading branch information
2 people authored and GitHub Enterprise committed Jun 21, 2023
1 parent 921181f commit 2215c84
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 10 deletions.
29 changes: 21 additions & 8 deletions controller/src/common/dpucloud_xrtcontroller.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -741,7 +741,7 @@ std::vector<vart::TensorBuffer*> DpuXrtCloudController::create_tensorbuffer_for_
if (isTensorsBatch) { // if create tensorbuffers in batch, tensor->get_shape()[0] is 1
tbufs.reserve(tensors.size()*batch_size_);
} else {
tbufs.reserve(tensors.size());
tbufs.resize(tensors.size());
}
while(iter !=xdpu_total_reg_map.end()) {
int is_workspace=0;
Expand Down Expand Up @@ -780,6 +780,13 @@ std::vector<vart::TensorBuffer*> DpuXrtCloudController::create_tensorbuffer_for_
}
auto bufPhy = create_tensor_buffers_hbm(get_merged_io_tensors(iter->first),false, hbm,1);
for (int i =0; i < tensor_batch; i++) {
//std::vector<const xir::Tensor*> tensors_xir;//
//if (isInputs){
// tensors_xir = model_->get_graph_input_tensors();
//}
//else
// tensors_xir = model_->get_graph_output_tensors();

for (unsigned int ts=0;ts< tensors.size(); ts++) {
if(tensors[ts]->get_attr<int32_t>("reg_id") != (int32_t)iter->first) {
continue;
Expand All @@ -788,12 +795,11 @@ std::vector<vart::TensorBuffer*> DpuXrtCloudController::create_tensorbuffer_for_
//dims[0] = dims[0]/tensor_batch;
if (isTensorsBatch) { // if create tensorbuffers in batch, tensor->get_shape()[0] is 1
xir::Tensor *tensor;//
if (isInputs){
tensor = const_cast<xir::Tensor*>(model_->get_graph_input_tensors()[ts]);
if (isInputs){
tensor = const_cast<xir::Tensor*>(model_->get_graph_input_tensors()[ts]);
}
else
tensor = const_cast<xir::Tensor*>(model_->get_graph_output_tensors()[ts]);

else
tensor = const_cast<xir::Tensor*>(model_->get_graph_output_tensors()[ts]);
std::vector<vart::TensorBuffer*> bufPhy_single;
bufPhy_single.emplace_back(bufPhy[i]);
//xir::Tensor *tensor = xir::Tensor::create(tensors[ts]->get_name(), dims, tensors[ts]->get_data_type()).release();
Expand All @@ -806,9 +812,16 @@ std::vector<vart::TensorBuffer*> DpuXrtCloudController::create_tensorbuffer_for_
bufsView_.push_back(std::move(buf));
}
} else {
xir::Tensor *tensor = const_cast<xir::Tensor*>(tensors[ts]);
//for (unsigned int f=0; f< tensors_xir.size(); f++) {
// if (tensors[f]->get_name() == tensors_xir[ts]->get_name()) {
// tensor = const_cast<xir::Tensor*>(tensors[f]);
// break;
// }
//}
std::unique_ptr<vart::TensorBufferExtImpView> buf(
new vart::TensorBufferExtImpView(tensors[ts], tensor_offset[ts], bufPhy));
tbufs.emplace_back(buf.get());
new vart::TensorBufferExtImpView(tensor, tensor_offset[ts], bufPhy));
tbufs[ts] = buf.get();
{
std::unique_lock<std::mutex> lock(hwbufio_mtx_);
bufsView2Phys_.emplace(buf.get(), bufPhy);
Expand Down
10 changes: 8 additions & 2 deletions controller/src/common/graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -368,10 +368,12 @@ void DpuXmodel::init_graph(const xir::Subgraph* subgraph) {
//xdpu_total_in_size=0;
//xdpu_total_out_size=0;
graph_intensors_.reserve(input_tensors.size());
xdpu_io_input_offset.resize(input_tensors.size());
int cnt = 0;
for (auto &in_tensor : input_tensors) {
auto out = find_tensor(in_tensor,subgraph_,true);
auto ddr_addr = out->get_attr<std::int32_t>("ddr_addr");
xdpu_io_input_offset.emplace_back(ddr_addr);
xdpu_io_input_offset[cnt] = ddr_addr;
//input_scales_.emplace(in_tensor->get_name(),pow(2,in_tensor->get_attr<std::int32_t>("fix_point")));
input_scales_.emplace_back(pow(2,in_tensor->get_attr<std::int32_t>("fix_point")));
auto dims = in_tensor->get_shape();
Expand All @@ -389,6 +391,7 @@ void DpuXmodel::init_graph(const xir::Subgraph* subgraph) {
tensor->set_attrs(std::move(attrs));
input_regid.emplace_back(out->get_attr<std::int32_t>("reg_id"));
graph_intensors_.emplace_back(std::move(tensor));
cnt++;
//tensors_.emplace_back(std::move(tensor));
//xdpu_total_in_size += tensor->get_element_num();

Expand All @@ -397,10 +400,12 @@ void DpuXmodel::init_graph(const xir::Subgraph* subgraph) {

// Get output offset
graph_outtensors_.reserve(output_tensors.size());
xdpu_io_output_offset.resize(output_tensors.size());
cnt = 0;
for(auto &out_tensor : output_tensors) {
auto out = find_tensor(out_tensor,subgraph_,false);
auto ddr_addr = out->get_attr<std::int32_t>("ddr_addr");
xdpu_io_output_offset.emplace_back(ddr_addr);
xdpu_io_output_offset[cnt] = ddr_addr;
//output_scales_.emplace(out_tensor->get_name(),pow(2,(-1)*out_tensor->get_attr<std::int32_t>("fix_point")));
output_scales_.emplace_back(pow(2,(-1)*out_tensor->get_attr<std::int32_t>("fix_point")));
output_dims.emplace_back(out->get_shape());
Expand All @@ -417,6 +422,7 @@ void DpuXmodel::init_graph(const xir::Subgraph* subgraph) {
//auto tensor = out;
output_regid.emplace_back(out->get_attr<std::int32_t>("reg_id"));
graph_outtensors_.emplace_back(std::move(tensor));
cnt++;
//xdpu_total_out_size += tensor->get_element_num();

}
Expand Down

0 comments on commit 2215c84

Please sign in to comment.