Skip to content

Commit

Permalink
Error fixes in adding allBlocks to IT #20
Browse files Browse the repository at this point in the history
  • Loading branch information
pflynn157 authored and pthomadakis committed Sep 6, 2024
1 parent 0c19616 commit d055c0e
Show file tree
Hide file tree
Showing 11 changed files with 1,171 additions and 568 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules)
if (MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHs-c- /GR-")
else ()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions -fno-rtti")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions -fno-rtti -g")
endif ()

#-------------------------------------------------------------------------------
Expand Down
1,616 changes: 1,074 additions & 542 deletions first.mlir

Large diffs are not rendered by default.

47 changes: 33 additions & 14 deletions first.ta
Original file line number Diff line number Diff line change
@@ -1,19 +1,38 @@
# Sparse matrix sparse matrix elementwise addition
# Sparse matrix is in CSR format. Currently workspace transformation on the IndexTree dialect works for only CSR format
# RUN: comet-opt --opt-comp-workspace --convert-ta-to-it --convert-to-loops --convert-to-llvm %s &> eltwise_add_CSRxCSR_oCSR.llvm
# RUN: export SPARSE_FILE_NAME0=%comet_integration_test_data_dir/test_rank2.mtx
# RUN: export SPARSE_FILE_NAME1=%comet_integration_test_data_dir/test_rank2_transpose.mtx
# RUN: mlir-cpu-runner eltwise_add_CSRxCSR_oCSR.llvm -O3 -e main -entry-point-result=void -shared-libs=%comet_utility_library_dir/libcomet_runner_utils%shlibext | FileCheck %s


def main() {
#IndexLabel Declarations
IndexLabel [a] = [?];
IndexLabel [b] = [?];
#IndexLabel Declarations
IndexLabel [i] = [?];
IndexLabel [j] = [?];

#Tensor Declarations
Tensor<double> A([i, j], {CSR});
Tensor<double> B([i, j], {CSR});
Tensor<double> C([i, j], {CSR});

Tensor<double> A([a, b], {CSR});
Tensor<double> B([b, a], {Dense});
Tensor<double> C([b, a], {Dense});

A[a, b] = comet_read(0);
B[b, a] = 1.0;
C[b, a] = A[a, b] * B[b, a];
#C[b, a] = 1.0;
#Tensor Readfile Operation
A[i, j] = comet_read(0);
B[i, j] = comet_read(1);

print(A);
print(B);
print(C);
#Tensor Contraction
C[i, j] = A[i, j] + B[i, j];
print(C);
}

# Print the result for verification.
# CHECK: data =
# CHECK-NEXT: 5,
# CHECK-NEXT: data =
# CHECK-NEXT: 0,
# CHECK-NEXT: data =
# CHECK-NEXT: 0,2,4,5,7,9,
# CHECK-NEXT: data =
# CHECK-NEXT: 0,3,1,4,2,0,3,1,4,
# CHECK-NEXT: data =
# CHECK-NEXT: 2,5.5,4,7.7,6,5.5,8,7.7,10,
1 change: 1 addition & 0 deletions include/comet/Dialect/Utils/Utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,7 @@ namespace mlir

void getFormatsPermsOfComputeOp(Value computeOp,
std::vector<std::vector<std::string>> &opFormats,
std::vector<std::vector<std::string>> &opBlocks,
std::vector<std::vector<int>> &opPerms,
std::vector<std::vector<bool>> &inputOutputMapping);

Expand Down
5 changes: 3 additions & 2 deletions lib/Dialect/IndexTree/Transforms/Fusion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -185,9 +185,10 @@ void IndexTreeKernelFusionPass::test(mlir::func::FuncOp &funcop)
comet_debug() << " is_comp_worksp_opt: " << is_comp_worksp_opt << " semiring: " << semiring << "\n";

std::vector<std::vector<std::string>> opFormats;
std::vector<std::vector<std::string>> opBlocks;
std::vector<std::vector<int>> opPerms;
std::vector<std::vector<bool> > inputOutputMapping;
getFormatsPermsOfComputeOp(computeOp, opFormats, opPerms, inputOutputMapping);
getFormatsPermsOfComputeOp(computeOp, opFormats, opBlocks, opPerms, inputOutputMapping);
/// opFormats
comet_debug() << "[";
for (auto strings: opFormats) {
Expand Down Expand Up @@ -537,7 +538,7 @@ mlir::Value IndexTreeKernelFusionPass::createReducedComputeRHS(
SmallVector<StringRef, 8> blocks;
if (b_i == tensor_id)
{ /// for the new reduced tensor
blocks.insert(blocks.end(), old_formats_strs[b_i].begin() + rank_base, old_blocks_strs[b_i].end());
blocks.insert(blocks.end(), old_blocks_strs[b_i].begin() + rank_base, old_blocks_strs[b_i].end());
}
else
{ /// for other remaining old operands
Expand Down
10 changes: 5 additions & 5 deletions lib/Dialect/IndexTree/Transforms/WorkspaceTransforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -669,7 +669,7 @@ std::vector<Value> CompressedWorkspaceOutput(std::vector<int> sparseDimsOutput,

/// Convert blocks string array into StrAttr
std::vector<std::string> c3_blocks_str_0 = {"UNK"};
std::vector<std::string> c3_blocks_str_1 = opFormats[2];
std::vector<std::string> c3_blocks_str_1 = opBlocks[2];
std::vector<std::vector<std::string>> c3_blocks_str = {c3_blocks_str_0, c3_blocks_str_1};

std::vector<mlir::Value> c3_rhs = workspaceTensors;
Expand Down Expand Up @@ -777,7 +777,7 @@ void CompressedWorkspaceInput(std::vector<Value> computeOps, OpBuilder &builder,
std::vector<std::vector<std::string>> opBlocks;
std::vector<std::vector<int>> opPerms;
std::vector<std::vector<bool>> inputOutputMapping;
getFormatsPermsOfComputeOp(computeOp, opFormats, opPerms, inputOutputMapping);
getFormatsPermsOfComputeOp(computeOp, opFormats, opBlocks, opPerms, inputOutputMapping);
comet_debug() << " \n";
for (auto n : opFormats)
{
Expand Down Expand Up @@ -901,8 +901,8 @@ void CompressedWorkspaceInput(std::vector<Value> computeOps, OpBuilder &builder,
std::vector<std::string> c2_formats_str_1 = {"D"};
std::vector<std::vector<std::string>> c2_formats_str = {c2_formats_str_0, c2_formats_str_1};

std::vector<std::string> c2_blocks_str_0 = opFormats[sparseDimsInput[0].tensorId];
std::vector<std::string> c2_blocks_str_1 = {"D"};
std::vector<std::string> c2_blocks_str_0 = opBlocks[sparseDimsInput[0].tensorId];
std::vector<std::string> c2_blocks_str_1 = {"UNK"};
std::vector<std::vector<std::string>> c2_blocks_str = {c2_blocks_str_0, c2_blocks_str_1};

std::vector<mlir::Value> c2_rhs = {tensors_rhs[sparseDimsInput[0].tensorId]};
Expand Down Expand Up @@ -1079,7 +1079,7 @@ void IndexTreeWorkspaceTransformationsPass::CompressedWorkspaceTransforms(mlir::
std::vector<std::vector<std::string>> opBlocks;
std::vector<std::vector<int>> opPerms;
std::vector<std::vector<bool>> inputOutputMapping;
getFormatsPermsOfComputeOp(computeOp, opFormats, opPerms, inputOutputMapping);
getFormatsPermsOfComputeOp(computeOp, opFormats, opBlocks, opPerms, inputOutputMapping);

#ifdef DEBUG_MODE_WorkspaceTransformsPass
comet_debug() << "Print opFormats:\n";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ using namespace mlir::indexTree;
#define DEBUG_TYPE "tensor-decl-lowering"

// *********** For debug purpose *********//
#define COMET_DEBUG_MODE
// #define COMET_DEBUG_MODE
#include "comet/Utils/debug.h"
#undef COMET_DEBUG_MODE
// *********** For debug purpose *********//
Expand Down
15 changes: 14 additions & 1 deletion lib/Dialect/Utils/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1476,15 +1476,19 @@ namespace mlir
/// Get the perms and formats of the itCompute op
void getFormatsPermsOfComputeOp(Value computeOp,
std::vector<std::vector<std::string>> &opFormats,
std::vector<std::vector<std::string>> &opBlocks,
std::vector<std::vector<int>> &opPerms,
std::vector<std::vector<bool>> &inputOutputMapping)
{
indexTree::IndexTreeComputeRHSOp itComputeOp_rhs = dyn_cast<indexTree::IndexTreeComputeRHSOp>(computeOp.getDefiningOp()->getOperand(0).getDefiningOp());
ArrayAttr opFormatsArrayAttr_rhs = itComputeOp_rhs.getAllFormats();
ArrayAttr opBlocksArrayAttr_rhs = itComputeOp_rhs.getAllBlocks();
ArrayAttr opPermsArrayAttr_rhs = itComputeOp_rhs.getAllPerms();
indexTree::IndexTreeComputeLHSOp itComputeOp_lhs = dyn_cast<indexTree::IndexTreeComputeLHSOp>(computeOp.getDefiningOp()->getOperand(1).getDefiningOp());
ArrayAttr opFormatsArrayAttr_lhs = itComputeOp_lhs.getAllFormats();
ArrayAttr opBlocksArrayAttr_lhs = itComputeOp_lhs.getAllBlocks();
ArrayAttr opPermsArrayAttr_lhs = itComputeOp_lhs.getAllPerms();
///TODO(patrick) We should probably verify the block sizes
assert(opFormatsArrayAttr_rhs.size() == opPermsArrayAttr_rhs.size() && "not equal RHS formats size with perms size\n");
assert(opFormatsArrayAttr_lhs.size() == opPermsArrayAttr_lhs.size() && "not equal LHS formats size with perms size\n");

Expand All @@ -1493,17 +1497,25 @@ namespace mlir
comet_debug() << "Start printing opFormats_rhs\n";
std::vector<std::vector<std::string>> opFormats_rhs = convertArrayAttrStrTo2DVector(opFormatsArrayAttr_rhs);
comet_debug() << "End printing opFormats_rhs\n";
comet_debug() << "Start printing opBlocks_rhs\n";
std::vector<std::vector<std::string>> opBlocks_rhs = convertArrayAttrStrTo2DVector(opBlocksArrayAttr_rhs);
comet_debug() << "End printing opBlocks_rhs\n";
std::vector<std::vector<int>> opPerms_rhs = convertArrayAttrIntTo2DVector(opPermsArrayAttr_rhs);
std::vector<std::vector<bool>> inputMapping = createInputOutputMapping(opPermsArrayAttr_rhs, true);

comet_debug() << "Start printing opFormats_lhs\n";
std::vector<std::vector<std::string>> opFormats_lhs = convertArrayAttrStrTo2DVector(opFormatsArrayAttr_lhs);
comet_debug() << "End printing opFormats_lhs\n";
comet_debug() << "Start printing opBlocks_lhs\n";
std::vector<std::vector<std::string>> opBlocks_lhs = convertArrayAttrStrTo2DVector(opBlocksArrayAttr_lhs);
comet_debug() << "End printing opBlocks_lhs\n";
std::vector<std::vector<int>> opPerms_lhs = convertArrayAttrIntTo2DVector(opPermsArrayAttr_lhs);
std::vector<std::vector<bool>> outputMapping = createInputOutputMapping(opPermsArrayAttr_lhs, false);

opFormats = opFormats_rhs;
opFormats.insert(opFormats.end(), opFormats_lhs.begin(), opFormats_lhs.end());
opBlocks = opBlocks_rhs;
opBlocks.insert(opBlocks.end(), opBlocks_lhs.begin(), opBlocks_lhs.end());
opPerms = opPerms_rhs;
opPerms.insert(opPerms.end(), opPerms_lhs.begin(), opPerms_lhs.end());
inputOutputMapping = inputMapping;
Expand Down Expand Up @@ -1616,10 +1628,11 @@ namespace mlir
{
comet_debug() << " getFormatsInfo:leafs[" << j << "] is computeOp\n";
std::vector<std::vector<std::string>> allFormats;
std::vector<std::vector<std::string>> allBlocks;
std::vector<std::vector<int>> allPerms;
std::vector<std::vector<bool>> inputOutputMapping;
OpBuilder builder(leafop);
getFormatsPermsOfComputeOp(leafop, allFormats, allPerms, inputOutputMapping);
getFormatsPermsOfComputeOp(leafop, allFormats, allBlocks, allPerms, inputOutputMapping);

comet_debug() << " getFormatsInfo:Allformats allFormats.size(): " << allFormats.size() << "\n";
for (auto m : allFormats)
Expand Down
8 changes: 6 additions & 2 deletions run.sh
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
#!/bin/bash

export LD_LIBRARY_PATH="/home/patrick/Work/PNNL/COMET/install/lib"
export SPARSE_FILE_NAME0=first.mtx

build/bin/comet-opt --convert-ta-to-it --convert-to-loops --convert-to-llvm first.ta &> first.mlir
#export SPARSE_FILE_NAME0=first.mtx
#build/bin/comet-opt --convert-ta-to-it --convert-to-loops --convert-to-llvm first.ta &> first.mlir

export SPARSE_FILE_NAME0=integration_test/data/test_rank2.mtx
export SPARSE_FILE_NAME1=integration_test/data/test_rank2_transpose.mtx
build/bin/comet-opt --opt-comp-workspace --convert-ta-to-it --convert-to-loops --convert-to-llvm first.ta &> first.mlir

llvm/build/bin/mlir-cpu-runner first.mlir -O3 -e main -entry-point-result=void \
-shared-libs=build/lib/libcomet_runner_utils.so
Expand Down
13 changes: 13 additions & 0 deletions test.mtx
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
%%MatrixMarket matrix coordinate real general
%
% This is a test sparse matrix in Matrix Market Exchange Format.
% see https://math.nist.gov/MatrixMarket
%
4 6 7
1 1 5.0
1 2 1.0
2 1 7.0
2 2 3.0
4 1 8.0
4 4 4.0
4 5 9.0
20 changes: 20 additions & 0 deletions test.ta
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
def main() {
#IndexLabel Declarations
IndexLabel [a] = [4];
IndexLabel [b] = [?];
IndexLabel [c] = [?];

#Tensor Declarations
Tensor<double> B([b, c], {CSR}); #sparse tensor declarations should be before dense tensor declarations
Tensor<double> A([a, b], {Dense});
Tensor<double> C([a, c], {Dense});

#Tensor Fill Operation
A[a, b] = 1.0;
B[b, c] = comet_read(0);
C[a, c] = 0.0;

C[a, c] = A[a, b] * B[b, c];
print(C);
#print(B);
}

0 comments on commit d055c0e

Please sign in to comment.