Skip to content

Commit

Permalink
[examples] LeNet E2E pipeline uses batchmatmul-optimize pass.
Browse files Browse the repository at this point in the history
  • Loading branch information
zhanghb97 committed Aug 16, 2024
1 parent 1585e2a commit 0424893
Show file tree
Hide file tree
Showing 3 changed files with 57 additions and 22 deletions.
3 changes: 2 additions & 1 deletion examples/BuddyLeNet/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ add_custom_command(
-pass-pipeline "builtin.module(func.func(tosa-to-linalg-named, tosa-to-linalg, tosa-to-tensor, tosa-to-arith))" |
${BUDDY_BINARY_DIR}/buddy-opt
-eliminate-empty-tensors
-convert-tensor-to-linalg
-convert-tensor-to-linalg
-batchmatmul-optimize
-linalg-bufferize
-convert-linalg-to-affine-loops
-lower-affine
Expand Down
7 changes: 7 additions & 0 deletions examples/BuddyLeNet/fake-lenet.mlir
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
module {
func.func private @printMemrefF32(%ptr : tensor<*xf32>)
func.func private @rtclock() -> f64

func.func @forward(%arg0: tensor<44426xf32>, %arg1: tensor<1x1x28x28xf32>) -> tensor<1x10xf32> {
%extracted_slice = tensor.extract_slice %arg0[0] [150] [1] : tensor<44426xf32> to tensor<150xf32>
Expand Down Expand Up @@ -81,10 +82,16 @@ module {
%fake_params = arith.constant dense<1.0> : tensor<44426xf32>
%fake_input = arith.constant dense<2.0> : tensor<1x1x28x28xf32>

%t_start = call @rtclock() : () -> f64
%fake_output = call @forward(%fake_params, %fake_input) : (tensor<44426xf32>, tensor<1x1x28x28xf32>) -> tensor<1x10xf32>
%t_end = call @rtclock() : () -> f64

%tensor_unranked = tensor.cast %fake_output : tensor<1x10xf32> to tensor<*xf32>
call @printMemrefF32(%tensor_unranked) : (tensor<*xf32>) -> ()

%time = arith.subf %t_end, %t_start : f64
vector.print %time : f64

return
}
}
69 changes: 48 additions & 21 deletions examples/BuddyLeNet/makefile
Original file line number Diff line number Diff line change
@@ -1,30 +1,33 @@
#!/bin/bash
BUDDY_OPT := ../../build/bin/buddy-opt
MLIR_OPT := ../../llvm/build/bin/mlir-opt
MLIR_TRANSLATE := ../../llvm/build/bin/mlir-translate
MLIR_CPU_RUNNER := ../../llvm/build/bin/mlir-cpu-runner
LLC := ../../llvm/build/bin/llc
OPT_FLAG := -O0
BUDDY_BUILD_DIR := ../../build/
LLVM_BUILD_DIR := ../../llvm/build/
BUDDY_OPT := ${BUDDY_BUILD_DIR}/bin/buddy-opt
MLIR_OPT := ${LLVM_BUILD_DIR}/bin/mlir-opt
MLIR_TRANSLATE := ${LLVM_BUILD_DIR}/bin/mlir-translate
MLIR_CPU_RUNNER := ${LLVM_BUILD_DIR}/bin/mlir-cpu-runner
LLC := ${LLVM_BUILD_DIR}/bin/llc
OPT_FLAG := -O3

ifeq ($(shell uname),Linux)
MLIR_RUNNER_UTILS := ../../llvm/build/lib/libmlir_runner_utils.so
MLIR_C_RUNNER_UTILS := ../../llvm/build/lib/libmlir_c_runner_utils.so
MLIR_ASYNC_RUNTIME := ../../llvm/build/lib/libmlir_async_runtime.so
MLIR_RUNNER_UTILS := ${LLVM_BUILD_DIR}/lib/libmlir_runner_utils.so
MLIR_C_RUNNER_UTILS := ${LLVM_BUILD_DIR}/lib/libmlir_c_runner_utils.so
MLIR_ASYNC_RUNTIME := ${LLVM_BUILD_DIR}/lib/libmlir_async_runtime.so
MTRIPLE := x86_64-unknown-linux-gnu
else ifeq ($(shell uname),Darwin)
MLIR_RUNNER_UTILS := ../../llvm/build/lib/libmlir_runner_utils.dylib
MLIR_C_RUNNER_UTILS := ../../llvm/build/lib/libmlir_c_runner_utils.dylib
MLIR_ASYNC_RUNTIME := ./../llvm/build/lib/libmlir_async_runtime.dylib
MLIR_RUNNER_UTILS := ${LLVM_BUILD_DIR}/lib/libmlir_runner_utils.dylib
MLIR_C_RUNNER_UTILS := ${LLVM_BUILD_DIR}/lib/libmlir_c_runner_utils.dylib
MLIR_ASYNC_RUNTIME := ${LLVM_BUILD_DIR}/lib/libmlir_async_runtime.dylib
MTRIPLE := x86_64-apple-darwin
endif

buddy-lenet-lower:
@${MLIR_OPT} ./fake-lenet.mlir \
@${BUDDY_OPT} ./fake-lenet.mlir \
-pass-pipeline "builtin.module(func.func(tosa-to-linalg-named, tosa-to-linalg, tosa-to-tensor, tosa-to-arith))" | \
${MLIR_OPT} \
${BUDDY_OPT} \
-eliminate-empty-tensors \
-convert-tensor-to-linalg \
-linalg-bufferize \
-batchmatmul-optimize \
-convert-linalg-to-affine-loops \
-lower-affine \
-func-bufferize \
Expand All @@ -38,16 +41,15 @@ buddy-lenet-lower:
-convert-arith-to-llvm \
-finalize-memref-to-llvm \
-convert-scf-to-cf \
-llvm-request-c-wrappers \
-convert-arith-to-llvm \
-convert-func-to-llvm \
-reconcile-unrealized-casts \
-o ./log.mlir

buddy-lenet-translate:
@${MLIR_OPT} ./fake-lenet.mlir \
@${BUDDY_OPT} ./fake-lenet.mlir \
-pass-pipeline "builtin.module(func.func(tosa-to-linalg-named, tosa-to-linalg, tosa-to-tensor, tosa-to-arith))" | \
${MLIR_OPT} \
${BUDDY_OPT} \
-eliminate-empty-tensors \
-convert-tensor-to-linalg \
-linalg-bufferize \
Expand All @@ -64,17 +66,16 @@ buddy-lenet-translate:
-convert-arith-to-llvm \
-finalize-memref-to-llvm \
-convert-scf-to-cf \
-llvm-request-c-wrappers \
-convert-arith-to-llvm \
-convert-func-to-llvm \
-reconcile-unrealized-casts | \
${MLIR_TRANSLATE} -mlir-to-llvmir -o log.ll


buddy-lenet-run:
@${MLIR_OPT} ./fake-lenet.mlir \
@${BUDDY_OPT} ./fake-lenet.mlir \
-pass-pipeline "builtin.module(func.func(tosa-to-linalg-named, tosa-to-linalg, tosa-to-tensor, tosa-to-arith))" | \
${MLIR_OPT} \
${BUDDY_OPT} \
-eliminate-empty-tensors \
-convert-tensor-to-linalg \
-linalg-bufferize \
Expand All @@ -91,7 +92,33 @@ buddy-lenet-run:
-convert-arith-to-llvm \
-finalize-memref-to-llvm \
-convert-scf-to-cf \
-llvm-request-c-wrappers \
-convert-arith-to-llvm \
-convert-func-to-llvm \
-reconcile-unrealized-casts | \
${MLIR_CPU_RUNNER} ${OPT_FLAG} -e main -entry-point-result=void \
-shared-libs=${MLIR_RUNNER_UTILS} -shared-libs=${MLIR_C_RUNNER_UTILS}

buddy-lenet-opt-run:
@${BUDDY_OPT} ./fake-lenet.mlir \
-pass-pipeline "builtin.module(func.func(tosa-to-linalg-named, tosa-to-linalg, tosa-to-tensor, tosa-to-arith))" | \
${BUDDY_OPT} \
-eliminate-empty-tensors \
-convert-tensor-to-linalg \
-linalg-bufferize \
-batchmatmul-optimize \
-convert-linalg-to-affine-loops \
-lower-affine \
-func-bufferize \
-arith-bufferize \
-tensor-bufferize \
-buffer-deallocation \
-finalizing-bufferize \
-convert-vector-to-scf \
-expand-strided-metadata \
-convert-vector-to-llvm \
-convert-arith-to-llvm \
-finalize-memref-to-llvm \
-convert-scf-to-cf \
-convert-arith-to-llvm \
-convert-func-to-llvm \
-reconcile-unrealized-casts | \
Expand Down

0 comments on commit 0424893

Please sign in to comment.