Skip to content

Commit

Permalink
[fronted] Add conv1d to convolution2d_op.
Browse files Browse the repository at this point in the history
  • Loading branch information
breezejh committed Apr 28, 2024
1 parent ee5c0ed commit dc788f8
Showing 1 changed file with 218 additions and 118 deletions.
336 changes: 218 additions & 118 deletions frontend/Python/ops/tosa.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
import numpy

import mlir.ir as ir
from mlir.dialects import tensor, tosa
from mlir.dialects import tensor, tosa, arith, linalg

from ..graph import TensorDType
from ..graph import (
Expand Down Expand Up @@ -967,129 +967,229 @@ def convolution2d_op(node: Conv2dOp, symbol_table):
Import the convolution operation.
From Buddy Conv2dOp to MLIR TOSA `conv2d` operation.
"""
assert len(node.args) == 9
assert len(node.args) == 9
input1 = symbol_table.get((str(node.args[0]), 0))
weight = symbol_table.get((str(node.args[1]), 0))
is_kernel_transposed = node.args[6]
dtype = node.tensor_meta["dtype"]
result_element_type = mlir_element_type_get(dtype)
if node._layout.find("NCHW") != -1:
perm_list = [0, 2, 3, 1]
perm_const_op = tosa.ConstOp(
ir.DenseElementsAttr.get(memoryview(array.array("i", perm_list)))
)
out_shape = list(ir.RankedTensorType(input1.type).shape)
perm_shape = []
perm_shape.append(out_shape[0])
perm_shape.append(out_shape[2])
perm_shape.append(out_shape[3])
perm_shape.append(out_shape[1])
permute_result_type = ir.RankedTensorType.get(
perm_shape, result_element_type
)
input1 = tosa.TransposeOp(
permute_result_type, input1, perm_const_op.results[0]
).result
if node._layout.find("FCHW") != -1:
perm_list = [0, 2, 3, 1]
perm_const_op = tosa.ConstOp(
ir.DenseElementsAttr.get(memoryview(array.array("i", perm_list)))
)
out_shape = list(ir.RankedTensorType(weight.type).shape)
perm_shape = []
perm_shape.append(out_shape[0])
perm_shape.append(out_shape[2])
perm_shape.append(out_shape[3])
perm_shape.append(out_shape[1])
permute_result_type = ir.RankedTensorType.get(
perm_shape, result_element_type
)
weight = tosa.TransposeOp(
permute_result_type, weight, perm_const_op.results[0]
).result
if is_kernel_transposed:
in_channels = list(ir.RankedTensorType(weight.type).shape)[0]
out_channels = list(ir.RankedTensorType(weight.type).shape)[1]
else:
in_channels = list(ir.RankedTensorType(weight.type).shape)[1]
out_channels = list(ir.RankedTensorType(weight.type).shape)[0]
if len(node._parents) == 2:
new_size_tensor_type = ir.RankedTensorType.get(
[out_channels], result_element_type
)
element = mlir_element_attr_get(dtype, 0)
new_size_attr = ir.DenseElementsAttr.get_splat(
new_size_tensor_type, element
)
bias_tensor = tosa.ConstOp(new_size_attr).results[0]
else:
bias_tensor = symbol_table.get((str(node.args[2]), 0))
assert input1 != None and weight != None and bias_tensor != None
stride = node.args[3]
input_padding = node.args[4]
if len(input_padding) == 1:
input_padding = [input_padding[0]] * 4
elif len(input_padding) == 2:
input_padding = [input_padding[0]] * 2 + [input_padding[1]] * 2
dilation = node.args[5]
groups = node.args[8]
out_shape = node.tensor_meta["shape"]
if node._layout.find("NCHW") != -1:
perm_shape = []
perm_shape.append(out_shape[0])
perm_shape.append(out_shape[2])
perm_shape.append(out_shape[3])
perm_shape.append(out_shape[1])
out_shape = perm_shape
output = ir.RankedTensorType.get(out_shape, result_element_type)
stride_attr = ir._denseI64ArrayAttr(stride, None)
assert groups == 1, 'tosa.conv2d only support one group'
if is_kernel_transposed:
if sum(input_padding) > 0 or sum(dilation) > len(dilation):
raise NotImplementedError
out_padding = node.args[7]
for i in range(len(out_padding), 4):
out_padding = [0] + out_padding
out_padding_attr = ir._denseI64ArrayAttr(out_padding, None)
out_shape_attr = ir._denseI64ArrayAttr(out_shape, None)
op = tosa.TransposeConv2DOp(
output,
input1,
weight,
bias_tensor,
out_padding_attr,
stride_attr,
out_shape_attr,
)
else:
input_padding_attr = ir._denseI64ArrayAttr(input_padding, None)
t = ir.RankedTensorType(weight.type).shape

if len(t) == 4:
is_kernel_transposed = node.args[6]
dtype = node.tensor_meta["dtype"]
result_element_type = mlir_element_type_get(dtype)
if node._layout.find("NCHW") != -1:
perm_list = [0, 2, 3, 1]
perm_const_op = tosa.ConstOp(
ir.DenseElementsAttr.get(memoryview(array.array("i", perm_list)))
)
out_shape = list(ir.RankedTensorType(input1.type).shape)
perm_shape = []
perm_shape.append(out_shape[0])
perm_shape.append(out_shape[2])
perm_shape.append(out_shape[3])
perm_shape.append(out_shape[1])
permute_result_type = ir.RankedTensorType.get(
perm_shape, result_element_type
)
input1 = tosa.TransposeOp(
permute_result_type, input1, perm_const_op.results[0]
).result

if node._layout.find("FCHW") != -1:
perm_list = [0, 2, 3, 1]
perm_const_op = tosa.ConstOp(
ir.DenseElementsAttr.get(memoryview(array.array("i", perm_list)))
)
out_shape = list(ir.RankedTensorType(weight.type).shape)
perm_shape = []
perm_shape.append(out_shape[0])
perm_shape.append(out_shape[2])
perm_shape.append(out_shape[3])
perm_shape.append(out_shape[1])
permute_result_type = ir.RankedTensorType.get(
perm_shape, result_element_type
)
weight = tosa.TransposeOp(
permute_result_type, weight, perm_const_op.results[0]
).result
if is_kernel_transposed:
in_channels = list(ir.RankedTensorType(weight.type).shape)[0]
out_channels = list(ir.RankedTensorType(weight.type).shape)[1]
else:
in_channels = list(ir.RankedTensorType(weight.type).shape)[1]
out_channels = list(ir.RankedTensorType(weight.type).shape)[0]
if len(node._parents) == 2:
new_size_tensor_type = ir.RankedTensorType.get(
[out_channels], result_element_type
)
element = mlir_element_attr_get(dtype, 0)
new_size_attr = ir.DenseElementsAttr.get_splat(
new_size_tensor_type, element
)
bias_tensor = tosa.ConstOp(new_size_attr).results[0]
else:
bias_tensor = symbol_table.get((str(node.args[2]), 0))
assert input1 != None and weight != None and bias_tensor != None
stride = node.args[3]
input_padding = node.args[4]
if len(input_padding) == 1:
input_padding = [input_padding[0]] * 4
elif len(input_padding) == 2:
input_padding = [input_padding[0]] * 2 + [input_padding[1]] * 2
dilation = node.args[5]
groups = node.args[8]
out_shape = node.tensor_meta["shape"]
if node._layout.find("NCHW") != -1:
perm_shape = []
perm_shape.append(out_shape[0])
perm_shape.append(out_shape[2])
perm_shape.append(out_shape[3])
perm_shape.append(out_shape[1])
out_shape = perm_shape
output = ir.RankedTensorType.get(out_shape, result_element_type)
stride_attr = ir._denseI64ArrayAttr(stride, None)
assert groups == 1, 'tosa.conv2d only support one group'
if is_kernel_transposed:
if sum(input_padding) > 0 or sum(dilation) > len(dilation):
raise NotImplementedError
out_padding = node.args[7]
for i in range(len(out_padding), 4):
out_padding = [0] + out_padding
out_padding_attr = ir._denseI64ArrayAttr(out_padding, None)
out_shape_attr = ir._denseI64ArrayAttr(out_shape, None)
op = tosa.TransposeConv2DOp(
output,
input1,
weight,
bias_tensor,
out_padding_attr,
stride_attr,
out_shape_attr,
)
else:
input_padding_attr = ir._denseI64ArrayAttr(input_padding, None)
dilation_attr = ir._denseI64ArrayAttr(dilation, None)
op = tosa.Conv2DOp(
output,
input1,
weight,
bias_tensor,
input_padding_attr,
stride_attr,
dilation_attr,
)
if node._layout.find("NCHW") != -1:
perm_list = [0, 3, 1, 2]
perm_const_op = tosa.ConstOp(
ir.DenseElementsAttr.get(memoryview(array.array("i", perm_list)))
)
perm_shape = []
perm_shape.append(out_shape[0])
perm_shape.append(out_shape[3])
perm_shape.append(out_shape[1])
perm_shape.append(out_shape[2])
permute_result_type = ir.RankedTensorType.get(
perm_shape, result_element_type
)
op = tosa.TransposeOp(
permute_result_type, op.result, perm_const_op.results[0]
)
return op

if len(t) == 3 :

is_kernel_transposed = node.args[6]
dtype = node.tensor_meta["dtype"]
result_element_type = mlir_element_type_get(dtype)
if is_kernel_transposed:
in_channels = list(ir.RankedTensorType(weight.type).shape)[0]
out_channels = list(ir.RankedTensorType(weight.type).shape)[1]
else:
in_channels = list(ir.RankedTensorType(weight.type).shape)[1]
out_channels = list(ir.RankedTensorType(weight.type).shape)[0]
if len(node._parents) == 2:
new_size_tensor_type = ir.RankedTensorType.get(
[out_channels], result_element_type
)
element = mlir_element_attr_get(dtype, 0)
new_size_attr = ir.DenseElementsAttr.get_splat(
new_size_tensor_type, element
)
bias_tensor = tosa.ConstOp(new_size_attr).results[0]
else:
bias_tensor = symbol_table.get((str(node.args[2]), 0))
assert input1 != None and weight != None and bias_tensor != None
stride = node.args[3]
input_padding = node.args[4]
dilation = node.args[5]
groups = node.args[8]

if input_padding[0] != 0 :
input_shape = list(ir.RankedTensorType(input1.type).shape)
padded_type = ir.RankedTensorType.get([input_shape[0], input_shape[1], input_shape[2]+2], result_element_type)
pad_values_type = ir.RankedTensorType.get([3, 2], ir.IntegerType.get_signless(32))
pad_values = ir.DenseElementsAttr.get(numpy.array([[0, 0], [0, 0], [input_padding[0], input_padding[0]]],dtype=numpy.int32),type=pad_values_type)
pad_constant = arith.ConstantOp(pad_values_type,pad_values).result
input1 = tosa.PadOp(padded_type, input1, pad_constant)

out_shape = node.tensor_meta["shape"]
output = ir.RankedTensorType.get(out_shape, result_element_type)
output1 = tensor.EmptyOp(list(out_shape), result_element_type)
stride_attr = ir._denseI64ArrayAttr(stride, None)
assert groups == 1, 'only support one group'
dilation_attr = ir._denseI64ArrayAttr(dilation, None)
op = tosa.Conv2DOp(
output,
input1,
weight,
bias_tensor,
input_padding_attr,
stride_attr,
dilation_attr,
)
if node._layout.find("NCHW") != -1:
perm_list = [0, 3, 1, 2]
perm_const_op = tosa.ConstOp(
ir.DenseElementsAttr.get(memoryview(array.array("i", perm_list)))

conv_op = linalg.conv_1d_ncw_fcw(
input1,weight,
outs = [output1],
strides=stride_attr,
dilations=dilation_attr,
)
perm_shape = []
perm_shape.append(out_shape[0])
perm_shape.append(out_shape[3])
perm_shape.append(out_shape[1])
perm_shape.append(out_shape[2])
permute_result_type = ir.RankedTensorType.get(
perm_shape, result_element_type
output2 = tensor.EmptyOp(list(out_shape), result_element_type)
generic_map = ir.AffineMap.get_permutation(
[i for i in range(len(list(out_shape)))]
)
loop_type = [ir.Attribute.parse("#linalg.iterator_type<parallel>")] * len(
list(out_shape)
)
loop_type[1] = ir.Attribute.parse("#linalg.iterator_type<reduction>")
op = linalg.GenericOp(
[output],
[conv_op, bias_tensor],
[output2],
ir.ArrayAttr.get(
[
ir.AffineMapAttr.get(
generic_map.get_submap(
[i for i in range(len(list(out_shape)))]
)
),
ir.AffineMapAttr.get(
generic_map.get_submap(
[1]
)
),
ir.AffineMapAttr.get(
generic_map.get_submap(
[i for i in range(len(list(out_shape)))]
)
),
]
),
ir.ArrayAttr.get(loop_type),
)
op = tosa.TransposeOp(
permute_result_type, op.result, perm_const_op.results[0]
block = ir.Block.create_at_start(
op.region,
[
result_element_type,
ir.RankedTensorType(bias_tensor.type).element_type,
result_element_type,
],
)
return op
add_op = arith.AddFOp(block.arguments[1], block.arguments[0])
block.append(add_op)
block.append(linalg.YieldOp([add_op.result]))

return op

def relu_op(node: ReluOp, symbol_table):
"""
Expand Down

0 comments on commit dc788f8

Please sign in to comment.