Skip to content

Commit

Permalink
[BuddyWhisper]Add BuddyWhisper and Conv1d to buddy-mlir.
Browse files Browse the repository at this point in the history
  • Loading branch information
breezejh committed Jun 16, 2024
1 parent 2ad7ce7 commit 1efed88
Showing 1 changed file with 83 additions and 7 deletions.
90 changes: 83 additions & 7 deletions frontend/Python/ops/tosa.py
Original file line number Diff line number Diff line change
Expand Up @@ -1025,20 +1025,18 @@ def convolution2d_op(node: Conv2dOp, symbol_table):
else:
bias_tensor = symbol_table.get((str(bias), 0))

# Prepare input padding.
if len(input_padding) == 1:
input_padding = [input_padding[0]] * 4
elif len(input_padding) == 2:
input_padding = [input_padding[0]] * 2 + [input_padding[1]] * 2

# Prepare attributes.
input_padding_attr = ir._denseI64ArrayAttr(input_padding, None)
dilation_attr = ir._denseI64ArrayAttr(dilation, None)
stride_attr = ir._denseI64ArrayAttr(stride, None)

# TODO: Convolution 1D
# Convolution 2D
if len(weight_shape) == 4:
# Prepare input padding.
if len(input_padding) == 1:
input_padding = [input_padding[0]] * 4
elif len(input_padding) == 2:
input_padding = [input_padding[0]] * 2 + [input_padding[1]] * 2
# If the input layout is NCHW, then convert to NHWC.
if node._layout.find("NCHW") != -1:
perm_list = [0, 2, 3, 1]
Expand Down Expand Up @@ -1166,6 +1164,84 @@ def convolution2d_op(node: Conv2dOp, symbol_table):
op = tosa.TransposeOp(
permute_result_type, op.result, perm_const_op.results[0]
)
# Convolution 1D
elif len(weight_shape) == 3:
# Prepare input with padding.
if input_padding[0] != 0:
input_shape = list(ir.RankedTensorType(input_val.type).shape)
padded_type = ir.RankedTensorType.get(
[
input_shape[0],
input_shape[1],
input_shape[2] + 2 * input_padding[0],
],
result_element_type,
)
pad_values_type = ir.RankedTensorType.get(
[3, 2], ir.IntegerType.get_signless(32)
)
pad_values = ir.DenseElementsAttr.get(
numpy.array(
[[0, 0], [0, 0], [input_padding[0], input_padding[0]]],
dtype=numpy.int32,
),
type=pad_values_type,
)
pad_constant = arith.ConstantOp(pad_values_type, pad_values).result
input_val = tosa.PadOp(padded_type, input_val, pad_constant)
output_type = ir.RankedTensorType.get(out_shape, result_element_type)
output_conv = tensor.EmptyOp(list(out_shape), result_element_type)
assert groups == 1, "only support one group"
# Con1D Operation Without Bias
conv_op = linalg.conv_1d_ncw_fcw(
input_val,
weight_val,
outs=[output_conv],
strides=stride_attr,
dilations=dilation_attr,
)
output = tensor.EmptyOp(list(out_shape), result_element_type)
generic_map = ir.AffineMap.get_permutation(
[i for i in range(len(list(out_shape)))]
)
loop_type = [
ir.Attribute.parse("#linalg.iterator_type<parallel>")
] * len(list(out_shape))
loop_type[1] = ir.Attribute.parse("#linalg.iterator_type<reduction>")
# Add Bias To Conv2d.
op = linalg.GenericOp(
[output_type],
[conv_op, bias_tensor],
[output],
ir.ArrayAttr.get(
[
ir.AffineMapAttr.get(
generic_map.get_submap(
[i for i in range(len(list(out_shape)))]
)
),
ir.AffineMapAttr.get(generic_map.get_submap([1])),
ir.AffineMapAttr.get(
generic_map.get_submap(
[i for i in range(len(list(out_shape)))]
)
),
]
),
ir.ArrayAttr.get(loop_type),
)
block = ir.Block.create_at_start(
op.region,
[
result_element_type,
ir.RankedTensorType(bias_tensor.type).element_type,
result_element_type,
],
)
add_op = arith.AddFOp(block.arguments[1], block.arguments[0])
block.append(add_op)
block.append(linalg.YieldOp([add_op.result]))

return op


Expand Down

0 comments on commit 1efed88

Please sign in to comment.