Skip to content

Commit

Permalink
[quantizer] fix broadcast bug (#236)
Browse files Browse the repository at this point in the history
  • Loading branch information
zk1998 authored Jul 19, 2023
1 parent 4be57e2 commit 83ada42
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 4 deletions.
10 changes: 10 additions & 0 deletions tests/quantizer_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1293,6 +1293,16 @@ def forward(self, x):

check_quantize_rewrite(model, inputs)

def test_quantized_mul_different_shape_complex(self):
class Model(nn.Module):
def forward(self, x):
return x.transpose(0, 1) * x

model = Model()
inputs = torch.randn(1, 3, 224, 224)

check_quantize_rewrite(model, inputs)

def test_quantized_add_relu_different_shape(self):
class Model(nn.Module):
def forward(self, x):
Expand Down
8 changes: 4 additions & 4 deletions tinynn/graph/quantization/quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2821,17 +2821,17 @@ def _is_broadcastable_binary_quantized_op_node(node: TraceNode, custom_data) ->

for l_dim, r_dim in zip(l_shape, r_shape):
if l_dim > r_dim:
if ref_index in (None, 0):
if ref_index in (None, 0) and r_dim == 1:
ref_index = 0
else:
ref_index = -1
break
break
elif l_dim < r_dim:
if ref_index in (None, 1):
if ref_index in (None, 1) and l_dim == 1:
ref_index = 1
else:
ref_index = -1
break
break

if ref_index >= 0:
src_index = 1 - ref_index
Expand Down

0 comments on commit 83ada42

Please sign in to comment.