Skip to content
This repository has been archived by the owner on Oct 13, 2021. It is now read-only.

Set axes=[0] for apply_squeeze rather than leaving unset #668

Merged
merged 5 commits into from
Dec 17, 2020
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 30 additions & 6 deletions keras2onnx/_builtin.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,6 +553,10 @@ def convert_tf_depth_to_space(scope, operator, container):
node = operator.raw_operator
block_size = node.get_attr('block_size')
oopb = OnnxOperatorBuilder(container, scope)
if container.target_opset < 13:
op_version = 11
else:
op_version = 13
if _is_nhwc(node):
adjusted_input_name = oopb.apply_transpose(operator.input_full_names,
name=operator.full_name + '_pre_transpose',
Expand All @@ -562,7 +566,7 @@ def convert_tf_depth_to_space(scope, operator, container):
name=operator.full_name,
blocksize=node.get_attr('block_size'),
mode="DCR",
op_version=11)
op_version=op_version)
oopb.apply_op_with_output("apply_transpose",
depth_to_space_result,
operator.output_full_names,
Expand All @@ -575,7 +579,7 @@ def convert_tf_depth_to_space(scope, operator, container):
name=operator.full_name,
blocksize=block_size,
mode="DCR",
op_version=11)
op_version=op_version)


@converter_func(TYPES.DepthwiseConv2dNative)
Expand Down Expand Up @@ -1269,13 +1273,17 @@ def convert_reshape_timedistributed(scope, operator, container):
else:
oopb = OnnxOperatorBuilder(container, scope)
shape0 = oopb.apply_shape(input_name, name=operator.full_name + '_shape')
if container.target_opset < 13:
op_version = 11
else:
op_version = 13
cropped_tensor_name = oopb.add_node('Slice',
[shape0[0],
('_start', oopb.int64, np.array([0], dtype=np.int64)),
('_end', oopb.int64, np.array([2], dtype=np.int64))
],
operator.inputs[0].full_name + '_cropping',
op_version=11)
op_version=op_version)
concat = oopb.apply_concat([cropped_tensor_name,
('_start', oopb.int64, np.array(target_shape, dtype=np.int64)),
], name=operator.full_name + '_concat')
Expand Down Expand Up @@ -1419,20 +1427,28 @@ def _convert_tf_reduce_op(scope, operator, container, onnx_op):
axes = _cal_tensor_value(node.inputs[1]).tolist()
axes = [axes] if np.isscalar(axes) else axes

if operator.target_opset < 11:
if container.target_opset < 11:
op_version = 1
input_shape = _cal_tensor_shape(node.inputs[0])
if input_shape is None:
if any([val < 0 for val in axes]):
raise ValueError("reduce_op: cannot have negative axis because we don't know input rank")
else:
input_rank = len(input_shape)
axes = [val + input_rank if val < 0 else val for val in axes]
elif container.target_opset < 13:
op_version = 11
if onnx_op in ['ReduceMax', 'ReduceMin'] and container.target_opset == 12:
op_version = 12
else:
op_version = 13

keepdims = node.get_attr("keep_dims")
oopb.add_node_with_output(onnx_op,
operator.inputs[0].full_name,
operator.outputs[0].full_name,
name=operator.full_name + '_reduce_min',
op_version=op_version,
axes=axes, keepdims=keepdims)


Expand Down Expand Up @@ -2253,6 +2269,10 @@ def convert_tf_tensor_scatter_update(scope, operator, container):
if operator.target_opset < 11:
raise ValueError("TensorScatterUpdate op is not supported for opset = " + str(operator.target_opset))
else:
if operator.target_opset < 13:
op_version = 11
else:
op_version = 13
oopb = OnnxOperatorBuilder(container, scope)
node = operator.raw_operator

Expand All @@ -2276,7 +2296,7 @@ def convert_tf_tensor_scatter_update(scope, operator, container):
[operator.inputs[0].full_name, cast_indices[0], updates_name],
operator.outputs[0].full_name,
name=operator.full_name + '_tensor_scatter_nd',
op_version=11)
op_version=op_version)


@converter_func(TYPES.Unpack)
Expand Down Expand Up @@ -2360,10 +2380,14 @@ def convert_tf_variable_v2(scope, operator, container):
def convert_tf_where(scope, operator, container):
oopb = OnnxOperatorBuilder(container, scope)
node = operator.raw_operator
if container.target_opset < 13:
op_version = 9
else:
op_version = 13
where_node = oopb.add_node('NonZero',
operator.inputs[0].full_name,
operator.inputs[0].full_name + '_non_zero',
op_version=9)
op_version=op_version)
oopb.apply_op_with_output("apply_transpose",
where_node,
operator.output_full_names,
Expand Down
6 changes: 3 additions & 3 deletions keras2onnx/ke2onnx/lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ def build_output_states(scope, operator, container, output_names, bidirectional=
squeeze_names.extend(list(zip(split_names, outputs)))

for split_name, output_name in squeeze_names:
apply_squeeze(scope, split_name, output_name, container)
apply_squeeze(scope, split_name, output_name, container, axes=[0])

else:
output_state = op.return_state
Expand All @@ -234,8 +234,8 @@ def build_output_states(scope, operator, container, output_names, bidirectional=

output_h = operator.outputs[1].full_name
output_c = operator.outputs[2].full_name
apply_squeeze(scope, lstm_h, output_h, container)
apply_squeeze(scope, lstm_c, output_c, container)
apply_squeeze(scope, lstm_h, output_h, container, axes=[0])
apply_squeeze(scope, lstm_c, output_c, container, axes=[0])


def _calculate_keras_lstm_output_shapes(operator):
Expand Down
4 changes: 2 additions & 2 deletions keras2onnx/ke2onnx/simplernn.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,14 +391,14 @@ def build_output_states(scope, operator, container, output_names, bidirectional=
apply_split(scope, rnn_h, split_names, container)

for split_name, output_name in zip(split_names, output_names):
apply_squeeze(scope, split_name, output_name, container)
apply_squeeze(scope, split_name, output_name, container, axes=[0])

else:
output_state = op.return_state

if output_state:
output_h = operator.outputs[1].full_name
apply_squeeze(scope, rnn_h, output_h, container)
apply_squeeze(scope, rnn_h, output_h, container, axes=[0])


def is_time_major(op, bidirectional):
Expand Down