diff --git a/mmdnn/conversion/darknet/darknet_graph.py b/mmdnn/conversion/darknet/darknet_graph.py index 9c782a1a..d480d869 100644 --- a/mmdnn/conversion/darknet/darknet_graph.py +++ b/mmdnn/conversion/darknet/darknet_graph.py @@ -68,14 +68,14 @@ def conv_output_height(height, padding, kernel_size, stride): return (height + 2*padding - kernel_size)/stride + 1 - def activation(self, block, pre_node_name, input_shape): + def activation(self, block, pre_node_name, input_shape, id): if block['activation'] != 'linear': relu_layer = OrderedDict() relu_layer['input'] = [pre_node_name] if 'name' in block.keys(): relu_layer['name'] = '%s-act' % block['name'] else: - relu_layer['name'] = 'layer%d-act' % i + relu_layer['name'] = 'layer%d-act' % id relu_layer['type'] = 'ReLU' relu_param = OrderedDict() if block['activation'] == 'leaky': @@ -180,7 +180,7 @@ def build(self): pre_node_name = bn_layer['name'] - pre_node_name = self.activation(block, pre_node_name, input_shape) + pre_node_name = self.activation(block, pre_node_name, input_shape, i) elif block['type'] == 'maxpool': @@ -301,7 +301,7 @@ def build(self): self.layer_num_map[i] = shortcut_layer['name'] pre_node_name = shortcut_layer['name'] - pre_node_name = self.activation(block, pre_node_name, input_shape) + pre_node_name = self.activation(block, pre_node_name, input_shape, i) elif block['type'] == 'connected': fc_layer = OrderedDict() @@ -321,7 +321,7 @@ def build(self): self.layer_num_map[i] = fc_layer['name'] pre_node_name = fc_layer['name'] - pre_node_name = self.activation(block, pre_node_name, input_shape) + pre_node_name = self.activation(block, pre_node_name, input_shape, i) elif block['type'] == 'softmax': sm_layer = OrderedDict()