diff --git a/main.py b/main.py index d1a2e79..f9c4bef 100644 --- a/main.py +++ b/main.py @@ -4,17 +4,17 @@ """ from __future__ import print_function -import keras -from keras.layers import Dense, Conv2D, BatchNormalization, Activation -from keras.layers import AveragePooling2D, Input, Flatten -from keras.optimizers import Adam -from keras.callbacks import ModelCheckpoint, LearningRateScheduler -from keras.callbacks import ReduceLROnPlateau -from keras.preprocessing.image import ImageDataGenerator -from keras.regularizers import l2 -from keras import backend as K -from keras.models import Model -from keras.datasets import cifar10 +import tensorflow.keras +from tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, Activation +from tensorflow.keras.layers import AveragePooling2D, Input, Flatten +from tensorflow.keras.optimizers import Adam +from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler +from tensorflow.keras.callbacks import ReduceLROnPlateau +from tensorflow.keras.preprocessing.image import ImageDataGenerator +from tensorflow.keras.regularizers import l2 +from tensorflow.keras import backend as K +from tensorflow.keras.models import Model +from tensorflow.keras.datasets import cifar10 from models import resnext, resnet_v1, resnet_v2, mobilenets, inception_v3, inception_resnet_v2, densenet from utils import lr_schedule import numpy as np @@ -53,17 +53,17 @@ print('y_train shape:', y_train.shape) # Convert class vectors to binary class matrices. -y_train = keras.utils.to_categorical(y_train, num_classes) -y_test = keras.utils.to_categorical(y_test, num_classes) +y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes) +y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes) depth = 20 # For ResNet, specify the depth (e.g. ResNet50: depth=50) -model = resnet_v1.resnet_v1(input_shape=input_shape, depth=depth, attention_module=attention_module) +# model = resnet_v1.resnet_v1(input_shape=input_shape, depth=depth, attention_module=attention_module) # model = resnet_v2.resnet_v2(input_shape=input_shape, depth=depth, attention_module=attention_module) # model = resnext.ResNext(input_shape=input_shape, classes=num_classes, attention_module=attention_module) # model = mobilenets.MobileNet(input_shape=input_shape, classes=num_classes, attention_module=attention_module) # model = inception_v3.InceptionV3(input_shape=input_shape, classes=num_classes, attention_module=attention_module) # model = inception_resnet_v2.InceptionResNetV2(input_shape=input_shape, classes=num_classes, attention_module=attention_module) -# model = densenet.DenseNet(input_shape=input_shape, classes=num_classes, attention_module=attention_module) +model = densenet.DenseNet(input_shape=input_shape, classes=num_classes, attention_module=attention_module) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=lr_schedule(0)), diff --git a/models/__pycache__/attention_module.cpython-36.pyc b/models/__pycache__/attention_module.cpython-36.pyc deleted file mode 100644 index 08fd786..0000000 Binary files a/models/__pycache__/attention_module.cpython-36.pyc and /dev/null differ diff --git a/models/__pycache__/densenet.cpython-36.pyc b/models/__pycache__/densenet.cpython-36.pyc deleted file mode 100644 index da13176..0000000 Binary files a/models/__pycache__/densenet.cpython-36.pyc and /dev/null differ diff --git a/models/__pycache__/inception_resnet_v2.cpython-36.pyc b/models/__pycache__/inception_resnet_v2.cpython-36.pyc deleted file mode 100644 index c9474ae..0000000 Binary files a/models/__pycache__/inception_resnet_v2.cpython-36.pyc and /dev/null differ diff --git a/models/__pycache__/inception_v3.cpython-36.pyc b/models/__pycache__/inception_v3.cpython-36.pyc deleted file mode 100644 index cdb5707..0000000 Binary files a/models/__pycache__/inception_v3.cpython-36.pyc and /dev/null differ diff --git a/models/__pycache__/mobilenets.cpython-36.pyc b/models/__pycache__/mobilenets.cpython-36.pyc deleted file mode 100644 index 8fdac7e..0000000 Binary files a/models/__pycache__/mobilenets.cpython-36.pyc and /dev/null differ diff --git a/models/__pycache__/resnet_v1.cpython-36.pyc b/models/__pycache__/resnet_v1.cpython-36.pyc deleted file mode 100644 index bad1d3c..0000000 Binary files a/models/__pycache__/resnet_v1.cpython-36.pyc and /dev/null differ diff --git a/models/__pycache__/resnet_v2.cpython-36.pyc b/models/__pycache__/resnet_v2.cpython-36.pyc deleted file mode 100644 index 0685267..0000000 Binary files a/models/__pycache__/resnet_v2.cpython-36.pyc and /dev/null differ diff --git a/models/__pycache__/resnext.cpython-36.pyc b/models/__pycache__/resnext.cpython-36.pyc deleted file mode 100644 index b7b2ee7..0000000 Binary files a/models/__pycache__/resnext.cpython-36.pyc and /dev/null differ diff --git a/models/__pycache__/se.cpython-36.pyc b/models/__pycache__/se.cpython-36.pyc deleted file mode 100644 index 2a4f2df..0000000 Binary files a/models/__pycache__/se.cpython-36.pyc and /dev/null differ diff --git a/models/__pycache__/se_resnext.cpython-36.pyc b/models/__pycache__/se_resnext.cpython-36.pyc deleted file mode 100644 index 4324ebb..0000000 Binary files a/models/__pycache__/se_resnext.cpython-36.pyc and /dev/null differ diff --git a/models/attention_module.py b/models/attention_module.py index ff13e34..164b1ae 100644 --- a/models/attention_module.py +++ b/models/attention_module.py @@ -1,6 +1,6 @@ -from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Reshape, Dense, multiply, Permute, Concatenate, Conv2D, Add, Activation, Lambda -from keras import backend as K -from keras.activations import sigmoid +from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Reshape, Dense, multiply, Permute, Concatenate, Conv2D, Add, Activation, Lambda +from tensorflow.keras import backend as K +from tensorflow.keras.activations import sigmoid def attach_attention_module(net, attention_module): if attention_module == 'se_block': # SE_block @@ -18,23 +18,23 @@ def se_block(input_feature, ratio=8): """ channel_axis = 1 if K.image_data_format() == "channels_first" else -1 - channel = input_feature._keras_shape[channel_axis] + channel = input_feature.shape[channel_axis] se_feature = GlobalAveragePooling2D()(input_feature) se_feature = Reshape((1, 1, channel))(se_feature) - assert se_feature._keras_shape[1:] == (1,1,channel) + assert se_feature.shape[1:] == (1,1,channel) se_feature = Dense(channel // ratio, activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) - assert se_feature._keras_shape[1:] == (1,1,channel//ratio) + assert se_feature.shape[1:] == (1,1,channel//ratio) se_feature = Dense(channel, activation='sigmoid', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) - assert se_feature._keras_shape[1:] == (1,1,channel) + assert se_feature.shape[1:] == (1,1,channel) if K.image_data_format() == 'channels_first': se_feature = Permute((3, 1, 2))(se_feature) @@ -53,7 +53,7 @@ def cbam_block(cbam_feature, ratio=8): def channel_attention(input_feature, ratio=8): channel_axis = 1 if K.image_data_format() == "channels_first" else -1 - channel = input_feature._keras_shape[channel_axis] + channel = input_feature.shape[channel_axis] shared_layer_one = Dense(channel//ratio, activation='relu', @@ -67,19 +67,19 @@ def channel_attention(input_feature, ratio=8): avg_pool = GlobalAveragePooling2D()(input_feature) avg_pool = Reshape((1,1,channel))(avg_pool) - assert avg_pool._keras_shape[1:] == (1,1,channel) + assert avg_pool.shape[1:] == (1,1,channel) avg_pool = shared_layer_one(avg_pool) - assert avg_pool._keras_shape[1:] == (1,1,channel//ratio) + assert avg_pool.shape[1:] == (1,1,channel//ratio) avg_pool = shared_layer_two(avg_pool) - assert avg_pool._keras_shape[1:] == (1,1,channel) + assert avg_pool.shape[1:] == (1,1,channel) max_pool = GlobalMaxPooling2D()(input_feature) max_pool = Reshape((1,1,channel))(max_pool) - assert max_pool._keras_shape[1:] == (1,1,channel) + assert max_pool.shape[1:] == (1,1,channel) max_pool = shared_layer_one(max_pool) - assert max_pool._keras_shape[1:] == (1,1,channel//ratio) + assert max_pool.shape[1:] == (1,1,channel//ratio) max_pool = shared_layer_two(max_pool) - assert max_pool._keras_shape[1:] == (1,1,channel) + assert max_pool.shape[1:] == (1,1,channel) cbam_feature = Add()([avg_pool,max_pool]) cbam_feature = Activation('sigmoid')(cbam_feature) @@ -93,18 +93,18 @@ def spatial_attention(input_feature): kernel_size = 7 if K.image_data_format() == "channels_first": - channel = input_feature._keras_shape[1] + channel = input_feature.shape[1] cbam_feature = Permute((2,3,1))(input_feature) else: - channel = input_feature._keras_shape[-1] + channel = input_feature.shape[-1] cbam_feature = input_feature avg_pool = Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(cbam_feature) - assert avg_pool._keras_shape[-1] == 1 + assert avg_pool.shape[-1] == 1 max_pool = Lambda(lambda x: K.max(x, axis=3, keepdims=True))(cbam_feature) - assert max_pool._keras_shape[-1] == 1 + assert max_pool.shape[-1] == 1 concat = Concatenate(axis=3)([avg_pool, max_pool]) - assert concat._keras_shape[-1] == 2 + assert concat.shape[-1] == 2 cbam_feature = Conv2D(filters = 1, kernel_size=kernel_size, strides=1, @@ -112,7 +112,7 @@ def spatial_attention(input_feature): activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(concat) - assert cbam_feature._keras_shape[-1] == 1 + assert cbam_feature.shape[-1] == 1 if K.image_data_format() == "channels_first": cbam_feature = Permute((3, 1, 2))(cbam_feature) diff --git a/models/densenet.py b/models/densenet.py index 1213000..f6330dc 100644 --- a/models/densenet.py +++ b/models/densenet.py @@ -11,21 +11,18 @@ import warnings -from keras.models import Model -from keras.layers.core import Dense, Dropout, Activation, Reshape -from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D -from keras.layers.pooling import AveragePooling2D, MaxPooling2D -from keras.layers.pooling import GlobalAveragePooling2D -from keras.layers import Input -from keras.layers.merge import concatenate -from keras.layers.normalization import BatchNormalization -from keras.regularizers import l2 -from keras.utils.layer_utils import convert_all_kernels_in_model, convert_dense_weights_data_format -from keras.utils.data_utils import get_file -from keras.engine.topology import get_source_inputs -from keras.applications.imagenet_utils import _obtain_input_shape -from keras.applications.imagenet_utils import decode_predictions -import keras.backend as K +from tensorflow.keras.models import Model +from tensorflow.keras.layers import Dense, Dropout, Activation, Reshape +from tensorflow.keras.layers import Conv2D, Conv2DTranspose, UpSampling2D +from tensorflow.keras.layers import AveragePooling2D, MaxPooling2D +from tensorflow.keras.layers import GlobalAveragePooling2D +from tensorflow.keras.layers import Input +from tensorflow.keras.layers import concatenate +from tensorflow.keras.layers import BatchNormalization +from tensorflow.keras.regularizers import l2 +from tensorflow.keras.utils import get_source_inputs +from keras_applications.imagenet_utils import _obtain_input_shape +import tensorflow.keras.backend as K from models.attention_module import attach_attention_module diff --git a/models/inception_resnet_v2.py b/models/inception_resnet_v2.py index 1d3b44a..9b2b484 100644 --- a/models/inception_resnet_v2.py +++ b/models/inception_resnet_v2.py @@ -21,24 +21,23 @@ import warnings -from keras.models import Model -from keras.layers import Activation -from keras.layers import AveragePooling2D -from keras.layers import BatchNormalization -from keras.layers import Concatenate -from keras.layers import Conv2D -from keras.layers import Dense -from keras.layers import GlobalAveragePooling2D -from keras.layers import GlobalMaxPooling2D -from keras.layers import Input -from keras.layers import Lambda -from keras.layers import MaxPooling2D -from keras.utils.data_utils import get_file -from keras.engine.topology import get_source_inputs -from keras.applications import imagenet_utils -from keras.applications.imagenet_utils import _obtain_input_shape -from keras.applications.imagenet_utils import decode_predictions -from keras import backend as K +from tensorflow.keras.models import Model +from tensorflow.keras.layers import Activation +from tensorflow.keras.layers import AveragePooling2D +from tensorflow.keras.layers import BatchNormalization +from tensorflow.keras.layers import Concatenate +from tensorflow.keras.layers import Conv2D +from tensorflow.keras.layers import Dense +from tensorflow.keras.layers import GlobalAveragePooling2D +from tensorflow.keras.layers import GlobalMaxPooling2D +from tensorflow.keras.layers import Input +from tensorflow.keras.layers import Lambda +from tensorflow.keras.layers import MaxPooling2D +from tensorflow.keras.utils import get_file +from tensorflow.keras.utils import get_source_inputs +from tensorflow.keras.applications import imagenet_utils +from keras_applications.imagenet_utils import _obtain_input_shape +from tensorflow.keras import backend as K from models.attention_module import attach_attention_module diff --git a/models/inception_v3.py b/models/inception_v3.py index 82c1f4b..b651d90 100644 --- a/models/inception_v3.py +++ b/models/inception_v3.py @@ -18,23 +18,22 @@ import warnings -from keras.models import Model -from keras import layers -from keras.layers import Activation -from keras.layers import Dense -from keras.layers import Reshape -from keras.layers import Input -from keras.layers import BatchNormalization -from keras.layers import Conv2D -from keras.layers import MaxPooling2D -from keras.layers import AveragePooling2D -from keras.layers import GlobalAveragePooling2D -from keras.layers import GlobalMaxPooling2D -from keras.engine.topology import get_source_inputs -from keras.utils.data_utils import get_file -from keras import backend as K -from keras.applications.imagenet_utils import decode_predictions -from keras.applications.imagenet_utils import _obtain_input_shape +from tensorflow.keras.models import Model +from tensorflow.keras import layers +from tensorflow.keras.layers import Activation +from tensorflow.keras.layers import Dense +from tensorflow.keras.layers import Reshape +from tensorflow.keras.layers import Input +from tensorflow.keras.layers import BatchNormalization +from tensorflow.keras.layers import Conv2D +from tensorflow.keras.layers import MaxPooling2D +from tensorflow.keras.layers import AveragePooling2D +from tensorflow.keras.layers import GlobalAveragePooling2D +from tensorflow.keras.layers import GlobalMaxPooling2D +from tensorflow.keras.utils import get_source_inputs +from tensorflow.keras.utils import get_file +from tensorflow.keras import backend as K +from keras_applications.imagenet_utils import _obtain_input_shape from models.attention_module import attach_attention_module diff --git a/models/mobilenets.py b/models/mobilenets.py index bb953ea..5b39a0d 100644 --- a/models/mobilenets.py +++ b/models/mobilenets.py @@ -11,26 +11,25 @@ import warnings -from keras.models import Model -from keras.layers import Input -from keras.layers import Activation -from keras.layers import Dropout -from keras.layers import Reshape -from keras.layers import BatchNormalization -from keras.layers import GlobalAveragePooling2D -from keras.layers import GlobalMaxPooling2D -from keras.layers import Conv2D -from keras import initializers -from keras import regularizers -from keras import constraints -from keras.utils import conv_utils -from keras.utils.data_utils import get_file -from keras.engine.topology import get_source_inputs -from keras.engine import InputSpec -from keras.applications import imagenet_utils -from keras.applications.imagenet_utils import _obtain_input_shape -from keras.applications.imagenet_utils import decode_predictions -from keras import backend as K +from tensorflow.keras.models import Model +from tensorflow.keras.layers import Input +from tensorflow.keras.layers import Activation +from tensorflow.keras.layers import Dropout +from tensorflow.keras.layers import Reshape +from tensorflow.keras.layers import BatchNormalization +from tensorflow.keras.layers import GlobalAveragePooling2D +from tensorflow.keras.layers import GlobalMaxPooling2D +from tensorflow.keras.layers import Conv2D +from tensorflow.keras import initializers +from tensorflow.keras import regularizers +from tensorflow.keras import constraints +import tensorflow.keras.utils as conv_utils +from tensorflow.keras.utils import get_file +from tensorflow.keras.utils import get_source_inputs +from tensorflow.keras.layers import InputSpec +from tensorflow.keras.applications import imagenet_utils +from keras_applications.imagenet_utils import _obtain_input_shape +from tensorflow.keras import backend as K from models.attention_module import attach_attention_module diff --git a/models/resnet_v1.py b/models/resnet_v1.py index 1494a4d..bd0aa3b 100644 --- a/models/resnet_v1.py +++ b/models/resnet_v1.py @@ -7,16 +7,16 @@ """ from __future__ import print_function -import keras -from keras.layers import Dense, Conv2D, BatchNormalization, Activation -from keras.layers import AveragePooling2D, Input, Flatten -from keras.optimizers import Adam -from keras.callbacks import ModelCheckpoint, LearningRateScheduler -from keras.callbacks import ReduceLROnPlateau -from keras.preprocessing.image import ImageDataGenerator -from keras.regularizers import l2 -from keras import backend as K -from keras.models import Model +import tensorflow.keras +from tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, Activation +from tensorflow.keras.layers import AveragePooling2D, Input, Flatten +from tensorflow.keras.optimizers import Adam +from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler +from tensorflow.keras.callbacks import ReduceLROnPlateau +from tensorflow.keras.preprocessing.image import ImageDataGenerator +from tensorflow.keras.regularizers import l2 +from tensorflow.keras import backend as K +from tensorflow.keras.models import Model from models.attention_module import attach_attention_module def resnet_layer(inputs, @@ -124,7 +124,7 @@ def resnet_v1(input_shape, depth, num_classes=10, attention_module=None): # attention_module if attention_module is not None: y = attach_attention_module(y, attention_module) - x = keras.layers.add([x, y]) + x = tensorflow.keras.layers.add([x, y]) x = Activation('relu')(x) num_filters *= 2 diff --git a/models/resnet_v2.py b/models/resnet_v2.py index db4654d..50e219f 100644 --- a/models/resnet_v2.py +++ b/models/resnet_v2.py @@ -7,16 +7,16 @@ """ from __future__ import print_function -import keras -from keras.layers import Dense, Conv2D, BatchNormalization, Activation -from keras.layers import AveragePooling2D, Input, Flatten -from keras.optimizers import Adam -from keras.callbacks import ModelCheckpoint, LearningRateScheduler -from keras.callbacks import ReduceLROnPlateau -from keras.preprocessing.image import ImageDataGenerator -from keras.regularizers import l2 -from keras import backend as K -from keras.models import Model +import tensorflow.keras +from tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, Activation +from tensorflow.keras.layers import AveragePooling2D, Input, Flatten +from tensorflow.keras.optimizers import Adam +from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler +from tensorflow.keras.callbacks import ReduceLROnPlateau +from tensorflow.keras.preprocessing.image import ImageDataGenerator +from tensorflow.keras.regularizers import l2 +from tensorflow.keras import backend as K +from tensorflow.keras.models import Model from models.attention_module import attach_attention_module def resnet_layer(inputs, @@ -144,7 +144,7 @@ def resnet_v2(input_shape, depth, num_classes=10, attention_module=None): if attention_module is not None: y = attach_attention_module(y, attention_module) - x = keras.layers.add([x, y]) + x = tensorflow.keras.layers.add([x, y]) num_filters_in = num_filters_out diff --git a/models/resnext.py b/models/resnext.py index e51370e..3b4f0e2 100644 --- a/models/resnext.py +++ b/models/resnext.py @@ -10,20 +10,18 @@ import warnings -from keras.models import Model -from keras.layers.core import Dense, Lambda -from keras.layers.advanced_activations import LeakyReLU -from keras.layers.convolutional import Conv2D -from keras.layers.pooling import GlobalAveragePooling2D, GlobalMaxPooling2D, MaxPooling2D -from keras.layers import Input -from keras.layers.merge import concatenate, add -from keras.layers.normalization import BatchNormalization -from keras.regularizers import l2 -from keras.utils.layer_utils import convert_all_kernels_in_model -from keras.utils.data_utils import get_file -from keras.engine.topology import get_source_inputs -from keras.applications.imagenet_utils import _obtain_input_shape -import keras.backend as K +from tensorflow.keras.models import Model +from tensorflow.keras.layers import Dense, Lambda +from tensorflow.keras.layers import Activation, LeakyReLU +from tensorflow.keras.layers import Conv2D +from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, MaxPooling2D +from tensorflow.keras.layers import Input +from tensorflow.keras.layers import concatenate, add +from tensorflow.keras.layers import BatchNormalization +from tensorflow.keras.regularizers import l2 +from tensorflow.keras.utils import get_source_inputs +from keras_applications.imagenet_utils import _obtain_input_shape +import tensorflow.keras.backend as K from models.attention_module import attach_attention_module @@ -249,7 +247,7 @@ def __initial_conv_block(input, weight_decay=5e-4): x = Conv2D(64, (3, 3), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input) x = BatchNormalization(axis=channel_axis)(x) - x = LeakyReLU()(x) + x = LeakyReLU(alpha=0.1)(x) return x @@ -266,7 +264,7 @@ def __initial_conv_block_inception(input, weight_decay=5e-4): x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=(2, 2))(input) x = BatchNormalization(axis=channel_axis)(x) - x = LeakyReLU()(x) + x = LeakyReLU(alpha=0.1)(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) @@ -293,7 +291,7 @@ def __grouped_convolution_block(input, grouped_channels, cardinality, strides, w x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init) x = BatchNormalization(axis=channel_axis)(x) - x = LeakyReLU()(x) + x = LeakyReLU(alpha=0.1)(x) return x for c in range(cardinality): @@ -308,7 +306,7 @@ def __grouped_convolution_block(input, grouped_channels, cardinality, strides, w group_merge = concatenate(group_list, axis=channel_axis) x = BatchNormalization(axis=channel_axis)(group_merge) - x = LeakyReLU()(x) + x = LeakyReLU(alpha=0.1)(x) return x @@ -331,12 +329,12 @@ def __bottleneck_block(input, filters=64, cardinality=8, strides=1, weight_decay # Check if input number of filters is same as 16 * k, else create convolution2d for this input if K.image_data_format() == 'channels_first': - if init._keras_shape[1] != 2 * filters: + if init.shape[1] != 2 * filters: init = Conv2D(filters * 2, (1, 1), padding='same', strides=(strides, strides), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init) init = BatchNormalization(axis=channel_axis)(init) else: - if init._keras_shape[-1] != 2 * filters: + if init.shape[-1] != 2 * filters: init = Conv2D(filters * 2, (1, 1), padding='same', strides=(strides, strides), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init) init = BatchNormalization(axis=channel_axis)(init) @@ -344,7 +342,7 @@ def __bottleneck_block(input, filters=64, cardinality=8, strides=1, weight_decay x = Conv2D(filters, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input) x = BatchNormalization(axis=channel_axis)(x) - x = LeakyReLU()(x) + x = LeakyReLU(alpha=0.1)(x) x = __grouped_convolution_block(x, grouped_channels, cardinality, strides, weight_decay) @@ -357,7 +355,7 @@ def __bottleneck_block(input, filters=64, cardinality=8, strides=1, weight_decay x = attach_attention_module(x, attention_module) x = add([init, x]) - x = LeakyReLU()(x) + x = LeakyReLU(alpha=0.1)(x) return x diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..ea44f69 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,10 @@ +h5py==2.10.0 +idna==2.10 +Keras-Applications==1.0.8 +Keras-Preprocessing==1.1.2 +numpy==1.19.5 +tensorboard==2.4.1 +tensorboard-plugin-wit==1.8.0 +tensorflow==2.4.1 +tensorflow-estimator==2.4.0 +