-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodels.py
133 lines (121 loc) · 6.11 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import sys
import itertools
from tensorflow.keras import Model
from tensorflow.keras.layers import Input, Conv1D, MaxPooling1D, LSTM, Dense, Flatten, Activation, BatchNormalization
from tensorflow.keras.metrics import binary_accuracy, categorical_accuracy, mean_squared_error
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l1, l2
from tensorflow.keras.activations import softmax
def compile(model, loss):
model.compile(loss=loss,
optimizer=Adam(),
metrics=[categorical_accuracy, binary_accuracy])
def double_models(classes, len_byte_vector):
name = 'C64_16_2pr_C32_4_2pr_C64_32_2pr_F_D' + str(classes)
last = l0 = Input(shape=(512,len_byte_vector))
last = Conv1D(64, (16,), strides=2, padding='same', activation='relu')(last)
last = Conv1D(32, (4,), strides=2, padding='same', activation='relu')(last)
last = Conv1D(64, (32,), strides=2, padding='same', activation='relu')(last)
last = Flatten()(last)
last = Dense(classes)(last)
lastcat = Activation('softmax')(last)
modelcat = Model(l0, lastcat, name=name + '_cat')
compile(modelcat, 'categorical_crossentropy')
lastmse = Activation('sigmoid')(last)
modelmse = Model(l0, lastmse, name=name + '_mse')
compile(modelmse, 'mse')
return modelcat, modelmse
def C64_16_2pr_C32_4_2pr_C64_32_2pr_F_D(classes, len_byte_vector, activation, loss):
myfuncname = sys._getframe().f_code.co_name + str(classes) + '_cat'
last = l0 = Input(shape=(512,len_byte_vector))
last = Conv1D(64, (16,), strides=2, padding='same', activation='relu')(last)
last = Conv1D(32, (4,), strides=2, padding='same', activation='relu')(last)
last = Conv1D(64, (32,), strides=2, padding='same', activation='relu')(last)
last = Flatten()(last)
last = Dense(classes)(last)
last = Activation(activation)(last)
model = Model(l0, last, name=myfuncname)
compile(model, loss)
return model
def C64_16_2pBA_C32_4_2pBA_C64_32_2pBA_F_D(classes, len_byte_vector, activation, loss):
myfuncname = sys._getframe().f_code.co_name + str(classes) + '_cat'
last = l0 = Input(shape=(512,len_byte_vector))
last = Conv1D(64, (16,), strides=2, padding='same')(last)
last = BatchNormalization()(last)
last = Activation('relu')(last)
last = Conv1D(32, (4,), strides=2, padding='same')(last)
last = BatchNormalization()(last)
last = Activation('relu')(last)
last = Conv1D(64, (32,), strides=2, padding='same')(last)
last = BatchNormalization()(last)
last = Activation('relu')(last)
last = Flatten()(last)
last = Dense(classes)(last)
last = Activation(activation)(last)
model = Model(l0, last, name=myfuncname)
compile(model, loss)
return model
def C256_16_16_L128_D(classes, len_byte_vector, activation, loss):
myfuncname = sys._getframe().f_code.co_name + str(classes) + '_cat'
last = l0 = Input(shape=(512,len_byte_vector))
last = Conv1D(256, (16,), strides=16)(last)
last = LSTM(128)(last)
last = Dense(classes)(last)
last = Activation(activation)(last)
model = Model(l0, last, name=myfuncname)
compile(model, loss)
return model
def C64_16_2pr_5C32_4_2pr_C64_32_2pr_F_D(classes, len_byte_vector, activation, loss):
myfuncname = sys._getframe().f_code.co_name + str(classes) + '_cat'
last = l0 = Input(shape=(512,len_byte_vector))
last = Conv1D(64, (16,), strides=2, padding='same', activation='relu')(last)
last = Conv1D(32, (4,), strides=2, padding='same', activation='relu')(last)
last = Conv1D(32, (4,), strides=2, padding='same', activation='relu')(last)
last = Conv1D(32, (4,), strides=2, padding='same', activation='relu')(last)
last = Conv1D(32, (4,), strides=2, padding='same', activation='relu')(last)
last = Conv1D(32, (4,), strides=2, padding='same', activation='relu')(last)
last = Conv1D(64, (32,), strides=2, padding='same', activation='relu')(last)
last = Flatten()(last)
last = Dense(classes)(last)
last = Activation(activation)(last)
model = Model(l0, last, name=myfuncname)
compile(model, loss)
return model
# def C64_16_1pr_5C32_4_1pr_C64_32_1pr_L64_D(classes, len_byte_vector, activation, loss):
# myfuncname = sys._getframe().f_code.co_name + str(classes) + '_cat'
# last = l0 = Input(shape=(512,len_byte_vector))
# last = Conv1D(64, (16,), strides=1, padding='same', activation='relu')(last)
# last = Conv1D(32, (4,), strides=1, padding='same', activation='relu')(last)
# last = Conv1D(32, (4,), strides=1, padding='same', activation='relu')(last)
# last = Conv1D(32, (4,), strides=1, padding='same', activation='relu')(last)
# last = Conv1D(32, (4,), strides=1, padding='same', activation='relu')(last)
# last = Conv1D(32, (4,), strides=1, padding='same', activation='relu')(last)
# last = Conv1D(64, (32,), strides=1, padding='same', activation='relu')(last)
# last = LSTM(64)(last)
# last = Dense(classes)(last)
# last = Activation(activation)(last)
# model = Model(l0, last, name=myfuncname)
# compile(model, loss)
# return model
def C32_4_2PR_C64_32_2PR_F_D(classes, len_byte_vector, activation, loss):
myfuncname = sys._getframe().f_code.co_name + str(classes) + '_cat'
last = l0 = Input(shape=(512,len_byte_vector))
last = Conv1D(32, (4,), strides=2, padding='same', activation='relu')(last)
last = Conv1D(64, (32,), strides=2, padding='same', activation='relu')(last)
last = Flatten()(last)
last = Dense(classes)(last)
last = Activation(activation)(last)
model = Model(l0, last, name=myfuncname)
compile(model, loss)
return model
# def C64_64_8pr_C64_16_16pr_L64_D(classes, len_byte_vector, activation, loss):
# myfuncname = sys._getframe().f_code.co_name + str(classes) + '_cat'
# last = l0 = Input(shape=(512,len_byte_vector))
# last = Conv1D(64, (16,), strides=2, padding='same', activation='relu')(last)
# last = Conv1D(32, (4,), strides=2, padding='same', activation='relu')(last)
# last = LSTM(64)(last)
# last = Dense(classes)(last)
# last = Activation(activation)(last)
# model = Model(l0, last, name=myfuncname)
# compile(model, loss)
# return model