-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbcdunet.py
330 lines (270 loc) · 17.7 KB
/
bcdunet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
# /~https://github.com/rezazad68/BCDU-Net/blob/master/Skin%20Lesion%20Segmentation/models.py
from __future__ import division
import numpy as np
from tensorflow.keras.models import Model
# from tensorflow.keras.layers import Input, Activation, BatchNormalization, concatenate, Conv2D, Conv2DTranspose, MaxPooling2D, UpSampling2D, Reshape, Dropout
from tensorflow.keras.layers import *
from keras import backend as K
from keras.utils.vis_utils import plot_model as plot
from keras.optimizers import SGD
from keras.optimizers import *
from keras.applications.vgg16 import VGG16
import keras
def BCDU_net_D3(input_size = (256,256,1), afunc = 'relu'):
N = input_size[0]
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv3)
drop3 = Dropout(0.5)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
# D1
conv4 = Conv2D(512, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4_1 = Conv2D(512, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4_1 = Dropout(0.5)(conv4_1)
# D2
conv4_2 = Conv2D(512, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(drop4_1)
conv4_2 = Conv2D(512, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv4_2)
conv4_2 = Dropout(0.5)(conv4_2)
# D3
merge_dense = concatenate([conv4_2,drop4_1], axis = 3)
conv4_3 = Conv2D(512, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(merge_dense)
conv4_3 = Conv2D(512, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv4_3)
drop4_3 = Dropout(0.5)(conv4_3)
up6 = Conv2DTranspose(256, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(drop4_3)
up6 = BatchNormalization(axis=3)(up6)
up6 = Activation(afunc)(up6)
x1 = Reshape(target_shape=(1, np.int32(N/4), np.int32(N/4), 256))(drop3)
x2 = Reshape(target_shape=(1, np.int32(N/4), np.int32(N/4), 256))(up6)
merge6 = concatenate([x1,x2], axis = 1)
merge6 = ConvLSTM2D(filters = 128, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge6)
conv6 = Conv2D(256, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(256, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2DTranspose(128, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(conv6)
up7 = BatchNormalization(axis=3)(up7)
up7 = Activation(afunc)(up7)
x1 = Reshape(target_shape=(1, np.int32(N/2), np.int32(N/2), 128))(conv2)
x2 = Reshape(target_shape=(1, np.int32(N/2), np.int32(N/2), 128))(up7)
merge7 = concatenate([x1,x2], axis = 1)
merge7 = ConvLSTM2D(filters = 64, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge7)
conv7 = Conv2D(128, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(128, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2DTranspose(64, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(conv7)
up8 = BatchNormalization(axis=3)(up8)
up8 = Activation(afunc)(up8)
x1 = Reshape(target_shape=(1, N, N, 64))(conv1)
x2 = Reshape(target_shape=(1, N, N, 64))(up8)
merge8 = concatenate([x1,x2], axis = 1)
merge8 = ConvLSTM2D(filters = 32, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge8)
conv8 = Conv2D(64, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(64, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv8)
conv8 = Conv2D(2, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv8)
conv9 = Conv2D(1, 1, activation = 'sigmoid')(conv8)
model = Model(inputs=[inputs], outputs=[conv9])
# model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
return model
def BCDU_net_D1(input_size = (256,256,1), afunc = 'relu'):
N = input_size[0]
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv3)
drop3 = Dropout(0.5)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
dconv4 = Conv2D(512, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(pool3)
dconv4 = Conv2D(512, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(dconv4)
ddrop4 = Dropout(0.5)(conv3)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv3)
# D1
conv4 = Conv2D(512, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4_1 = Conv2D(512, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4_1 = Dropout(0.5)(conv4_1)
up6 = Conv2DTranspose(256, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(drop4_1)
up6 = BatchNormalization(axis=3)(up6)
up6 = Activation(afunc)(up6)
x1 = Reshape(target_shape=(1, np.int32(N/4), np.int32(N/4), 256))(drop3)
x2 = Reshape(target_shape=(1, np.int32(N/4), np.int32(N/4), 256))(up6)
merge6 = concatenate([x1,x2], axis = 1)
merge6 = ConvLSTM2D(filters = 128, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge6)
conv6 = Conv2D(256, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(256, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2DTranspose(128, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(conv6)
up7 = BatchNormalization(axis=3)(up7)
up7 = Activation(afunc)(up7)
x1 = Reshape(target_shape=(1, np.int32(N/2), np.int32(N/2), 128))(conv2)
x2 = Reshape(target_shape=(1, np.int32(N/2), np.int32(N/2), 128))(up7)
merge7 = concatenate([x1,x2], axis = 1)
merge7 = ConvLSTM2D(filters = 64, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge7)
conv7 = Conv2D(128, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(128, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2DTranspose(64, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(conv7)
up8 = BatchNormalization(axis=3)(up8)
up8 = Activation(afunc)(up8)
x1 = Reshape(target_shape=(1, N, N, 64))(conv1)
x2 = Reshape(target_shape=(1, N, N, 64))(up8)
merge8 = concatenate([x1,x2], axis = 1)
merge8 = ConvLSTM2D(filters = 32, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge8)
conv8 = Conv2D(64, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(64, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv8)
conv8 = Conv2D(2, 3, activation = afunc, padding = 'same', kernel_initializer = 'he_normal')(conv8)
conv9 = Conv2D(1, 1, activation = 'sigmoid')(conv8)
model = Model(inputs = inputs, outputs = conv9)
# model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
return model
def unet(input_size = (256,256,1)):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
drop3 = Dropout(0.5)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop4))
merge6 = concatenate([drop3,up6], axis = 3)
conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv2,up7], axis = 3)
conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv1,up8], axis = 3)
conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
conv8 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
conv9 = Conv2D(1, 1, activation = 'sigmoid')(conv8)
model = Model(input = inputs, output = conv9)
model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
return model
def SqueezeExcite(x, ratio=16):
nb_chan = K.int_shape(x)[-1]
y = GlobalAveragePooling2D()(x)
y = Dense(nb_chan // ratio, activation='relu')(y)
y = Dense(nb_chan, activation='sigmoid')(y)
y = Multiply()([x, y])
return y
WEIGHTS_PATH_NO_TOP = ('/~https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.1/'
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5')
def SEDU_Net_D3(input_size = (256,256,1), learning_rate = 1e-4):
img_input = Input(input_size)
N = input_size[0]
# Block 1
x = layers.Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv1')(img_input)
conv1 = layers.Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv2')(x)
pool1 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(conv1)
# Block 2
x = layers.Conv2D(128, (3, 3),
activation='relu',
padding='same',
name='block2_conv1')(pool1)
conv2 = layers.Conv2D(128, (3, 3),
activation='relu',
padding='same',
name='block2_conv2')(x)
pool2 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(conv2)
# Block 3
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv1')(pool2)
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv2')(x)
conv3 = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv3')(x)
pool3 = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(conv3)
# Block 4
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv1')(pool3)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv2')(x)
# D1
drop4_1 = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv3')(x)
# D2
conv4_2 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(drop4_1)
conv4_2 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4_2)
conv4_2 = Dropout(0.5)(conv4_2)
# D3
merge_dense = concatenate([conv4_2,drop4_1], axis = 3)
conv4_3 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge_dense)
conv4_3 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4_3)
drop4_3 = Dropout(0.5)(conv4_3)
up6 = Conv2DTranspose(256, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(drop4_3)
up6 = SqueezeExcite(up6, ratio=16)
up6 = BatchNormalization(axis=3)(up6)
up6 = Activation('relu')(up6)
x1 = Reshape(target_shape=(1, np.int32(N/4), np.int32(N/4), 256))(conv3)
x2 = Reshape(target_shape=(1, np.int32(N/4), np.int32(N/4), 256))(up6)
merge6 = concatenate([x1,x2], axis = 1)
merge6 = ConvLSTM2D(filters = 256, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge6)
conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
conv6 = SqueezeExcite(conv6, ratio=16)
up7 = Conv2DTranspose(128, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(conv6)
up7 = SqueezeExcite(up7, ratio=16)
up7 = BatchNormalization(axis=3)(up7)
up7 = Activation('relu')(up7)
x1 = Reshape(target_shape=(1, np.int32(N/2), np.int32(N/2), 128))(conv2)
x2 = Reshape(target_shape=(1, np.int32(N/2), np.int32(N/2), 128))(up7)
merge7 = concatenate([x1,x2], axis = 1)
merge7 = ConvLSTM2D(filters = 128, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge7)
conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
conv7 = SqueezeExcite(conv7, ratio=16)
up8 = Conv2DTranspose(64, kernel_size=2, strides=2, padding='same',kernel_initializer = 'he_normal')(conv7)
up8 = SqueezeExcite(up8, ratio=16)
up8 = BatchNormalization(axis=3)(up8)
up8 = Activation('relu')(up8)
x1 = Reshape(target_shape=(1, N, N, 64))(conv1)
x2 = Reshape(target_shape=(1, N, N, 64))(up8)
merge8 = concatenate([x1,x2], axis = 1)
merge8 = ConvLSTM2D(filters = 64, kernel_size=(3, 3), padding='same', return_sequences = False, go_backwards = True,kernel_initializer = 'he_normal' )(merge8)
conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
conv8 = SqueezeExcite(conv8, ratio=16)
conv8 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
conv9 = Conv2D(1, 1, activation = 'sigmoid')(conv8)
# Load weights.
weights_path = keras.utils.get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='6d6bbae143d832006294945121d1f1fc')
model = Model(input = img_input, output = conv9)
model.load_weights(weights_path, by_name=True)
model.compile(optimizer = Adam(lr = learning_rate), loss = 'binary_crossentropy', metrics = ['accuracy'])
return model