소스 검색

added new model, with different hyperparameter from dd3d git

Nicolasticot 5 년 전
부모
커밋
bb9a7f549e
2개의 변경된 파일82개의 추가작업 그리고 9개의 파일을 삭제
  1. 42 4
      DeepDrug.ipynb
  2. 40 5
      DeepDrug.py

+ 42 - 4
DeepDrug.ipynb 파일 보기

@@ -16,7 +16,7 @@
16 16
   },
17 17
   {
18 18
    "cell_type": "code",
19
-   "execution_count": null,
19
+   "execution_count": 6,
20 20
    "metadata": {},
21 21
    "outputs": [],
22 22
    "source": [
@@ -28,8 +28,9 @@
28 28
     "from keras.layers import Dense, Flatten, TimeDistributed, Dropout\n",
29 29
     "from keras import Input, Model\n",
30 30
     "from keras.layers import add, Activation\n",
31
+    "from keras.layers.advanced_activations import LeakyReLU\n",
31 32
     "#from keras.utils import plot_model  # Needs pydot.\n",
32
-    "from keras.layers import Conv3D, MaxPooling3D"
33
+    "from keras.layers import Convolution3D, MaxPooling3D"
33 34
    ]
34 35
   },
35 36
   {
@@ -162,6 +163,43 @@
162 163
     "    return model"
163 164
    ]
164 165
   },
166
+  {
167
+   "cell_type": "code",
168
+   "execution_count": 8,
169
+   "metadata": {},
170
+   "outputs": [],
171
+   "source": [
172
+    "def model_new(): # créer un objet modèle\n",
173
+    "    \"\"\"\n",
174
+    "    Return a simple sequentiel model\n",
175
+    "    \n",
176
+    "    Returns :\n",
177
+    "        - model : keras.Model\n",
178
+    "    \"\"\"\n",
179
+    "    inputs = Input(shape=(14,32,32,32))\n",
180
+    "    conv_1 = Convolution3D(filters=64, kernel_size=5, padding=\"valid\", data_format='channels_first')(inputs)\n",
181
+    "    activation_1 = LeakyReLU(alpha = 0.1)(conv_1)\n",
182
+    "    drop_1 = Dropout(0.2)(activation_1)\n",
183
+    "    conv_2 = Convolution3D(filters=64, kernel_size=3, padding=\"valid\", data_format='channels_first')(drop_1)\n",
184
+    "    activation_2 = LeakyReLU(alpha = 0.1)(conv_2)\n",
185
+    "    maxpool = MaxPooling3D(pool_size=(2,2,2),\n",
186
+    "                            strides=None,\n",
187
+    "                            padding='valid',\n",
188
+    "                            data_format='channels_first')(activation_2)\n",
189
+    "    drop_2 = Dropout(0.4)(maxpool)\n",
190
+    "    flatters = Flatten()(drop_2)\n",
191
+    "    dense = Dense(128)(flatters)\n",
192
+    "    activation_3 = LeakyReLU(alpha = 0.1)(dense)\n",
193
+    "    drop_3 = Dropout(0.4)(activation_3)\n",
194
+    "    output = Dense(3, activation='softmax')(drop_3)\n",
195
+    "    model = Model(inputs=inputs, outputs=output)\n",
196
+    "    my_opt = optimizers.Adam(learning_rate=0.000001, beta_1=0.9, beta_2=0.999, amsgrad=False)\n",
197
+    "    print(model.summary)\n",
198
+    "    model.compile(optimizer=my_opt, loss=\"categorical_crossentropy\",\n",
199
+    "                  metrics=[\"accuracy\"])\n",
200
+    "    return model"
201
+   ]
202
+  },
165 203
   {
166 204
    "cell_type": "code",
167 205
    "execution_count": null,
@@ -250,7 +288,7 @@
250 288
    "metadata": {},
251 289
    "outputs": [],
252 290
    "source": [
253
-    "my_model = model_light()"
291
+    "my_model = model_new()"
254 292
    ]
255 293
   },
256 294
   {
@@ -270,7 +308,7 @@
270 308
    "outputs": [],
271 309
    "source": [
272 310
     "history_mild_2mp = mild_model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=30, batch_size=32)\n",
273
-    "my_model.save('light_model_2mp_e30_b32.h5')"
311
+    "my_model.save('new_model_e30_b32_t1000.h5')"
274 312
    ]
275 313
   },
276 314
   {

+ 40 - 5
DeepDrug.py 파일 보기

@@ -5,7 +5,7 @@
5 5
 
6 6
 # ## Importing library
7 7
 
8
-# In[ ]:
8
+# In[6]:
9 9
 
10 10
 
11 11
 import numpy as np
@@ -16,8 +16,9 @@ from keras import optimizers, callbacks
16 16
 from keras.layers import Dense, Flatten, TimeDistributed, Dropout
17 17
 from keras import Input, Model
18 18
 from keras.layers import add, Activation
19
+from keras.layers.advanced_activations import LeakyReLU
19 20
 #from keras.utils import plot_model  # Needs pydot.
20
-from keras.layers import Conv3D, MaxPooling3D
21
+from keras.layers import Convolution3D, MaxPooling3D
21 22
 
22 23
 
23 24
 # ### used to store model prediction in order to plot roc curve
@@ -126,6 +127,40 @@ def model_heavy(): # créer un objet modèle
126 127
     return model
127 128
 
128 129
 
130
+# In[8]:
131
+
132
+
133
+def model_new(): # créer un objet modèle
134
+    """
135
+    Return a simple sequentiel model
136
+    
137
+    Returns :
138
+        - model : keras.Model
139
+    """
140
+    inputs = Input(shape=(14,32,32,32))
141
+    conv_1 = Convolution3D(filters=64, kernel_size=5, padding="valid", data_format='channels_first')(inputs)
142
+    activation_1 = LeakyReLU(alpha = 0.1)(conv_1)
143
+    drop_1 = Dropout(0.2)(activation_1)
144
+    conv_2 = Convolution3D(filters=64, kernel_size=3, padding="valid", data_format='channels_first')(drop_1)
145
+    activation_2 = LeakyReLU(alpha = 0.1)(conv_2)
146
+    maxpool = MaxPooling3D(pool_size=(2,2,2),
147
+                            strides=None,
148
+                            padding='valid',
149
+                            data_format='channels_first')(activation_2)
150
+    drop_2 = Dropout(0.4)(maxpool)
151
+    flatters = Flatten()(drop_2)
152
+    dense = Dense(128)(flatters)
153
+    activation_3 = LeakyReLU(alpha = 0.1)(dense)
154
+    drop_3 = Dropout(0.4)(activation_3)
155
+    output = Dense(3, activation='softmax')(drop_3)
156
+    model = Model(inputs=inputs, outputs=output)
157
+    my_opt = optimizers.Adam(learning_rate=0.000001, beta_1=0.9, beta_2=0.999, amsgrad=False)
158
+    print(model.summary)
159
+    model.compile(optimizer=my_opt, loss="categorical_crossentropy",
160
+                  metrics=["accuracy"])
161
+    return model
162
+
163
+
129 164
 # In[ ]:
130 165
 
131 166
 
@@ -191,7 +226,7 @@ Y_test = output[1000:,]
191 226
 # In[ ]:
192 227
 
193 228
 
194
-my_model = model_light()
229
+my_model = model_new()
195 230
 
196 231
 
197 232
 # In[ ]:
@@ -204,8 +239,8 @@ tf.test.is_gpu_available()
204 239
 # In[ ]:
205 240
 
206 241
 
207
-history_mild_2mp = my_model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=30, batch_size=32)
208
-my_model.save('light_model_2mp_e30_b32.h5')
242
+history_mild_2mp = mild_model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=30, batch_size=32)
243
+my_model.save('new_model_e30_b32_t1000.h5')
209 244
 
210 245
 
211 246
 # In[ ]: