|
29 | 29 | "<hr>"
|
30 | 30 | ]
|
31 | 31 | },
|
| 32 | + { |
| 33 | + "cell_type": "markdown", |
| 34 | + "metadata": {}, |
| 35 | + "source": [ |
| 36 | + "<hr>\n", |
| 37 | + "<font face=\"Calibri\" size=\"5\" color=\"red\"> <b>Important Note about JupyterHub</b> </font>\n", |
| 38 | + "<br><br>\n", |
| 39 | + "<font face=\"Calibri\" size=\"3\"> <b>Your JupyterHub server will automatically shutdown when left idle for more than 1 hour. Your notebooks will not be lost but you will have to restart their kernels and re-run them from the beginning. You will not be able to seamlessly continue running a partially run notebook.</b> </font>" |
| 40 | + ] |
| 41 | + }, |
| 42 | + { |
| 43 | + "cell_type": "markdown", |
| 44 | + "metadata": {}, |
| 45 | + "source": [ |
| 46 | + "<hr>\n", |
| 47 | + "<font face=\"Calibri\" size=\"5\"> <b>0. Importing Relevant Python Packages </b> </font>\n", |
| 48 | + "\n", |
| 49 | + "<font size=\"3\">Our first step is to <b>import the necessary python libraries into your Jupyter Notebook.</b></font>" |
| 50 | + ] |
| 51 | + }, |
32 | 52 | {
|
33 | 53 | "cell_type": "code",
|
34 | 54 | "execution_count": null,
|
35 | 55 | "metadata": {},
|
36 | 56 | "outputs": [],
|
37 | 57 | "source": [
|
| 58 | + "import os\n", |
| 59 | + "\n", |
38 | 60 | "import scipy.io as sio\n",
|
39 | 61 | "import numpy as np\n",
|
| 62 | + "import matplotlib.pyplot as plt\n", |
40 | 63 | "from keras.optimizers import Nadam\n",
|
41 |
| - "import matplotlib.pyplot as plt" |
| 64 | + "from keras.models import Model\n", |
| 65 | + "from keras.engine.input_layer import Input\n", |
| 66 | + "from keras.layers import Conv2D, Reshape, Activation, Concatenate, GRU, Dense, LSTM, SimpleRNN\n", |
| 67 | + "\n", |
| 68 | + "from asf_notebook import new_directory" |
42 | 69 | ]
|
43 | 70 | },
|
44 | 71 | {
|
45 | 72 | "cell_type": "markdown",
|
46 | 73 | "metadata": {},
|
47 | 74 | "source": [
|
48 |
| - "# Step 1: Data Preparation - loading T1 and T2 images, training map, and test map" |
| 75 | + "<hr>\n", |
| 76 | + "<font face=\"Calibri\" size=\"5\"><b>1. Create a working directory for the analysis and change into it:</b></font>" |
| 77 | + ] |
| 78 | + }, |
| 79 | + { |
| 80 | + "cell_type": "code", |
| 81 | + "execution_count": null, |
| 82 | + "metadata": {}, |
| 83 | + "outputs": [], |
| 84 | + "source": [ |
| 85 | + "base_path = \"/home/jovyan/notebooks/ASF/GEOS_657_Labs/lab_7_taizhou_data\"\n", |
| 86 | + "new_directory(base_path)\n", |
| 87 | + "print(f\"Current working directory: {os.getcwd()}\")" |
| 88 | + ] |
| 89 | + }, |
| 90 | + { |
| 91 | + "cell_type": "markdown", |
| 92 | + "metadata": {}, |
| 93 | + "source": [ |
| 94 | + "<hr>\n", |
| 95 | + "<font face=\"Calibri\" size=\"5\"><b>2. Data Preparation</b></font> \n", |
| 96 | + "\n", |
| 97 | + "<font face=\"Calibri\" size=\"3\"><b>load T1 and T2 images, training map, and test map. Save the images (T1.png and T2.png):</b></font> " |
49 | 98 | ]
|
50 | 99 | },
|
51 | 100 | {
|
|
71 | 120 | "print('the shape of T2 image is: {}'.format(imgT2.shape))\n",
|
72 | 121 | "\n",
|
73 | 122 | "plt.imshow(imgT1[:, :, [3, 2, 1]])\n",
|
| 123 | + "plt.savefig(f\"{base_path}/T1.png\", dpi=300)\n", |
74 | 124 | "plt.show()\n",
|
75 | 125 | "\n",
|
76 | 126 | "plt.imshow(imgT2[:, :, [3, 2, 1]])\n",
|
| 127 | + "plt.savefig(f\"{base_path}/T2.png\", dpi=300)\n", |
77 | 128 | "plt.show()\n",
|
78 | 129 | "\n",
|
79 | 130 | "[rows, cols] = np.nonzero(tra_map)\n",
|
80 | 131 | "num_samples = len(rows)\n",
|
81 | 132 | "rows = np.reshape(rows, (num_samples, 1))\n",
|
82 | 133 | "cols = np.reshape(cols, (num_samples, 1))\n",
|
83 |
| - "temp = np.concatenate((rows, cols), axis = 1)\n", |
| 134 | + "temp = np.concatenate((rows, cols), axis=1)\n", |
84 | 135 | "np.random.shuffle(temp)\n",
|
85 | 136 | "rows = temp[:, 0].reshape((num_samples,))\n",
|
86 |
| - "cols = temp[:, 1].reshape((num_samples,))\n", |
| 137 | + "cols = temp[:, 1].reshape((num_samples,))" |
| 138 | + ] |
| 139 | + }, |
| 140 | + { |
| 141 | + "cell_type": "markdown", |
| 142 | + "metadata": {}, |
| 143 | + "source": [ |
| 144 | + "<font face=\"Calibri\" size=\"3\">Create 3x3 patches as training samples according to the training map</font> \n", |
| 145 | + "<br><br>\n", |
| 146 | + "<font face=\"Calibri\" size=\"3\"><b>Create numpy arrays temporarily filled with zeros to hold our 3x3 patches:</b></font> " |
| 147 | + ] |
| 148 | + }, |
| 149 | + { |
| 150 | + "cell_type": "code", |
| 151 | + "execution_count": null, |
| 152 | + "metadata": {}, |
| 153 | + "outputs": [], |
| 154 | + "source": [ |
| 155 | + "x_tra_t1 = np.float32(\n", |
| 156 | + " np.zeros([num_samples, patch_size, patch_size, num_bands]))\n", |
| 157 | + "x_tra_t2 = np.float32(\n", |
| 158 | + " np.zeros([num_samples, patch_size, patch_size, num_bands]))\n", |
87 | 159 | "\n",
|
88 |
| - "# sampling 3x3 patches as training samples according to the training map\n", |
89 |
| - "x_tra_t1 = np.float32(np.zeros([num_samples, patch_size, patch_size, num_bands])) # training samples from T1 image\n", |
90 |
| - "x_tra_t2 = np.float32(np.zeros([num_samples, patch_size, patch_size, num_bands])) # training samples from T2 image\n", |
91 |
| - "y_tra = np.uint8(np.zeros([num_samples,])) # ground truths for training samples\n", |
| 160 | + "y_tra = np.uint8(np.zeros([num_samples, ])) # ground truths for training samples" |
| 161 | + ] |
| 162 | + }, |
| 163 | + { |
| 164 | + "cell_type": "markdown", |
| 165 | + "metadata": {}, |
| 166 | + "source": [ |
| 167 | + "<font face=\"Calibri\" size=\"3\"><b>Populate the zero-filled arrays with appropriate values:</b></font> " |
| 168 | + ] |
| 169 | + }, |
| 170 | + { |
| 171 | + "cell_type": "code", |
| 172 | + "execution_count": null, |
| 173 | + "metadata": {}, |
| 174 | + "outputs": [], |
| 175 | + "source": [ |
92 | 176 | "for i in range(num_samples):\n",
|
93 |
| - " patch = imgT1[rows[i]-int((patch_size-1)/2) : rows[i]+int((patch_size-1)/2)+1, cols[i]-int((patch_size-1)/2) : cols[i]+int((patch_size-1)/2)+1, :]\n", |
| 177 | + " patch = imgT1[rows[i]-int((patch_size-1)/2): rows[i]+int((patch_size-1)/2)+1,\n", |
| 178 | + " cols[i]-int((patch_size-1)/2): cols[i]+int((patch_size-1)/2)+1, :]\n", |
94 | 179 | " x_tra_t1[i, :, :, :] = patch\n",
|
95 |
| - " patch = imgT2[rows[i]-int((patch_size-1)/2) : rows[i]+int((patch_size-1)/2)+1, cols[i]-int((patch_size-1)/2) : cols[i]+int((patch_size-1)/2)+1, :]\n", |
| 180 | + " patch = imgT2[rows[i]-int((patch_size-1)/2): rows[i]+int((patch_size-1)/2)+1,\n", |
| 181 | + " cols[i]-int((patch_size-1)/2): cols[i]+int((patch_size-1)/2)+1, :]\n", |
96 | 182 | " x_tra_t2[i, :, :, :] = patch\n",
|
97 | 183 | " y_tra[i] = tra_map[rows[i], cols[i]]-1\n",
|
98 | 184 | "\n",
|
99 | 185 | "[rows, cols] = np.nonzero(test_map)\n",
|
100 | 186 | "num_samples = len(rows)\n",
|
101 | 187 | "rows = np.reshape(rows, (num_samples, 1))\n",
|
102 | 188 | "cols = np.reshape(cols, (num_samples, 1))\n",
|
103 |
| - "temp = np.concatenate((rows, cols), axis = 1)\n", |
| 189 | + "temp = np.concatenate((rows, cols), axis=1)\n", |
104 | 190 | "np.random.shuffle(temp)\n",
|
105 | 191 | "rows = temp[:, 0].reshape((num_samples,))\n",
|
106 |
| - "cols = temp[:, 1].reshape((num_samples,))\n", |
107 |
| - "\n", |
108 |
| - "# sampling 3x3 patches as test samples according to the test map\n", |
109 |
| - "x_test_t1 = np.float32(np.zeros([num_samples, patch_size, patch_size, num_bands])) # test samples from T1 image\n", |
110 |
| - "x_test_t2 = np.float32(np.zeros([num_samples, patch_size, patch_size, num_bands])) # test samples from T2 image\n", |
111 |
| - "y_test = np.uint8(np.zeros([num_samples,])) # ground truths for test samples\n", |
| 192 | + "cols = temp[:, 1].reshape((num_samples,))" |
| 193 | + ] |
| 194 | + }, |
| 195 | + { |
| 196 | + "cell_type": "markdown", |
| 197 | + "metadata": {}, |
| 198 | + "source": [ |
| 199 | + "<font face=\"Calibri\" size=\"3\"><b>Sample 3x3 patches as test samples according to the test map:</b></font> " |
| 200 | + ] |
| 201 | + }, |
| 202 | + { |
| 203 | + "cell_type": "code", |
| 204 | + "execution_count": null, |
| 205 | + "metadata": {}, |
| 206 | + "outputs": [], |
| 207 | + "source": [ |
| 208 | + "# test samples from T1 image\n", |
| 209 | + "x_test_t1 = np.float32(\n", |
| 210 | + " np.zeros([num_samples, patch_size, patch_size, num_bands]))\n", |
| 211 | + "# test samples from T2 image\n", |
| 212 | + "x_test_t2 = np.float32(\n", |
| 213 | + " np.zeros([num_samples, patch_size, patch_size, num_bands]))\n", |
| 214 | + "# ground truths for test samples\n", |
| 215 | + "y_test = np.uint8(np.zeros([num_samples, ])) \n", |
112 | 216 | "for i in range(num_samples):\n",
|
113 |
| - " patch = imgT1[rows[i]-int((patch_size-1)/2) : rows[i]+int((patch_size-1)/2)+1, cols[i]-int((patch_size-1)/2) : cols[i]+int((patch_size-1)/2)+1, :]\n", |
| 217 | + " patch = imgT1[rows[i]-int((patch_size-1)/2): rows[i]+int((patch_size-1)/2)+1,\n", |
| 218 | + " cols[i]-int((patch_size-1)/2): cols[i]+int((patch_size-1)/2)+1, :]\n", |
114 | 219 | " x_test_t1[i, :, :, :] = patch\n",
|
115 |
| - " patch = imgT2[rows[i]-int((patch_size-1)/2) : rows[i]+int((patch_size-1)/2)+1, cols[i]-int((patch_size-1)/2) : cols[i]+int((patch_size-1)/2)+1, :]\n", |
| 220 | + " patch = imgT2[rows[i]-int((patch_size-1)/2): rows[i]+int((patch_size-1)/2)+1,\n", |
| 221 | + " cols[i]-int((patch_size-1)/2): cols[i]+int((patch_size-1)/2)+1, :]\n", |
116 | 222 | " x_test_t2[i, :, :, :] = patch\n",
|
117 | 223 | " y_test[i] = test_map[rows[i], cols[i]]-1\n",
|
118 | 224 | "\n",
|
119 | 225 | "print('the shape of input tensors on training set is: {}'.format(x_tra_t1.shape))\n",
|
120 | 226 | "print('the shape of target tensor on training set is: {}'.format(y_tra.shape))\n",
|
121 | 227 | "print('the shape of input tensors on training set is: {}'.format(x_test_t1.shape))\n",
|
122 |
| - "print('the shape of target tensor on training set is: {}'.format(y_test.shape))\n", |
123 |
| - "print('##################################')" |
| 228 | + "print('the shape of target tensor on training set is: {}'.format(y_test.shape))" |
124 | 229 | ]
|
125 | 230 | },
|
126 | 231 | {
|
127 | 232 | "cell_type": "markdown",
|
128 | 233 | "metadata": {},
|
129 | 234 | "source": [
|
130 |
| - "# Step 2: Building up the recurrent convolutional network" |
| 235 | + "<hr>\n", |
| 236 | + "<font face=\"Calibri\" size=\"5\"> <b>3. Building up the recurrent convolutional network </b> </font> \n", |
| 237 | + "\n", |
| 238 | + "<font face=\"Calibri\" size=\"3\"><b>Write a function to build the network:</b></font> " |
131 | 239 | ]
|
132 | 240 | },
|
133 | 241 | {
|
|
136 | 244 | "metadata": {},
|
137 | 245 | "outputs": [],
|
138 | 246 | "source": [
|
139 |
| - "from keras.models import Model\n", |
140 |
| - "from keras.engine.input_layer import Input\n", |
141 |
| - "from keras.layers import Conv2D, Reshape, Activation, Concatenate, GRU, Dense, LSTM, SimpleRNN\n", |
142 |
| - "\n", |
143 | 247 | "def build_network():\n",
|
144 | 248 | " # the T1 branch of the convolutional sub-network\n",
|
145 |
| - " input1 = Input(shape = (3, 3, 6))\n", |
146 |
| - " x1 = Conv2D(filters = 32, kernel_size = 3, strides = 1, padding = 'valid')(input1)\n", |
| 249 | + " input1 = Input(shape=(3, 3, 6))\n", |
| 250 | + " x1 = Conv2D(filters=32, kernel_size=3, strides=1, padding='valid')(input1)\n", |
147 | 251 | " x1 = Activation('relu')(x1)\n",
|
148 |
| - " x1 = Reshape(target_shape = (1, 32))(x1)\n", |
149 |
| - " \n", |
| 252 | + " x1 = Reshape(target_shape=(1, 32))(x1)\n", |
| 253 | + "\n", |
150 | 254 | " # the T2 branch of the convolutional sub-network\n",
|
151 |
| - " input2 = Input(shape = (3, 3, 6))\n", |
152 |
| - " x2 = Conv2D(filters = 32, kernel_size = 3, strides = 1, padding = 'valid')(input2)\n", |
| 255 | + " input2 = Input(shape=(3, 3, 6))\n", |
| 256 | + " x2 = Conv2D(filters=32, kernel_size=3, strides=1, padding='valid')(input2)\n", |
153 | 257 | " x2 = Activation('relu')(x2)\n",
|
154 |
| - " x2 = Reshape(target_shape = (1, 32))(x2)\n", |
155 |
| - " \n", |
| 258 | + " x2 = Reshape(target_shape=(1, 32))(x2)\n", |
| 259 | + "\n", |
156 | 260 | " # the recurrent sub-network\n",
|
157 |
| - " x = Concatenate(axis = 1)([x1, x2])\n", |
| 261 | + " x = Concatenate(axis=1)([x1, x2])\n", |
158 | 262 | " #x = SimpleRNN(units = 128)(x)\n",
|
159 |
| - " x = LSTM(units = 128)(x)\n", |
| 263 | + " x = LSTM(units=128)(x)\n", |
160 | 264 | " #x = GRU(units = 128)(x)\n",
|
161 |
| - " x = Dense(units = 32, activation = 'relu')(x)\n", |
162 |
| - " y = Dense(units = 1, activation = 'sigmoid')(x)\n", |
163 |
| - " \n", |
164 |
| - " net = Model(inputs = [input1, input2], outputs = y)\n", |
| 265 | + " x = Dense(units=32, activation='relu')(x)\n", |
| 266 | + " y = Dense(units=1, activation='sigmoid')(x)\n", |
| 267 | + "\n", |
| 268 | + " net = Model(inputs=[input1, input2], outputs=y)\n", |
165 | 269 | "\n",
|
166 | 270 | " net.summary()\n",
|
167 |
| - " \n", |
168 |
| - " return net" |
| 271 | + "\n", |
| 272 | + " return net\n" |
169 | 273 | ]
|
170 | 274 | },
|
171 | 275 | {
|
172 | 276 | "cell_type": "markdown",
|
173 | 277 | "metadata": {},
|
174 | 278 | "source": [
|
175 |
| - "# Step 3: Network training" |
| 279 | + "<hr>\n", |
| 280 | + "<font face=\"Calibri\" size=\"5\"> <b>4. Network training </b> </font> \n", |
| 281 | + "\n", |
| 282 | + "<font face=\"Calibri\" size=\"3\"><b>Build the network:</b></font> " |
176 | 283 | ]
|
177 | 284 | },
|
178 | 285 | {
|
|
187 | 294 | "net = build_network()"
|
188 | 295 | ]
|
189 | 296 | },
|
| 297 | + { |
| 298 | + "cell_type": "markdown", |
| 299 | + "metadata": {}, |
| 300 | + "source": [ |
| 301 | + "<font face=\"Calibri\" size=\"3\"><b>Train the network:</b></font> " |
| 302 | + ] |
| 303 | + }, |
190 | 304 | {
|
191 | 305 | "cell_type": "code",
|
192 | 306 | "execution_count": null,
|
193 | 307 | "metadata": {},
|
194 | 308 | "outputs": [],
|
195 | 309 | "source": [
|
196 |
| - "nadam = Nadam(lr = 0.00002)\n", |
197 |
| - "net.compile(optimizer = nadam, loss = 'binary_crossentropy', metrics = ['accuracy'])\n", |
198 |
| - "net_info = net.fit([x_tra_t1, x_tra_t2], y_tra, batch_size = batch_size, validation_split = 0.1, epochs = nb_epoch)\n", |
| 310 | + "nadam = Nadam(lr=0.00002)\n", |
| 311 | + "net.compile(optimizer=nadam, loss='binary_crossentropy', metrics=['accuracy'])\n", |
| 312 | + "net_info = net.fit([x_tra_t1, x_tra_t2], y_tra,\n", |
| 313 | + " batch_size=batch_size, validation_split=0.1, epochs=nb_epoch)\n", |
199 | 314 | "\n",
|
200 | 315 | "loss = net_info.history['loss']\n",
|
201 | 316 | "loss_val = net_info.history['val_loss']\n",
|
202 | 317 | "plt.rcParams.update({'font.size': 18})\n",
|
203 |
| - "fig = plt.figure(figsize=(8,7))\n", |
204 |
| - "ax = fig.add_subplot(1,1,1)\n", |
| 318 | + "fig = plt.figure(figsize=(8, 7))\n", |
| 319 | + "ax = fig.add_subplot(1, 1, 1)" |
| 320 | + ] |
| 321 | + }, |
| 322 | + { |
| 323 | + "cell_type": "markdown", |
| 324 | + "metadata": {}, |
| 325 | + "source": [ |
| 326 | + "<font face=\"Calibri\" size=\"3\"><b>Plot and save the results (loss.png):</b></font> " |
| 327 | + ] |
| 328 | + }, |
| 329 | + { |
| 330 | + "cell_type": "code", |
| 331 | + "execution_count": null, |
| 332 | + "metadata": {}, |
| 333 | + "outputs": [], |
| 334 | + "source": [ |
205 | 335 | "plt.plot(loss)\n",
|
206 | 336 | "plt.plot(loss_val)\n",
|
207 | 337 | "plt.ylabel('loss')\n",
|
208 | 338 | "plt.xlabel('epoch')\n",
|
209 | 339 | "plt.legend(['train', 'val'], loc='upper right')\n",
|
| 340 | + "plt.savefig(f\"{base_path}/loss.png\", bbox_inches='tight', dpi=200)\n", |
210 | 341 | "plt.show()\n",
|
211 |
| - "\n", |
212 |
| - "#sio.savemat('loss_curves.mat', {'loss': loss, 'loss_val': loss_val})\n", |
213 |
| - "print('##########################################')" |
| 342 | + "#sio.savemat('loss_curves.mat', {'loss': loss, 'loss_val': loss_val})" |
214 | 343 | ]
|
215 | 344 | },
|
216 | 345 | {
|
217 | 346 | "cell_type": "markdown",
|
218 | 347 | "metadata": {},
|
219 | 348 | "source": [
|
220 |
| - "# Step 4: Test" |
| 349 | + "<hr>\n", |
| 350 | + "<font face=\"Calibri\" size=\"5\"><b>5. Test</b></font> \n", |
| 351 | + "\n", |
| 352 | + "<font face=\"Calibri\" size=\"3\"><b>Run the network on the test dataset. Save the change map probability and the change map binary (change_map_probability.png and change_map_binary.png):</b></font> " |
221 | 353 | ]
|
222 | 354 | },
|
223 | 355 | {
|
|
238 | 370 | "print('sampling patches...')\n",
|
239 | 371 | "for i in range(1, imgT1.shape[0]-1, 1):\n",
|
240 | 372 | " for j in range(1, imgT1.shape[1]-1, 1):\n",
|
241 |
| - " patch = imgT1[i-int((patch_size-1)/2) : i+int((patch_size-1)/2)+1, j-int((patch_size-1)/2) : j+int((patch_size-1)/2)+1, :]\n", |
| 373 | + " patch = imgT1[i-int((patch_size-1)/2): i+int((patch_size-1)/2)+1,\n", |
| 374 | + " j-int((patch_size-1)/2): j+int((patch_size-1)/2)+1, :]\n", |
242 | 375 | " x_t1[cnt, :, :, :] = patch\n",
|
243 |
| - " patch = imgT2[i-int((patch_size-1)/2) : i+int((patch_size-1)/2)+1, j-int((patch_size-1)/2) : j+int((patch_size-1)/2)+1, :]\n", |
| 376 | + " patch = imgT2[i-int((patch_size-1)/2): i+int((patch_size-1)/2)+1,\n", |
| 377 | + " j-int((patch_size-1)/2): j+int((patch_size-1)/2)+1, :]\n", |
244 | 378 | " x_t2[cnt, :, :, :] = patch\n",
|
245 | 379 | " cnt = cnt + 1\n",
|
246 | 380 | "print('sampling done.')\n",
|
247 | 381 | "pred = net.predict([x_t1, x_t2])\n",
|
248 | 382 | "change_map_prob = np.reshape(pred, (400, 400))\n",
|
249 | 383 | "plt.imshow(change_map_prob)\n",
|
| 384 | + "plt.savefig(f\"{base_path}/change_map_probability.png\", dpi=200)\n", |
250 | 385 | "plt.show()\n",
|
251 | 386 | "\n",
|
252 |
| - "change_map_binary = np.where(change_map_prob<0.5,0,1)\n", |
| 387 | + "change_map_binary = np.where(change_map_prob < 0.5, 0, 1)\n", |
253 | 388 | "plt.imshow(change_map_binary)\n",
|
| 389 | + "plt.savefig(f\"{base_path}/change_map_binary.png\", dpi=200)\n", |
254 | 390 | "plt.show()"
|
255 | 391 | ]
|
256 |
| - }, |
257 |
| - { |
258 |
| - "cell_type": "code", |
259 |
| - "execution_count": null, |
260 |
| - "metadata": {}, |
261 |
| - "outputs": [], |
262 |
| - "source": [] |
263 | 392 | }
|
264 | 393 | ],
|
265 | 394 | "metadata": {
|
|
0 commit comments