3
3
import numpy as np
4
4
import matplotlib .pyplot as plt
5
5
#%matplotlib inline
6
- import tensorflow as tf
6
+ # import tensorflow as tf
7
7
import keras .backend as K
8
-
8
+ import keras
9
9
10
10
from keras .models import Model , load_model
11
- from keras .layers import Input , BatchNormalization , Activation , Dense , Dropout ,Maximum
12
- from keras .layers .core import Lambda , RepeatVector , Reshape
13
- from keras .layers .convolutional import Conv2D , Conv2DTranspose ,Conv3D ,Conv3DTranspose
14
- from keras .layers .pooling import MaxPooling2D , GlobalMaxPool2D ,MaxPooling3D
15
- from keras .layers .merge import concatenate , add
16
- from keras .callbacks import EarlyStopping , ModelCheckpoint , ReduceLROnPlateau
11
+ # from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout,Maximum
12
+ # from keras.layers.core import Lambda, RepeatVector, Reshape
13
+ # from keras.layers.convolutional import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose
14
+ # from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D
15
+ # from keras.layers.merge import concatenate, add
16
+ # from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
17
17
from keras .optimizers import Adam
18
- from keras .preprocessing .image import ImageDataGenerator , array_to_img , img_to_array , load_img
18
+ # from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
19
19
20
- from skimage .io import imread , imshow , concatenate_images
21
- from skimage .transform import resize
20
+ # from skimage.io import imread, imshow, concatenate_images
21
+ # from skimage.transform import resize
22
22
23
23
import os
24
- from skimage .io import imread , imshow , concatenate_images
25
- from skimage .transform import resize
24
+ # from skimage.io import imread, imshow, concatenate_images
25
+ # from skimage.transform import resize
26
26
from medpy .io import load
27
27
from medpy .io import save
28
28
import numpy as np
29
+ #import time
30
+ #import sys
31
+ #sys.path.insert(1, '~/Brain_Segmentation/utils.py')
29
32
#import cv2
30
33
31
- from .. / utils import f1_score ,dice_coef ,dice_coef_loss
34
+ from utils import f1_score ,dice_coef ,dice_coef_loss , standardize , compute_class_sens_spec , get_sens_spec_df , one_hot_encode
32
35
def reverse_encode (a ):
33
36
return np .argmax (a ,axis = - 1 )
34
37
35
38
36
39
37
- model_to_predict1 = load_model ('../Models/survival_pred_240_240.h5' ,custom_objects = {'dice_coef_loss' :dice_coef_loss })
38
- model_to_predict2 = load_model ('../Models/survival_pred_240_155_1.h5' )
39
- model_to_predict3 = load_model ('../Models/survival_pred_240_155_2.h5' )
40
- path = '../TestData'
40
+
41
+ model_to_predict1 = load_model ('../first_240_155.h5' ,custom_objects = {'dice_coef_loss' :dice_coef_loss , 'dice_coef' :dice_coef })
42
+ #model_to_predict2 = load_model('../Models/survival_pred_240_155_1.h5',custom_objects={'dice_coef_loss':dice_coef_loss , 'f1_score':f1_score})
43
+ #model_to_predict3 = load_model('../Models/survival_pred_240_155_2.h5',custom_objects={'dice_coef_loss':dice_coef_loss , 'f1_score':f1_score})
44
+ path = '../../Brats17TrainingData/LGG'
41
45
all_images = os .listdir (path )
42
46
#print(len(all_images))
43
47
all_images .sort ()
44
48
45
49
data = np .zeros ((240 ,240 ,155 ,4 ))
46
50
47
- for i in range (100 , 101 ):
48
- new_image = np .zeros ((240 ,240 ,155 ,5 ))
51
+ for i in range (40 , 42 ):
52
+ new_image = np .zeros ((240 ,240 ,155 ,4 ))
49
53
print (i )
50
54
x_to = []
51
55
y_to = []
@@ -64,24 +68,42 @@ def reverse_encode(a):
64
68
print ("Entered ground truth" )
65
69
else :
66
70
image_data , image_header = load (image_path );
71
+ image_data = standardize (image_data )
67
72
data [:,:,:,w ] = image_data
68
73
print ("Entered modality" )
69
74
w = w + 1
70
75
71
76
print (data .shape )
72
-
77
+ Y_hat = model_to_predict1 .predict (data )
78
+ #print(Y_hat.shape)
79
+ image_data2 [image_data2 == 4 ] = 3
80
+ Y_hat [Y_hat > 0.6 ] = 1.0
81
+ Y_hat [Y_hat <= 0.6 ] = 0.0
82
+ #print(Y_hat[0,100,100])
83
+ #print(len(Y_hat[:,:,:,0]==1))
84
+ #print(len(Y_hat[:,:,:,1]==1))
85
+ #print(len(Y_hat[:,:,:,2]==1))
86
+ #print(len(Y_hat[:,:,:,3]==1))
87
+ image_data2 = keras .utils .to_categorical (image_data2 , num_classes = 4 )
88
+ #image_data2 = one_hot_encode(image_data2)
89
+ print (get_sens_spec_df (Y_hat ,image_data2 ))
90
+ print (model_to_predict1 .evaluate (x = data ,y = image_data2 ))
91
+ print (model_to_predict1 .metrics_names )
73
92
#Combining results from all 3 dimensions
74
-
93
+ '''
75
94
for slice_no in range(0,240):
76
95
a = slice_no
77
96
X = data[slice_no,:,:,:]
78
97
X = X.reshape(1,240,155,4)
79
98
Y_hat = model_to_predict3.predict(X)
80
99
new_image[a,:,:,:] = Y_hat[0,:,:,:]
100
+ '''
101
+
81
102
103
+ '''
82
104
for slice_no in range(0,155):
83
105
a = slice_no
84
- X = data [:,:slice_no ,:]
106
+ X = data[:,:, slice_no,:]
85
107
X = X.reshape(1,240,240,4)
86
108
Y_hat = model_to_predict1.predict(X)
87
109
new_image[:,:,slice_no,:] += Y_hat[0,:,:,:]
@@ -92,10 +114,39 @@ def reverse_encode(a):
92
114
X = X.reshape(1,240,155,4)
93
115
Y_hat = model_to_predict2.predict(X)
94
116
new_image[:,a,:,:] += Y_hat[0,:,:,:]
95
-
117
+ '''
96
118
97
- new_image = new_image / 3 #average of probabilities from 3 directions
119
+
120
+ #new_image = new_image/3.0
121
+ #print(new_image[100,100,100])
122
+ #pred = pred.reshape(-1,5)
123
+ #pred1 = np.argmax(new_image[:,:,:,1:],axis=3)
124
+ #new_image = np.argmax(new_image,axis=3)
125
+ #pred1[new_image[:,:,:,0] > 0.56] = 0 #average of probabilities from 3 directions
126
+ #pred1 = pred1.astype('int64')
127
+ #image_data2 = image_data2.astype('int64')
128
+
129
+ '''
130
+ for slice_no in range(0,155):
131
+ print(slice_no)
132
+ img = pred1[:,:,slice_no]
133
+ imgplot = plt.imshow(img)
134
+ plt.show(block=False)
135
+ #time.sleep(1)
136
+ plt.pause(0.1)
137
+ plt.close()
138
+
139
+
140
+ for slice_no in range(0,155):
141
+ print(slice_no)
142
+ img = image_data2[:,:,slice_no]
143
+ imgplot = plt.imshow(img)
144
+ plt.show(block=False)
145
+ #time.sleep(1)
146
+ plt.pause(0.1)
147
+ plt.close()
148
+ '''
98
149
99
150
100
- name = '../all_images/VSD.Seg_001.' + image_id + '.mha'
101
- save (new_image ,name )
151
+ # name = '../all_images/VSD.Seg_001.'+ image_id + '.mha'
152
+ # save(new_image,name)
0 commit comments