Skip to content

Commit ff537ef

Browse files
author
SIDDHARTA
committed
Fourth Commit
1 parent 341dc80 commit ff537ef

19 files changed

+2974
-1424
lines changed

.ipynb_checkpoints/Exploratory_Data_Analysis-checkpoint.ipynb

+124-197
Large diffs are not rendered by default.

.ipynb_checkpoints/Inference_Script-checkpoint.ipynb

+977
Large diffs are not rendered by default.

.ipynb_checkpoints/Inference_Script_Real_Time-checkpoint.ipynb

-311
This file was deleted.

.ipynb_checkpoints/Train_Segmentation_Model-checkpoint.ipynb

+315-188
Large diffs are not rendered by default.

Exploratory_Data_Analysis.ipynb

+124-197
Large diffs are not rendered by default.

Inference_Script.ipynb

+977
Large diffs are not rendered by default.

Inference_Script_Real_Time.ipynb

-311
This file was deleted.

Models/model.pth

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
version https://git-lfs.github.com/spec/v1
2-
oid sha256:7ab6fafee16ec4daca6cf62a57314ee6886165987afee8e04d7f9f559617a3f6
3-
size 126339797
2+
oid sha256:9e76358176631bed1f29a87bdf5d1b9c0a7da1d25c461b69c0e6c88e0c100ed5
3+
size 126373167

ONNX_models/Unet.onnx

67.9 MB
Binary file not shown.

Train_Segmentation_Model.ipynb

+315-188
Large diffs are not rendered by default.

Utilities/Data_Retriever_Seg.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,8 @@ def __getitem__(self, index):
4646

4747
def make_mask(self,row_id):
4848
name = self.df.iloc[row_id].Name
49-
annot = "\\".join([self.annot_folder, name])
50-
fname = "\\".join([self.image_folder, (name[:-4]+'.jpg')])
49+
annot = "\\".join([self.annot_folder, name+'.xml'])
50+
fname = "\\".join([self.image_folder, (name+'.jpg')])
5151
labels = self.df.iloc[row_id][1:7].to_dict()
5252

5353
tree = ET.parse(annot)

Utilities/Extract_masks.py

+75
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
#!/usr/bin/env python
2+
# coding: utf-8
3+
4+
# In[2]:
5+
6+
7+
import xml.etree.ElementTree as ET
8+
import os
9+
import cv2
10+
import pandas as pd
11+
import numpy as np
12+
from itertools import product
13+
14+
15+
# In[ ]:
16+
17+
def create_filepaths(path):
18+
df = pd.DataFrame()
19+
for (dirpath, dirnames,filenames) in os.walk(path):
20+
for filename in filenames:
21+
temp_path = "\\".join([path, filename])
22+
tree = ET.parse(temp_path)
23+
root = tree.getroot()
24+
dict1 = dict()
25+
ls = []
26+
for description in root.iter('name'):
27+
ls.append(description.text)
28+
res = np.array(ls)
29+
res = np.unique(res)
30+
ls = res.tolist()
31+
dict1['Name'] = filename[:-4]
32+
for ele in ls:
33+
dict1[ele] = 1
34+
df = df.append(dict1, ignore_index= True)
35+
df = df.replace(np.nan, 0)
36+
df = df[['Name', 'crazing', 'patches', 'inclusion', 'pitted_surface', 'rolled-in_scale', 'scratches']]
37+
df['Number_of_Defects'] = df.drop('Name',axis=1).sum(axis=1)
38+
39+
return df
40+
41+
def make_mask(annot, labels):
42+
tree = ET.parse(annot)
43+
root = tree.getroot()
44+
#extract image dimensions
45+
width = int(root.find('.//size/width').text)
46+
height = int(root.find('.//size/height').text)
47+
masks = np.zeros((height, width, len(labels)), dtype=np.uint8) # float32 is V.Imp
48+
49+
for idx, label in enumerate(labels):
50+
if labels.get(label) == 1:
51+
boxes = []
52+
for obj in root.findall('.//object'):
53+
if obj.find('name').text == label:
54+
for box in obj.findall('.//bndbox'):
55+
xmin = int(box.find('xmin').text)
56+
ymin = int(box.find('ymin').text)
57+
xmax = int(box.find('xmax').text)
58+
ymax = int(box.find('ymax').text)
59+
coors = [xmin, ymin, xmax, ymax]
60+
boxes.append(coors)
61+
62+
mask = np.zeros((height, width), dtype=np.uint8)
63+
for i in range(len(boxes)):
64+
box = boxes[i]
65+
row_s, row_e = box[1], box[3]
66+
col_s, col_e = box[0], box[2]
67+
row_corr = [*range(row_s, row_e)]
68+
col_corr = [*range(col_s, col_e)]
69+
coords = np.array(list(product(row_corr, col_corr)))
70+
mask[coords[:,0], coords[:,1]] = 1
71+
72+
masks[:, :, idx] = mask.reshape(height, width, order='F')
73+
74+
return masks
75+

Utilities/ONNX_converter.py

+58
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
#!/usr/bin/env python
2+
# coding: utf-8
3+
4+
# In[2]:
5+
6+
7+
from Resnet_UNet import ResNetUNet
8+
import torch.nn as nn
9+
from torchvision import models
10+
import torch
11+
from collections import OrderedDict
12+
import argparse
13+
14+
15+
# In[2]:
16+
17+
18+
def main():
19+
my_parser = argparse.ArgumentParser(description='Convert pytorch model to ONNX format.')
20+
21+
my_parser.add_argument('Model_Path',
22+
metavar='model_path',
23+
type=str,
24+
help='The path to the saved pytorch model.')
25+
26+
args = my_parser.parse_args()
27+
28+
ckpt_path = args.Model_Path
29+
model = ResNetUNet(n_class=6, base_model=models.resnet18(pretrained=True))
30+
state = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
31+
new_state_dict = OrderedDict()
32+
for k, v in state["state_dict"].items():
33+
name = k[7:]
34+
new_state_dict[name] = v
35+
model.load_state_dict(new_state_dict)
36+
model.eval()
37+
38+
input_var = torch.rand(1, 3, 128, 128) # Use half of the original resolution.
39+
batch_size = 5
40+
# Export the model
41+
torch.onnx.export(model, # model being run
42+
input_var, # model input (or a tuple for multiple inputs)
43+
"./ONNX_models/Unet.onnx", # where to save the model (can be a file or file-like object)
44+
export_params=True, # store the trained parameter weights inside the model file
45+
opset_version=11, # the ONNX version to export the model to
46+
do_constant_folding=True, # whether to execute constant folding for optimization
47+
input_names = ['input'], # the model's input names
48+
output_names = ['output']) # the model's output names
49+
50+
print("Successfully converted the model to onnx format.")
51+
52+
53+
# In[ ]:
54+
55+
56+
if __name__ == "__main__":
57+
main()
58+

Utilities/Trainer.py

+5-28
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66

77
import pandas as pd
88
import numpy as np
9-
from Utilities.Data_Retriever_Seg import DataRetriever
10-
from Utilities.Meter import Meter
119
import torch
1210
import torch.nn as nn
1311
from sklearn.model_selection import train_test_split
@@ -19,7 +17,9 @@
1917
import gc
2018
from tqdm import tqdm
2119
from os import path, walk
22-
import xml.etree.ElementTree as ET
20+
from Utilities.Extract_masks import create_filepaths
21+
from Utilities.Data_Retriever_Seg import DataRetriever
22+
from Utilities.Meter import Meter
2323

2424

2525
# In[2]:
@@ -145,8 +145,8 @@ def provider(self,
145145
'''Returns dataloader for the model training'''
146146
image_folder = '.\IMAGES'
147147
annot_folder = '.\ANNOTATIONS'
148-
df = self.create_filepaths(annot_folder)
149-
train_df, val_df = train_test_split(df, test_size=0.3, stratify=df["defects"], random_state=69)
148+
df = create_filepaths(annot_folder)
149+
train_df, val_df = train_test_split(df, test_size=0.3, stratify=df["Number_of_Defects"], random_state=69)
150150
df = train_df if phase == "train" else val_df
151151
image_dataset = DataRetriever(df, image_folder, annot_folder, mean, std, phase)
152152
dataloader = DataLoader(
@@ -159,26 +159,3 @@ def provider(self,
159159

160160
return dataloader
161161

162-
def create_filepaths(self, path):
163-
df = pd.DataFrame()
164-
for (dirpath, dirnames,filenames) in walk(path):
165-
for filename in filenames:
166-
temp_path = "\\".join([path, filename])
167-
tree = ET.parse(temp_path)
168-
root = tree.getroot()
169-
dict1 = dict()
170-
ls = []
171-
for description in root.iter('name'):
172-
ls.append(description.text)
173-
res = np.array(ls)
174-
res = np.unique(res)
175-
ls = res.tolist()
176-
dict1['Name'] = filename
177-
for ele in ls:
178-
dict1[ele] = 1
179-
df = df.append(dict1, ignore_index= True)
180-
df = df.replace(np.nan, 0)
181-
df['defects'] = df.drop('Name',axis=1).sum(axis=1)
182-
183-
return df
184-
Binary file not shown.
Binary file not shown.
2.29 KB
Binary file not shown.
-678 Bytes
Binary file not shown.

requirements.txt

-3.72 KB
Binary file not shown.

0 commit comments

Comments
 (0)