-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathprune_vgg.py
143 lines (134 loc) · 5.68 KB
/
prune_vgg.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
#!/workspace/alexsun/reconv_compression/mtlc_python/bin/env
from parameter import *
from train import train_network
from evaluate import test_network
from utils import *
from reconv import *
from config import *
from prune import *
# Set a random seed for fair comparison between different metric
# fix_random_seed(8)
# fix_random_seed(88)
fix_random_seed(888)
from train import retrain
def get_log_file_name(base_file_name):
file_name = base_file_name
count = 1
while os.path.isfile(file_name + ".txt"):
count += 1
file_name = base_file_name
file_name += f"{count}th_run"
return file_name + ".txt"
def main():
parser = build_parser()
args = parser.parse_args()
assert args.metric is not None
assert 'vgg' in args.model
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
print(str(args.gpu))
os.environ['CUDA_VISIBLE_DEVICES'] = '7'
torch.cuda.set_device(7)
# device = torch.device(args.gpu if args.gpu_no >= 0 else "cpu")
# Load the trained net
net_name = f"{args.data_set}_model"
rev_flag = args.rev
print("Reverse:", rev_flag)
network = load_basenet(args, name=net_name, gpu="cuda:7")
print("Base Network")
print_model_param_flops(network.cpu(), input_res=32)
print_model_param_nums(network.cpu())
# network = network.module.cpu()
network = network.cuda()
_, _, (top1, top5) = test_network(args, network=network)
# Prune Config
prune_layers = ['conv1', 'conv8', 'conv9', 'conv10', 'conv11', 'conv12', 'conv13']
if args.prune_cfg == 1:
prune_channels = [32, 256, 256, 256, 256, 256, 256]
elif args.prune_cfg == 2:
prune_channels = [32, 384, 384, 384, 384, 384, 384]
elif args.prune_cfg == 3:
prune_channels = [32, 448, 448, 448, 448, 448, 448]
else:
print("Unrecognized Pruning Configuration")
exit()
# Get the metric saliency score
###################################################
if args.metric == 'spec':
print("Prunning By Spectral Norm")
exec(f"from saliency.{args.model}_{net_name}_eigvs import *")
all_eigvs = []
for i in range(NUM_LAYER):
all_eigvs.append(eval(f"eigvs{i}"))
all_eigvs[i] = [torch.from_numpy(eigv) for eigv in eval(f"eigvs{i}")]
prune_eigvs = [all_eigvs[0], all_eigvs[7], all_eigvs[8], all_eigvs[9], all_eigvs[10], all_eigvs[11], all_eigvs[12]]
network = prune(network, prune_layers, prune_channels, prune_eigvs=prune_eigvs, magnitude=False, rev=rev_flag)
base_log = args.save_path+"/logs/"+f"{args.model}_{net_name}_spec_pruned"
net_name += "spec_pruned"
if rev_flag:
net_name += "_rev"
base_log += "_rev"
if args.prune_cfg != 1:
base_log += f"cfg{args.prune_cfg}"
net_name += f"cfg{args.prune_cfg}"
log_path = get_log_file_name(base_log)
print(log_path)
log_file = open(log_path, 'a')
###################################################
elif args.metric == 'nuc':
print("Prunning By Nuclear Norm")
exec(f"from saliency.{args.model}_{net_name}_nucs import *")
all_nucs = []
for i in range(NUM_LAYER):
all_nucs.append(eval(f"nucs{i}"))
prune_nucs = [all_nucs[0], all_nucs[7], all_nucs[8], all_nucs[9], all_nucs[10], all_nucs[11], all_nucs[12]]
network = prune(network, prune_layers, prune_channels, prune_eigvs=prune_nucs, magnitude=False, rev=rev_flag)
base_log = args.save_path+"/logs/"+f"{args.model}_{net_name}_nuc_pruned"
net_name += "nuc_pruned"
if rev_flag:
net_name += "_rev"
base_log += "_rev"
if args.prune_cfg != 1:
base_log += f"cfg{args.prune_cfg}"
net_name += f"cfg{args.prune_cfg}"
log_path = get_log_file_name(base_log)
print(log_path)
log_file = open(log_path, 'a')
###################################################
elif args.metric == 'fro':
print("Prunning By Frobenius Norm")
exec(f"from saliency.{args.model}_{net_name}_fros import *")
all_fros = []
for i in range(NUM_LAYER):
all_fros.append(eval(f"fros{i}"))
prune_fros = [all_fros[0], all_fros[7], all_fros[8], all_fros[9], all_fros[10], all_fros[11], all_fros[12]]
network = prune(network, prune_layers, prune_channels, prune_eigvs=prune_fros, magnitude=False, rev=rev_flag)
base_log = args.save_path+"/logs/"+f"{args.model}_{net_name}_fro_pruned"
net_name += "fro_pruned"
if rev_flag:
net_name += "_rev"
base_log += "_rev"
if args.prune_cfg != 1:
base_log += f"cfg{args.prune_cfg}"
net_name += f"cfg{args.prune_cfg}"
log_path = get_log_file_name(base_log)
print(log_path)
log_file = open(log_path, 'a')
###################################################
else:
print("Unrecogrnized Metric Input")
for i in range(len(network.features)):
if isinstance(network.features[i], torch.nn.Conv2d):
print(i)
kernel = network.features[i].weight
print(kernel.shape)
print("Pruned Network")
print_model_param_flops(network.cpu(), input_res=32)
print_model_param_nums(network.cpu())
# Retraining
network.cuda()
_, _, (top1, top5) = test_network(args, network=network, log_file=log_file)
print("Pre-finetuning Accuracy:", top1, top5)
network, acc1, acc5 = retrain(args, network, retrain_epoch=80, save_best=True, net_name=f"{args.model}_{net_name}", log_file=log_file)
print("Post-finetuning Accuracy:", acc1, acc5)
if __name__ == '__main__':
main()