-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtest_generation.py
123 lines (87 loc) · 4.01 KB
/
test_generation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
from datasets.shapenet_pointflow import get_datasets
import numpy as np
import torch
from metrics.evaluation_metrics import compute_all_metrics
from pprint import pprint
from tqdm import tqdm
import os
from metrics.evaluation_metrics import jsd_between_point_cloud_sets as JSD
def get_test_dataset(path, cates = ['chair']):
# using the same parameters as point flow
class Args: pass
args = Args()
args.data_dir = path
args.dataset_type = 'shapenet15k'
args.tr_max_sample_points = 2048
args.te_max_sample_points = 2048
args.dataset_scale = 1.
args.normalize_per_shape = False
args.normalize_std_per_axis = False
args.cates = cates
_, test_dataset = get_datasets(args)
return test_dataset
def get_test_loader(path, cates = ['chair']):
test_dataset = get_test_dataset(path, cates)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=32,
shuffle=False,
num_workers=4,
pin_memory=True,
drop_last=False,
)
return test_loader
def evaluate_gen(path, model, sampler, save_path='./results/', cates = ['chair'], emb=None, load_samples=False):
# try to load generated files
if load_samples:
sample_pcs = torch.tensor(np.load(os.path.join(save_path, 'generated_pcs.npy')))
ref_pcs = torch.tensor(np.load(os.path.join(save_path, 'reference_pcs.npy')))
else:
print('Generating new data...')
loader = get_test_loader(path, cates)
all_sample = []
all_ref = []
for data in tqdm(loader):
idx_b, te_pc = data['idx'], data['test_points']
te_pc = te_pc.cuda()
# number of samples, number of points
B, N = te_pc.shape[0], te_pc.shape[1]
out_pc = sampler.sample(model, B, n_points=N, emb=emb) #sampler.sample(model, (B, N, 3))[-1] #model.sample(B, N)
out_pc = out_pc.cuda()
# denormalize
m, s = data['mean'].float(), data['std'].float()
m = m.cuda()
s = s.cuda()
out_pc = out_pc * s + m
te_pc = te_pc * s + m
all_sample.append(out_pc)
all_ref.append(te_pc)
sample_pcs = torch.cat(all_sample, dim=0)
ref_pcs = torch.cat(all_ref, dim=0)
# save results to a file
np.save(os.path.join(save_path, 'generated_pcs.npy'), sample_pcs.cpu().numpy())
np.save(os.path.join(save_path, 'reference_pcs.npy'), ref_pcs.cpu().numpy())
print((sample_pcs * sample_pcs).sum(dim=-1, keepdim=True).sqrt().max(), (ref_pcs * ref_pcs).sum(dim=-1, keepdim=True).sqrt().max())
print(sample_pcs.mean(), ref_pcs.mean())
print(f'Comparing {sample_pcs.shape[0]} generated samples of shape {list(sample_pcs.shape[1:])} to {ref_pcs.shape[0]} original samples of shape {list(ref_pcs.shape[1:])}')
results = compute_all_metrics(sample_pcs, ref_pcs, batch_size=32)
results = {k: (v.cpu().detach().item()
if not isinstance(v, float) else v) for k, v in results.items()}
pprint(results)
jsd = JSD(sample_pcs.cpu().numpy(), ref_pcs.cpu().numpy())
pprint('JSD: {}'.format(jsd))
def main():
#path = "/home/tourloid/Desktop/PhD/Data/ShapeNetCore.v2.PC15k"
path = "/home/vvrbeast/Desktop/Giannis/Data/ShapeNetCore.v2.PC15k"
from models.ddpm_unet_attn import SPVUnet
from utils.schedulers import DDPMSparseSchedulerGPU, DDIMSparseSchedulerGPU
#model = SPVUnet(voxel_size=0.1, nfs=(32, 64, 128, 256), num_layers=1, pres=1e-5)
model = SPVUnet(voxel_size=0.1, nfs = (64, 128, 128, 256), pres=1e-5, attn_chans=16, attn_start=3)
checkpoint_path = './checkpoints/ddpm_unet_attn_64_128_256_256_2700.pt'
checkpoint = torch.load(checkpoint_path)['state_dict']
model.load_state_dict(checkpoint)
model.cuda().eval()
ddpm_sched = DDPMSparseSchedulerGPU(n_steps=1000, beta_min=0.0001, beta_max=0.02)
evaluate_gen(path, model, ddpm_sched, save_path='./results/')
if __name__ == "__main__":
main()