-
Notifications
You must be signed in to change notification settings - Fork 3
/
eval.py
92 lines (72 loc) · 3.79 KB
/
eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import sys
import os
sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.join(os.path.dirname(__file__),'..'))
import torch
import numpy as np
import argparse
import model
from glob import glob
from pc_util import normalize_point_cloud, farthest_point_sample, group_points
def eval_patches(xyz, arg, model):
centroids = farthest_point_sample(xyz, arg.num_patch)
patches = group_points(xyz, centroids, arg.num_point)
normalized_patches, patch_centroid, furthest_distance = normalize_point_cloud(patches)
dense_patches_list = []
dense_normal_list = []
sparse_normal_list = []
#print(normalized_patches.shape)
for i in range(normalized_patches.shape[0]):
xyz=torch.from_numpy(normalized_patches[i]).unsqueeze(0).transpose(1, 2).cuda()
#print(torch.from_numpy(normalized_patches[i]).size())
dense_patches, dense_normal, sparse_normal = model(xyz, 1)
dense_patches_list.append(dense_patches.transpose(1, 2).detach().cpu().numpy())
dense_normal_list.append(dense_normal.transpose(1, 2).detach().cpu().numpy())
sparse_normal_list.append(sparse_normal.transpose(1, 2).detach().cpu().numpy())
gen_ddense_xyz = np.concatenate(dense_patches_list, axis=0)
gen_ddense_xyz = (gen_ddense_xyz * furthest_distance) + patch_centroid
gen_ddense_normal = np.concatenate(dense_normal_list, axis=0)
return np.reshape(gen_ddense_xyz, (-1, 3)), np.reshape(gen_ddense_normal, (-1, 3))
def evaluate(model, arg):
model.eval()
shapes = glob(arg.eval_xyz + '/*.xyz')
for i, item in enumerate(shapes):
#print(item)
obj_name = item.split('/')[-1]
data = np.loadtxt(item)
input_sparse_xyz = data[:, 0:3]
input_sparse_normal = data[:, 3:6]
normalize_sparse_xyz, centroid, furthest_distance = normalize_point_cloud(input_sparse_xyz)
dense_xyz, dense_normal = eval_patches(normalize_sparse_xyz, arg,model)
dense_xyz = dense_xyz * furthest_distance + centroid
gen_dense=np.concatenate((dense_xyz,dense_normal),axis=-1)
#print(gen_dense.shape)
#print(arg.eval_save_path)
savepath=os.path.join(arg.eval_save_path,obj_name)
#print(arg.eval_save_path)
#print(savepath)
#print(gen_dense.shape)
gen_dense=farthest_point_sample(gen_dense,arg.num_shape_point*arg.up_ratio)
#print(gen_dense.shape)
np.savetxt(savepath,gen_dense)
print(obj_name,'is saved')
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--up_ratio', type=int, default=4,choices=[4,8,12,16], help='Upsampling Ratio')
parser.add_argument('--model', default='model_pugeo', help='Model for upsampling')
parser.add_argument('--num_point', type=int, default=256,choices=[256], help='Point Number')
parser.add_argument('--eval_xyz', default='/home/siyu_ren/pugeo_pytorch_data/test_5000/', help='Folder to store point cloud(xyz format) toevaluate')
parser.add_argument('--num_shape_point', type=int, default=5000, help='Point Number per shape')
parser.add_argument('--patch_num_ratio', type=int, default=3, help='Number of points covered by patch')
arg = parser.parse_args()
arg.log_dir=os.path.join(os.path.dirname(__file__),'..','log_x%d'%arg.up_ratio)
arg.num_patch = int(arg.num_shape_point / arg.num_point * arg.patch_num_ratio)
arg.eval_save_path=os.path.join(os.path.dirname(__file__),'..','PUGEOx%d'%arg.up_ratio)
try:
os.mkdir(arg.eval_save_path)
except:
pass
model = model.pugeonet(up_ratio=arg.up_ratio, knn=30)
model = model.cuda()
model.load_state_dict(torch.load(os.path.join(arg.log_dir,'model.t7')))
evaluate(model, arg)