-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patheval.py
144 lines (119 loc) · 6.1 KB
/
eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import torch
from scene import Scene
import os, time
import numpy as np
from tqdm import tqdm
from os import makedirs
from gaussian_renderer import render_surfel
import torchvision
from utils.general_utils import safe_state
from utils.system_utils import searchForMaxIteration
from argparse import ArgumentParser
from arguments import ModelParams, PipelineParams, OptimizationParams, get_combined_args
from gaussian_renderer import GaussianModel
from utils.image_utils import psnr
from utils.loss_utils import ssim
from lpipsPyTorch import lpips
from torchvision.utils import save_image, make_grid
def render_set(model_path, views, gaussians, pipeline, background, save_ims, opt):
if save_ims:
# Create directories to save rendered images
render_path = os.path.join(model_path, "test", "renders")
color_path = os.path.join(render_path, 'rgb')
normal_path = os.path.join(render_path, 'normal')
makedirs(color_path, exist_ok=True)
makedirs(normal_path, exist_ok=True)
ssims = []
psnrs = []
lpipss = []
render_times = []
for idx, view in enumerate(tqdm(views, desc="Rendering progress")):
view.refl_mask = None # When evaluating, reflection mask is disabled
t1 = time.time()
rendering = render_surfel(view, gaussians, pipeline, background, srgb=opt.srgb, opt=opt)
render_time = time.time() - t1
render_color = torch.clamp(rendering["render"], 0.0, 1.0)
render_color = render_color[None]
gt = torch.clamp(view.original_image, 0.0, 1.0)
gt = gt[None, 0:3, :, :]
ssims.append(ssim(render_color, gt).item())
psnrs.append(psnr(render_color, gt).item())
lpipss.append(lpips(render_color, gt, net_type='vgg').item())
render_times.append(render_time)
if save_ims:
# Save the rendered color image
torchvision.utils.save_image(render_color, os.path.join(color_path, '{0:05d}.png'.format(idx)))
# Save the normal map if available
if 'rend_normal' in rendering:
normal_map = rendering['rend_normal'] * 0.5 + 0.5
torchvision.utils.save_image(normal_map, os.path.join(normal_path, '{0:05d}.png'.format(idx)))
ssim_v = np.array(ssims).mean()
psnr_v = np.array(psnrs).mean()
lpip_v = np.array(lpipss).mean()
fps = 1.0 / np.array(render_times).mean()
print('psnr:{}, ssim:{}, lpips:{}, fps:{}'.format(psnr_v, ssim_v, lpip_v, fps))
dump_path = os.path.join(model_path, 'metric.txt')
with open(dump_path, 'w') as f:
f.write('psnr:{}, ssim:{}, lpips:{}, fps:{}'.format(psnr_v, ssim_v, lpip_v, fps))
def render_set_train(model_path, views, gaussians, pipeline, background, save_ims, opt):
if save_ims:
# Create directories to save rendered images
render_path = os.path.join(model_path, "train", "renders")
color_path = os.path.join(render_path, 'rgb')
gt_path = os.path.join(render_path, 'gt')
normal_path = os.path.join(render_path, 'normal')
makedirs(color_path, exist_ok=True)
makedirs(gt_path, exist_ok=True)
makedirs(normal_path, exist_ok=True)
for idx, view in enumerate(tqdm(views, desc="Rendering progress")):
view.refl_mask = None # When evaluating, reflection mask is disabled
rendering = render_surfel(view, gaussians, pipeline, background, srgb=opt.srgb, opt=opt)
render_color = torch.clamp(rendering["render"], 0.0, 1.0)
render_color = render_color[None]
gt = torch.clamp(view.original_image, 0.0, 1.0)
gt = gt[None, :3, :, :]
if save_ims:
# Save the rendered color image
torchvision.utils.save_image(render_color, os.path.join(color_path, '{0:05d}.png'.format(idx)))
torchvision.utils.save_image(gt, os.path.join(gt_path, '{0:05d}.png'.format(idx)))
# Save the normal map if available
if 'rend_normal' in rendering:
normal_map = rendering['rend_normal'] * 0.5 + 0.5
torchvision.utils.save_image(normal_map, os.path.join(normal_path, '{0:05d}.png'.format(idx)))
def render_sets(dataset: ModelParams, iteration: int, pipeline: PipelineParams, save_ims: bool, op, indirect):
with torch.no_grad():
gaussians = GaussianModel(dataset.sh_degree)
scene = Scene(dataset, gaussians, load_iteration=iteration, shuffle=False)
bg_color = [1, 1, 1] if dataset.white_background else [0, 0, 0]
background = torch.tensor(bg_color, dtype=torch.float32, device="cuda")
iteration = searchForMaxIteration(os.path.join(dataset.model_path, "point_cloud"))
if indirect:
op.indirect = 1
gaussians.load_mesh_from_ply(dataset.model_path, iteration)
# render_set_train(dataset.model_path, scene.getTrainCameras(), gaussians, pipeline, background, save_ims, op)
render_set(dataset.model_path, scene.getTestCameras(), gaussians, pipeline, background, save_ims, op)
env_dict = gaussians.render_env_map()
grid = [
env_dict["env1"].permute(2, 0, 1),
]
grid = make_grid(grid, nrow=1, padding=10)
save_image(grid, os.path.join(dataset.model_path, "env1.png"))
grid = [
env_dict["env2"].permute(2, 0, 1),
]
grid = make_grid(grid, nrow=1, padding=10)
save_image(grid, os.path.join(dataset.model_path, "env2.png"))
if __name__ == "__main__":
# Set up command line argument parser
parser = ArgumentParser(description="Testing script parameters")
model = ModelParams(parser, sentinel=True)
op = OptimizationParams(parser)
pipeline = PipelineParams(parser)
parser.add_argument("--iteration", default=-1, type=int)
parser.add_argument("--save_images", action="store_true")
parser.add_argument("--quiet", action="store_true")
args = get_combined_args(parser)
print("Rendering " + args.model_path)
# Initialize system state (RNG)
safe_state(args.quiet)
render_sets(model.extract(args), args.iteration, pipeline.extract(args), args.save_images, op, True)