-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnerf_eval_test_time_optim.py
127 lines (98 loc) · 4.56 KB
/
nerf_eval_test_time_optim.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import os
import imageio
import time
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm, trange
from nerf_utils import *
from run_endonerf_helpers import *
from logging_nerf import *
from load_blender import load_blender_data
from load_llff import load_llff_data
from arg_parser import *
try:
from apex import amp
except ImportError:
pass
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
np.random.seed(0)
DEBUG = True
def load_dataset(args):
images, masks, depth_maps, poses, times, bds, render_poses, render_times, i_test = load_llff_data(args.datadir, args.factor,
recenter=True, bd_factor=.75, spherify=args.spherify, fg_mask=args.gt_fgmask, use_depth=args.ref_depths,
render_path=args.llff_renderpath, davinci_endoscopic=args.davinci_endoscopic)
print("depth maps ::: ", depth_maps)
hwf = poses[0,:3,-1]
poses = poses[:,:3,:4]
print('Loaded llff', images.shape, render_poses.shape, hwf, args.datadir)
if not isinstance(i_test, list):
i_test = [i_test]
if args.llffhold > 0:
print('Auto LLFF holdout,', args.llffhold)
i_test = np.arange(images.shape[0])[1:-1:args.llffhold]
i_val = i_test
# i_train = np.array([i for i in np.arange(int(images.shape[0])) if (i not in args.skip_frames)]) # use all frames for reconstruction
i_train = np.array([i for i in np.arange(int(images.shape[0])) if (i not in i_test and i not in i_val and i not in args.skip_frames)]) # leave out test/val frames
print('DEFINING BOUNDS')
print("I TEST ::", i_test)
print("I TRAIN ::", i_train)
close_depth, inf_depth = np.ndarray.min(bds) * .9, np.ndarray.max(bds) * 1.
if args.no_ndc:
near = np.ndarray.min(bds) * .9
far = np.ndarray.max(bds) * 1.
else:
near = 0.
far = 1.
print('NEAR FAR', near, far)
if args.time_interval < 0:
args.time_interval = 1 / (images.shape[0] - 1)
return times, render_times, poses, render_poses, i_train, i_test, hwf, near, far, depth_maps, images, masks
def eval():
parser = config_parser()
args = parser.parse_args()
args.ref_depths = True
print("use_depth :: ", args.ref_depths)
args.gt_fgmask = True ## do not comment for training
times, render_times, poses, render_poses, i_train, i_test, hwf, near, far, depth_maps, images, masks = load_dataset(args)
min_time, max_time = times[i_train[0]], times[i_train[-1]]
assert min_time == 0., "time must start at 0"
assert max_time == 1., "max time must be 1"
# Cast intrinsics to right types
H, W, focal = hwf
H, W = int(H), int(W)
hwf = [H, W, focal]
render_poses = np.array(poses[i_test])
render_times = np.array(times[i_test])
basedir = args.basedir
expname = args.expname
nerf_dir = "nerf_only"
if args.update_poses:
nerf_dir = "nerf_and_pose"
print("nerf_dir :: ", nerf_dir)
render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer, nerf_model_extras = create_nerf(args, nerf_dir, args.lrate)
global_step = start
bds_dict = {
'near' : near + 1e-6,
'far' : far,
}
render_kwargs_train.update(bds_dict)
render_kwargs_test.update(bds_dict)
# Move testing data to GPU
render_poses = torch.Tensor(render_poses).to(device)
render_times = torch.Tensor(render_times).to(device)
if depth_maps is not None:
close_depth, inf_depth = np.percentile(depth_maps, 3.0), np.percentile(depth_maps, 99.9)
print('RENDER ONLY')
with torch.no_grad():
images = images[i_test]
masks = masks[i_test]
save_gt = True
testsavedir = os.path.join(basedir, expname, nerf_dir + '_{:06d}'.format(start))
os.makedirs(testsavedir, exist_ok=True)
print('test poses shape', render_poses.shape)
rgbs, _ = render_path(render_poses, render_times, hwf, args.chunk, render_kwargs_test, gt_imgs=images, gt_masks=masks,
savedir=testsavedir, render_factor=args.render_factor, save_also_gt=save_gt, save_depth=True, near_far=(close_depth, inf_depth))
print('Done rendering', testsavedir)
imageio.mimwrite(os.path.join(testsavedir, 'video.mp4'), to8b(rgbs), fps=args.video_fps, quality=8)
if __name__=='__main__':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
eval()