-
Notifications
You must be signed in to change notification settings - Fork 44
/
base.yaml
143 lines (137 loc) · 4.18 KB
/
base.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
# Default config for the whole project
# Outermost level configuration switches
exp_name: base
mocking: False
detect_anomaly: False
fix_random: False
allow_tf32: True
deterministic: False # deterministic training (debug only)
benchmark: True # when benchmarking, slow, after, fast
profiler_cfg:
enabled: False # no profiling by default
print_model: False # defaults to a compact interface
preparing_parser: False
print_parameters: True
# Top level model building
model_cfg:
type: VolumetricVideoModel
camera_cfg:
type: NoopCamera
sampler_cfg:
type: ImportanceSampler
network_cfg:
type: MultilevelNetwork
# <<: *network_cfg # is this ok?
parameterizer_cfg:
type: ContractRegressor
in_dim: 3
xyzt_embedder_cfg:
type: ComposedXyztEmbedder
xyz_embedder_cfg:
type: PositionalEncodingEmbedder
multires: 10
t_embedder_cfg:
type: LatentCodeEmbedder
xyz_embedder_cfg:
type: EmptyEmbedder
dir_embedder_cfg:
type: PositionalEncodingEmbedder
multires: 4
rgb_embedder_cfg:
type: EmptyEmbedder
deformer_cfg:
type: EmptyRegressor
geometry_cfg:
type: SplitRegressor
width: 512
depth: 8
appearance_cfg:
type: MlpRegressor
width: 256
depth: 2
out_dim: 3
out_actvn: sigmoid
network_cfgs:
# - &network_cfg # coarse network configuration
- type: VolumetricVideoNetwork
geometry_cfg:
type: SplitRegressor
width: 128
depth: 4
appearance_cfg:
type: EmptyRegressor
# - <<: *network_cfg # fine network configuration
- type: VolumetricVideoNetwork
# seems to be hierarchically overwritting, good
renderer_cfg:
type: VolumeRenderer
supervisor_cfg:
type: SequentialSupervisor
dataloader_cfg: &dataloader_cfg # we see the term "dataloader" a one word?
type: VolumetricVideoDataloader
dataset_cfg: &dataset_cfg
type: VolumetricVideoDataset
split: TRAIN
data_root: data/dataset/sequence
n_rays: 512
supply_decoded: True # pass the image to the network directly
encode_ext: .png # save memory during training
frame_sample: [0, null, 1]
view_sample: [0, null, 1]
intri_file: intri.yml
extri_file: extri.yml
bodymodel_file: output/cfg_exp.yml
motion_file: 'motion.npz'
append_gt_prob: 0.1
extra_src_pool: 1
bounds: [[-10.0, -10.0, -10.0], [10.0, 10.0, 10.0]]
sampler_cfg:
type: RandomSampler
frame_sample: [0, null, 1]
view_sample: [0, null, 1]
batch_sampler_cfg:
type: BatchSampler
batch_size: 8
val_dataloader_cfg: # we see the term "dataloader" a one word?
<<: *dataloader_cfg
max_iter: -1
dataset_cfg:
<<: *dataset_cfg
type: WillChangeToNoopIfGUIDataset
split: VAL
supply_decoded: True # pass the image to the network directly
encode_ext: .png # save bandwidth for rendering
frame_sample: [0, null, 50]
view_sample: [0, null, 5]
append_gt_prob: 1.0
extra_src_pool: 0
sampler_cfg:
type: SequentialSampler
frame_sample: [0, null, 1]
view_sample: [0, null, 1]
# Please modify dataset_cfg instead of this
batch_sampler_cfg:
type: BatchSampler
batch_size: 1
runner_cfg: &runner_cfg
type: VolumetricVideoRunner
epochs: 400
# decay_epochs: 400
ep_iter: 500
optimizer_cfg:
type: ConfigurableOptimizer
scheduler_cfg:
type: ExponentialLR
moderator_cfg:
type: NoopModerator
visualizer_cfg:
type: VolumetricVideoVisualizer
types: [RENDER, DEPTH, ALPHA]
result_dir: data/result
save_tag: ''
evaluator_cfg:
type: VolumetricVideoEvaluator
recorder_cfg:
type: TensorboardRecorder
viewer_cfg:
type: VolumetricVideoViewer