From 2e5e99a69843b6ebc74ebdbae0bc55cf2f16b68f Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Sun, 11 Feb 2024 03:51:41 +0530
Subject: [PATCH 01/18] Update requirements.txt
---
requirements.txt | 3 ---
1 file changed, 3 deletions(-)
diff --git a/requirements.txt b/requirements.txt
index 8cf29b82..608bd513 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,3 @@
-torch==1.13.1
-torchvision==0.14.1
-torchaudio==0.13.1
mmcv==1.6.0
matplotlib
argparse
From bfee7fb591f985f0684589abf1c2e5212f153638 Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Sun, 11 Feb 2024 03:59:16 +0530
Subject: [PATCH 02/18] Update train.py
---
train.py | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/train.py b/train.py
index 13f9e51e..2e273760 100644
--- a/train.py
+++ b/train.py
@@ -202,7 +202,7 @@ def scene_reconstruction(dataset, opt, hyper, pipe, testing_iterations, saving_i
# if opt.lambda_lpips !=0:
# lpipsloss = lpips_loss(image_tensor,gt_image_tensor,lpips_model)
# loss += opt.lambda_lpips * lpipsloss
-
+
loss.backward()
if torch.isnan(loss).any():
print("loss is nan,end training, reexecv program now.")
@@ -337,6 +337,8 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
if config['cameras'] and len(config['cameras']) > 0:
l1_test = 0.0
psnr_test = 0.0
+ ssim_test = 0.0
+ lpips_test = 0.0
for idx, viewpoint in enumerate(config['cameras']):
image = torch.clamp(renderFunc(viewpoint, scene.gaussians,stage=stage, cam_type=dataset_type, *renderArgs)["render"], 0.0, 1.0)
if dataset_type == "PanopticSports":
@@ -352,11 +354,16 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
pass
l1_test += l1_loss(image, gt_image).mean().double()
# mask=viewpoint.mask
+
+ lpips_test += lpips_loss(image_tensor,gt_image_tensor,lpips_model)
+
+ ssim_test += ssim(image_tensor,gt_image_tensor)
psnr_test += psnr(image, gt_image, mask=None).mean().double()
+
psnr_test /= len(config['cameras'])
l1_test /= len(config['cameras'])
- print("\n[ITER {}] Evaluating {}: L1 {} PSNR {}".format(iteration, config['name'], l1_test, psnr_test))
+ print("\n[ITER {}] Evaluating {}: L1 {} PSNR {} SSIM {} LPIPS {}".format(iteration, config['name'], l1_test, psnr_test, ssim_test, lpips_test))
# print("sh feature",scene.gaussians.get_features.shape)
if tb_writer:
tb_writer.add_scalar(stage + "/"+config['name'] + '/loss_viewpoint - l1_loss', l1_test, iteration)
From 2fb3433bf63514028d7f40ad34eff6b24c3aa620 Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Sun, 11 Feb 2024 04:11:02 +0530
Subject: [PATCH 03/18] Update train.py
---
train.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/train.py b/train.py
index 2e273760..f227ae61 100644
--- a/train.py
+++ b/train.py
@@ -355,9 +355,9 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
l1_test += l1_loss(image, gt_image).mean().double()
# mask=viewpoint.mask
- lpips_test += lpips_loss(image_tensor,gt_image_tensor,lpips_model)
+ lpips_test += lpips_loss(image,gt_image,lpips_model)
- ssim_test += ssim(image_tensor,gt_image_tensor)
+ ssim_test += ssim(image,gt_image)
psnr_test += psnr(image, gt_image, mask=None).mean().double()
From 5ff5c58b5ea0db464021cc35923748346034eb6e Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Sun, 11 Feb 2024 04:18:12 +0530
Subject: [PATCH 04/18] Update train.py
---
train.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/train.py b/train.py
index f227ae61..acae1413 100644
--- a/train.py
+++ b/train.py
@@ -69,7 +69,7 @@ def scene_reconstruction(dataset, opt, hyper, pipe, testing_iterations, saving_i
progress_bar = tqdm(range(first_iter, final_iter), desc="Training progress")
first_iter += 1
- # lpips_model = lpips.LPIPS(net="alex").cuda()
+ lpips_model = lpips.LPIPS(net="alex").cuda()
video_cams = scene.getVideoCameras()
test_cams = scene.getTestCameras()
train_cams = scene.getTrainCameras()
From e1db663b8876449527fb0a42020b07c90b52e873 Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Sun, 11 Feb 2024 04:33:58 +0530
Subject: [PATCH 05/18] Update train.py
---
train.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/train.py b/train.py
index acae1413..a74811b4 100644
--- a/train.py
+++ b/train.py
@@ -227,7 +227,7 @@ def scene_reconstruction(dataset, opt, hyper, pipe, testing_iterations, saving_i
# Log and save
timer.pause()
- training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, scene, render, [pipe, background], stage, scene.dataset_type)
+ training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, lpips_model, scene, render, [pipe, background], stage, scene.dataset_type)
if (iteration in saving_iterations):
print("\n[ITER {}] Saving Gaussians".format(iteration))
scene.save(iteration, stage)
@@ -319,7 +319,7 @@ def prepare_output_and_logger(expname):
print("Tensorboard not available: not logging progress")
return tb_writer
-def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, scene : Scene, renderFunc, renderArgs, stage, dataset_type):
+def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, lpips_model, scene : Scene, renderFunc, renderArgs, stage, dataset_type):
if tb_writer:
tb_writer.add_scalar(f'{stage}/train_loss_patches/l1_loss', Ll1.item(), iteration)
tb_writer.add_scalar(f'{stage}/train_loss_patchestotal_loss', loss.item(), iteration)
From c1731dcd48a48907985b0f26b9b34c6d135a14ad Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Sun, 11 Feb 2024 07:04:09 +0530
Subject: [PATCH 06/18] Update train.py
---
train.py | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
diff --git a/train.py b/train.py
index a74811b4..704060da 100644
--- a/train.py
+++ b/train.py
@@ -70,6 +70,7 @@ def scene_reconstruction(dataset, opt, hyper, pipe, testing_iterations, saving_i
progress_bar = tqdm(range(first_iter, final_iter), desc="Training progress")
first_iter += 1
lpips_model = lpips.LPIPS(net="alex").cuda()
+ lpips_model2 = lpips.LPIPS(net="vgg").cuda()
video_cams = scene.getVideoCameras()
test_cams = scene.getTestCameras()
train_cams = scene.getTrainCameras()
@@ -227,7 +228,7 @@ def scene_reconstruction(dataset, opt, hyper, pipe, testing_iterations, saving_i
# Log and save
timer.pause()
- training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, lpips_model, scene, render, [pipe, background], stage, scene.dataset_type)
+ training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, lpips_model, lpips_model2, scene, render, [pipe, background], stage, scene.dataset_type)
if (iteration in saving_iterations):
print("\n[ITER {}] Saving Gaussians".format(iteration))
scene.save(iteration, stage)
@@ -319,7 +320,7 @@ def prepare_output_and_logger(expname):
print("Tensorboard not available: not logging progress")
return tb_writer
-def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, lpips_model, scene : Scene, renderFunc, renderArgs, stage, dataset_type):
+def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, lpips_model, lpips_model2, scene : Scene, renderFunc, renderArgs, stage, dataset_type):
if tb_writer:
tb_writer.add_scalar(f'{stage}/train_loss_patches/l1_loss', Ll1.item(), iteration)
tb_writer.add_scalar(f'{stage}/train_loss_patchestotal_loss', loss.item(), iteration)
@@ -338,7 +339,8 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
l1_test = 0.0
psnr_test = 0.0
ssim_test = 0.0
- lpips_test = 0.0
+ lpips_test_a = 0.0
+ lpips_test_v = 0.0
for idx, viewpoint in enumerate(config['cameras']):
image = torch.clamp(renderFunc(viewpoint, scene.gaussians,stage=stage, cam_type=dataset_type, *renderArgs)["render"], 0.0, 1.0)
if dataset_type == "PanopticSports":
@@ -355,7 +357,9 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
l1_test += l1_loss(image, gt_image).mean().double()
# mask=viewpoint.mask
- lpips_test += lpips_loss(image,gt_image,lpips_model)
+ lpips_test_a += lpips_loss(image,gt_image,lpips_model)
+
+ lpips_test_v += lpips_loss(image,gt_image,lpips_model2)
ssim_test += ssim(image,gt_image)
@@ -363,11 +367,14 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
psnr_test /= len(config['cameras'])
l1_test /= len(config['cameras'])
- print("\n[ITER {}] Evaluating {}: L1 {} PSNR {} SSIM {} LPIPS {}".format(iteration, config['name'], l1_test, psnr_test, ssim_test, lpips_test))
+ print("\n[ITER {}] Evaluating {}: L1 {} PSNR {} SSIM {} LPIPSA {} LPIPSV {}".format(iteration, config['name'], l1_test, psnr_test, ssim_test, lpips_test_a, lpips_test_v))
# print("sh feature",scene.gaussians.get_features.shape)
if tb_writer:
tb_writer.add_scalar(stage + "/"+config['name'] + '/loss_viewpoint - l1_loss', l1_test, iteration)
tb_writer.add_scalar(stage+"/"+config['name'] + '/loss_viewpoint - psnr', psnr_test, iteration)
+ tb_writer.add_scalar(stage+"/"+config['name'] + '/loss_viewpoint - ssim', ssim_test, iteration)
+ tb_writer.add_scalar(stage + "/"+config['name'] + '/loss_viewpoint - lpipsa', lpips_test_a, iteration)
+ tb_writer.add_scalar(stage+"/"+config['name'] + '/loss_viewpoint - lpipsv', lpips_test_v, iteration)
if tb_writer:
tb_writer.add_histogram(f"{stage}/scene/opacity_histogram", scene.gaussians.get_opacity, iteration)
From 40c55775ff46c44d73f2b486e6c457ad535b322e Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Sun, 11 Feb 2024 21:07:56 +0530
Subject: [PATCH 07/18] Update train.py
---
train.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/train.py b/train.py
index 704060da..99c5fe43 100644
--- a/train.py
+++ b/train.py
@@ -357,11 +357,11 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
l1_test += l1_loss(image, gt_image).mean().double()
# mask=viewpoint.mask
- lpips_test_a += lpips_loss(image,gt_image,lpips_model)
+ lpips_test_a += lpips_loss(image,gt_image,lpips_model, normalize=True).mean().double()
- lpips_test_v += lpips_loss(image,gt_image,lpips_model2)
+ lpips_test_v += lpips_loss(image,gt_image,lpips_model2, normalize=True).mean().double()
- ssim_test += ssim(image,gt_image)
+ ssim_test += ssim(image,gt_image).mean().double()
psnr_test += psnr(image, gt_image, mask=None).mean().double()
From 6a20ef40719c69857eac58c1cc1365cbfb5350ac Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Sun, 11 Feb 2024 21:32:46 +0530
Subject: [PATCH 08/18] Update train.py
---
train.py | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/train.py b/train.py
index 99c5fe43..477d2acc 100644
--- a/train.py
+++ b/train.py
@@ -31,6 +31,7 @@
from time import time
import copy
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
to8b = lambda x : (255*np.clip(x.cpu().numpy(),0,1)).astype(np.uint8)
try:
@@ -69,8 +70,8 @@ def scene_reconstruction(dataset, opt, hyper, pipe, testing_iterations, saving_i
progress_bar = tqdm(range(first_iter, final_iter), desc="Training progress")
first_iter += 1
- lpips_model = lpips.LPIPS(net="alex").cuda()
- lpips_model2 = lpips.LPIPS(net="vgg").cuda()
+ lpips_model = lpips.LPIPS(net="alex", version="0.1").eval().to(device)
+ lpips_model2 = lpips.LPIPS(net="vgg", version="0.1").eval().to(device)
video_cams = scene.getVideoCameras()
test_cams = scene.getTestCameras()
train_cams = scene.getTrainCameras()
@@ -228,7 +229,7 @@ def scene_reconstruction(dataset, opt, hyper, pipe, testing_iterations, saving_i
# Log and save
timer.pause()
- training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, lpips_model, lpips_model2, scene, render, [pipe, background], stage, scene.dataset_type)
+ training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, lpips_model, lpips_model2, opt.lambda_lpips, opt.lambda_dssim, scene, render, [pipe, background], stage, scene.dataset_type)
if (iteration in saving_iterations):
print("\n[ITER {}] Saving Gaussians".format(iteration))
scene.save(iteration, stage)
@@ -320,7 +321,7 @@ def prepare_output_and_logger(expname):
print("Tensorboard not available: not logging progress")
return tb_writer
-def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, lpips_model, lpips_model2, scene : Scene, renderFunc, renderArgs, stage, dataset_type):
+def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, lpips_model, lpips_model2, opt.lambda_lpips, opt.lambda_dssim, scene : Scene, renderFunc, renderArgs, stage, dataset_type):
if tb_writer:
tb_writer.add_scalar(f'{stage}/train_loss_patches/l1_loss', Ll1.item(), iteration)
tb_writer.add_scalar(f'{stage}/train_loss_patchestotal_loss', loss.item(), iteration)
@@ -359,9 +360,9 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
lpips_test_a += lpips_loss(image,gt_image,lpips_model, normalize=True).mean().double()
- lpips_test_v += lpips_loss(image,gt_image,lpips_model2, normalize=True).mean().double()
+ lpips_test_v += opt.lambda_lpips * lpips_loss(image,gt_image,lpips_model2).mean().double()
- ssim_test += ssim(image,gt_image).mean().double()
+ ssim_test += opt.lambda_dssim * (1.0- ssim(image,gt_image).mean().double())
psnr_test += psnr(image, gt_image, mask=None).mean().double()
From 0cc28dcd2215fd8ae43c0ca17175eef5f21ef6b3 Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Sun, 11 Feb 2024 21:51:02 +0530
Subject: [PATCH 09/18] Update train.py
---
train.py | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/train.py b/train.py
index 477d2acc..4df2de39 100644
--- a/train.py
+++ b/train.py
@@ -229,7 +229,7 @@ def scene_reconstruction(dataset, opt, hyper, pipe, testing_iterations, saving_i
# Log and save
timer.pause()
- training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, lpips_model, lpips_model2, opt.lambda_lpips, opt.lambda_dssim, scene, render, [pipe, background], stage, scene.dataset_type)
+ training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, lpips_model, lpips_model2, opt.lambda_dssim, scene, render, [pipe, background], stage, scene.dataset_type)
if (iteration in saving_iterations):
print("\n[ITER {}] Saving Gaussians".format(iteration))
scene.save(iteration, stage)
@@ -321,7 +321,7 @@ def prepare_output_and_logger(expname):
print("Tensorboard not available: not logging progress")
return tb_writer
-def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, lpips_model, lpips_model2, opt.lambda_lpips, opt.lambda_dssim, scene : Scene, renderFunc, renderArgs, stage, dataset_type):
+def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, lpips_model, lpips_model2, opt.lambda_dssim, scene : Scene, renderFunc, renderArgs, stage, dataset_type):
if tb_writer:
tb_writer.add_scalar(f'{stage}/train_loss_patches/l1_loss', Ll1.item(), iteration)
tb_writer.add_scalar(f'{stage}/train_loss_patchestotal_loss', loss.item(), iteration)
@@ -339,7 +339,7 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
if config['cameras'] and len(config['cameras']) > 0:
l1_test = 0.0
psnr_test = 0.0
- ssim_test = 0.0
+ ssim_test = []
lpips_test_a = 0.0
lpips_test_v = 0.0
for idx, viewpoint in enumerate(config['cameras']):
@@ -358,17 +358,17 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
l1_test += l1_loss(image, gt_image).mean().double()
# mask=viewpoint.mask
- lpips_test_a += lpips_loss(image,gt_image,lpips_model, normalize=True).mean().double()
+ lpips_test_a += lpips_loss(image,gt_image,lpips_model, normalize=True).item()
- lpips_test_v += opt.lambda_lpips * lpips_loss(image,gt_image,lpips_model2).mean().double()
+ lpips_test_v += lpips_loss(image,gt_image,lpips_model2, normalize=True).item()
- ssim_test += opt.lambda_dssim * (1.0- ssim(image,gt_image).mean().double())
+ ssim_test.append(ssim(image,gt_image))
psnr_test += psnr(image, gt_image, mask=None).mean().double()
psnr_test /= len(config['cameras'])
l1_test /= len(config['cameras'])
- print("\n[ITER {}] Evaluating {}: L1 {} PSNR {} SSIM {} LPIPSA {} LPIPSV {}".format(iteration, config['name'], l1_test, psnr_test, ssim_test, lpips_test_a, lpips_test_v))
+ print("\n[ITER {}] Evaluating {}: L1 {} PSNR {} SSIM {} LPIPSA {} LPIPSV {}".format(iteration, config['name'], l1_test, psnr_test, np.array(ssim_test).mean(), lpips_test_a, lpips_test_v))
# print("sh feature",scene.gaussians.get_features.shape)
if tb_writer:
tb_writer.add_scalar(stage + "/"+config['name'] + '/loss_viewpoint - l1_loss', l1_test, iteration)
From 8b0563d2dc2fc43c655eb80836ad9b3eefabc590 Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Sun, 11 Feb 2024 22:00:27 +0530
Subject: [PATCH 10/18] Update train.py
---
train.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/train.py b/train.py
index 4df2de39..ca62b7c3 100644
--- a/train.py
+++ b/train.py
@@ -229,7 +229,7 @@ def scene_reconstruction(dataset, opt, hyper, pipe, testing_iterations, saving_i
# Log and save
timer.pause()
- training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, lpips_model, lpips_model2, opt.lambda_dssim, scene, render, [pipe, background], stage, scene.dataset_type)
+ training_report(tb_writer, iteration, Ll1, loss, l1_loss, iter_start.elapsed_time(iter_end), testing_iterations, lpips_model, lpips_model2, scene, render, [pipe, background], stage, scene.dataset_type)
if (iteration in saving_iterations):
print("\n[ITER {}] Saving Gaussians".format(iteration))
scene.save(iteration, stage)
@@ -321,7 +321,7 @@ def prepare_output_and_logger(expname):
print("Tensorboard not available: not logging progress")
return tb_writer
-def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, lpips_model, lpips_model2, opt.lambda_dssim, scene : Scene, renderFunc, renderArgs, stage, dataset_type):
+def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_iterations, lpips_model, lpips_model2, scene : Scene, renderFunc, renderArgs, stage, dataset_type):
if tb_writer:
tb_writer.add_scalar(f'{stage}/train_loss_patches/l1_loss', Ll1.item(), iteration)
tb_writer.add_scalar(f'{stage}/train_loss_patchestotal_loss', loss.item(), iteration)
From 664b284121e37e300e9e1eae616c89d89ab69144 Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Sun, 11 Feb 2024 22:10:26 +0530
Subject: [PATCH 11/18] Update train.py
---
train.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/train.py b/train.py
index ca62b7c3..494017b8 100644
--- a/train.py
+++ b/train.py
@@ -358,9 +358,9 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
l1_test += l1_loss(image, gt_image).mean().double()
# mask=viewpoint.mask
- lpips_test_a += lpips_loss(image,gt_image,lpips_model, normalize=True).item()
+ lpips_test_a += lpips_model(gt_image, image, normalize=True).item()
- lpips_test_v += lpips_loss(image,gt_image,lpips_model2, normalize=True).item()
+ lpips_test_v += lpips_model2(gt_image, image, normalize=True).item()
ssim_test.append(ssim(image,gt_image))
From 19e1856eefad641f6aedf5752942558d2a2d45b5 Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Sun, 11 Feb 2024 22:21:29 +0530
Subject: [PATCH 12/18] Update train.py
---
train.py | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/train.py b/train.py
index 494017b8..74ae55b1 100644
--- a/train.py
+++ b/train.py
@@ -339,7 +339,8 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
if config['cameras'] and len(config['cameras']) > 0:
l1_test = 0.0
psnr_test = 0.0
- ssim_test = []
+ ssim_test = 0.0
+ n = 0
lpips_test_a = 0.0
lpips_test_v = 0.0
for idx, viewpoint in enumerate(config['cameras']):
@@ -362,13 +363,14 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
lpips_test_v += lpips_model2(gt_image, image, normalize=True).item()
- ssim_test.append(ssim(image,gt_image))
+ ssim_test += ssim(image,gt_image)
+ n += 1
psnr_test += psnr(image, gt_image, mask=None).mean().double()
psnr_test /= len(config['cameras'])
l1_test /= len(config['cameras'])
- print("\n[ITER {}] Evaluating {}: L1 {} PSNR {} SSIM {} LPIPSA {} LPIPSV {}".format(iteration, config['name'], l1_test, psnr_test, np.array(ssim_test).mean(), lpips_test_a, lpips_test_v))
+ print("\n[ITER {}] Evaluating {}: L1 {} PSNR {} SSIM {} LPIPSA {} LPIPSV {}".format(iteration, config['name'], l1_test, psnr_test, ssim_test/n , lpips_test_a, lpips_test_v))
# print("sh feature",scene.gaussians.get_features.shape)
if tb_writer:
tb_writer.add_scalar(stage + "/"+config['name'] + '/loss_viewpoint - l1_loss', l1_test, iteration)
From 4081b78e1053a6427713f76b1951f43e1197a97c Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Sun, 11 Feb 2024 22:37:17 +0530
Subject: [PATCH 13/18] Update train.py
---
train.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/train.py b/train.py
index 74ae55b1..352919a2 100644
--- a/train.py
+++ b/train.py
@@ -370,7 +370,7 @@ def training_report(tb_writer, iteration, Ll1, loss, l1_loss, elapsed, testing_i
psnr_test /= len(config['cameras'])
l1_test /= len(config['cameras'])
- print("\n[ITER {}] Evaluating {}: L1 {} PSNR {} SSIM {} LPIPSA {} LPIPSV {}".format(iteration, config['name'], l1_test, psnr_test, ssim_test/n , lpips_test_a, lpips_test_v))
+ print("\n[ITER {}] Evaluating {}: L1 {} PSNR {} SSIM {} LPIPSA {} LPIPSV {}".format(iteration, config['name'], l1_test, psnr_test, ssim_test/n, lpips_test_a/n, lpips_test_v/n))
# print("sh feature",scene.gaussians.get_features.shape)
if tb_writer:
tb_writer.add_scalar(stage + "/"+config['name'] + '/loss_viewpoint - l1_loss', l1_test, iteration)
From 8698cca07f9db6d3019a75da77f0083573cc3d8f Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Mon, 12 Feb 2024 02:46:26 +0530
Subject: [PATCH 14/18] Added ipynb file
Contains the notebook run with changes fixed and new losses.
---
4DGaussians.ipynb | 5109 ++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 4771 insertions(+), 338 deletions(-)
diff --git a/4DGaussians.ipynb b/4DGaussians.ipynb
index a6bbc445..e0e1515c 100644
--- a/4DGaussians.ipynb
+++ b/4DGaussians.ipynb
@@ -2,10 +2,10 @@
"cells": [
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": 1,
"metadata": {
"id": "VjYy0F2gZIPR",
- "outputId": "129c4176-455a-42b0-e06a-b30ef7792aea",
+ "outputId": "1de934b0-9534-4f23-be8f-edfe47267e50",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000
@@ -13,115 +13,174 @@
},
"outputs": [
{
- "metadata": {
- "tags": null
- },
- "name": "stdout",
"output_type": "stream",
+ "name": "stdout",
"text": [
"/content\n",
- "fatal: destination path '4DGaussians' already exists and is not an empty directory.\n",
+ "Cloning into '4DGaussians'...\n",
+ "remote: Enumerating objects: 2513, done.\u001b[K\n",
+ "remote: Counting objects: 100% (288/288), done.\u001b[K\n",
+ "remote: Compressing objects: 100% (177/177), done.\u001b[K\n",
+ "remote: Total 2513 (delta 162), reused 177 (delta 105), pack-reused 2225\u001b[K\n",
+ "Receiving objects: 100% (2513/2513), 54.99 MiB | 16.23 MiB/s, done.\n",
+ "Resolving deltas: 100% (1183/1183), done.\n",
"/content/4DGaussians\n",
"Submodule 'submodules/depth-diff-gaussian-rasterization' (https://github.com/ingra14m/depth-diff-gaussian-rasterization) registered for path 'submodules/depth-diff-gaussian-rasterization'\n",
"Submodule 'submodules/simple-knn' (https://gitlab.inria.fr/bkerbl/simple-knn.git) registered for path 'submodules/simple-knn'\n",
"Cloning into '/content/4DGaussians/submodules/depth-diff-gaussian-rasterization'...\n",
"Cloning into '/content/4DGaussians/submodules/simple-knn'...\n",
- "Submodule path 'submodules/depth-diff-gaussian-rasterization': checked out 'f2d8fa9921ea9a6cb9ac1c33a34ebd1b11510657'\n",
+ "remote: Enumerating objects: 6, done.\u001b[K\n",
+ "remote: Counting objects: 100% (3/3), done.\u001b[K\n",
+ "remote: Compressing objects: 100% (3/3), done.\u001b[K\n",
+ "remote: Total 6 (delta 0), reused 0 (delta 0), pack-reused 3\u001b[K\n",
+ "Unpacking objects: 100% (6/6), 2.93 MiB | 1.67 MiB/s, done.\n",
+ "From https://github.com/ingra14m/depth-diff-gaussian-rasterization\n",
+ " * branch e49506654e8e11ed8a62d22bcb693e943fdecacf -> FETCH_HEAD\n",
+ "Submodule path 'submodules/depth-diff-gaussian-rasterization': checked out 'e49506654e8e11ed8a62d22bcb693e943fdecacf'\n",
"Submodule 'third_party/glm' (https://github.com/g-truc/glm.git) registered for path 'submodules/depth-diff-gaussian-rasterization/third_party/glm'\n",
"Cloning into '/content/4DGaussians/submodules/depth-diff-gaussian-rasterization/third_party/glm'...\n",
"Submodule path 'submodules/depth-diff-gaussian-rasterization/third_party/glm': checked out '5c46b9c07008ae65cb81ab79cd677ecc1934b903'\n",
"Submodule path 'submodules/simple-knn': checked out '44f764299fa305faf6ec5ebd99939e0508331503'\n",
- "/bin/bash: line 1: conda: command not found\n",
- "/bin/bash: line 1: conda: command not found\n",
- "Collecting torch==1.13.1 (from -r requirements.txt (line 1))\n",
- " Downloading torch-1.13.1-cp310-cp310-manylinux1_x86_64.whl (887.5 MB)\n",
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m887.5/887.5 MB\u001b[0m \u001b[31m2.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
- "\u001b[?25hCollecting torchvision==0.14.1 (from -r requirements.txt (line 2))\n",
- " Downloading torchvision-0.14.1-cp310-cp310-manylinux1_x86_64.whl (24.2 MB)\n",
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m24.2/24.2 MB\u001b[0m \u001b[31m71.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
- "\u001b[?25hCollecting torchaudio==0.13.1 (from -r requirements.txt (line 3))\n",
- " Downloading torchaudio-0.13.1-cp310-cp310-manylinux1_x86_64.whl (4.2 MB)\n",
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.2/4.2 MB\u001b[0m \u001b[31m105.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
- "\u001b[?25hCollecting mmcv==1.6.0 (from -r requirements.txt (line 4))\n",
+ "Collecting mmcv==1.6.0 (from -r requirements.txt (line 1))\n",
" Downloading mmcv-1.6.0.tar.gz (554 kB)\n",
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m554.9/554.9 kB\u001b[0m \u001b[31m50.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m554.9/554.9 kB\u001b[0m \u001b[31m13.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
- "Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 5)) (3.7.1)\n",
- "Collecting argparse (from -r requirements.txt (line 6))\n",
+ "Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 2)) (3.7.1)\n",
+ "Collecting argparse (from -r requirements.txt (line 3))\n",
" Downloading argparse-1.4.0-py2.py3-none-any.whl (23 kB)\n",
- "Collecting lpips (from -r requirements.txt (line 7))\n",
+ "Collecting lpips (from -r requirements.txt (line 4))\n",
" Downloading lpips-0.1.4-py3-none-any.whl (53 kB)\n",
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m53.8/53.8 kB\u001b[0m \u001b[31m7.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
- "\u001b[?25hCollecting plyfile (from -r requirements.txt (line 8))\n",
- " Downloading plyfile-1.0.1-py3-none-any.whl (23 kB)\n",
- "Requirement already satisfied: imageio-ffmpeg in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 9)) (0.4.9)\n",
- "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch==1.13.1->-r requirements.txt (line 1)) (4.5.0)\n",
- "Collecting nvidia-cuda-runtime-cu11==11.7.99 (from torch==1.13.1->-r requirements.txt (line 1))\n",
- " Downloading nvidia_cuda_runtime_cu11-11.7.99-py3-none-manylinux1_x86_64.whl (849 kB)\n",
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m849.3/849.3 kB\u001b[0m \u001b[31m56.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
- "\u001b[?25hCollecting nvidia-cudnn-cu11==8.5.0.96 (from torch==1.13.1->-r requirements.txt (line 1))\n",
- " Downloading nvidia_cudnn_cu11-8.5.0.96-2-py3-none-manylinux1_x86_64.whl (557.1 MB)\n",
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m557.1/557.1 MB\u001b[0m \u001b[31m3.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
- "\u001b[?25hCollecting nvidia-cublas-cu11==11.10.3.66 (from torch==1.13.1->-r requirements.txt (line 1))\n",
- " Downloading nvidia_cublas_cu11-11.10.3.66-py3-none-manylinux1_x86_64.whl (317.1 MB)\n",
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m317.1/317.1 MB\u001b[0m \u001b[31m2.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
- "\u001b[?25hCollecting nvidia-cuda-nvrtc-cu11==11.7.99 (from torch==1.13.1->-r requirements.txt (line 1))\n",
- " Downloading nvidia_cuda_nvrtc_cu11-11.7.99-2-py3-none-manylinux1_x86_64.whl (21.0 MB)\n",
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m21.0/21.0 MB\u001b[0m \u001b[31m75.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
- "\u001b[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from torchvision==0.14.1->-r requirements.txt (line 2)) (1.23.5)\n",
- "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from torchvision==0.14.1->-r requirements.txt (line 2)) (2.31.0)\n",
- "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /usr/local/lib/python3.10/dist-packages (from torchvision==0.14.1->-r requirements.txt (line 2)) (9.4.0)\n",
- "Collecting addict (from mmcv==1.6.0->-r requirements.txt (line 4))\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m53.8/53.8 kB\u001b[0m \u001b[31m8.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting plyfile (from -r requirements.txt (line 5))\n",
+ " Downloading plyfile-1.0.3-py3-none-any.whl (23 kB)\n",
+ "Collecting pytorch_msssim (from -r requirements.txt (line 6))\n",
+ " Downloading pytorch_msssim-1.0.0-py3-none-any.whl (7.7 kB)\n",
+ "Collecting open3d (from -r requirements.txt (line 7))\n",
+ " Downloading open3d-0.18.0-cp310-cp310-manylinux_2_27_x86_64.whl (399.7 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m399.7/399.7 MB\u001b[0m \u001b[31m2.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting addict (from mmcv==1.6.0->-r requirements.txt (line 1))\n",
" Downloading addict-2.4.0-py3-none-any.whl (3.8 kB)\n",
- "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from mmcv==1.6.0->-r requirements.txt (line 4)) (23.2)\n",
- "Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from mmcv==1.6.0->-r requirements.txt (line 4)) (6.0.1)\n",
- "Collecting yapf (from mmcv==1.6.0->-r requirements.txt (line 4))\n",
+ "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from mmcv==1.6.0->-r requirements.txt (line 1)) (1.23.5)\n",
+ "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from mmcv==1.6.0->-r requirements.txt (line 1)) (23.2)\n",
+ "Requirement already satisfied: Pillow in /usr/local/lib/python3.10/dist-packages (from mmcv==1.6.0->-r requirements.txt (line 1)) (9.4.0)\n",
+ "Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from mmcv==1.6.0->-r requirements.txt (line 1)) (6.0.1)\n",
+ "Collecting yapf (from mmcv==1.6.0->-r requirements.txt (line 1))\n",
" Downloading yapf-0.40.2-py3-none-any.whl (254 kB)\n",
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m254.7/254.7 kB\u001b[0m \u001b[31m30.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
- "\u001b[?25hRequirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from nvidia-cublas-cu11==11.10.3.66->torch==1.13.1->-r requirements.txt (line 1)) (67.7.2)\n",
- "Requirement already satisfied: wheel in /usr/local/lib/python3.10/dist-packages (from nvidia-cublas-cu11==11.10.3.66->torch==1.13.1->-r requirements.txt (line 1)) (0.41.2)\n",
- "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->-r requirements.txt (line 5)) (1.1.1)\n",
- "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->-r requirements.txt (line 5)) (0.12.1)\n",
- "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->-r requirements.txt (line 5)) (4.43.1)\n",
- "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->-r requirements.txt (line 5)) (1.4.5)\n",
- "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->-r requirements.txt (line 5)) (3.1.1)\n",
- "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib->-r requirements.txt (line 5)) (2.8.2)\n",
- "Requirement already satisfied: scipy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from lpips->-r requirements.txt (line 7)) (1.11.3)\n",
- "Requirement already satisfied: tqdm>=4.28.1 in /usr/local/lib/python3.10/dist-packages (from lpips->-r requirements.txt (line 7)) (4.66.1)\n",
- "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib->-r requirements.txt (line 5)) (1.16.0)\n",
- "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision==0.14.1->-r requirements.txt (line 2)) (3.3.0)\n",
- "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision==0.14.1->-r requirements.txt (line 2)) (3.4)\n",
- "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision==0.14.1->-r requirements.txt (line 2)) (2.0.6)\n",
- "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision==0.14.1->-r requirements.txt (line 2)) (2023.7.22)\n",
- "Requirement already satisfied: importlib-metadata>=6.6.0 in /usr/local/lib/python3.10/dist-packages (from yapf->mmcv==1.6.0->-r requirements.txt (line 4)) (6.8.0)\n",
- "Requirement already satisfied: platformdirs>=3.5.1 in /usr/local/lib/python3.10/dist-packages (from yapf->mmcv==1.6.0->-r requirements.txt (line 4)) (3.11.0)\n",
- "Requirement already satisfied: tomli>=2.0.1 in /usr/local/lib/python3.10/dist-packages (from yapf->mmcv==1.6.0->-r requirements.txt (line 4)) (2.0.1)\n",
- "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.10/dist-packages (from importlib-metadata>=6.6.0->yapf->mmcv==1.6.0->-r requirements.txt (line 4)) (3.17.0)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m254.7/254.7 kB\u001b[0m \u001b[31m34.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hRequirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->-r requirements.txt (line 2)) (1.2.0)\n",
+ "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib->-r requirements.txt (line 2)) (0.12.1)\n",
+ "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->-r requirements.txt (line 2)) (4.48.1)\n",
+ "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->-r requirements.txt (line 2)) (1.4.5)\n",
+ "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->-r requirements.txt (line 2)) (3.1.1)\n",
+ "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib->-r requirements.txt (line 2)) (2.8.2)\n",
+ "Requirement already satisfied: torch>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from lpips->-r requirements.txt (line 4)) (2.1.0+cu121)\n",
+ "Requirement already satisfied: torchvision>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from lpips->-r requirements.txt (line 4)) (0.16.0+cu121)\n",
+ "Requirement already satisfied: scipy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from lpips->-r requirements.txt (line 4)) (1.11.4)\n",
+ "Requirement already satisfied: tqdm>=4.28.1 in /usr/local/lib/python3.10/dist-packages (from lpips->-r requirements.txt (line 4)) (4.66.1)\n",
+ "Collecting dash>=2.6.0 (from open3d->-r requirements.txt (line 7))\n",
+ " Downloading dash-2.15.0-py3-none-any.whl (10.2 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m10.2/10.2 MB\u001b[0m \u001b[31m106.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hRequirement already satisfied: werkzeug>=2.2.3 in /usr/local/lib/python3.10/dist-packages (from open3d->-r requirements.txt (line 7)) (3.0.1)\n",
+ "Requirement already satisfied: nbformat>=5.7.0 in /usr/local/lib/python3.10/dist-packages (from open3d->-r requirements.txt (line 7)) (5.9.2)\n",
+ "Collecting configargparse (from open3d->-r requirements.txt (line 7))\n",
+ " Downloading ConfigArgParse-1.7-py3-none-any.whl (25 kB)\n",
+ "Collecting ipywidgets>=8.0.4 (from open3d->-r requirements.txt (line 7))\n",
+ " Downloading ipywidgets-8.1.2-py3-none-any.whl (139 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m139.4/139.4 kB\u001b[0m \u001b[31m21.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hRequirement already satisfied: pandas>=1.0 in /usr/local/lib/python3.10/dist-packages (from open3d->-r requirements.txt (line 7)) (1.5.3)\n",
+ "Requirement already satisfied: scikit-learn>=0.21 in /usr/local/lib/python3.10/dist-packages (from open3d->-r requirements.txt (line 7)) (1.2.2)\n",
+ "Collecting pyquaternion (from open3d->-r requirements.txt (line 7))\n",
+ " Downloading pyquaternion-0.9.9-py3-none-any.whl (14 kB)\n",
+ "Requirement already satisfied: Flask<3.1,>=1.0.4 in /usr/local/lib/python3.10/dist-packages (from dash>=2.6.0->open3d->-r requirements.txt (line 7)) (2.2.5)\n",
+ "Requirement already satisfied: plotly>=5.0.0 in /usr/local/lib/python3.10/dist-packages (from dash>=2.6.0->open3d->-r requirements.txt (line 7)) (5.15.0)\n",
+ "Collecting dash-html-components==2.0.0 (from dash>=2.6.0->open3d->-r requirements.txt (line 7))\n",
+ " Downloading dash_html_components-2.0.0-py3-none-any.whl (4.1 kB)\n",
+ "Collecting dash-core-components==2.0.0 (from dash>=2.6.0->open3d->-r requirements.txt (line 7))\n",
+ " Downloading dash_core_components-2.0.0-py3-none-any.whl (3.8 kB)\n",
+ "Collecting dash-table==5.0.0 (from dash>=2.6.0->open3d->-r requirements.txt (line 7))\n",
+ " Downloading dash_table-5.0.0-py3-none-any.whl (3.9 kB)\n",
+ "Requirement already satisfied: typing-extensions>=4.1.1 in /usr/local/lib/python3.10/dist-packages (from dash>=2.6.0->open3d->-r requirements.txt (line 7)) (4.9.0)\n",
+ "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from dash>=2.6.0->open3d->-r requirements.txt (line 7)) (2.31.0)\n",
+ "Collecting retrying (from dash>=2.6.0->open3d->-r requirements.txt (line 7))\n",
+ " Downloading retrying-1.3.4-py3-none-any.whl (11 kB)\n",
+ "Requirement already satisfied: nest-asyncio in /usr/local/lib/python3.10/dist-packages (from dash>=2.6.0->open3d->-r requirements.txt (line 7)) (1.6.0)\n",
+ "Requirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from dash>=2.6.0->open3d->-r requirements.txt (line 7)) (67.7.2)\n",
+ "Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.10/dist-packages (from dash>=2.6.0->open3d->-r requirements.txt (line 7)) (7.0.1)\n",
+ "Collecting comm>=0.1.3 (from ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7))\n",
+ " Downloading comm-0.2.1-py3-none-any.whl (7.2 kB)\n",
+ "Requirement already satisfied: ipython>=6.1.0 in /usr/local/lib/python3.10/dist-packages (from ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7)) (7.34.0)\n",
+ "Requirement already satisfied: traitlets>=4.3.1 in /usr/local/lib/python3.10/dist-packages (from ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7)) (5.7.1)\n",
+ "Collecting widgetsnbextension~=4.0.10 (from ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7))\n",
+ " Downloading widgetsnbextension-4.0.10-py3-none-any.whl (2.3 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.3/2.3 MB\u001b[0m \u001b[31m83.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting jupyterlab-widgets~=3.0.10 (from ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7))\n",
+ " Downloading jupyterlab_widgets-3.0.10-py3-none-any.whl (215 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m215.0/215.0 kB\u001b[0m \u001b[31m20.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hRequirement already satisfied: fastjsonschema in /usr/local/lib/python3.10/dist-packages (from nbformat>=5.7.0->open3d->-r requirements.txt (line 7)) (2.19.1)\n",
+ "Requirement already satisfied: jsonschema>=2.6 in /usr/local/lib/python3.10/dist-packages (from nbformat>=5.7.0->open3d->-r requirements.txt (line 7)) (4.19.2)\n",
+ "Requirement already satisfied: jupyter-core in /usr/local/lib/python3.10/dist-packages (from nbformat>=5.7.0->open3d->-r requirements.txt (line 7)) (5.7.1)\n",
+ "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas>=1.0->open3d->-r requirements.txt (line 7)) (2023.4)\n",
+ "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.7->matplotlib->-r requirements.txt (line 2)) (1.16.0)\n",
+ "Requirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21->open3d->-r requirements.txt (line 7)) (1.3.2)\n",
+ "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21->open3d->-r requirements.txt (line 7)) (3.2.0)\n",
+ "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch>=0.4.0->lpips->-r requirements.txt (line 4)) (3.13.1)\n",
+ "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=0.4.0->lpips->-r requirements.txt (line 4)) (1.12)\n",
+ "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch>=0.4.0->lpips->-r requirements.txt (line 4)) (3.2.1)\n",
+ "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=0.4.0->lpips->-r requirements.txt (line 4)) (3.1.3)\n",
+ "Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch>=0.4.0->lpips->-r requirements.txt (line 4)) (2023.6.0)\n",
+ "Requirement already satisfied: triton==2.1.0 in /usr/local/lib/python3.10/dist-packages (from torch>=0.4.0->lpips->-r requirements.txt (line 4)) (2.1.0)\n",
+ "Requirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug>=2.2.3->open3d->-r requirements.txt (line 7)) (2.1.5)\n",
+ "Requirement already satisfied: platformdirs>=3.5.1 in /usr/local/lib/python3.10/dist-packages (from yapf->mmcv==1.6.0->-r requirements.txt (line 1)) (4.2.0)\n",
+ "Requirement already satisfied: tomli>=2.0.1 in /usr/local/lib/python3.10/dist-packages (from yapf->mmcv==1.6.0->-r requirements.txt (line 1)) (2.0.1)\n",
+ "Requirement already satisfied: itsdangerous>=2.0 in /usr/local/lib/python3.10/dist-packages (from Flask<3.1,>=1.0.4->dash>=2.6.0->open3d->-r requirements.txt (line 7)) (2.1.2)\n",
+ "Requirement already satisfied: click>=8.0 in /usr/local/lib/python3.10/dist-packages (from Flask<3.1,>=1.0.4->dash>=2.6.0->open3d->-r requirements.txt (line 7)) (8.1.7)\n",
+ "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.10/dist-packages (from importlib-metadata->dash>=2.6.0->open3d->-r requirements.txt (line 7)) (3.17.0)\n",
+ "Collecting jedi>=0.16 (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7))\n",
+ " Downloading jedi-0.19.1-py2.py3-none-any.whl (1.6 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m86.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hRequirement already satisfied: decorator in /usr/local/lib/python3.10/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7)) (4.4.2)\n",
+ "Requirement already satisfied: pickleshare in /usr/local/lib/python3.10/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7)) (0.7.5)\n",
+ "Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7)) (3.0.43)\n",
+ "Requirement already satisfied: pygments in /usr/local/lib/python3.10/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7)) (2.16.1)\n",
+ "Requirement already satisfied: backcall in /usr/local/lib/python3.10/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7)) (0.2.0)\n",
+ "Requirement already satisfied: matplotlib-inline in /usr/local/lib/python3.10/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7)) (0.1.6)\n",
+ "Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.10/dist-packages (from ipython>=6.1.0->ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7)) (4.9.0)\n",
+ "Requirement already satisfied: attrs>=22.2.0 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=2.6->nbformat>=5.7.0->open3d->-r requirements.txt (line 7)) (23.2.0)\n",
+ "Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=2.6->nbformat>=5.7.0->open3d->-r requirements.txt (line 7)) (2023.12.1)\n",
+ "Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=2.6->nbformat>=5.7.0->open3d->-r requirements.txt (line 7)) (0.33.0)\n",
+ "Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=2.6->nbformat>=5.7.0->open3d->-r requirements.txt (line 7)) (0.17.1)\n",
+ "Requirement already satisfied: tenacity>=6.2.0 in /usr/local/lib/python3.10/dist-packages (from plotly>=5.0.0->dash>=2.6.0->open3d->-r requirements.txt (line 7)) (8.2.3)\n",
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->dash>=2.6.0->open3d->-r requirements.txt (line 7)) (3.3.2)\n",
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->dash>=2.6.0->open3d->-r requirements.txt (line 7)) (3.6)\n",
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->dash>=2.6.0->open3d->-r requirements.txt (line 7)) (2.0.7)\n",
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->dash>=2.6.0->open3d->-r requirements.txt (line 7)) (2024.2.2)\n",
+ "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=0.4.0->lpips->-r requirements.txt (line 4)) (1.3.0)\n",
+ "Requirement already satisfied: parso<0.9.0,>=0.8.3 in /usr/local/lib/python3.10/dist-packages (from jedi>=0.16->ipython>=6.1.0->ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7)) (0.8.3)\n",
+ "Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.10/dist-packages (from pexpect>4.3->ipython>=6.1.0->ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7)) (0.7.0)\n",
+ "Requirement already satisfied: wcwidth in /usr/local/lib/python3.10/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython>=6.1.0->ipywidgets>=8.0.4->open3d->-r requirements.txt (line 7)) (0.2.13)\n",
"Building wheels for collected packages: mmcv\n",
" Building wheel for mmcv (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
- " Created wheel for mmcv: filename=mmcv-1.6.0-py2.py3-none-any.whl size=847986 sha256=4aa22bff8e71e96eb0c412dea29087e675ea7ff37a484c5be7ca685662aa958c\n",
+ " Created wheel for mmcv: filename=mmcv-1.6.0-py2.py3-none-any.whl size=847985 sha256=09c3e9c292df38e995e6da1e8f4ffc10d60b73922923155974090a6f1811ece7\n",
" Stored in directory: /root/.cache/pip/wheels/02/e2/7c/97f72e34ee40d71cdd28b94c9fdfec7bcc453651ad6e65c96d\n",
"Successfully built mmcv\n",
- "Installing collected packages: argparse, addict, plyfile, nvidia-cuda-runtime-cu11, nvidia-cuda-nvrtc-cu11, nvidia-cublas-cu11, yapf, nvidia-cudnn-cu11, torch, mmcv, torchvision, torchaudio, lpips\n",
- " Attempting uninstall: torch\n",
- " Found existing installation: torch 2.0.1+cu118\n",
- " Uninstalling torch-2.0.1+cu118:\n",
- " Successfully uninstalled torch-2.0.1+cu118\n",
- " Attempting uninstall: torchvision\n",
- " Found existing installation: torchvision 0.15.2+cu118\n",
- " Uninstalling torchvision-0.15.2+cu118:\n",
- " Successfully uninstalled torchvision-0.15.2+cu118\n",
- " Attempting uninstall: torchaudio\n",
- " Found existing installation: torchaudio 2.0.2+cu118\n",
- " Uninstalling torchaudio-2.0.2+cu118:\n",
- " Successfully uninstalled torchaudio-2.0.2+cu118\n",
- "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
- "torchdata 0.6.1 requires torch==2.0.1, but you have torch 1.13.1 which is incompatible.\n",
- "torchtext 0.15.2 requires torch==2.0.1, but you have torch 1.13.1 which is incompatible.\u001b[0m\u001b[31m\n",
- "\u001b[0mSuccessfully installed addict-2.4.0 argparse-1.4.0 lpips-0.1.4 mmcv-1.6.0 nvidia-cublas-cu11-11.10.3.66 nvidia-cuda-nvrtc-cu11-11.7.99 nvidia-cuda-runtime-cu11-11.7.99 nvidia-cudnn-cu11-8.5.0.96 plyfile-1.0.1 torch-1.13.1 torchaudio-0.13.1 torchvision-0.14.1 yapf-0.40.2\n"
+ "Installing collected packages: dash-table, dash-html-components, dash-core-components, argparse, addict, widgetsnbextension, retrying, pyquaternion, plyfile, jupyterlab-widgets, jedi, configargparse, comm, yapf, pytorch_msssim, mmcv, ipywidgets, dash, lpips, open3d\n",
+ " Attempting uninstall: widgetsnbextension\n",
+ " Found existing installation: widgetsnbextension 3.6.6\n",
+ " Uninstalling widgetsnbextension-3.6.6:\n",
+ " Successfully uninstalled widgetsnbextension-3.6.6\n",
+ " Attempting uninstall: jupyterlab-widgets\n",
+ " Found existing installation: jupyterlab-widgets 3.0.9\n",
+ " Uninstalling jupyterlab-widgets-3.0.9:\n",
+ " Successfully uninstalled jupyterlab-widgets-3.0.9\n",
+ " Attempting uninstall: ipywidgets\n",
+ " Found existing installation: ipywidgets 7.7.1\n",
+ " Uninstalling ipywidgets-7.7.1:\n",
+ " Successfully uninstalled ipywidgets-7.7.1\n",
+ "Successfully installed addict-2.4.0 argparse-1.4.0 comm-0.2.1 configargparse-1.7 dash-2.15.0 dash-core-components-2.0.0 dash-html-components-2.0.0 dash-table-5.0.0 ipywidgets-8.1.2 jedi-0.19.1 jupyterlab-widgets-3.0.10 lpips-0.1.4 mmcv-1.6.0 open3d-0.18.0 plyfile-1.0.3 pyquaternion-0.9.9 pytorch_msssim-1.0.0 retrying-1.3.4 widgetsnbextension-4.0.10 yapf-0.40.2\n"
]
},
{
+ "output_type": "display_data",
"data": {
"application/vnd.colab-display-data+json": {
"pip_warning": {
@@ -131,8 +190,7 @@
}
}
},
- "metadata": {},
- "output_type": "display_data"
+ "metadata": {}
},
{
"output_type": "stream",
@@ -153,7 +211,7 @@
],
"source": [
"%cd /content\n",
- "!git clone https://github.com/hustvl/4DGaussians\n",
+ "!git clone https://github.com/Tasmay-Tibrewal/4DGaussians\n",
"%cd 4DGaussians\n",
"!git submodule update --init --recursive\n",
"\n",
@@ -162,6 +220,91 @@
"!pip install -e submodules/simple-knn\n"
]
},
+ {
+ "cell_type": "code",
+ "source": [
+ "!sudo apt-get install libglm-dev"
+ ],
+ "metadata": {
+ "id": "vSr0LKDzyXRc",
+ "outputId": "8d0f718c-be6d-4925-f78e-3401ab1a2f86",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
+ },
+ "execution_count": 2,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Reading package lists... Done\n",
+ "Building dependency tree... Done\n",
+ "Reading state information... Done\n",
+ "The following NEW packages will be installed:\n",
+ " libglm-dev\n",
+ "0 upgraded, 1 newly installed, 0 to remove and 32 not upgraded.\n",
+ "Need to get 188 kB of archives.\n",
+ "After this operation, 2,312 kB of additional disk space will be used.\n",
+ "Get:1 http://archive.ubuntu.com/ubuntu jammy/main amd64 libglm-dev all 0.9.9.8+ds-2 [188 kB]\n",
+ "Fetched 188 kB in 1s (156 kB/s)\n",
+ "debconf: unable to initialize frontend: Dialog\n",
+ "debconf: (No usable dialog-like program is installed, so the dialog based frontend cannot be used. at /usr/share/perl5/Debconf/FrontEnd/Dialog.pm line 78, <> line 1.)\n",
+ "debconf: falling back to frontend: Readline\n",
+ "debconf: unable to initialize frontend: Readline\n",
+ "debconf: (This frontend requires a controlling tty.)\n",
+ "debconf: falling back to frontend: Teletype\n",
+ "dpkg-preconfigure: unable to re-open stdin: \n",
+ "Selecting previously unselected package libglm-dev.\n",
+ "(Reading database ... 121749 files and directories currently installed.)\n",
+ "Preparing to unpack .../libglm-dev_0.9.9.8+ds-2_all.deb ...\n",
+ "Unpacking libglm-dev (0.9.9.8+ds-2) ...\n",
+ "Setting up libglm-dev (0.9.9.8+ds-2) ...\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "!pip3 install torch torchvision torchaudio"
+ ],
+ "metadata": {
+ "id": "b3AB2HKnyzr6",
+ "outputId": "562567c4-e1e8-4227-adb1-9f0f1b4d9984",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
+ },
+ "execution_count": 3,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (2.1.0+cu121)\n",
+ "Requirement already satisfied: torchvision in /usr/local/lib/python3.10/dist-packages (0.16.0+cu121)\n",
+ "Requirement already satisfied: torchaudio in /usr/local/lib/python3.10/dist-packages (2.1.0+cu121)\n",
+ "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch) (3.13.1)\n",
+ "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch) (4.9.0)\n",
+ "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch) (1.12)\n",
+ "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch) (3.2.1)\n",
+ "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch) (3.1.3)\n",
+ "Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch) (2023.6.0)\n",
+ "Requirement already satisfied: triton==2.1.0 in /usr/local/lib/python3.10/dist-packages (from torch) (2.1.0)\n",
+ "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from torchvision) (1.23.5)\n",
+ "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from torchvision) (2.31.0)\n",
+ "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /usr/local/lib/python3.10/dist-packages (from torchvision) (9.4.0)\n",
+ "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch) (2.1.5)\n",
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (3.3.2)\n",
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (3.6)\n",
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (2.0.7)\n",
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->torchvision) (2024.2.2)\n",
+ "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch) (1.3.0)\n"
+ ]
+ }
+ ]
+ },
{
"cell_type": "code",
"source": [
@@ -171,24 +314,1526 @@
"!unzip data.zip"
],
"metadata": {
- "id": "wpqmK97Koq36"
+ "id": "wpqmK97Koq36",
+ "outputId": "e612d081-ce96-4bcf-c84b-3a24b8345cec",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
},
- "execution_count": null,
- "outputs": []
+ "execution_count": 4,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/test\n",
+ "--2024-02-11 17:13:27-- https://huggingface.co/camenduru/4DGaussians/resolve/main/data/data.zip\n",
+ "Resolving huggingface.co (huggingface.co)... 13.33.33.55, 13.33.33.20, 13.33.33.110, ...\n",
+ "Connecting to huggingface.co (huggingface.co)|13.33.33.55|:443... connected.\n",
+ "HTTP request sent, awaiting response... 302 Found\n",
+ "Location: https://cdn-lfs.huggingface.co/repos/54/6f/546f5254879b01adbe4d90a6dfb3d38dbe03222785e85aa12bff42c456102d6a/4e901bd3463e874bed5dd6bbe3236a8b2348f9adb5ef20484f8ba8b6d96876ec?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27data.zip%3B+filename%3D%22data.zip%22%3B&response-content-type=application%2Fzip&Expires=1707930808&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcwNzkzMDgwOH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy81NC82Zi81NDZmNTI1NDg3OWIwMWFkYmU0ZDkwYTZkZmIzZDM4ZGJlMDMyMjI3ODVlODVhYTEyYmZmNDJjNDU2MTAyZDZhLzRlOTAxYmQzNDYzZTg3NGJlZDVkZDZiYmUzMjM2YThiMjM0OGY5YWRiNWVmMjA0ODRmOGJhOGI2ZDk2ODc2ZWM%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=NDHtCXKiz1eNtzZoMancMPt7mbOe0gQ3lN6JUF9PDRsft4p2FxZ112fbKlC5i6NmpTEIl27MAM640BMU8lcza7LAjOf8Smz-WAhZkx3uGNdmgGtP%7E2TS%7EiCIgLCumRnuSQh73ygLfCn0Q2y3sm9q2hQNg31Eq2HID79y3QoyDSX5UXpNMWtu-2U05p16ud0OyFCvaanCOfthaDkWmw7dt99V%7Ex1r%7EFCuTjdKpvcrfg8YCU9%7EMh3rMiXI%7EcBHqGMT38w6jl5L5zJXWXoucpenBpI5KOoW%7E5RvQgfsvX9ywrR%7ENWdyyThfJ7X7K7NOhlWldMcIgR77%7EGJBDAE%7EqwhtFw__&Key-Pair-Id=KVTP0A1DKRTAX [following]\n",
+ "--2024-02-11 17:13:28-- https://cdn-lfs.huggingface.co/repos/54/6f/546f5254879b01adbe4d90a6dfb3d38dbe03222785e85aa12bff42c456102d6a/4e901bd3463e874bed5dd6bbe3236a8b2348f9adb5ef20484f8ba8b6d96876ec?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27data.zip%3B+filename%3D%22data.zip%22%3B&response-content-type=application%2Fzip&Expires=1707930808&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcwNzkzMDgwOH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy81NC82Zi81NDZmNTI1NDg3OWIwMWFkYmU0ZDkwYTZkZmIzZDM4ZGJlMDMyMjI3ODVlODVhYTEyYmZmNDJjNDU2MTAyZDZhLzRlOTAxYmQzNDYzZTg3NGJlZDVkZDZiYmUzMjM2YThiMjM0OGY5YWRiNWVmMjA0ODRmOGJhOGI2ZDk2ODc2ZWM%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=NDHtCXKiz1eNtzZoMancMPt7mbOe0gQ3lN6JUF9PDRsft4p2FxZ112fbKlC5i6NmpTEIl27MAM640BMU8lcza7LAjOf8Smz-WAhZkx3uGNdmgGtP%7E2TS%7EiCIgLCumRnuSQh73ygLfCn0Q2y3sm9q2hQNg31Eq2HID79y3QoyDSX5UXpNMWtu-2U05p16ud0OyFCvaanCOfthaDkWmw7dt99V%7Ex1r%7EFCuTjdKpvcrfg8YCU9%7EMh3rMiXI%7EcBHqGMT38w6jl5L5zJXWXoucpenBpI5KOoW%7E5RvQgfsvX9ywrR%7ENWdyyThfJ7X7K7NOhlWldMcIgR77%7EGJBDAE%7EqwhtFw__&Key-Pair-Id=KVTP0A1DKRTAX\n",
+ "Resolving cdn-lfs.huggingface.co (cdn-lfs.huggingface.co)... 18.155.68.73, 18.155.68.98, 18.155.68.94, ...\n",
+ "Connecting to cdn-lfs.huggingface.co (cdn-lfs.huggingface.co)|18.155.68.73|:443... connected.\n",
+ "HTTP request sent, awaiting response... 200 OK\n",
+ "Length: 257793177 (246M) [application/zip]\n",
+ "Saving to: ‘data.zip’\n",
+ "\n",
+ "data.zip 100%[===================>] 245.85M 260MB/s in 0.9s \n",
+ "\n",
+ "2024-02-11 17:13:30 (260 MB/s) - ‘data.zip’ saved [257793177/257793177]\n",
+ "\n",
+ "Archive: data.zip\n",
+ " creating: data/\n",
+ " creating: data/bouncingballs/\n",
+ " inflating: data/bouncingballs/transforms_train.json \n",
+ " creating: data/bouncingballs/val/\n",
+ " inflating: data/bouncingballs/val/r_012.png \n",
+ " inflating: data/bouncingballs/val/r_003.png \n",
+ " inflating: data/bouncingballs/val/r_017.png \n",
+ " inflating: data/bouncingballs/val/r_009.png \n",
+ " inflating: data/bouncingballs/val/r_006.png \n",
+ " inflating: data/bouncingballs/val/r_014.png \n",
+ " inflating: data/bouncingballs/val/r_005.png \n",
+ " inflating: data/bouncingballs/val/r_013.png \n",
+ " inflating: data/bouncingballs/val/r_001.png \n",
+ " inflating: data/bouncingballs/val/r_000.png \n",
+ " inflating: data/bouncingballs/val/r_018.png \n",
+ " inflating: data/bouncingballs/val/r_011.png \n",
+ " inflating: data/bouncingballs/val/r_007.png \n",
+ " inflating: data/bouncingballs/val/r_008.png \n",
+ " inflating: data/bouncingballs/val/r_019.png \n",
+ " inflating: data/bouncingballs/val/r_015.png \n",
+ " inflating: data/bouncingballs/val/r_010.png \n",
+ " inflating: data/bouncingballs/val/r_016.png \n",
+ " inflating: data/bouncingballs/val/r_002.png \n",
+ " inflating: data/bouncingballs/val/r_004.png \n",
+ " creating: data/bouncingballs/test/\n",
+ " inflating: data/bouncingballs/test/r_012.png \n",
+ " inflating: data/bouncingballs/test/r_003.png \n",
+ " inflating: data/bouncingballs/test/r_017.png \n",
+ " inflating: data/bouncingballs/test/r_009.png \n",
+ " inflating: data/bouncingballs/test/r_006.png \n",
+ " inflating: data/bouncingballs/test/r_014.png \n",
+ " inflating: data/bouncingballs/test/r_005.png \n",
+ " inflating: data/bouncingballs/test/r_013.png \n",
+ " inflating: data/bouncingballs/test/r_001.png \n",
+ " inflating: data/bouncingballs/test/r_000.png \n",
+ " inflating: data/bouncingballs/test/r_018.png \n",
+ " inflating: data/bouncingballs/test/r_011.png \n",
+ " inflating: data/bouncingballs/test/r_007.png \n",
+ " inflating: data/bouncingballs/test/r_008.png \n",
+ " inflating: data/bouncingballs/test/r_019.png \n",
+ " inflating: data/bouncingballs/test/r_015.png \n",
+ " inflating: data/bouncingballs/test/r_010.png \n",
+ " inflating: data/bouncingballs/test/r_016.png \n",
+ " inflating: data/bouncingballs/test/r_002.png \n",
+ " inflating: data/bouncingballs/test/r_004.png \n",
+ " inflating: data/bouncingballs/transforms_test.json \n",
+ " creating: data/bouncingballs/train/\n",
+ " inflating: data/bouncingballs/train/r_026.png \n",
+ " inflating: data/bouncingballs/train/r_129.png \n",
+ " inflating: data/bouncingballs/train/r_061.png \n",
+ " inflating: data/bouncingballs/train/r_088.png \n",
+ " inflating: data/bouncingballs/train/r_027.png \n",
+ " inflating: data/bouncingballs/train/r_134.png \n",
+ " inflating: data/bouncingballs/train/r_065.png \n",
+ " inflating: data/bouncingballs/train/r_100.png \n",
+ " inflating: data/bouncingballs/train/r_012.png \n",
+ " inflating: data/bouncingballs/train/r_003.png \n",
+ " inflating: data/bouncingballs/train/r_142.png \n",
+ " inflating: data/bouncingballs/train/r_122.png \n",
+ " inflating: data/bouncingballs/train/r_078.png \n",
+ " inflating: data/bouncingballs/train/r_136.png \n",
+ " inflating: data/bouncingballs/train/r_059.png \n",
+ " inflating: data/bouncingballs/train/r_049.png \n",
+ " inflating: data/bouncingballs/train/r_085.png \n",
+ " inflating: data/bouncingballs/train/r_093.png \n",
+ " inflating: data/bouncingballs/train/r_075.png \n",
+ " inflating: data/bouncingballs/train/r_115.png \n",
+ " inflating: data/bouncingballs/train/r_083.png \n",
+ " inflating: data/bouncingballs/train/r_110.png \n",
+ " inflating: data/bouncingballs/train/r_017.png \n",
+ " inflating: data/bouncingballs/train/r_139.png \n",
+ " inflating: data/bouncingballs/train/r_009.png \n",
+ " inflating: data/bouncingballs/train/r_020.png \n",
+ " inflating: data/bouncingballs/train/r_124.png \n",
+ " inflating: data/bouncingballs/train/r_006.png \n",
+ " inflating: data/bouncingballs/train/r_014.png \n",
+ " inflating: data/bouncingballs/train/r_132.png \n",
+ " inflating: data/bouncingballs/train/r_054.png \n",
+ " inflating: data/bouncingballs/train/r_032.png \n",
+ " inflating: data/bouncingballs/train/r_089.png \n",
+ " inflating: data/bouncingballs/train/r_005.png \n",
+ " inflating: data/bouncingballs/train/r_096.png \n",
+ " inflating: data/bouncingballs/train/r_030.png \n",
+ " inflating: data/bouncingballs/train/r_031.png \n",
+ " inflating: data/bouncingballs/train/r_025.png \n",
+ " inflating: data/bouncingballs/train/r_097.png \n",
+ " inflating: data/bouncingballs/train/r_058.png \n",
+ " inflating: data/bouncingballs/train/r_043.png \n",
+ " inflating: data/bouncingballs/train/r_103.png \n",
+ " inflating: data/bouncingballs/train/r_042.png \n",
+ " inflating: data/bouncingballs/train/r_056.png \n",
+ " inflating: data/bouncingballs/train/r_098.png \n",
+ " inflating: data/bouncingballs/train/r_148.png \n",
+ " inflating: data/bouncingballs/train/r_038.png \n",
+ " inflating: data/bouncingballs/train/r_137.png \n",
+ " inflating: data/bouncingballs/train/r_140.png \n",
+ " inflating: data/bouncingballs/train/r_108.png \n",
+ " inflating: data/bouncingballs/train/r_102.png \n",
+ " inflating: data/bouncingballs/train/r_069.png \n",
+ " inflating: data/bouncingballs/train/r_076.png \n",
+ " inflating: data/bouncingballs/train/r_045.png \n",
+ " inflating: data/bouncingballs/train/r_035.png \n",
+ " inflating: data/bouncingballs/train/r_127.png \n",
+ " inflating: data/bouncingballs/train/r_013.png \n",
+ " inflating: data/bouncingballs/train/r_051.png \n",
+ " inflating: data/bouncingballs/train/r_144.png \n",
+ " inflating: data/bouncingballs/train/r_001.png \n",
+ " inflating: data/bouncingballs/train/r_055.png \n",
+ " inflating: data/bouncingballs/train/r_095.png \n",
+ " inflating: data/bouncingballs/train/r_079.png \n",
+ " inflating: data/bouncingballs/train/r_052.png \n",
+ " inflating: data/bouncingballs/train/r_046.png \n",
+ " inflating: data/bouncingballs/train/r_044.png \n",
+ " inflating: data/bouncingballs/train/r_064.png \n",
+ " inflating: data/bouncingballs/train/r_029.png \n",
+ " inflating: data/bouncingballs/train/r_120.png \n",
+ " inflating: data/bouncingballs/train/r_105.png \n",
+ " inflating: data/bouncingballs/train/r_091.png \n",
+ " inflating: data/bouncingballs/train/r_109.png \n",
+ " inflating: data/bouncingballs/train/r_028.png \n",
+ " inflating: data/bouncingballs/train/r_126.png \n",
+ " inflating: data/bouncingballs/train/r_048.png \n",
+ " inflating: data/bouncingballs/train/r_123.png \n",
+ " inflating: data/bouncingballs/train/r_101.png \n",
+ " inflating: data/bouncingballs/train/r_092.png \n",
+ " inflating: data/bouncingballs/train/r_143.png \n",
+ " inflating: data/bouncingballs/train/r_084.png \n",
+ " inflating: data/bouncingballs/train/r_116.png \n",
+ " inflating: data/bouncingballs/train/r_128.png \n",
+ " inflating: data/bouncingballs/train/r_000.png \n",
+ " inflating: data/bouncingballs/train/r_034.png \n",
+ " inflating: data/bouncingballs/train/r_111.png \n",
+ " inflating: data/bouncingballs/train/r_119.png \n",
+ " inflating: data/bouncingballs/train/r_039.png \n",
+ " inflating: data/bouncingballs/train/r_077.png \n",
+ " inflating: data/bouncingballs/train/r_062.png \n",
+ " inflating: data/bouncingballs/train/r_018.png \n",
+ " inflating: data/bouncingballs/train/r_106.png \n",
+ " inflating: data/bouncingballs/train/r_011.png \n",
+ " inflating: data/bouncingballs/train/r_047.png \n",
+ " inflating: data/bouncingballs/train/r_033.png \n",
+ " inflating: data/bouncingballs/train/r_080.png \n",
+ " inflating: data/bouncingballs/train/r_074.png \n",
+ " inflating: data/bouncingballs/train/r_135.png \n",
+ " inflating: data/bouncingballs/train/r_067.png \n",
+ " inflating: data/bouncingballs/train/r_086.png \n",
+ " inflating: data/bouncingballs/train/r_087.png \n",
+ " inflating: data/bouncingballs/train/r_104.png \n",
+ " inflating: data/bouncingballs/train/r_007.png \n",
+ " inflating: data/bouncingballs/train/r_024.png \n",
+ " inflating: data/bouncingballs/train/r_138.png \n",
+ " inflating: data/bouncingballs/train/r_057.png \n",
+ " inflating: data/bouncingballs/train/r_145.png \n",
+ " inflating: data/bouncingballs/train/r_112.png \n",
+ " inflating: data/bouncingballs/train/r_050.png \n",
+ " inflating: data/bouncingballs/train/r_008.png \n",
+ " inflating: data/bouncingballs/train/r_117.png \n",
+ " inflating: data/bouncingballs/train/r_073.png \n",
+ " inflating: data/bouncingballs/train/r_019.png \n",
+ " inflating: data/bouncingballs/train/r_121.png \n",
+ " inflating: data/bouncingballs/train/r_023.png \n",
+ " inflating: data/bouncingballs/train/r_081.png \n",
+ " inflating: data/bouncingballs/train/r_090.png \n",
+ " inflating: data/bouncingballs/train/r_015.png \n",
+ " inflating: data/bouncingballs/train/r_053.png \n",
+ " inflating: data/bouncingballs/train/r_037.png \n",
+ " inflating: data/bouncingballs/train/r_066.png \n",
+ " inflating: data/bouncingballs/train/r_114.png \n",
+ " inflating: data/bouncingballs/train/r_071.png \n",
+ " inflating: data/bouncingballs/train/r_113.png \n",
+ " inflating: data/bouncingballs/train/r_010.png \n",
+ " inflating: data/bouncingballs/train/r_147.png \n",
+ " inflating: data/bouncingballs/train/r_070.png \n",
+ " inflating: data/bouncingballs/train/r_016.png \n",
+ " inflating: data/bouncingballs/train/r_125.png \n",
+ " inflating: data/bouncingballs/train/r_002.png \n",
+ " inflating: data/bouncingballs/train/r_041.png \n",
+ " inflating: data/bouncingballs/train/r_133.png \n",
+ " inflating: data/bouncingballs/train/r_036.png \n",
+ " inflating: data/bouncingballs/train/r_118.png \n",
+ " inflating: data/bouncingballs/train/r_068.png \n",
+ " inflating: data/bouncingballs/train/r_141.png \n",
+ " inflating: data/bouncingballs/train/r_040.png \n",
+ " inflating: data/bouncingballs/train/r_082.png \n",
+ " inflating: data/bouncingballs/train/r_131.png \n",
+ " inflating: data/bouncingballs/train/r_022.png \n",
+ " inflating: data/bouncingballs/train/r_130.png \n",
+ " inflating: data/bouncingballs/train/r_099.png \n",
+ " inflating: data/bouncingballs/train/r_149.png \n",
+ " inflating: data/bouncingballs/train/r_107.png \n",
+ " inflating: data/bouncingballs/train/r_004.png \n",
+ " inflating: data/bouncingballs/train/r_146.png \n",
+ " inflating: data/bouncingballs/train/r_063.png \n",
+ " inflating: data/bouncingballs/train/r_094.png \n",
+ " inflating: data/bouncingballs/train/r_021.png \n",
+ " inflating: data/bouncingballs/train/r_060.png \n",
+ " inflating: data/bouncingballs/train/r_072.png \n",
+ " inflating: data/bouncingballs/transforms_val.json \n",
+ " creating: data/hook/\n",
+ " inflating: data/hook/transforms_train.json \n",
+ " inflating: data/hook/transforms_test.json \n",
+ " inflating: data/hook/transforms_val.json \n",
+ " creating: data/hook/val/\n",
+ " inflating: data/hook/val/r_012.png \n",
+ " inflating: data/hook/val/r_003.png \n",
+ " inflating: data/hook/val/r_017.png \n",
+ " inflating: data/hook/val/r_009.png \n",
+ " inflating: data/hook/val/r_006.png \n",
+ " inflating: data/hook/val/r_014.png \n",
+ " inflating: data/hook/val/r_005.png \n",
+ " inflating: data/hook/val/r_013.png \n",
+ " inflating: data/hook/val/r_001.png \n",
+ " inflating: data/hook/val/r_000.png \n",
+ " inflating: data/hook/val/r_018.png \n",
+ " inflating: data/hook/val/r_011.png \n",
+ " inflating: data/hook/val/r_007.png \n",
+ " inflating: data/hook/val/r_008.png \n",
+ " inflating: data/hook/val/r_019.png \n",
+ " inflating: data/hook/val/r_015.png \n",
+ " inflating: data/hook/val/r_010.png \n",
+ " inflating: data/hook/val/r_016.png \n",
+ " inflating: data/hook/val/r_002.png \n",
+ " inflating: data/hook/val/r_004.png \n",
+ " creating: data/hook/test/\n",
+ " inflating: data/hook/test/r_012.png \n",
+ " inflating: data/hook/test/r_003.png \n",
+ " inflating: data/hook/test/r_017.png \n",
+ " inflating: data/hook/test/r_009.png \n",
+ " inflating: data/hook/test/r_006.png \n",
+ " inflating: data/hook/test/r_014.png \n",
+ " inflating: data/hook/test/r_005.png \n",
+ " inflating: data/hook/test/r_013.png \n",
+ " inflating: data/hook/test/r_001.png \n",
+ " inflating: data/hook/test/r_000.png \n",
+ " inflating: data/hook/test/r_018.png \n",
+ " inflating: data/hook/test/r_011.png \n",
+ " inflating: data/hook/test/r_007.png \n",
+ " inflating: data/hook/test/r_008.png \n",
+ " inflating: data/hook/test/r_019.png \n",
+ " inflating: data/hook/test/r_015.png \n",
+ " inflating: data/hook/test/r_010.png \n",
+ " inflating: data/hook/test/r_016.png \n",
+ " inflating: data/hook/test/r_002.png \n",
+ " inflating: data/hook/test/r_004.png \n",
+ " creating: data/hook/train/\n",
+ " inflating: data/hook/train/r_061.png \n",
+ " inflating: data/hook/train/r_088.png \n",
+ " inflating: data/hook/train/r_027.png \n",
+ " inflating: data/hook/train/r_065.png \n",
+ " inflating: data/hook/train/r_012.png \n",
+ " inflating: data/hook/train/r_003.png \n",
+ " inflating: data/hook/train/r_078.png \n",
+ " inflating: data/hook/train/r_059.png \n",
+ " inflating: data/hook/train/r_049.png \n",
+ " inflating: data/hook/train/r_085.png \n",
+ " inflating: data/hook/train/r_026.png \n",
+ " inflating: data/hook/train/r_093.png \n",
+ " inflating: data/hook/train/r_075.png \n",
+ " inflating: data/hook/train/r_083.png \n",
+ " inflating: data/hook/train/r_017.png \n",
+ " inflating: data/hook/train/r_009.png \n",
+ " inflating: data/hook/train/r_020.png \n",
+ " inflating: data/hook/train/r_006.png \n",
+ " inflating: data/hook/train/r_014.png \n",
+ " inflating: data/hook/train/r_054.png \n",
+ " inflating: data/hook/train/r_032.png \n",
+ " inflating: data/hook/train/r_089.png \n",
+ " inflating: data/hook/train/r_005.png \n",
+ " inflating: data/hook/train/r_096.png \n",
+ " inflating: data/hook/train/r_030.png \n",
+ " inflating: data/hook/train/r_031.png \n",
+ " inflating: data/hook/train/r_025.png \n",
+ " inflating: data/hook/train/r_097.png \n",
+ " inflating: data/hook/train/r_058.png \n",
+ " inflating: data/hook/train/r_043.png \n",
+ " inflating: data/hook/train/r_042.png \n",
+ " inflating: data/hook/train/r_056.png \n",
+ " inflating: data/hook/train/r_098.png \n",
+ " inflating: data/hook/train/r_038.png \n",
+ " inflating: data/hook/train/r_069.png \n",
+ " inflating: data/hook/train/r_076.png \n",
+ " inflating: data/hook/train/r_045.png \n",
+ " inflating: data/hook/train/r_035.png \n",
+ " inflating: data/hook/train/r_013.png \n",
+ " inflating: data/hook/train/r_051.png \n",
+ " inflating: data/hook/train/r_001.png \n",
+ " inflating: data/hook/train/r_055.png \n",
+ " inflating: data/hook/train/r_095.png \n",
+ " inflating: data/hook/train/r_079.png \n",
+ " inflating: data/hook/train/r_052.png \n",
+ " inflating: data/hook/train/r_046.png \n",
+ " inflating: data/hook/train/r_044.png \n",
+ " inflating: data/hook/train/r_064.png \n",
+ " inflating: data/hook/train/r_029.png \n",
+ " inflating: data/hook/train/r_091.png \n",
+ " inflating: data/hook/train/r_028.png \n",
+ " inflating: data/hook/train/r_048.png \n",
+ " inflating: data/hook/train/r_092.png \n",
+ " inflating: data/hook/train/r_084.png \n",
+ " inflating: data/hook/train/r_000.png \n",
+ " inflating: data/hook/train/r_034.png \n",
+ " inflating: data/hook/train/r_039.png \n",
+ " inflating: data/hook/train/r_077.png \n",
+ " inflating: data/hook/train/r_062.png \n",
+ " inflating: data/hook/train/r_018.png \n",
+ " inflating: data/hook/train/r_011.png \n",
+ " inflating: data/hook/train/r_047.png \n",
+ " inflating: data/hook/train/r_033.png \n",
+ " inflating: data/hook/train/r_080.png \n",
+ " inflating: data/hook/train/r_074.png \n",
+ " inflating: data/hook/train/r_067.png \n",
+ " inflating: data/hook/train/r_086.png \n",
+ " inflating: data/hook/train/r_087.png \n",
+ " inflating: data/hook/train/r_007.png \n",
+ " inflating: data/hook/train/r_024.png \n",
+ " inflating: data/hook/train/r_057.png \n",
+ " inflating: data/hook/train/r_050.png \n",
+ " inflating: data/hook/train/r_008.png \n",
+ " inflating: data/hook/train/r_073.png \n",
+ " inflating: data/hook/train/r_019.png \n",
+ " inflating: data/hook/train/r_023.png \n",
+ " inflating: data/hook/train/r_081.png \n",
+ " inflating: data/hook/train/r_090.png \n",
+ " inflating: data/hook/train/r_015.png \n",
+ " inflating: data/hook/train/r_053.png \n",
+ " inflating: data/hook/train/r_037.png \n",
+ " inflating: data/hook/train/r_066.png \n",
+ " inflating: data/hook/train/r_071.png \n",
+ " inflating: data/hook/train/r_010.png \n",
+ " inflating: data/hook/train/r_070.png \n",
+ " inflating: data/hook/train/r_016.png \n",
+ " inflating: data/hook/train/r_002.png \n",
+ " inflating: data/hook/train/r_041.png \n",
+ " inflating: data/hook/train/r_036.png \n",
+ " inflating: data/hook/train/r_068.png \n",
+ " inflating: data/hook/train/r_040.png \n",
+ " inflating: data/hook/train/r_082.png \n",
+ " inflating: data/hook/train/r_022.png \n",
+ " inflating: data/hook/train/r_099.png \n",
+ " inflating: data/hook/train/r_004.png \n",
+ " inflating: data/hook/train/r_063.png \n",
+ " inflating: data/hook/train/r_094.png \n",
+ " inflating: data/hook/train/r_021.png \n",
+ " inflating: data/hook/train/r_060.png \n",
+ " inflating: data/hook/train/r_072.png \n",
+ " creating: data/standup/\n",
+ " inflating: data/standup/transforms_train.json \n",
+ " inflating: data/standup/transforms_test.json \n",
+ " inflating: data/standup/transforms_val.json \n",
+ " creating: data/standup/val/\n",
+ " inflating: data/standup/val/r_012.png \n",
+ " inflating: data/standup/val/r_003.png \n",
+ " inflating: data/standup/val/r_017.png \n",
+ " inflating: data/standup/val/r_009.png \n",
+ " inflating: data/standup/val/r_006.png \n",
+ " inflating: data/standup/val/r_014.png \n",
+ " inflating: data/standup/val/r_005.png \n",
+ " inflating: data/standup/val/r_013.png \n",
+ " inflating: data/standup/val/r_001.png \n",
+ " inflating: data/standup/val/r_000.png \n",
+ " inflating: data/standup/val/r_018.png \n",
+ " inflating: data/standup/val/r_011.png \n",
+ " inflating: data/standup/val/r_007.png \n",
+ " inflating: data/standup/val/r_008.png \n",
+ " inflating: data/standup/val/r_019.png \n",
+ " inflating: data/standup/val/r_015.png \n",
+ " inflating: data/standup/val/r_010.png \n",
+ " inflating: data/standup/val/r_016.png \n",
+ " inflating: data/standup/val/r_002.png \n",
+ " inflating: data/standup/val/r_004.png \n",
+ " creating: data/standup/test/\n",
+ " inflating: data/standup/test/r_012.png \n",
+ " inflating: data/standup/test/r_003.png \n",
+ " inflating: data/standup/test/r_017.png \n",
+ " inflating: data/standup/test/r_009.png \n",
+ " inflating: data/standup/test/r_006.png \n",
+ " inflating: data/standup/test/r_014.png \n",
+ " inflating: data/standup/test/r_005.png \n",
+ " inflating: data/standup/test/r_013.png \n",
+ " inflating: data/standup/test/r_001.png \n",
+ " inflating: data/standup/test/r_000.png \n",
+ " inflating: data/standup/test/r_018.png \n",
+ " inflating: data/standup/test/r_011.png \n",
+ " inflating: data/standup/test/r_007.png \n",
+ " inflating: data/standup/test/r_008.png \n",
+ " inflating: data/standup/test/r_019.png \n",
+ " inflating: data/standup/test/r_015.png \n",
+ " inflating: data/standup/test/r_010.png \n",
+ " inflating: data/standup/test/r_016.png \n",
+ " inflating: data/standup/test/r_002.png \n",
+ " inflating: data/standup/test/r_004.png \n",
+ " creating: data/standup/train/\n",
+ " inflating: data/standup/train/r_129.png \n",
+ " inflating: data/standup/train/r_061.png \n",
+ " inflating: data/standup/train/r_088.png \n",
+ " inflating: data/standup/train/r_027.png \n",
+ " inflating: data/standup/train/r_134.png \n",
+ " inflating: data/standup/train/r_065.png \n",
+ " inflating: data/standup/train/r_100.png \n",
+ " inflating: data/standup/train/r_012.png \n",
+ " inflating: data/standup/train/r_003.png \n",
+ " inflating: data/standup/train/r_142.png \n",
+ " inflating: data/standup/train/r_122.png \n",
+ " inflating: data/standup/train/r_078.png \n",
+ " inflating: data/standup/train/r_136.png \n",
+ " inflating: data/standup/train/r_059.png \n",
+ " inflating: data/standup/train/r_049.png \n",
+ " inflating: data/standup/train/r_085.png \n",
+ " inflating: data/standup/train/r_026.png \n",
+ " inflating: data/standup/train/r_093.png \n",
+ " inflating: data/standup/train/r_075.png \n",
+ " inflating: data/standup/train/r_115.png \n",
+ " inflating: data/standup/train/r_083.png \n",
+ " inflating: data/standup/train/r_110.png \n",
+ " inflating: data/standup/train/r_017.png \n",
+ " inflating: data/standup/train/r_139.png \n",
+ " inflating: data/standup/train/r_009.png \n",
+ " inflating: data/standup/train/r_020.png \n",
+ " inflating: data/standup/train/r_124.png \n",
+ " inflating: data/standup/train/r_006.png \n",
+ " inflating: data/standup/train/r_014.png \n",
+ " inflating: data/standup/train/r_132.png \n",
+ " inflating: data/standup/train/r_054.png \n",
+ " inflating: data/standup/train/r_032.png \n",
+ " inflating: data/standup/train/r_089.png \n",
+ " inflating: data/standup/train/r_005.png \n",
+ " inflating: data/standup/train/r_096.png \n",
+ " inflating: data/standup/train/r_030.png \n",
+ " inflating: data/standup/train/r_031.png \n",
+ " inflating: data/standup/train/r_025.png \n",
+ " inflating: data/standup/train/r_097.png \n",
+ " inflating: data/standup/train/r_058.png \n",
+ " inflating: data/standup/train/r_043.png \n",
+ " inflating: data/standup/train/r_103.png \n",
+ " inflating: data/standup/train/r_042.png \n",
+ " inflating: data/standup/train/r_056.png \n",
+ " inflating: data/standup/train/r_098.png \n",
+ " inflating: data/standup/train/r_148.png \n",
+ " inflating: data/standup/train/r_038.png \n",
+ " inflating: data/standup/train/r_137.png \n",
+ " inflating: data/standup/train/r_140.png \n",
+ " inflating: data/standup/train/r_108.png \n",
+ " inflating: data/standup/train/r_102.png \n",
+ " inflating: data/standup/train/r_069.png \n",
+ " inflating: data/standup/train/r_076.png \n",
+ " inflating: data/standup/train/r_045.png \n",
+ " inflating: data/standup/train/r_035.png \n",
+ " inflating: data/standup/train/r_127.png \n",
+ " inflating: data/standup/train/r_013.png \n",
+ " inflating: data/standup/train/r_051.png \n",
+ " inflating: data/standup/train/r_144.png \n",
+ " inflating: data/standup/train/r_001.png \n",
+ " inflating: data/standup/train/r_055.png \n",
+ " inflating: data/standup/train/r_095.png \n",
+ " inflating: data/standup/train/r_079.png \n",
+ " inflating: data/standup/train/r_052.png \n",
+ " inflating: data/standup/train/r_046.png \n",
+ " inflating: data/standup/train/r_044.png \n",
+ " inflating: data/standup/train/r_064.png \n",
+ " inflating: data/standup/train/r_029.png \n",
+ " inflating: data/standup/train/r_120.png \n",
+ " inflating: data/standup/train/r_105.png \n",
+ " inflating: data/standup/train/r_091.png \n",
+ " inflating: data/standup/train/r_109.png \n",
+ " inflating: data/standup/train/r_028.png \n",
+ " inflating: data/standup/train/r_126.png \n",
+ " inflating: data/standup/train/r_048.png \n",
+ " inflating: data/standup/train/r_123.png \n",
+ " inflating: data/standup/train/r_101.png \n",
+ " inflating: data/standup/train/r_092.png \n",
+ " inflating: data/standup/train/r_143.png \n",
+ " inflating: data/standup/train/r_084.png \n",
+ " inflating: data/standup/train/r_116.png \n",
+ " inflating: data/standup/train/r_128.png \n",
+ " inflating: data/standup/train/r_000.png \n",
+ " inflating: data/standup/train/r_034.png \n",
+ " inflating: data/standup/train/r_111.png \n",
+ " inflating: data/standup/train/r_119.png \n",
+ " inflating: data/standup/train/r_039.png \n",
+ " inflating: data/standup/train/r_077.png \n",
+ " inflating: data/standup/train/r_062.png \n",
+ " inflating: data/standup/train/r_018.png \n",
+ " inflating: data/standup/train/r_106.png \n",
+ " inflating: data/standup/train/r_011.png \n",
+ " inflating: data/standup/train/r_047.png \n",
+ " inflating: data/standup/train/r_033.png \n",
+ " inflating: data/standup/train/r_080.png \n",
+ " inflating: data/standup/train/r_074.png \n",
+ " inflating: data/standup/train/r_135.png \n",
+ " inflating: data/standup/train/r_067.png \n",
+ " inflating: data/standup/train/r_086.png \n",
+ " inflating: data/standup/train/r_087.png \n",
+ " inflating: data/standup/train/r_104.png \n",
+ " inflating: data/standup/train/r_007.png \n",
+ " inflating: data/standup/train/r_024.png \n",
+ " inflating: data/standup/train/r_138.png \n",
+ " inflating: data/standup/train/r_057.png \n",
+ " inflating: data/standup/train/r_145.png \n",
+ " inflating: data/standup/train/r_112.png \n",
+ " inflating: data/standup/train/r_050.png \n",
+ " inflating: data/standup/train/r_008.png \n",
+ " inflating: data/standup/train/r_117.png \n",
+ " inflating: data/standup/train/r_073.png \n",
+ " inflating: data/standup/train/r_019.png \n",
+ " inflating: data/standup/train/r_121.png \n",
+ " inflating: data/standup/train/r_023.png \n",
+ " inflating: data/standup/train/r_081.png \n",
+ " inflating: data/standup/train/r_090.png \n",
+ " inflating: data/standup/train/r_015.png \n",
+ " inflating: data/standup/train/r_053.png \n",
+ " inflating: data/standup/train/r_037.png \n",
+ " inflating: data/standup/train/r_066.png \n",
+ " inflating: data/standup/train/r_114.png \n",
+ " inflating: data/standup/train/r_071.png \n",
+ " inflating: data/standup/train/r_113.png \n",
+ " inflating: data/standup/train/r_010.png \n",
+ " inflating: data/standup/train/r_147.png \n",
+ " inflating: data/standup/train/r_070.png \n",
+ " inflating: data/standup/train/r_016.png \n",
+ " inflating: data/standup/train/r_125.png \n",
+ " inflating: data/standup/train/r_002.png \n",
+ " inflating: data/standup/train/r_041.png \n",
+ " inflating: data/standup/train/r_133.png \n",
+ " inflating: data/standup/train/r_036.png \n",
+ " inflating: data/standup/train/r_118.png \n",
+ " inflating: data/standup/train/r_068.png \n",
+ " inflating: data/standup/train/r_141.png \n",
+ " inflating: data/standup/train/r_040.png \n",
+ " inflating: data/standup/train/r_082.png \n",
+ " inflating: data/standup/train/r_131.png \n",
+ " inflating: data/standup/train/r_022.png \n",
+ " inflating: data/standup/train/r_130.png \n",
+ " inflating: data/standup/train/r_099.png \n",
+ " inflating: data/standup/train/r_149.png \n",
+ " inflating: data/standup/train/r_107.png \n",
+ " inflating: data/standup/train/r_004.png \n",
+ " inflating: data/standup/train/r_146.png \n",
+ " inflating: data/standup/train/r_063.png \n",
+ " inflating: data/standup/train/r_094.png \n",
+ " inflating: data/standup/train/r_021.png \n",
+ " inflating: data/standup/train/r_060.png \n",
+ " inflating: data/standup/train/r_072.png \n",
+ " creating: data/lego/\n",
+ " inflating: data/lego/transforms_train.json \n",
+ " inflating: data/lego/transforms_test.json \n",
+ " inflating: data/lego/transforms_val.json \n",
+ " creating: data/lego/train/\n",
+ " inflating: data/lego/train/r_48.png \n",
+ " inflating: data/lego/train/r_35.png \n",
+ " inflating: data/lego/train/r_3.png \n",
+ " inflating: data/lego/train/r_37.png \n",
+ " inflating: data/lego/train/r_27.png \n",
+ " inflating: data/lego/train/r_41.png \n",
+ " inflating: data/lego/train/r_18.png \n",
+ " inflating: data/lego/train/r_21.png \n",
+ " inflating: data/lego/train/r_42.png \n",
+ " inflating: data/lego/train/r_16.png \n",
+ " inflating: data/lego/train/r_49.png \n",
+ " inflating: data/lego/train/r_39.png \n",
+ " inflating: data/lego/train/r_7.png \n",
+ " inflating: data/lego/train/r_29.png \n",
+ " inflating: data/lego/train/r_10.png \n",
+ " inflating: data/lego/train/r_12.png \n",
+ " inflating: data/lego/train/r_15.png \n",
+ " inflating: data/lego/train/r_36.png \n",
+ " inflating: data/lego/train/r_9.png \n",
+ " inflating: data/lego/train/r_19.png \n",
+ " inflating: data/lego/train/r_40.png \n",
+ " inflating: data/lego/train/r_34.png \n",
+ " inflating: data/lego/train/r_5.png \n",
+ " inflating: data/lego/train/r_45.png \n",
+ " inflating: data/lego/train/r_25.png \n",
+ " inflating: data/lego/train/r_8.png \n",
+ " inflating: data/lego/train/r_13.png \n",
+ " inflating: data/lego/train/r_33.png \n",
+ " inflating: data/lego/train/r_32.png \n",
+ " inflating: data/lego/train/r_38.png \n",
+ " inflating: data/lego/train/r_2.png \n",
+ " inflating: data/lego/train/r_31.png \n",
+ " inflating: data/lego/train/r_1.png \n",
+ " inflating: data/lego/train/r_14.png \n",
+ " inflating: data/lego/train/r_22.png \n",
+ " inflating: data/lego/train/r_6.png \n",
+ " inflating: data/lego/train/r_0.png \n",
+ " inflating: data/lego/train/r_17.png \n",
+ " inflating: data/lego/train/r_44.png \n",
+ " inflating: data/lego/train/r_46.png \n",
+ " inflating: data/lego/train/r_26.png \n",
+ " inflating: data/lego/train/r_20.png \n",
+ " inflating: data/lego/train/r_23.png \n",
+ " inflating: data/lego/train/r_30.png \n",
+ " inflating: data/lego/train/r_47.png \n",
+ " inflating: data/lego/train/r_11.png \n",
+ " inflating: data/lego/train/r_28.png \n",
+ " inflating: data/lego/train/r_24.png \n",
+ " inflating: data/lego/train/r_43.png \n",
+ " inflating: data/lego/train/r_4.png \n",
+ " creating: data/lego/test/\n",
+ " inflating: data/lego/test/r_000.png \n",
+ " inflating: data/lego/test/r_001.png \n",
+ " inflating: data/lego/test/r_002.png \n",
+ " inflating: data/lego/test/r_003.png \n",
+ " inflating: data/lego/test/r_004.png \n",
+ " inflating: data/lego/test/r_005.png \n",
+ " inflating: data/lego/test/r_006.png \n",
+ " inflating: data/lego/test/r_007.png \n",
+ " inflating: data/lego/test/r_008.png \n",
+ " inflating: data/lego/test/r_009.png \n",
+ " inflating: data/lego/test/r_010.png \n",
+ " inflating: data/lego/test/r_011.png \n",
+ " inflating: data/lego/test/r_012.png \n",
+ " inflating: data/lego/test/r_013.png \n",
+ " inflating: data/lego/test/r_014.png \n",
+ " inflating: data/lego/test/r_015.png \n",
+ " inflating: data/lego/test/r_016.png \n",
+ " inflating: data/lego/test/r_017.png \n",
+ " inflating: data/lego/test/r_018.png \n",
+ " inflating: data/lego/test/r_019.png \n",
+ " creating: data/lego/val/\n",
+ " inflating: data/lego/val/r_3.png \n",
+ " inflating: data/lego/val/r_7.png \n",
+ " inflating: data/lego/val/r_10.png \n",
+ " inflating: data/lego/val/r_9.png \n",
+ " inflating: data/lego/val/r_5.png \n",
+ " inflating: data/lego/val/r_8.png \n",
+ " inflating: data/lego/val/r_2.png \n",
+ " inflating: data/lego/val/r_1.png \n",
+ " inflating: data/lego/val/r_6.png \n",
+ " inflating: data/lego/val/r_0.png \n",
+ " inflating: data/lego/val/r_4.png \n",
+ " creating: data/jumpingjacks/\n",
+ " inflating: data/jumpingjacks/transforms_train.json \n",
+ " inflating: data/jumpingjacks/transforms_test.json \n",
+ " inflating: data/jumpingjacks/transforms_val.json \n",
+ " creating: data/jumpingjacks/val/\n",
+ " inflating: data/jumpingjacks/val/r_012.png \n",
+ " inflating: data/jumpingjacks/val/r_003.png \n",
+ " inflating: data/jumpingjacks/val/r_017.png \n",
+ " inflating: data/jumpingjacks/val/r_009.png \n",
+ " inflating: data/jumpingjacks/val/r_006.png \n",
+ " inflating: data/jumpingjacks/val/r_014.png \n",
+ " inflating: data/jumpingjacks/val/r_005.png \n",
+ " inflating: data/jumpingjacks/val/r_013.png \n",
+ " inflating: data/jumpingjacks/val/r_001.png \n",
+ " inflating: data/jumpingjacks/val/transforms.json \n",
+ " inflating: data/jumpingjacks/val/r_000.png \n",
+ " inflating: data/jumpingjacks/val/r_018.png \n",
+ " inflating: data/jumpingjacks/val/r_011.png \n",
+ " inflating: data/jumpingjacks/val/r_007.png \n",
+ " inflating: data/jumpingjacks/val/r_008.png \n",
+ " inflating: data/jumpingjacks/val/r_019.png \n",
+ " inflating: data/jumpingjacks/val/r_015.png \n",
+ " inflating: data/jumpingjacks/val/r_010.png \n",
+ " inflating: data/jumpingjacks/val/r_016.png \n",
+ " inflating: data/jumpingjacks/val/r_002.png \n",
+ " inflating: data/jumpingjacks/val/r_004.png \n",
+ " creating: data/jumpingjacks/test/\n",
+ " inflating: data/jumpingjacks/test/r_012.png \n",
+ " inflating: data/jumpingjacks/test/r_003.png \n",
+ " inflating: data/jumpingjacks/test/r_017.png \n",
+ " inflating: data/jumpingjacks/test/r_009.png \n",
+ " inflating: data/jumpingjacks/test/r_006.png \n",
+ " inflating: data/jumpingjacks/test/r_014.png \n",
+ " inflating: data/jumpingjacks/test/r_005.png \n",
+ " inflating: data/jumpingjacks/test/r_013.png \n",
+ " inflating: data/jumpingjacks/test/r_001.png \n",
+ " inflating: data/jumpingjacks/test/transforms.json \n",
+ " inflating: data/jumpingjacks/test/r_000.png \n",
+ " inflating: data/jumpingjacks/test/r_018.png \n",
+ " inflating: data/jumpingjacks/test/r_011.png \n",
+ " inflating: data/jumpingjacks/test/r_007.png \n",
+ " inflating: data/jumpingjacks/test/r_008.png \n",
+ " inflating: data/jumpingjacks/test/r_019.png \n",
+ " inflating: data/jumpingjacks/test/r_015.png \n",
+ " inflating: data/jumpingjacks/test/r_010.png \n",
+ " inflating: data/jumpingjacks/test/r_016.png \n",
+ " inflating: data/jumpingjacks/test/r_002.png \n",
+ " inflating: data/jumpingjacks/test/r_004.png \n",
+ " creating: data/jumpingjacks/train/\n",
+ " inflating: data/jumpingjacks/train/r_129.png \n",
+ " inflating: data/jumpingjacks/train/r_174.png \n",
+ " inflating: data/jumpingjacks/train/r_061.png \n",
+ " inflating: data/jumpingjacks/train/r_088.png \n",
+ " inflating: data/jumpingjacks/train/r_194.png \n",
+ " inflating: data/jumpingjacks/train/r_027.png \n",
+ " inflating: data/jumpingjacks/train/r_134.png \n",
+ " inflating: data/jumpingjacks/train/r_065.png \n",
+ " inflating: data/jumpingjacks/train/r_100.png \n",
+ " inflating: data/jumpingjacks/train/r_012.png \n",
+ " inflating: data/jumpingjacks/train/r_003.png \n",
+ " inflating: data/jumpingjacks/train/r_142.png \n",
+ " inflating: data/jumpingjacks/train/r_122.png \n",
+ " inflating: data/jumpingjacks/train/r_078.png \n",
+ " inflating: data/jumpingjacks/train/r_136.png \n",
+ " inflating: data/jumpingjacks/train/r_059.png \n",
+ " inflating: data/jumpingjacks/train/r_049.png \n",
+ " inflating: data/jumpingjacks/train/r_085.png \n",
+ " inflating: data/jumpingjacks/train/r_026.png \n",
+ " inflating: data/jumpingjacks/train/r_175.png \n",
+ " inflating: data/jumpingjacks/train/r_093.png \n",
+ " inflating: data/jumpingjacks/train/r_075.png \n",
+ " inflating: data/jumpingjacks/train/r_115.png \n",
+ " inflating: data/jumpingjacks/train/r_173.png \n",
+ " inflating: data/jumpingjacks/train/r_166.png \n",
+ " inflating: data/jumpingjacks/train/r_192.png \n",
+ " inflating: data/jumpingjacks/train/r_083.png \n",
+ " inflating: data/jumpingjacks/train/r_110.png \n",
+ " inflating: data/jumpingjacks/train/r_170.png \n",
+ " inflating: data/jumpingjacks/train/r_017.png \n",
+ " inflating: data/jumpingjacks/train/r_196.png \n",
+ " inflating: data/jumpingjacks/train/r_139.png \n",
+ " inflating: data/jumpingjacks/train/r_009.png \n",
+ " inflating: data/jumpingjacks/train/r_199.png \n",
+ " inflating: data/jumpingjacks/train/r_164.png \n",
+ " inflating: data/jumpingjacks/train/r_020.png \n",
+ " inflating: data/jumpingjacks/train/r_168.png \n",
+ " inflating: data/jumpingjacks/train/r_186.png \n",
+ " inflating: data/jumpingjacks/train/r_124.png \n",
+ " inflating: data/jumpingjacks/train/r_006.png \n",
+ " inflating: data/jumpingjacks/train/r_014.png \n",
+ " inflating: data/jumpingjacks/train/r_132.png \n",
+ " inflating: data/jumpingjacks/train/r_054.png \n",
+ " inflating: data/jumpingjacks/train/r_184.png \n",
+ " inflating: data/jumpingjacks/train/r_032.png \n",
+ " inflating: data/jumpingjacks/train/r_089.png \n",
+ " inflating: data/jumpingjacks/train/r_005.png \n",
+ " inflating: data/jumpingjacks/train/r_195.png \n",
+ " inflating: data/jumpingjacks/train/r_191.png \n",
+ " inflating: data/jumpingjacks/train/r_172.png \n",
+ " inflating: data/jumpingjacks/train/r_096.png \n",
+ " inflating: data/jumpingjacks/train/r_030.png \n",
+ " inflating: data/jumpingjacks/train/r_031.png \n",
+ " inflating: data/jumpingjacks/train/r_025.png \n",
+ " inflating: data/jumpingjacks/train/r_097.png \n",
+ " inflating: data/jumpingjacks/train/r_161.png \n",
+ " inflating: data/jumpingjacks/train/r_058.png \n",
+ " inflating: data/jumpingjacks/train/r_043.png \n",
+ " inflating: data/jumpingjacks/train/r_103.png \n",
+ " inflating: data/jumpingjacks/train/r_042.png \n",
+ " inflating: data/jumpingjacks/train/r_056.png \n",
+ " inflating: data/jumpingjacks/train/r_176.png \n",
+ " inflating: data/jumpingjacks/train/r_098.png \n",
+ " inflating: data/jumpingjacks/train/r_148.png \n",
+ " inflating: data/jumpingjacks/train/r_038.png \n",
+ " inflating: data/jumpingjacks/train/r_179.png \n",
+ " inflating: data/jumpingjacks/train/r_167.png \n",
+ " inflating: data/jumpingjacks/train/r_137.png \n",
+ " inflating: data/jumpingjacks/train/r_140.png \n",
+ " inflating: data/jumpingjacks/train/r_183.png \n",
+ " inflating: data/jumpingjacks/train/r_108.png \n",
+ " inflating: data/jumpingjacks/train/r_102.png \n",
+ " inflating: data/jumpingjacks/train/r_069.png \n",
+ " inflating: data/jumpingjacks/train/r_076.png \n",
+ " inflating: data/jumpingjacks/train/r_045.png \n",
+ " inflating: data/jumpingjacks/train/r_035.png \n",
+ " inflating: data/jumpingjacks/train/r_127.png \n",
+ " inflating: data/jumpingjacks/train/r_013.png \n",
+ " inflating: data/jumpingjacks/train/r_051.png \n",
+ " inflating: data/jumpingjacks/train/r_144.png \n",
+ " inflating: data/jumpingjacks/train/r_001.png \n",
+ " inflating: data/jumpingjacks/train/r_055.png \n",
+ " inflating: data/jumpingjacks/train/r_095.png \n",
+ " inflating: data/jumpingjacks/train/r_155.png \n",
+ " inflating: data/jumpingjacks/train/r_079.png \n",
+ " inflating: data/jumpingjacks/train/r_052.png \n",
+ " inflating: data/jumpingjacks/train/r_162.png \n",
+ " inflating: data/jumpingjacks/train/r_046.png \n",
+ " inflating: data/jumpingjacks/train/r_044.png \n",
+ " inflating: data/jumpingjacks/train/r_064.png \n",
+ " inflating: data/jumpingjacks/train/r_029.png \n",
+ " inflating: data/jumpingjacks/train/r_120.png \n",
+ " inflating: data/jumpingjacks/train/transforms.json \n",
+ " inflating: data/jumpingjacks/train/r_105.png \n",
+ " inflating: data/jumpingjacks/train/r_156.png \n",
+ " inflating: data/jumpingjacks/train/r_163.png \n",
+ " inflating: data/jumpingjacks/train/r_091.png \n",
+ " inflating: data/jumpingjacks/train/r_160.png \n",
+ " inflating: data/jumpingjacks/train/r_109.png \n",
+ " inflating: data/jumpingjacks/train/r_028.png \n",
+ " inflating: data/jumpingjacks/train/r_126.png \n",
+ " inflating: data/jumpingjacks/train/r_048.png \n",
+ " inflating: data/jumpingjacks/train/r_150.png \n",
+ " inflating: data/jumpingjacks/train/r_123.png \n",
+ " inflating: data/jumpingjacks/train/r_154.png \n",
+ " inflating: data/jumpingjacks/train/r_101.png \n",
+ " inflating: data/jumpingjacks/train/r_092.png \n",
+ " inflating: data/jumpingjacks/train/r_143.png \n",
+ " inflating: data/jumpingjacks/train/r_084.png \n",
+ " inflating: data/jumpingjacks/train/r_116.png \n",
+ " inflating: data/jumpingjacks/train/r_158.png \n",
+ " inflating: data/jumpingjacks/train/r_171.png \n",
+ " inflating: data/jumpingjacks/train/r_128.png \n",
+ " inflating: data/jumpingjacks/train/r_000.png \n",
+ " inflating: data/jumpingjacks/train/r_187.png \n",
+ " inflating: data/jumpingjacks/train/r_181.png \n",
+ " inflating: data/jumpingjacks/train/r_034.png \n",
+ " inflating: data/jumpingjacks/train/r_111.png \n",
+ " inflating: data/jumpingjacks/train/r_119.png \n",
+ " inflating: data/jumpingjacks/train/r_039.png \n",
+ " inflating: data/jumpingjacks/train/r_198.png \n",
+ " inflating: data/jumpingjacks/train/r_077.png \n",
+ " inflating: data/jumpingjacks/train/r_062.png \n",
+ " inflating: data/jumpingjacks/train/r_018.png \n",
+ " inflating: data/jumpingjacks/train/r_178.png \n",
+ " inflating: data/jumpingjacks/train/r_106.png \n",
+ " inflating: data/jumpingjacks/train/r_011.png \n",
+ " inflating: data/jumpingjacks/train/r_047.png \n",
+ " inflating: data/jumpingjacks/train/r_033.png \n",
+ " inflating: data/jumpingjacks/train/r_080.png \n",
+ " inflating: data/jumpingjacks/train/r_165.png \n",
+ " inflating: data/jumpingjacks/train/r_074.png \n",
+ " inflating: data/jumpingjacks/train/r_135.png \n",
+ " inflating: data/jumpingjacks/train/r_067.png \n",
+ " inflating: data/jumpingjacks/train/r_086.png \n",
+ " inflating: data/jumpingjacks/train/r_177.png \n",
+ " inflating: data/jumpingjacks/train/r_151.png \n",
+ " inflating: data/jumpingjacks/train/r_087.png \n",
+ " inflating: data/jumpingjacks/train/r_104.png \n",
+ " inflating: data/jumpingjacks/train/r_007.png \n",
+ " inflating: data/jumpingjacks/train/r_188.png \n",
+ " inflating: data/jumpingjacks/train/r_193.png \n",
+ " inflating: data/jumpingjacks/train/r_024.png \n",
+ " inflating: data/jumpingjacks/train/r_197.png \n",
+ " inflating: data/jumpingjacks/train/r_138.png \n",
+ " inflating: data/jumpingjacks/train/r_057.png \n",
+ " inflating: data/jumpingjacks/train/r_145.png \n",
+ " inflating: data/jumpingjacks/train/r_112.png \n",
+ " inflating: data/jumpingjacks/train/r_050.png \n",
+ " inflating: data/jumpingjacks/train/r_008.png \n",
+ " inflating: data/jumpingjacks/train/r_117.png \n",
+ " inflating: data/jumpingjacks/train/r_169.png \n",
+ " inflating: data/jumpingjacks/train/r_152.png \n",
+ " inflating: data/jumpingjacks/train/r_073.png \n",
+ " inflating: data/jumpingjacks/train/r_019.png \n",
+ " inflating: data/jumpingjacks/train/r_121.png \n",
+ " inflating: data/jumpingjacks/train/r_180.png \n",
+ " inflating: data/jumpingjacks/train/r_189.png \n",
+ " inflating: data/jumpingjacks/train/r_023.png \n",
+ " inflating: data/jumpingjacks/train/r_081.png \n",
+ " inflating: data/jumpingjacks/train/r_090.png \n",
+ " inflating: data/jumpingjacks/train/r_015.png \n",
+ " inflating: data/jumpingjacks/train/r_053.png \n",
+ " inflating: data/jumpingjacks/train/r_037.png \n",
+ " inflating: data/jumpingjacks/train/r_066.png \n",
+ " inflating: data/jumpingjacks/train/r_114.png \n",
+ " inflating: data/jumpingjacks/train/r_071.png \n",
+ " inflating: data/jumpingjacks/train/r_113.png \n",
+ " inflating: data/jumpingjacks/train/r_010.png \n",
+ " inflating: data/jumpingjacks/train/r_147.png \n",
+ " inflating: data/jumpingjacks/train/r_070.png \n",
+ " inflating: data/jumpingjacks/train/r_016.png \n",
+ " inflating: data/jumpingjacks/train/r_185.png \n",
+ " inflating: data/jumpingjacks/train/r_153.png \n",
+ " inflating: data/jumpingjacks/train/r_125.png \n",
+ " inflating: data/jumpingjacks/train/r_002.png \n",
+ " inflating: data/jumpingjacks/train/r_041.png \n",
+ " inflating: data/jumpingjacks/train/r_133.png \n",
+ " inflating: data/jumpingjacks/train/r_036.png \n",
+ " inflating: data/jumpingjacks/train/r_118.png \n",
+ " inflating: data/jumpingjacks/train/r_068.png \n",
+ " inflating: data/jumpingjacks/train/r_141.png \n",
+ " inflating: data/jumpingjacks/train/r_040.png \n",
+ " inflating: data/jumpingjacks/train/r_082.png \n",
+ " inflating: data/jumpingjacks/train/r_131.png \n",
+ " inflating: data/jumpingjacks/train/r_022.png \n",
+ " inflating: data/jumpingjacks/train/r_130.png \n",
+ " inflating: data/jumpingjacks/train/r_099.png \n",
+ " inflating: data/jumpingjacks/train/r_149.png \n",
+ " inflating: data/jumpingjacks/train/r_107.png \n",
+ " inflating: data/jumpingjacks/train/r_004.png \n",
+ " inflating: data/jumpingjacks/train/r_146.png \n",
+ " inflating: data/jumpingjacks/train/r_190.png \n",
+ " inflating: data/jumpingjacks/train/r_182.png \n",
+ " inflating: data/jumpingjacks/train/r_063.png \n",
+ " inflating: data/jumpingjacks/train/r_094.png \n",
+ " inflating: data/jumpingjacks/train/r_021.png \n",
+ " inflating: data/jumpingjacks/train/r_157.png \n",
+ " inflating: data/jumpingjacks/train/r_060.png \n",
+ " inflating: data/jumpingjacks/train/r_159.png \n",
+ " inflating: data/jumpingjacks/train/r_072.png \n",
+ " creating: data/mutant/\n",
+ " inflating: data/mutant/transforms_train.json \n",
+ " inflating: data/mutant/transforms_test.json \n",
+ " inflating: data/mutant/transforms_val.json \n",
+ " creating: data/mutant/val/\n",
+ " inflating: data/mutant/val/r_012.png \n",
+ " inflating: data/mutant/val/r_003.png \n",
+ " inflating: data/mutant/val/r_017.png \n",
+ " inflating: data/mutant/val/r_009.png \n",
+ " inflating: data/mutant/val/r_006.png \n",
+ " inflating: data/mutant/val/r_014.png \n",
+ " inflating: data/mutant/val/r_005.png \n",
+ " inflating: data/mutant/val/r_013.png \n",
+ " inflating: data/mutant/val/r_001.png \n",
+ " inflating: data/mutant/val/r_000.png \n",
+ " inflating: data/mutant/val/r_018.png \n",
+ " inflating: data/mutant/val/r_011.png \n",
+ " inflating: data/mutant/val/r_007.png \n",
+ " inflating: data/mutant/val/r_008.png \n",
+ " inflating: data/mutant/val/r_019.png \n",
+ " inflating: data/mutant/val/r_015.png \n",
+ " inflating: data/mutant/val/r_010.png \n",
+ " inflating: data/mutant/val/r_016.png \n",
+ " inflating: data/mutant/val/r_002.png \n",
+ " inflating: data/mutant/val/r_004.png \n",
+ " creating: data/mutant/test/\n",
+ " inflating: data/mutant/test/r_012.png \n",
+ " inflating: data/mutant/test/r_003.png \n",
+ " inflating: data/mutant/test/r_017.png \n",
+ " inflating: data/mutant/test/r_009.png \n",
+ " inflating: data/mutant/test/r_006.png \n",
+ " inflating: data/mutant/test/r_011.png \n",
+ " inflating: data/mutant/test/r_014.png \n",
+ " inflating: data/mutant/test/r_005.png \n",
+ " inflating: data/mutant/test/r_013.png \n",
+ " inflating: data/mutant/test/r_001.png \n",
+ " inflating: data/mutant/test/r_000.png \n",
+ " inflating: data/mutant/test/r_018.png \n",
+ " inflating: data/mutant/test/r_007.png \n",
+ " inflating: data/mutant/test/r_008.png \n",
+ " inflating: data/mutant/test/r_019.png \n",
+ " inflating: data/mutant/test/r_015.png \n",
+ " inflating: data/mutant/test/r_010.png \n",
+ " inflating: data/mutant/test/r_016.png \n",
+ " inflating: data/mutant/test/r_002.png \n",
+ " inflating: data/mutant/test/r_004.png \n",
+ " creating: data/mutant/train/\n",
+ " inflating: data/mutant/train/r_129.png \n",
+ " inflating: data/mutant/train/r_061.png \n",
+ " inflating: data/mutant/train/r_088.png \n",
+ " inflating: data/mutant/train/r_027.png \n",
+ " inflating: data/mutant/train/r_134.png \n",
+ " inflating: data/mutant/train/r_065.png \n",
+ " inflating: data/mutant/train/r_100.png \n",
+ " inflating: data/mutant/train/r_012.png \n",
+ " inflating: data/mutant/train/r_003.png \n",
+ " inflating: data/mutant/train/r_142.png \n",
+ " inflating: data/mutant/train/r_122.png \n",
+ " inflating: data/mutant/train/r_078.png \n",
+ " inflating: data/mutant/train/r_136.png \n",
+ " inflating: data/mutant/train/r_059.png \n",
+ " inflating: data/mutant/train/r_049.png \n",
+ " inflating: data/mutant/train/r_085.png \n",
+ " inflating: data/mutant/train/r_026.png \n",
+ " inflating: data/mutant/train/r_093.png \n",
+ " inflating: data/mutant/train/r_075.png \n",
+ " inflating: data/mutant/train/r_115.png \n",
+ " inflating: data/mutant/train/r_083.png \n",
+ " inflating: data/mutant/train/r_110.png \n",
+ " inflating: data/mutant/train/r_017.png \n",
+ " inflating: data/mutant/train/r_139.png \n",
+ " inflating: data/mutant/train/r_009.png \n",
+ " inflating: data/mutant/train/r_020.png \n",
+ " inflating: data/mutant/train/r_124.png \n",
+ " inflating: data/mutant/train/r_006.png \n",
+ " inflating: data/mutant/train/r_014.png \n",
+ " inflating: data/mutant/train/r_132.png \n",
+ " inflating: data/mutant/train/r_054.png \n",
+ " inflating: data/mutant/train/r_032.png \n",
+ " inflating: data/mutant/train/r_089.png \n",
+ " inflating: data/mutant/train/r_005.png \n",
+ " inflating: data/mutant/train/r_096.png \n",
+ " inflating: data/mutant/train/r_030.png \n",
+ " inflating: data/mutant/train/r_031.png \n",
+ " inflating: data/mutant/train/r_025.png \n",
+ " inflating: data/mutant/train/r_097.png \n",
+ " inflating: data/mutant/train/r_058.png \n",
+ " inflating: data/mutant/train/r_043.png \n",
+ " inflating: data/mutant/train/r_103.png \n",
+ " inflating: data/mutant/train/r_042.png \n",
+ " inflating: data/mutant/train/r_056.png \n",
+ " inflating: data/mutant/train/r_098.png \n",
+ " inflating: data/mutant/train/r_148.png \n",
+ " inflating: data/mutant/train/r_038.png \n",
+ " inflating: data/mutant/train/r_137.png \n",
+ " inflating: data/mutant/train/r_140.png \n",
+ " inflating: data/mutant/train/r_108.png \n",
+ " inflating: data/mutant/train/r_102.png \n",
+ " inflating: data/mutant/train/r_069.png \n",
+ " inflating: data/mutant/train/r_076.png \n",
+ " inflating: data/mutant/train/r_045.png \n",
+ " inflating: data/mutant/train/r_035.png \n",
+ " inflating: data/mutant/train/r_127.png \n",
+ " inflating: data/mutant/train/r_013.png \n",
+ " inflating: data/mutant/train/r_051.png \n",
+ " inflating: data/mutant/train/r_144.png \n",
+ " inflating: data/mutant/train/r_001.png \n",
+ " inflating: data/mutant/train/r_055.png \n",
+ " inflating: data/mutant/train/r_095.png \n",
+ " inflating: data/mutant/train/r_079.png \n",
+ " inflating: data/mutant/train/r_052.png \n",
+ " inflating: data/mutant/train/r_046.png \n",
+ " inflating: data/mutant/train/r_044.png \n",
+ " inflating: data/mutant/train/r_064.png \n",
+ " inflating: data/mutant/train/r_029.png \n",
+ " inflating: data/mutant/train/r_120.png \n",
+ " inflating: data/mutant/train/r_105.png \n",
+ " inflating: data/mutant/train/r_091.png \n",
+ " inflating: data/mutant/train/r_109.png \n",
+ " inflating: data/mutant/train/r_028.png \n",
+ " inflating: data/mutant/train/r_126.png \n",
+ " inflating: data/mutant/train/r_048.png \n",
+ " inflating: data/mutant/train/r_123.png \n",
+ " inflating: data/mutant/train/r_101.png \n",
+ " inflating: data/mutant/train/r_092.png \n",
+ " inflating: data/mutant/train/r_143.png \n",
+ " inflating: data/mutant/train/r_084.png \n",
+ " inflating: data/mutant/train/r_116.png \n",
+ " inflating: data/mutant/train/r_128.png \n",
+ " inflating: data/mutant/train/r_000.png \n",
+ " inflating: data/mutant/train/r_034.png \n",
+ " inflating: data/mutant/train/r_111.png \n",
+ " inflating: data/mutant/train/r_119.png \n",
+ " inflating: data/mutant/train/r_039.png \n",
+ " inflating: data/mutant/train/r_077.png \n",
+ " inflating: data/mutant/train/r_062.png \n",
+ " inflating: data/mutant/train/r_018.png \n",
+ " inflating: data/mutant/train/r_106.png \n",
+ " inflating: data/mutant/train/r_011.png \n",
+ " inflating: data/mutant/train/r_047.png \n",
+ " inflating: data/mutant/train/r_033.png \n",
+ " inflating: data/mutant/train/r_080.png \n",
+ " inflating: data/mutant/train/r_074.png \n",
+ " inflating: data/mutant/train/r_135.png \n",
+ " inflating: data/mutant/train/r_067.png \n",
+ " inflating: data/mutant/train/r_086.png \n",
+ " inflating: data/mutant/train/r_087.png \n",
+ " inflating: data/mutant/train/r_104.png \n",
+ " inflating: data/mutant/train/r_007.png \n",
+ " inflating: data/mutant/train/r_024.png \n",
+ " inflating: data/mutant/train/r_138.png \n",
+ " inflating: data/mutant/train/r_057.png \n",
+ " inflating: data/mutant/train/r_145.png \n",
+ " inflating: data/mutant/train/r_112.png \n",
+ " inflating: data/mutant/train/r_050.png \n",
+ " inflating: data/mutant/train/r_008.png \n",
+ " inflating: data/mutant/train/r_117.png \n",
+ " inflating: data/mutant/train/r_073.png \n",
+ " inflating: data/mutant/train/r_019.png \n",
+ " inflating: data/mutant/train/r_121.png \n",
+ " inflating: data/mutant/train/r_023.png \n",
+ " inflating: data/mutant/train/r_081.png \n",
+ " inflating: data/mutant/train/r_090.png \n",
+ " inflating: data/mutant/train/r_015.png \n",
+ " inflating: data/mutant/train/r_053.png \n",
+ " inflating: data/mutant/train/r_037.png \n",
+ " inflating: data/mutant/train/r_066.png \n",
+ " inflating: data/mutant/train/r_114.png \n",
+ " inflating: data/mutant/train/r_071.png \n",
+ " inflating: data/mutant/train/r_113.png \n",
+ " inflating: data/mutant/train/r_010.png \n",
+ " inflating: data/mutant/train/r_147.png \n",
+ " inflating: data/mutant/train/r_070.png \n",
+ " inflating: data/mutant/train/r_016.png \n",
+ " inflating: data/mutant/train/r_125.png \n",
+ " inflating: data/mutant/train/r_002.png \n",
+ " inflating: data/mutant/train/r_041.png \n",
+ " inflating: data/mutant/train/r_133.png \n",
+ " inflating: data/mutant/train/r_036.png \n",
+ " inflating: data/mutant/train/r_118.png \n",
+ " inflating: data/mutant/train/r_068.png \n",
+ " inflating: data/mutant/train/r_141.png \n",
+ " inflating: data/mutant/train/r_040.png \n",
+ " inflating: data/mutant/train/r_082.png \n",
+ " inflating: data/mutant/train/r_131.png \n",
+ " inflating: data/mutant/train/r_022.png \n",
+ " inflating: data/mutant/train/r_130.png \n",
+ " inflating: data/mutant/train/r_099.png \n",
+ " inflating: data/mutant/train/r_149.png \n",
+ " inflating: data/mutant/train/r_107.png \n",
+ " inflating: data/mutant/train/r_004.png \n",
+ " inflating: data/mutant/train/r_146.png \n",
+ " inflating: data/mutant/train/r_063.png \n",
+ " inflating: data/mutant/train/r_094.png \n",
+ " inflating: data/mutant/train/r_021.png \n",
+ " inflating: data/mutant/train/r_060.png \n",
+ " inflating: data/mutant/train/r_072.png \n",
+ " creating: data/hellwarrior/\n",
+ " inflating: data/hellwarrior/transforms_train.json \n",
+ " inflating: data/hellwarrior/transforms_test.json \n",
+ " inflating: data/hellwarrior/transforms_val.json \n",
+ " creating: data/hellwarrior/val/\n",
+ " inflating: data/hellwarrior/val/r_012.png \n",
+ " inflating: data/hellwarrior/val/r_003.png \n",
+ " inflating: data/hellwarrior/val/r_017.png \n",
+ " inflating: data/hellwarrior/val/r_009.png \n",
+ " inflating: data/hellwarrior/val/r_006.png \n",
+ " inflating: data/hellwarrior/val/r_014.png \n",
+ " inflating: data/hellwarrior/val/r_005.png \n",
+ " inflating: data/hellwarrior/val/r_013.png \n",
+ " inflating: data/hellwarrior/val/r_001.png \n",
+ " inflating: data/hellwarrior/val/r_000.png \n",
+ " inflating: data/hellwarrior/val/r_018.png \n",
+ " inflating: data/hellwarrior/val/r_011.png \n",
+ " inflating: data/hellwarrior/val/r_007.png \n",
+ " inflating: data/hellwarrior/val/r_008.png \n",
+ " inflating: data/hellwarrior/val/r_019.png \n",
+ " inflating: data/hellwarrior/val/r_015.png \n",
+ " inflating: data/hellwarrior/val/r_010.png \n",
+ " inflating: data/hellwarrior/val/r_016.png \n",
+ " inflating: data/hellwarrior/val/r_002.png \n",
+ " inflating: data/hellwarrior/val/r_004.png \n",
+ " creating: data/hellwarrior/test/\n",
+ " inflating: data/hellwarrior/test/r_012.png \n",
+ " inflating: data/hellwarrior/test/r_003.png \n",
+ " inflating: data/hellwarrior/test/r_017.png \n",
+ " inflating: data/hellwarrior/test/r_009.png \n",
+ " inflating: data/hellwarrior/test/r_006.png \n",
+ " inflating: data/hellwarrior/test/r_014.png \n",
+ " inflating: data/hellwarrior/test/r_005.png \n",
+ " inflating: data/hellwarrior/test/r_013.png \n",
+ " inflating: data/hellwarrior/test/r_001.png \n",
+ " inflating: data/hellwarrior/test/r_000.png \n",
+ " inflating: data/hellwarrior/test/r_018.png \n",
+ " inflating: data/hellwarrior/test/r_011.png \n",
+ " inflating: data/hellwarrior/test/r_007.png \n",
+ " inflating: data/hellwarrior/test/r_008.png \n",
+ " inflating: data/hellwarrior/test/r_019.png \n",
+ " inflating: data/hellwarrior/test/r_015.png \n",
+ " inflating: data/hellwarrior/test/r_010.png \n",
+ " inflating: data/hellwarrior/test/r_016.png \n",
+ " inflating: data/hellwarrior/test/r_002.png \n",
+ " inflating: data/hellwarrior/test/r_004.png \n",
+ " creating: data/hellwarrior/train/\n",
+ " inflating: data/hellwarrior/train/r_061.png \n",
+ " inflating: data/hellwarrior/train/r_088.png \n",
+ " inflating: data/hellwarrior/train/r_027.png \n",
+ " inflating: data/hellwarrior/train/r_065.png \n",
+ " inflating: data/hellwarrior/train/r_012.png \n",
+ " inflating: data/hellwarrior/train/r_003.png \n",
+ " inflating: data/hellwarrior/train/r_078.png \n",
+ " inflating: data/hellwarrior/train/r_059.png \n",
+ " inflating: data/hellwarrior/train/r_049.png \n",
+ " inflating: data/hellwarrior/train/r_085.png \n",
+ " inflating: data/hellwarrior/train/r_026.png \n",
+ " inflating: data/hellwarrior/train/r_093.png \n",
+ " inflating: data/hellwarrior/train/r_075.png \n",
+ " inflating: data/hellwarrior/train/r_083.png \n",
+ " inflating: data/hellwarrior/train/r_017.png \n",
+ " inflating: data/hellwarrior/train/r_009.png \n",
+ " inflating: data/hellwarrior/train/r_020.png \n",
+ " inflating: data/hellwarrior/train/r_006.png \n",
+ " inflating: data/hellwarrior/train/r_014.png \n",
+ " inflating: data/hellwarrior/train/r_054.png \n",
+ " inflating: data/hellwarrior/train/r_032.png \n",
+ " inflating: data/hellwarrior/train/r_089.png \n",
+ " inflating: data/hellwarrior/train/r_005.png \n",
+ " inflating: data/hellwarrior/train/r_096.png \n",
+ " inflating: data/hellwarrior/train/r_030.png \n",
+ " inflating: data/hellwarrior/train/r_031.png \n",
+ " inflating: data/hellwarrior/train/r_025.png \n",
+ " inflating: data/hellwarrior/train/r_097.png \n",
+ " inflating: data/hellwarrior/train/r_058.png \n",
+ " inflating: data/hellwarrior/train/r_043.png \n",
+ " inflating: data/hellwarrior/train/r_042.png \n",
+ " inflating: data/hellwarrior/train/r_056.png \n",
+ " inflating: data/hellwarrior/train/r_098.png \n",
+ " inflating: data/hellwarrior/train/r_038.png \n",
+ " inflating: data/hellwarrior/train/r_069.png \n",
+ " inflating: data/hellwarrior/train/r_076.png \n",
+ " inflating: data/hellwarrior/train/r_045.png \n",
+ " inflating: data/hellwarrior/train/r_035.png \n",
+ " inflating: data/hellwarrior/train/r_013.png \n",
+ " inflating: data/hellwarrior/train/r_051.png \n",
+ " inflating: data/hellwarrior/train/r_001.png \n",
+ " inflating: data/hellwarrior/train/r_055.png \n",
+ " inflating: data/hellwarrior/train/r_095.png \n",
+ " inflating: data/hellwarrior/train/r_079.png \n",
+ " inflating: data/hellwarrior/train/r_052.png \n",
+ " inflating: data/hellwarrior/train/r_046.png \n",
+ " inflating: data/hellwarrior/train/r_044.png \n",
+ " inflating: data/hellwarrior/train/r_064.png \n",
+ " inflating: data/hellwarrior/train/r_029.png \n",
+ " inflating: data/hellwarrior/train/r_091.png \n",
+ " inflating: data/hellwarrior/train/r_028.png \n",
+ " inflating: data/hellwarrior/train/r_048.png \n",
+ " inflating: data/hellwarrior/train/r_092.png \n",
+ " inflating: data/hellwarrior/train/r_084.png \n",
+ " inflating: data/hellwarrior/train/r_000.png \n",
+ " inflating: data/hellwarrior/train/r_034.png \n",
+ " inflating: data/hellwarrior/train/r_039.png \n",
+ " inflating: data/hellwarrior/train/r_077.png \n",
+ " inflating: data/hellwarrior/train/r_062.png \n",
+ " inflating: data/hellwarrior/train/r_018.png \n",
+ " inflating: data/hellwarrior/train/r_011.png \n",
+ " inflating: data/hellwarrior/train/r_047.png \n",
+ " inflating: data/hellwarrior/train/r_033.png \n",
+ " inflating: data/hellwarrior/train/r_080.png \n",
+ " inflating: data/hellwarrior/train/r_074.png \n",
+ " inflating: data/hellwarrior/train/r_067.png \n",
+ " inflating: data/hellwarrior/train/r_086.png \n",
+ " inflating: data/hellwarrior/train/r_087.png \n",
+ " inflating: data/hellwarrior/train/r_007.png \n",
+ " inflating: data/hellwarrior/train/r_024.png \n",
+ " inflating: data/hellwarrior/train/r_057.png \n",
+ " inflating: data/hellwarrior/train/r_050.png \n",
+ " inflating: data/hellwarrior/train/r_008.png \n",
+ " inflating: data/hellwarrior/train/r_073.png \n",
+ " inflating: data/hellwarrior/train/r_019.png \n",
+ " inflating: data/hellwarrior/train/r_023.png \n",
+ " inflating: data/hellwarrior/train/r_081.png \n",
+ " inflating: data/hellwarrior/train/r_090.png \n",
+ " inflating: data/hellwarrior/train/r_015.png \n",
+ " inflating: data/hellwarrior/train/r_053.png \n",
+ " inflating: data/hellwarrior/train/r_037.png \n",
+ " inflating: data/hellwarrior/train/r_066.png \n",
+ " inflating: data/hellwarrior/train/r_071.png \n",
+ " inflating: data/hellwarrior/train/r_010.png \n",
+ " inflating: data/hellwarrior/train/r_070.png \n",
+ " inflating: data/hellwarrior/train/r_016.png \n",
+ " inflating: data/hellwarrior/train/r_002.png \n",
+ " inflating: data/hellwarrior/train/r_041.png \n",
+ " inflating: data/hellwarrior/train/r_036.png \n",
+ " inflating: data/hellwarrior/train/r_068.png \n",
+ " inflating: data/hellwarrior/train/r_040.png \n",
+ " inflating: data/hellwarrior/train/r_082.png \n",
+ " inflating: data/hellwarrior/train/r_022.png \n",
+ " inflating: data/hellwarrior/train/r_099.png \n",
+ " inflating: data/hellwarrior/train/r_004.png \n",
+ " inflating: data/hellwarrior/train/r_063.png \n",
+ " inflating: data/hellwarrior/train/r_094.png \n",
+ " inflating: data/hellwarrior/train/r_021.png \n",
+ " inflating: data/hellwarrior/train/r_060.png \n",
+ " inflating: data/hellwarrior/train/r_072.png \n",
+ " creating: data/trex/\n",
+ " inflating: data/trex/transforms_train.json \n",
+ " inflating: data/trex/transforms_test.json \n",
+ " inflating: data/trex/transforms_val.json \n",
+ " creating: data/trex/val/\n",
+ " inflating: data/trex/val/r_012.png \n",
+ " inflating: data/trex/val/r_003.png \n",
+ " inflating: data/trex/val/r_017.png \n",
+ " inflating: data/trex/val/r_009.png \n",
+ " inflating: data/trex/val/r_006.png \n",
+ " inflating: data/trex/val/r_014.png \n",
+ " inflating: data/trex/val/r_005.png \n",
+ " inflating: data/trex/val/r_013.png \n",
+ " inflating: data/trex/val/r_001.png \n",
+ " inflating: data/trex/val/r_000.png \n",
+ " inflating: data/trex/val/r_018.png \n",
+ " inflating: data/trex/val/r_011.png \n",
+ " inflating: data/trex/val/r_007.png \n",
+ " inflating: data/trex/val/r_008.png \n",
+ " inflating: data/trex/val/r_019.png \n",
+ " inflating: data/trex/val/r_015.png \n",
+ " inflating: data/trex/val/r_010.png \n",
+ " inflating: data/trex/val/r_016.png \n",
+ " inflating: data/trex/val/r_002.png \n",
+ " inflating: data/trex/val/r_004.png \n",
+ " creating: data/trex/test/\n",
+ " inflating: data/trex/test/r_012.png \n",
+ " inflating: data/trex/test/r_003.png \n",
+ " inflating: data/trex/test/r_017.png \n",
+ " inflating: data/trex/test/r_009.png \n",
+ " inflating: data/trex/test/r_006.png \n",
+ " inflating: data/trex/test/r_014.png \n",
+ " inflating: data/trex/test/r_005.png \n",
+ " inflating: data/trex/test/r_013.png \n",
+ " inflating: data/trex/test/r_001.png \n",
+ " inflating: data/trex/test/r_000.png \n",
+ " inflating: data/trex/test/r_018.png \n",
+ " inflating: data/trex/test/r_011.png \n",
+ " inflating: data/trex/test/r_007.png \n",
+ " inflating: data/trex/test/r_008.png \n",
+ " inflating: data/trex/test/r_019.png \n",
+ " inflating: data/trex/test/r_015.png \n",
+ " inflating: data/trex/test/r_010.png \n",
+ " inflating: data/trex/test/r_016.png \n",
+ " inflating: data/trex/test/r_002.png \n",
+ " inflating: data/trex/test/r_004.png \n",
+ " creating: data/trex/train/\n",
+ " inflating: data/trex/train/r_129.png \n",
+ " inflating: data/trex/train/r_174.png \n",
+ " inflating: data/trex/train/r_061.png \n",
+ " inflating: data/trex/train/r_088.png \n",
+ " inflating: data/trex/train/r_194.png \n",
+ " inflating: data/trex/train/r_027.png \n",
+ " inflating: data/trex/train/r_134.png \n",
+ " inflating: data/trex/train/r_065.png \n",
+ " inflating: data/trex/train/r_100.png \n",
+ " inflating: data/trex/train/r_012.png \n",
+ " inflating: data/trex/train/r_003.png \n",
+ " inflating: data/trex/train/r_142.png \n",
+ " inflating: data/trex/train/r_122.png \n",
+ " inflating: data/trex/train/r_078.png \n",
+ " inflating: data/trex/train/r_136.png \n",
+ " inflating: data/trex/train/r_059.png \n",
+ " inflating: data/trex/train/r_049.png \n",
+ " inflating: data/trex/train/r_085.png \n",
+ " inflating: data/trex/train/r_026.png \n",
+ " inflating: data/trex/train/r_175.png \n",
+ " inflating: data/trex/train/r_093.png \n",
+ " inflating: data/trex/train/r_075.png \n",
+ " inflating: data/trex/train/r_115.png \n",
+ " inflating: data/trex/train/r_173.png \n",
+ " inflating: data/trex/train/r_166.png \n",
+ " inflating: data/trex/train/r_192.png \n",
+ " inflating: data/trex/train/r_083.png \n",
+ " inflating: data/trex/train/r_110.png \n",
+ " inflating: data/trex/train/r_170.png \n",
+ " inflating: data/trex/train/r_017.png \n",
+ " inflating: data/trex/train/r_196.png \n",
+ " inflating: data/trex/train/r_139.png \n",
+ " inflating: data/trex/train/r_009.png \n",
+ " inflating: data/trex/train/r_199.png \n",
+ " inflating: data/trex/train/r_164.png \n",
+ " inflating: data/trex/train/r_020.png \n",
+ " inflating: data/trex/train/r_168.png \n",
+ " inflating: data/trex/train/r_186.png \n",
+ " inflating: data/trex/train/r_124.png \n",
+ " inflating: data/trex/train/r_006.png \n",
+ " inflating: data/trex/train/r_014.png \n",
+ " inflating: data/trex/train/r_132.png \n",
+ " inflating: data/trex/train/r_054.png \n",
+ " inflating: data/trex/train/r_184.png \n",
+ " inflating: data/trex/train/r_032.png \n",
+ " inflating: data/trex/train/r_089.png \n",
+ " inflating: data/trex/train/r_005.png \n",
+ " inflating: data/trex/train/r_195.png \n",
+ " inflating: data/trex/train/r_191.png \n",
+ " inflating: data/trex/train/r_172.png \n",
+ " inflating: data/trex/train/r_096.png \n",
+ " inflating: data/trex/train/r_030.png \n",
+ " inflating: data/trex/train/r_031.png \n",
+ " inflating: data/trex/train/r_025.png \n",
+ " inflating: data/trex/train/r_097.png \n",
+ " inflating: data/trex/train/r_161.png \n",
+ " inflating: data/trex/train/r_058.png \n",
+ " inflating: data/trex/train/r_043.png \n",
+ " inflating: data/trex/train/r_103.png \n",
+ " inflating: data/trex/train/r_042.png \n",
+ " inflating: data/trex/train/r_056.png \n",
+ " inflating: data/trex/train/r_176.png \n",
+ " inflating: data/trex/train/r_098.png \n",
+ " inflating: data/trex/train/r_148.png \n",
+ " inflating: data/trex/train/r_038.png \n",
+ " inflating: data/trex/train/r_179.png \n",
+ " inflating: data/trex/train/r_167.png \n",
+ " inflating: data/trex/train/r_137.png \n",
+ " inflating: data/trex/train/r_140.png \n",
+ " inflating: data/trex/train/r_183.png \n",
+ " inflating: data/trex/train/r_108.png \n",
+ " inflating: data/trex/train/r_102.png \n",
+ " inflating: data/trex/train/r_069.png \n",
+ " inflating: data/trex/train/r_076.png \n",
+ " inflating: data/trex/train/r_045.png \n",
+ " inflating: data/trex/train/r_035.png \n",
+ " inflating: data/trex/train/r_127.png \n",
+ " inflating: data/trex/train/r_013.png \n",
+ " inflating: data/trex/train/r_051.png \n",
+ " inflating: data/trex/train/r_144.png \n",
+ " inflating: data/trex/train/r_001.png \n",
+ " inflating: data/trex/train/r_055.png \n",
+ " inflating: data/trex/train/r_095.png \n",
+ " inflating: data/trex/train/r_155.png \n",
+ " inflating: data/trex/train/r_079.png \n",
+ " inflating: data/trex/train/r_052.png \n",
+ " inflating: data/trex/train/r_162.png \n",
+ " inflating: data/trex/train/r_046.png \n",
+ " inflating: data/trex/train/r_044.png \n",
+ " inflating: data/trex/train/r_064.png \n",
+ " inflating: data/trex/train/r_029.png \n",
+ " inflating: data/trex/train/r_120.png \n",
+ " inflating: data/trex/train/r_105.png \n",
+ " inflating: data/trex/train/r_156.png \n",
+ " inflating: data/trex/train/r_163.png \n",
+ " inflating: data/trex/train/r_091.png \n",
+ " inflating: data/trex/train/r_160.png \n",
+ " inflating: data/trex/train/r_109.png \n",
+ " inflating: data/trex/train/r_028.png \n",
+ " inflating: data/trex/train/r_126.png \n",
+ " inflating: data/trex/train/r_048.png \n",
+ " inflating: data/trex/train/r_150.png \n",
+ " inflating: data/trex/train/r_123.png \n",
+ " inflating: data/trex/train/r_154.png \n",
+ " inflating: data/trex/train/r_101.png \n",
+ " inflating: data/trex/train/r_092.png \n",
+ " inflating: data/trex/train/r_143.png \n",
+ " inflating: data/trex/train/r_084.png \n",
+ " inflating: data/trex/train/r_116.png \n",
+ " inflating: data/trex/train/r_158.png \n",
+ " inflating: data/trex/train/r_171.png \n",
+ " inflating: data/trex/train/r_128.png \n",
+ " inflating: data/trex/train/r_000.png \n",
+ " inflating: data/trex/train/r_187.png \n",
+ " inflating: data/trex/train/r_181.png \n",
+ " inflating: data/trex/train/r_034.png \n",
+ " inflating: data/trex/train/r_111.png \n",
+ " inflating: data/trex/train/r_119.png \n",
+ " inflating: data/trex/train/r_039.png \n",
+ " inflating: data/trex/train/r_198.png \n",
+ " inflating: data/trex/train/r_077.png \n",
+ " inflating: data/trex/train/r_062.png \n",
+ " inflating: data/trex/train/r_018.png \n",
+ " inflating: data/trex/train/r_178.png \n",
+ " inflating: data/trex/train/r_106.png \n",
+ " inflating: data/trex/train/r_011.png \n",
+ " inflating: data/trex/train/r_047.png \n",
+ " inflating: data/trex/train/r_033.png \n",
+ " inflating: data/trex/train/r_080.png \n",
+ " inflating: data/trex/train/r_165.png \n",
+ " inflating: data/trex/train/r_074.png \n",
+ " inflating: data/trex/train/r_135.png \n",
+ " inflating: data/trex/train/r_067.png \n",
+ " inflating: data/trex/train/r_086.png \n",
+ " inflating: data/trex/train/r_177.png \n",
+ " inflating: data/trex/train/r_151.png \n",
+ " inflating: data/trex/train/r_087.png \n",
+ " inflating: data/trex/train/r_104.png \n",
+ " inflating: data/trex/train/r_007.png \n",
+ " inflating: data/trex/train/r_188.png \n",
+ " inflating: data/trex/train/r_193.png \n",
+ " inflating: data/trex/train/r_024.png \n",
+ " inflating: data/trex/train/r_197.png \n",
+ " inflating: data/trex/train/r_138.png \n",
+ " inflating: data/trex/train/r_057.png \n",
+ " inflating: data/trex/train/r_145.png \n",
+ " inflating: data/trex/train/r_112.png \n",
+ " inflating: data/trex/train/r_050.png \n",
+ " inflating: data/trex/train/r_008.png \n",
+ " inflating: data/trex/train/r_117.png \n",
+ " inflating: data/trex/train/r_169.png \n",
+ " inflating: data/trex/train/r_152.png \n",
+ " inflating: data/trex/train/r_073.png \n",
+ " inflating: data/trex/train/r_019.png \n",
+ " inflating: data/trex/train/r_121.png \n",
+ " inflating: data/trex/train/r_180.png \n",
+ " inflating: data/trex/train/r_189.png \n",
+ " inflating: data/trex/train/r_023.png \n",
+ " inflating: data/trex/train/r_081.png \n",
+ " inflating: data/trex/train/r_090.png \n",
+ " inflating: data/trex/train/r_015.png \n",
+ " inflating: data/trex/train/r_053.png \n",
+ " inflating: data/trex/train/r_037.png \n",
+ " inflating: data/trex/train/r_066.png \n",
+ " inflating: data/trex/train/r_114.png \n",
+ " inflating: data/trex/train/r_071.png \n",
+ " inflating: data/trex/train/r_113.png \n",
+ " inflating: data/trex/train/r_010.png \n",
+ " inflating: data/trex/train/r_147.png \n",
+ " inflating: data/trex/train/r_070.png \n",
+ " inflating: data/trex/train/r_016.png \n",
+ " inflating: data/trex/train/r_185.png \n",
+ " inflating: data/trex/train/r_153.png \n",
+ " inflating: data/trex/train/r_125.png \n",
+ " inflating: data/trex/train/r_002.png \n",
+ " inflating: data/trex/train/r_041.png \n",
+ " inflating: data/trex/train/r_133.png \n",
+ " inflating: data/trex/train/r_036.png \n",
+ " inflating: data/trex/train/r_118.png \n",
+ " inflating: data/trex/train/r_068.png \n",
+ " inflating: data/trex/train/r_141.png \n",
+ " inflating: data/trex/train/r_040.png \n",
+ " inflating: data/trex/train/r_082.png \n",
+ " inflating: data/trex/train/r_131.png \n",
+ " inflating: data/trex/train/r_022.png \n",
+ " inflating: data/trex/train/r_130.png \n",
+ " inflating: data/trex/train/r_099.png \n",
+ " inflating: data/trex/train/r_149.png \n",
+ " inflating: data/trex/train/r_107.png \n",
+ " inflating: data/trex/train/r_004.png \n",
+ " inflating: data/trex/train/r_146.png \n",
+ " inflating: data/trex/train/r_190.png \n",
+ " inflating: data/trex/train/r_182.png \n",
+ " inflating: data/trex/train/r_063.png \n",
+ " inflating: data/trex/train/r_094.png \n",
+ " inflating: data/trex/train/r_021.png \n",
+ " inflating: data/trex/train/r_157.png \n",
+ " inflating: data/trex/train/r_060.png \n",
+ " inflating: data/trex/train/r_159.png \n",
+ " inflating: data/trex/train/r_072.png \n"
+ ]
+ }
+ ]
},
{
"cell_type": "markdown",
- "source": [],
+ "source": [
+ "# **Bouncing Balls**"
+ ],
"metadata": {
"id": "IBB-voY0oqpy"
}
},
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": 5,
"metadata": {
"id": "8hk10A8Al7_V",
- "outputId": "e6df1ec7-fe5c-4b08-bee5-bcf077759c2a",
+ "outputId": "55a54e75-5985-4ae7-f704-c65335ba0e1b",
"colab": {
"base_uri": "https://localhost:8080/"
}
@@ -199,253 +1844,319 @@
"name": "stdout",
"text": [
"/content/4DGaussians\n",
+ "2024-02-11 17:13:42.330401: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
+ "2024-02-11 17:13:42.330453: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
+ "2024-02-11 17:13:42.331839: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
+ "2024-02-11 17:13:43.851263: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
"Optimizing \n",
- "Output folder: ./output/dnerf/bouncingballs [18/10 12:15:58]\n",
- "2023-10-18 12:15:59.094660: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
- "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
- "2023-10-18 12:16:00.723098: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
- "feature_dim: 128 [18/10 12:16:03]\n",
- "Found transforms_train.json file, assuming Blender data set! [18/10 12:16:03]\n",
- "Reading Training Transforms [18/10 12:16:03]\n",
- "Reading Test Transforms [18/10 12:16:16]\n",
- "Generating Video Transforms [18/10 12:16:18]\n",
- "Generating random point cloud (2000)... [18/10 12:16:19]\n",
- "Loading Training Cameras [18/10 12:16:19]\n",
- "Loading Test Cameras [18/10 12:16:19]\n",
- "Loading Video Cameras [18/10 12:16:19]\n",
+ "Output folder: ./output/dnerf/bouncingballs [11/02 17:13:46]\n",
+ "feature_dim: 64 [11/02 17:13:46]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 17:13:46]\n",
+ "Reading Training Transforms [11/02 17:13:46]\n",
+ "Reading Test Transforms [11/02 17:13:59]\n",
+ "Generating Video Transforms [11/02 17:14:01]\n",
+ "hello!!!! [11/02 17:14:01]\n",
+ "Generating random point cloud (2000)... [11/02 17:14:01]\n",
+ "Loading Training Cameras [11/02 17:14:01]\n",
+ "Loading Test Cameras [11/02 17:14:01]\n",
+ "Loading Video Cameras [11/02 17:14:01]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 17:14:01]\n",
"Voxel Plane: set aabb= Parameter containing:\n",
"tensor([[ 1.2998, 1.2999, 1.2999],\n",
- " [-1.2998, -1.2998, -1.2987]], requires_grad=True) [18/10 12:16:20]\n",
- "Number of points at initialisation : 2000 [18/10 12:16:24]\n",
- "Training progress: 0% 0/3000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [18/10 12:16:24]\n",
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 17:14:01]\n",
+ "Number of points at initialisation : 2000 [11/02 17:14:01]\n",
+ "Training progress: 0% 0/3000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 17:14:02]\n",
"Downloading: \"https://download.pytorch.org/models/alexnet-owt-7be5be79.pth\" to /root/.cache/torch/hub/checkpoints/alexnet-owt-7be5be79.pth\n",
"\n",
" 0% 0.00/233M [00:00, ?B/s]\u001b[A\n",
- " 3% 8.09M/233M [00:00<00:02, 84.9MB/s]\u001b[A\n",
- " 10% 23.0M/233M [00:00<00:01, 127MB/s] \u001b[A\n",
- " 23% 54.7M/233M [00:00<00:00, 221MB/s]\u001b[A\n",
- " 36% 83.1M/233M [00:00<00:00, 251MB/s]\u001b[A\n",
- " 46% 107M/233M [00:00<00:00, 231MB/s] \u001b[A\n",
- " 57% 132M/233M [00:00<00:00, 242MB/s]\u001b[A\n",
- " 68% 158M/233M [00:00<00:00, 250MB/s]\u001b[A\n",
- " 80% 185M/233M [00:00<00:00, 262MB/s]\u001b[A\n",
- "100% 233M/233M [00:01<00:00, 241MB/s]\n",
- "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [18/10 12:16:26]\n",
- "Training progress: 17% 500/3000 [00:22<01:21, 30.50it/s, Loss=0.0293588, psnr=22.72, point=2000]\n",
- "[ITER 500] Evaluating test: L1 0.03289371422108482 PSNR 21.315539640538834 [18/10 12:16:49]\n",
- "\n",
- "[ITER 500] Evaluating train: L1 0.042692432180047035 PSNR 19.828696531407974 [18/10 12:16:50]\n",
- "reset opacity [18/10 12:16:50]\n",
- "Training progress: 33% 1000/3000 [00:44<01:22, 24.30it/s, Loss=0.0231845, psnr=21.27, point=3108]\n",
- "[ITER 1000] Evaluating test: L1 0.02628705880659468 PSNR 21.702505616580737 [18/10 12:17:11]\n",
- "\n",
- "[ITER 1000] Evaluating train: L1 0.03691562473335687 PSNR 20.561191895428827 [18/10 12:17:13]\n",
- "Training progress: 50% 1500/3000 [01:05<01:11, 20.97it/s, Loss=0.0207843, psnr=25.42, point=7718]\n",
- "[ITER 1500] Evaluating test: L1 0.02419357346918653 PSNR 21.819040074067956 [18/10 12:17:32]\n",
- "\n",
- "[ITER 1500] Evaluating train: L1 0.03390577419058365 PSNR 20.735389709472656 [18/10 12:17:33]\n",
- "Training progress: 67% 2000/3000 [01:25<00:40, 24.63it/s, Loss=0.0269778, psnr=17.78, point=11409]\n",
- "[ITER 2000] Evaluating test: L1 0.024030927787808812 PSNR 21.883982994977167 [18/10 12:17:52]\n",
- "\n",
- "[ITER 2000] Evaluating train: L1 0.033870420254328674 PSNR 20.992827583761777 [18/10 12:17:53]\n",
- "\n",
- "[ITER 2000] Saving Gaussians [18/10 12:17:53]\n",
- "Training progress: 83% 2500/3000 [01:46<00:13, 35.83it/s, Loss=0.0251006, psnr=17.91, point=14595]\n",
- "[ITER 2500] Evaluating test: L1 0.022819621762370363 PSNR 21.854939965640796 [18/10 12:18:12]\n",
- "\n",
- "[ITER 2500] Evaluating train: L1 0.032913146978792024 PSNR 20.98833117765539 [18/10 12:18:13]\n",
- "Training progress: 100% 3000/3000 [02:06<00:00, 35.66it/s, Loss=0.0184311, psnr=19.64, point=17436]\n",
- "[ITER 3000] Evaluating test: L1 0.021785845569170573 PSNR 22.046124009525073 [18/10 12:18:32]\n",
- "\n",
- "[ITER 3000] Evaluating train: L1 0.032102140597999096 PSNR 20.985104953541477 [18/10 12:18:33]\n",
- "\n",
- "[ITER 3000] Saving Gaussians [18/10 12:18:33]\n",
- "reset opacity [18/10 12:18:33]\n",
- "Training progress: 100% 3000/3000 [02:09<00:00, 23.22it/s, Loss=0.0184311, psnr=19.64, point=17436]\n",
- "Training progress: 0% 0/20000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [18/10 12:18:35]\n",
- "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [18/10 12:18:35]\n",
- "Training progress: 2% 500/20000 [00:38<25:33, 12.72it/s, Loss=0.0094448, psnr=27.35, point=17895]\n",
- "[ITER 500] Evaluating test: L1 0.009282524012686574 PSNR 29.097346922930548 [18/10 12:19:15]\n",
- "\n",
- "[ITER 500] Evaluating train: L1 0.010759759173892876 PSNR 28.454158446368048 [18/10 12:19:17]\n",
- "reset opacity [18/10 12:19:17]\n",
- "Training progress: 5% 1000/20000 [01:17<26:32, 11.93it/s, Loss=0.0079862, psnr=28.01, point=18581]\n",
- "[ITER 1000] Evaluating test: L1 0.007670877473976682 PSNR 30.087999568266028 [18/10 12:19:54]\n",
- "\n",
- "[ITER 1000] Evaluating train: L1 0.00795259847141364 PSNR 30.263615327722885 [18/10 12:19:55]\n",
- "Training progress: 8% 1500/20000 [01:52<26:57, 11.44it/s, Loss=0.0053360, psnr=36.24, point=19801]\n",
- "[ITER 1500] Evaluating test: L1 0.007201356813311577 PSNR 30.24399544210995 [18/10 12:20:28]\n",
- "\n",
- "[ITER 1500] Evaluating train: L1 0.008079059038530378 PSNR 29.928070965935202 [18/10 12:20:29]\n",
- "Training progress: 10% 2000/20000 [02:27<27:27, 10.92it/s, Loss=0.0046025, psnr=31.91, point=21122]\n",
- "[ITER 2000] Evaluating test: L1 0.0053621865261126965 PSNR 32.535747191485235 [18/10 12:21:04]\n",
- "\n",
- "[ITER 2000] Evaluating train: L1 0.0061909939326784185 PSNR 31.593194288365982 [18/10 12:21:05]\n",
- "\n",
- "[ITER 2000] Saving Gaussians [18/10 12:21:05]\n",
- "Training progress: 12% 2500/20000 [03:04<21:54, 13.31it/s, Loss=0.0041578, psnr=35.14, point=22289]\n",
- "[ITER 2500] Evaluating test: L1 0.0048581437919946275 PSNR 32.970312903909125 [18/10 12:21:40]\n",
- "\n",
- "[ITER 2500] Evaluating train: L1 0.00490456263479941 PSNR 33.57072886298685 [18/10 12:21:41]\n",
- "Training progress: 15% 3000/20000 [03:40<19:10, 14.78it/s, Loss=0.0048178, psnr=30.44, point=23309]\n",
- "[ITER 3000] Evaluating test: L1 0.0045357245047960215 PSNR 33.26883473115809 [18/10 12:22:16]\n",
- "\n",
- "[ITER 3000] Evaluating train: L1 0.004470993901657707 PSNR 33.78879479800953 [18/10 12:22:17]\n",
+ " 3% 6.09M/233M [00:00<00:03, 63.8MB/s]\u001b[A\n",
+ " 5% 12.2M/233M [00:00<00:04, 55.5MB/s]\u001b[A\n",
+ " 12% 28.6M/233M [00:00<00:02, 105MB/s] \u001b[A\n",
+ " 19% 44.8M/233M [00:00<00:01, 129MB/s]\u001b[A\n",
+ " 27% 63.0M/233M [00:00<00:01, 151MB/s]\u001b[A\n",
+ " 33% 77.7M/233M [00:00<00:01, 138MB/s]\u001b[A\n",
+ " 39% 91.2M/233M [00:00<00:01, 131MB/s]\u001b[A\n",
+ " 45% 104M/233M [00:00<00:01, 130MB/s] \u001b[A\n",
+ " 50% 118M/233M [00:00<00:00, 133MB/s]\u001b[A\n",
+ " 56% 132M/233M [00:01<00:00, 137MB/s]\u001b[A\n",
+ " 62% 146M/233M [00:01<00:00, 140MB/s]\u001b[A\n",
+ " 68% 159M/233M [00:01<00:00, 133MB/s]\u001b[A\n",
+ " 75% 174M/233M [00:01<00:00, 141MB/s]\u001b[A\n",
+ " 82% 191M/233M [00:01<00:00, 151MB/s]\u001b[A\n",
+ " 88% 206M/233M [00:01<00:00, 145MB/s]\u001b[A\n",
+ "100% 233M/233M [00:01<00:00, 132MB/s]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 17:14:04]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 17:14:04]\n",
+ "Downloading: \"https://download.pytorch.org/models/vgg16-397923af.pth\" to /root/.cache/torch/hub/checkpoints/vgg16-397923af.pth\n",
+ "\n",
+ " 0% 0.00/528M [00:00, ?B/s]\u001b[A\n",
+ " 1% 7.63M/528M [00:00<00:06, 80.0MB/s]\u001b[A\n",
+ " 3% 15.3M/528M [00:00<00:06, 79.0MB/s]\u001b[A\n",
+ " 6% 29.4M/528M [00:00<00:04, 110MB/s] \u001b[A\n",
+ " 8% 44.7M/528M [00:00<00:03, 130MB/s]\u001b[A\n",
+ " 11% 60.2M/528M [00:00<00:03, 142MB/s]\u001b[A\n",
+ " 14% 75.5M/528M [00:00<00:03, 148MB/s]\u001b[A\n",
+ " 17% 89.6M/528M [00:00<00:03, 138MB/s]\u001b[A\n",
+ " 19% 103M/528M [00:00<00:03, 130MB/s] \u001b[A\n",
+ " 22% 116M/528M [00:00<00:03, 133MB/s]\u001b[A\n",
+ " 25% 132M/528M [00:01<00:02, 141MB/s]\u001b[A\n",
+ " 28% 145M/528M [00:01<00:02, 139MB/s]\u001b[A\n",
+ " 30% 160M/528M [00:01<00:02, 145MB/s]\u001b[A\n",
+ " 33% 175M/528M [00:01<00:02, 147MB/s]\u001b[A\n",
+ " 36% 189M/528M [00:01<00:02, 147MB/s]\u001b[A\n",
+ " 38% 203M/528M [00:01<00:02, 148MB/s]\u001b[A\n",
+ " 41% 218M/528M [00:01<00:02, 151MB/s]\u001b[A\n",
+ " 44% 233M/528M [00:01<00:03, 88.9MB/s]\u001b[A\n",
+ " 47% 248M/528M [00:02<00:02, 104MB/s] \u001b[A\n",
+ " 50% 263M/528M [00:02<00:02, 115MB/s]\u001b[A\n",
+ " 53% 279M/528M [00:02<00:02, 128MB/s]\u001b[A\n",
+ " 56% 295M/528M [00:02<00:01, 138MB/s]\u001b[A\n",
+ " 59% 310M/528M [00:02<00:01, 144MB/s]\u001b[A\n",
+ " 62% 325M/528M [00:02<00:01, 131MB/s]\u001b[A\n",
+ " 64% 340M/528M [00:02<00:01, 136MB/s]\u001b[A\n",
+ " 67% 354M/528M [00:02<00:01, 141MB/s]\u001b[A\n",
+ " 70% 368M/528M [00:02<00:01, 130MB/s]\u001b[A\n",
+ " 72% 381M/528M [00:03<00:01, 121MB/s]\u001b[A\n",
+ " 75% 393M/528M [00:03<00:01, 112MB/s]\u001b[A\n",
+ " 77% 404M/528M [00:03<00:01, 113MB/s]\u001b[A\n",
+ " 79% 415M/528M [00:03<00:01, 114MB/s]\u001b[A\n",
+ " 81% 427M/528M [00:03<00:00, 112MB/s]\u001b[A\n",
+ " 83% 437M/528M [00:03<00:00, 110MB/s]\u001b[A\n",
+ " 85% 448M/528M [00:03<00:00, 101MB/s]\u001b[A\n",
+ " 87% 458M/528M [00:03<00:00, 92.3MB/s]\u001b[A\n",
+ " 89% 468M/528M [00:04<00:00, 94.9MB/s]\u001b[A\n",
+ " 90% 477M/528M [00:04<00:00, 95.7MB/s]\u001b[A\n",
+ " 92% 486M/528M [00:04<00:00, 95.4MB/s]\u001b[A\n",
+ " 94% 496M/528M [00:04<00:00, 98.7MB/s]\u001b[A\n",
+ " 96% 506M/528M [00:04<00:00, 98.0MB/s]\u001b[A\n",
+ " 98% 517M/528M [00:04<00:00, 103MB/s] \u001b[A\n",
+ "100% 528M/528M [00:04<00:00, 120MB/s]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 17:14:11]\n",
+ "data loading done [11/02 17:14:14]\n",
+ "Training progress: 17% 500/3000 [00:32<01:29, 27.84it/s, Loss=0.0291453, psnr=24.36, point=2000]\n",
+ "[ITER 500] Evaluating test: L1 0.033344455392045134 PSNR 21.442816790412454 SSIM 0.9483118653297424 LPIPSA 0.19757015126592972 LPIPSV 0.1673430265749202 [11/02 17:14:39]\n",
"\n",
- "[ITER 3000] Saving Gaussians [18/10 12:22:17]\n",
- "reset opacity [18/10 12:22:18]\n",
- "Training progress: 18% 3500/20000 [04:17<16:31, 16.65it/s, Loss=0.0038691, psnr=37.74, point=24287]\n",
- "[ITER 3500] Evaluating test: L1 0.004175359123002957 PSNR 34.040599149816174 [18/10 12:22:54]\n",
- "\n",
- "[ITER 3500] Evaluating train: L1 0.004593397463288377 PSNR 33.84662392560173 [18/10 12:22:55]\n",
- "Training progress: 20% 4000/20000 [04:54<15:52, 16.81it/s, Loss=0.0030375, psnr=36.63, point=25380]\n",
- "[ITER 4000] Evaluating test: L1 0.0038323545801069807 PSNR 34.73158320258646 [18/10 12:23:30]\n",
- "\n",
- "[ITER 4000] Evaluating train: L1 0.003796229837462306 PSNR 35.06873366411995 [18/10 12:23:31]\n",
- "Training progress: 22% 4500/20000 [05:33<16:15, 15.89it/s, Loss=0.0031049, psnr=39.84, point=26291]\n",
- "[ITER 4500] Evaluating test: L1 0.004020221230080899 PSNR 34.12317376978257 [18/10 12:24:09]\n",
- "\n",
- "[ITER 4500] Evaluating train: L1 0.0037510368069086004 PSNR 35.35118103027344 [18/10 12:24:10]\n",
- "Training progress: 25% 5000/20000 [06:10<15:03, 16.60it/s, Loss=0.0026159, psnr=38.60, point=27157]\n",
- "[ITER 5000] Evaluating test: L1 0.0034943154383012477 PSNR 35.320645949419806 [18/10 12:24:46]\n",
- "\n",
- "[ITER 5000] Evaluating train: L1 0.0029573873479795806 PSNR 37.309416378245636 [18/10 12:24:47]\n",
- "Training progress: 28% 5500/20000 [06:47<14:24, 16.78it/s, Loss=0.0021671, psnr=37.94, point=28018]\n",
- "[ITER 5500] Evaluating test: L1 0.0034261168775093906 PSNR 35.630519418155444 [18/10 12:25:23]\n",
- "\n",
- "[ITER 5500] Evaluating train: L1 0.0031126464360996206 PSNR 37.271436130299286 [18/10 12:25:24]\n",
- "Training progress: 30% 6000/20000 [07:24<13:53, 16.80it/s, Loss=0.0023847, psnr=42.02, point=28836]\n",
- "[ITER 6000] Evaluating test: L1 0.003208557965562624 PSNR 36.11713768454159 [18/10 12:26:01]\n",
- "\n",
- "[ITER 6000] Evaluating train: L1 0.0027447668013765533 PSNR 37.97009254904354 [18/10 12:26:02]\n",
- "reset opacity [18/10 12:26:02]\n",
- "Training progress: 32% 6500/20000 [08:03<18:30, 12.15it/s, Loss=0.0022719, psnr=39.56, point=29569]\n",
- "[ITER 6500] Evaluating test: L1 0.0031567666378310498 PSNR 36.61334542667164 [18/10 12:26:40]\n",
- "\n",
- "[ITER 6500] Evaluating train: L1 0.0025403551853207104 PSNR 38.88953265021829 [18/10 12:26:41]\n",
- "Training progress: 35% 7000/20000 [08:42<19:32, 11.09it/s, Loss=0.0022586, psnr=40.55, point=30208]\n",
- "[ITER 7000] Evaluating test: L1 0.003054004450164297 PSNR 36.86542510986328 [18/10 12:27:18]\n",
+ "[ITER 500] Evaluating train: L1 0.04234862722018186 PSNR 19.90948430229636 SSIM 0.9370969533920288 LPIPSA 0.25410983842961926 LPIPSV 0.19610319856335134 [11/02 17:14:43]\n",
+ "Training progress: 33% 1000/3000 [00:58<01:19, 25.18it/s, Loss=0.0294358, psnr=18.71, point=3022]\n",
+ "[ITER 1000] Evaluating test: L1 0.026132952312336248 PSNR 21.567775389727423 SSIM 0.9452808499336243 LPIPSA 0.15680382970501394 LPIPSV 0.15990467369556427 [11/02 17:15:05]\n",
"\n",
- "[ITER 7000] Evaluating train: L1 0.0023618749041548546 PSNR 39.677162394804114 [18/10 12:27:19]\n",
+ "[ITER 1000] Evaluating train: L1 0.03729216743479757 PSNR 20.235161949606503 SSIM 0.9369443655014038 LPIPSA 0.18826581011800206 LPIPSV 0.17961761048611471 [11/02 17:15:09]\n",
"\n",
- "[ITER 7000] Saving Gaussians [18/10 12:27:19]\n",
- "Training progress: 38% 7500/20000 [09:20<14:14, 14.63it/s, Loss=0.0019817, psnr=42.44, point=30844]\n",
- "[ITER 7500] Evaluating test: L1 0.002951400381896426 PSNR 37.06199399162741 [18/10 12:27:56]\n",
+ "[ITER 1000] Saving Gaussians [11/02 17:15:09]\n",
+ "Training progress: 50% 1500/3000 [01:14<00:23, 64.21it/s, Loss=0.0185342, psnr=22.82, point=7550]\n",
+ "[ITER 1500] Evaluating test: L1 0.02503924855195424 PSNR 21.83750118928797 SSIM 0.9471423029899597 LPIPSA 0.13822940824662938 LPIPSV 0.150822433478692 [11/02 17:15:20]\n",
"\n",
- "[ITER 7500] Evaluating train: L1 0.0022212145576143965 PSNR 39.99447945987477 [18/10 12:27:57]\n",
- "Training progress: 40% 8000/20000 [09:58<12:51, 15.55it/s, Loss=0.0020356, psnr=44.16, point=31374]\n",
- "[ITER 8000] Evaluating test: L1 0.002882901469574255 PSNR 37.26977247350356 [18/10 12:28:34]\n",
+ "[ITER 1500] Evaluating train: L1 0.03329154766876908 PSNR 20.934678582584155 SSIM 0.9423518776893616 LPIPSA 0.15324002109906254 LPIPSV 0.16991347688085892 [11/02 17:15:24]\n",
+ "Training progress: 67% 2000/3000 [01:29<00:16, 60.20it/s, Loss=0.0230052, psnr=24.48, point=11604]\n",
+ "[ITER 2000] Evaluating test: L1 0.024316438035491633 PSNR 21.982201856725357 SSIM 0.9499489068984985 LPIPSA 0.12758771461599014 LPIPSV 0.14279255621573506 [11/02 17:15:36]\n",
"\n",
- "[ITER 8000] Evaluating train: L1 0.0021015912198516376 PSNR 40.504227357752185 [18/10 12:28:35]\n",
+ "[ITER 2000] Evaluating train: L1 0.03365445158937398 PSNR 20.888999265782974 SSIM 0.9440020322799683 LPIPSA 0.14570020796621547 LPIPSV 0.16152718531734803 [11/02 17:15:40]\n",
+ "Training progress: 83% 2500/3000 [01:46<00:10, 47.38it/s, Loss=0.0194915, psnr=23.67, point=14638]\n",
+ "[ITER 2500] Evaluating test: L1 0.023552954251713613 PSNR 21.861000285429114 SSIM 0.952340841293335 LPIPSA 0.12023974078543045 LPIPSV 0.13423925068448572 [11/02 17:15:52]\n",
"\n",
- "[ITER 8000] Saving Gaussians [18/10 12:28:35]\n",
- "Training progress: 42% 8500/20000 [10:33<10:10, 18.84it/s, Loss=0.0019608, psnr=44.71, point=24470]\n",
- "[ITER 8500] Evaluating test: L1 0.002908740206347669 PSNR 37.15876814898323 [18/10 12:29:09]\n",
+ "[ITER 2500] Evaluating train: L1 0.03228144933853079 PSNR 20.907299378338983 SSIM 0.9467178583145142 LPIPSA 0.13708710166461327 LPIPSV 0.14932524938793743 [11/02 17:15:57]\n",
+ "Training progress: 100% 3000/3000 [02:03<00:00, 55.24it/s, Loss=0.0201820, psnr=28.55, point=17156]\n",
+ "[ITER 3000] Evaluating test: L1 0.02235250306480071 PSNR 21.911495096543256 SSIM 0.954328179359436 LPIPSA 0.11502338770557852 LPIPSV 0.1277290262720164 [11/02 17:16:09]\n",
"\n",
- "[ITER 8500] Evaluating train: L1 0.0024008757489569045 PSNR 39.982252681956574 [18/10 12:29:10]\n",
- "Training progress: 45% 9000/20000 [11:07<10:01, 18.29it/s, Loss=0.0019406, psnr=38.15, point=24968]\n",
- "[ITER 9000] Evaluating test: L1 0.0027893552734680914 PSNR 37.548010657815375 [18/10 12:29:43]\n",
+ "[ITER 3000] Evaluating train: L1 0.0312146726064384 PSNR 21.22559076197007 SSIM 0.9481726288795471 LPIPSA 0.13342776543953838 LPIPSV 0.14282278848045013 [11/02 17:16:13]\n",
"\n",
- "[ITER 9000] Evaluating train: L1 0.002034447025781607 PSNR 40.57379038193647 [18/10 12:29:44]\n",
+ "[ITER 3000] Saving Gaussians [11/02 17:16:13]\n",
+ "reset opacity [11/02 17:16:14]\n",
+ "Training progress: 100% 3000/3000 [02:12<00:00, 22.71it/s, Loss=0.0201820, psnr=28.55, point=17156]\n",
+ "Training progress: 0% 0/20000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 17:16:14]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 17:16:14]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 17:16:14]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 17:16:15]\n",
+ "data loading done [11/02 17:16:18]\n",
+ "Training progress: 2% 500/20000 [00:33<18:01, 18.03it/s, Loss=0.0094331, psnr=30.98, point=17612]\n",
+ "[ITER 500] Evaluating test: L1 0.00965471179498469 PSNR 29.18541414597455 SSIM 0.9728952050209045 LPIPSA 0.07696397755952443 LPIPSV 0.10133461829494028 [11/02 17:16:52]\n",
"\n",
- "[ITER 9000] Saving Gaussians [18/10 12:29:44]\n",
- "reset opacity [18/10 12:29:45]\n",
- "Training progress: 48% 9500/20000 [11:42<09:43, 17.99it/s, Loss=0.0018367, psnr=45.70, point=25301]\n",
- "[ITER 9500] Evaluating test: L1 0.002798064488588887 PSNR 37.580643822165094 [18/10 12:30:18]\n",
+ "[ITER 500] Evaluating train: L1 0.011615017100292094 PSNR 27.296819238101733 SSIM 0.9673409461975098 LPIPSA 0.08900752882747089 LPIPSV 0.11726092634832158 [11/02 17:16:57]\n",
+ "Training progress: 5% 1000/20000 [01:11<17:30, 18.08it/s, Loss=0.0063102, psnr=36.15, point=17976]\n",
+ "[ITER 1000] Evaluating test: L1 0.0070629388039164685 PSNR 30.886966817519244 SSIM 0.9783129096031189 LPIPSA 0.06130329957779716 LPIPSV 0.08163365940837299 [11/02 17:17:30]\n",
"\n",
- "[ITER 9500] Evaluating train: L1 0.0018392370068742072 PSNR 42.312099905575025 [18/10 12:30:19]\n",
- "Training progress: 50% 10000/20000 [12:16<09:44, 17.10it/s, Loss=0.0017030, psnr=43.73, point=25718]\n",
- "[ITER 10000] Evaluating test: L1 0.002735216458163717 PSNR 37.80438479255228 [18/10 12:30:52]\n",
+ "[ITER 1000] Evaluating train: L1 0.008645257720833315 PSNR 29.225243848912857 SSIM 0.9730970859527588 LPIPSA 0.06907670375178843 LPIPSV 0.0991418545736986 [11/02 17:17:34]\n",
"\n",
- "[ITER 10000] Evaluating train: L1 0.0018079423016923316 PSNR 42.22233424467199 [18/10 12:30:53]\n",
- "Training progress: 52% 10500/20000 [12:52<09:35, 16.51it/s, Loss=0.0017275, psnr=46.47, point=26017]\n",
- "[ITER 10500] Evaluating test: L1 0.0027014906312722493 PSNR 37.80445570104262 [18/10 12:31:28]\n",
+ "[ITER 1000] Saving Gaussians [11/02 17:17:34]\n",
+ "Training progress: 8% 1500/20000 [01:40<12:53, 23.92it/s, Loss=0.0053573, psnr=34.26, point=18691]\n",
+ "[ITER 1500] Evaluating test: L1 0.005978792631889091 PSNR 32.27042994779699 SSIM 0.9824097752571106 LPIPSA 0.05494876249748118 LPIPSV 0.07448462353033178 [11/02 17:17:59]\n",
"\n",
- "[ITER 10500] Evaluating train: L1 0.0017387345570194371 PSNR 42.328860338996435 [18/10 12:31:29]\n",
- "Training progress: 55% 11000/20000 [13:27<09:29, 15.80it/s, Loss=0.0014403, psnr=47.44, point=26390]\n",
- "[ITER 11000] Evaluating test: L1 0.0026626865557559274 PSNR 37.93535456937902 [18/10 12:32:03]\n",
+ "[ITER 1500] Evaluating train: L1 0.005935111096786226 PSNR 32.24723939334645 SSIM 0.9820285439491272 LPIPSA 0.055580253438914526 LPIPSV 0.08382165716851459 [11/02 17:18:03]\n",
+ "Training progress: 10% 2000/20000 [02:08<12:05, 24.81it/s, Loss=0.0049846, psnr=34.94, point=19522]\n",
+ "[ITER 2000] Evaluating test: L1 0.005307658137205769 PSNR 33.51845247605268 SSIM 0.984204888343811 LPIPSA 0.04923628741765723 LPIPSV 0.07111961328807999 [11/02 17:18:27]\n",
"\n",
- "[ITER 11000] Evaluating train: L1 0.0016114438664825523 PSNR 42.97215742223403 [18/10 12:32:04]\n",
- "Training progress: 57% 11500/20000 [14:03<08:42, 16.26it/s, Loss=0.0015507, psnr=43.64, point=26688]\n",
- "[ITER 11500] Evaluating test: L1 0.0026659999960376058 PSNR 38.01013250911937 [18/10 12:32:39]\n",
+ "[ITER 2000] Evaluating train: L1 0.005122801590272609 PSNR 33.983444550458124 SSIM 0.9848840236663818 LPIPSA 0.049014910836430156 LPIPSV 0.07505333796143532 [11/02 17:18:31]\n",
+ "Training progress: 12% 2500/20000 [02:36<11:52, 24.58it/s, Loss=0.0049489, psnr=35.03, point=20293]\n",
+ "[ITER 2500] Evaluating test: L1 0.004937225103597431 PSNR 33.44058519251206 SSIM 0.9847249388694763 LPIPSA 0.044152526184916496 LPIPSV 0.06338566673152587 [11/02 17:18:55]\n",
"\n",
- "[ITER 11500] Evaluating train: L1 0.0015885166837559903 PSNR 43.43097552131204 [18/10 12:32:40]\n",
- "Training progress: 60% 12000/20000 [14:39<07:43, 17.27it/s, Loss=0.0012203, psnr=43.22, point=27018]\n",
- "[ITER 12000] Evaluating test: L1 0.002632023794028689 PSNR 38.01683381024529 [18/10 12:33:15]\n",
+ "[ITER 2500] Evaluating train: L1 0.004837928709628827 PSNR 33.977362464456 SSIM 0.9854086637496948 LPIPSA 0.04447527591358213 LPIPSV 0.06888919915346538 [11/02 17:18:59]\n",
+ "Training progress: 15% 3000/20000 [03:04<12:08, 23.32it/s, Loss=0.0043744, psnr=31.32, point=20893]\n",
+ "[ITER 3000] Evaluating test: L1 0.004878870184149812 PSNR 33.14590487760656 SSIM 0.983465313911438 LPIPSA 0.041159657432752496 LPIPSV 0.06144880591069951 [11/02 17:19:23]\n",
"\n",
- "[ITER 12000] Evaluating train: L1 0.0015034284031785587 PSNR 43.954322814941406 [18/10 12:33:16]\n",
- "reset opacity [18/10 12:33:16]\n",
- "Training progress: 62% 12500/20000 [15:14<07:14, 17.26it/s, Loss=0.0014863, psnr=44.21, point=27187]\n",
- "[ITER 12500] Evaluating test: L1 0.002675521872280275 PSNR 37.987121133243335 [18/10 12:33:51]\n",
+ "[ITER 3000] Evaluating train: L1 0.004520086749621174 PSNR 34.0139970218434 SSIM 0.9854863286018372 LPIPSA 0.040316140498308575 LPIPSV 0.06354398674824659 [11/02 17:19:27]\n",
"\n",
- "[ITER 12500] Evaluating train: L1 0.0015443166142658274 PSNR 44.20912664076861 [18/10 12:33:51]\n",
- "Training progress: 65% 13000/20000 [15:50<06:36, 17.65it/s, Loss=0.0014596, psnr=42.48, point=27346]\n",
- "[ITER 13000] Evaluating test: L1 0.002643499585032901 PSNR 38.07360391055836 [18/10 12:34:26]\n",
+ "[ITER 3000] Saving Gaussians [11/02 17:19:27]\n",
+ "reset opacity [11/02 17:19:28]\n",
+ "Training progress: 18% 3500/20000 [03:32<11:03, 24.87it/s, Loss=0.0037020, psnr=38.04, point=21353]\n",
+ "[ITER 3500] Evaluating test: L1 0.004052422118975835 PSNR 35.171185212976795 SSIM 0.9885184168815613 LPIPSA 0.034814956324065435 LPIPSV 0.05628390807439299 [11/02 17:19:51]\n",
"\n",
- "[ITER 13000] Evaluating train: L1 0.0015044443323003018 PSNR 44.235115948845355 [18/10 12:34:27]\n",
- "Training progress: 68% 13500/20000 [16:26<07:03, 15.36it/s, Loss=0.0014033, psnr=47.03, point=27546]\n",
- "[ITER 13500] Evaluating test: L1 0.002636970452252118 PSNR 38.07138218599207 [18/10 12:35:03]\n",
+ "[ITER 3500] Evaluating train: L1 0.004139745046439416 PSNR 35.670656092026654 SSIM 0.9890636205673218 LPIPSA 0.034555265241686034 LPIPSV 0.06026854716679629 [11/02 17:19:55]\n",
+ "Training progress: 20% 4000/20000 [03:59<10:34, 25.22it/s, Loss=0.0039564, psnr=34.11, point=21866]\n",
+ "[ITER 4000] Evaluating test: L1 0.0038954356329186876 PSNR 35.170851090375116 SSIM 0.9880565404891968 LPIPSA 0.03196918843861889 LPIPSV 0.05291624783593066 [11/02 17:20:17]\n",
"\n",
- "[ITER 13500] Evaluating train: L1 0.0014004296595778536 PSNR 44.93161078060375 [18/10 12:35:04]\n",
- "Training progress: 70% 14000/20000 [17:02<07:21, 13.60it/s, Loss=0.0013125, psnr=44.89, point=27756]\n",
- "[ITER 14000] Evaluating test: L1 0.0026343300568816416 PSNR 38.09962463378906 [18/10 12:35:39]\n",
+ "[ITER 4000] Evaluating train: L1 0.003972794148413574 PSNR 35.270197475657746 SSIM 0.9875285029411316 LPIPSA 0.03112776785650674 LPIPSV 0.05401498627136735 [11/02 17:20:22]\n",
"\n",
- "[ITER 14000] Evaluating train: L1 0.0013917168766698416 PSNR 45.037186566521136 [18/10 12:35:40]\n",
+ "[ITER 4000] Saving Gaussians [11/02 17:20:22]\n",
+ "Training progress: 22% 4500/20000 [04:26<10:17, 25.09it/s, Loss=0.0030761, psnr=36.42, point=22365]\n",
+ "[ITER 4500] Evaluating test: L1 0.003528140512678553 PSNR 35.81443427590763 SSIM 0.9897298216819763 LPIPSA 0.029261142125024515 LPIPSV 0.04966423529035905 [11/02 17:20:45]\n",
"\n",
- "[ITER 14000] Saving Gaussians [18/10 12:35:40]\n",
- "Training progress: 72% 14500/20000 [17:39<07:52, 11.64it/s, Loss=0.0014793, psnr=48.82, point=27969]\n",
- "[ITER 14500] Evaluating test: L1 0.0026121085153563935 PSNR 38.17773639454561 [18/10 12:36:16]\n",
+ "[ITER 4500] Evaluating train: L1 0.003585062359514482 PSNR 36.28946214563706 SSIM 0.9901353120803833 LPIPSA 0.02939138375222683 LPIPSV 0.05131817915860344 [11/02 17:20:49]\n",
+ "Training progress: 25% 5000/20000 [04:54<10:56, 22.86it/s, Loss=0.0030918, psnr=38.08, point=22841]\n",
+ "[ITER 5000] Evaluating test: L1 0.0034398279317161616 PSNR 36.1388302971335 SSIM 0.9897947907447815 LPIPSA 0.027406190467231414 LPIPSV 0.04760411031105939 [11/02 17:21:12]\n",
"\n",
- "[ITER 14500] Evaluating train: L1 0.0013993438401752535 PSNR 45.12252426147461 [18/10 12:36:16]\n",
- "Training progress: 75% 15000/20000 [18:15<05:38, 14.77it/s, Loss=0.0012198, psnr=44.46, point=28090]\n",
- "[ITER 15000] Evaluating test: L1 0.0026121584027457762 PSNR 38.16346000222599 [18/10 12:36:51]\n",
+ "[ITER 5000] Evaluating train: L1 0.004134350154987153 PSNR 35.34251875035903 SSIM 0.9890790581703186 LPIPSA 0.027844343663138503 LPIPSV 0.05052184664151248 [11/02 17:21:17]\n",
"\n",
- "[ITER 15000] Evaluating train: L1 0.0013996757676496224 PSNR 45.407531289493335 [18/10 12:36:52]\n",
- "Training progress: 78% 15500/20000 [18:50<04:29, 16.68it/s, Loss=0.0012678, psnr=43.22, point=28090]\n",
- "[ITER 15500] Evaluating test: L1 0.002572473734343315 PSNR 38.25604607077206 [18/10 12:37:26]\n",
+ "[ITER 5000] Saving Gaussians [11/02 17:21:17]\n",
+ "Training progress: 28% 5500/20000 [05:21<09:37, 25.11it/s, Loss=0.0030957, psnr=39.54, point=23279]\n",
+ "[ITER 5500] Evaluating test: L1 0.0032627035534995444 PSNR 36.34930868709789 SSIM 0.9902652502059937 LPIPSA 0.02516296270358212 LPIPSV 0.04533027353532174 [11/02 17:21:40]\n",
"\n",
- "[ITER 15500] Evaluating train: L1 0.0012877711902975159 PSNR 45.99841375911937 [18/10 12:37:27]\n",
- "Training progress: 80% 16000/20000 [19:25<03:54, 17.06it/s, Loss=0.0013073, psnr=45.90, point=28090]\n",
- "[ITER 16000] Evaluating test: L1 0.002580023041981108 PSNR 38.257411507999194 [18/10 12:38:01]\n",
+ "[ITER 5500] Evaluating train: L1 0.00332932237207013 PSNR 36.695199854233685 SSIM 0.9907733798027039 LPIPSA 0.0238501221379813 LPIPSV 0.0453504219870357 [11/02 17:21:44]\n",
+ "Training progress: 30% 6000/20000 [05:48<09:20, 24.99it/s, Loss=0.0031851, psnr=34.87, point=23648]\n",
+ "[ITER 6000] Evaluating test: L1 0.0030600152882363867 PSNR 36.98797854255228 SSIM 0.9908812046051025 LPIPSA 0.022961369989549413 LPIPSV 0.04280429125270423 [11/02 17:22:07]\n",
"\n",
- "[ITER 16000] Evaluating train: L1 0.001255324449362781 PSNR 46.2190372242647 [18/10 12:38:02]\n",
- "Training progress: 82% 16500/20000 [20:00<03:18, 17.61it/s, Loss=0.0011351, psnr=43.90, point=28090]\n",
- "[ITER 16500] Evaluating test: L1 0.0025938723359585684 PSNR 38.22130382762236 [18/10 12:38:36]\n",
+ "[ITER 6000] Evaluating train: L1 0.0029763396254138034 PSNR 38.03166849472944 SSIM 0.992019772529602 LPIPSA 0.020957775085287934 LPIPSV 0.042398704544586295 [11/02 17:22:11]\n",
"\n",
- "[ITER 16500] Evaluating train: L1 0.0013141077600748224 PSNR 46.081020804012525 [18/10 12:38:37]\n",
- "Training progress: 85% 17000/20000 [20:35<02:48, 17.81it/s, Loss=0.0012820, psnr=47.89, point=28090]\n",
- "[ITER 17000] Evaluating test: L1 0.0025558721424792617 PSNR 38.321165421429804 [18/10 12:39:11]\n",
+ "[ITER 6000] Saving Gaussians [11/02 17:22:11]\n",
+ "reset opacity [11/02 17:22:12]\n",
+ "Training progress: 32% 6500/20000 [06:16<08:56, 25.16it/s, Loss=0.0032437, psnr=36.80, point=24004]\n",
+ "[ITER 6500] Evaluating test: L1 0.0032681920291746363 PSNR 36.11462312586167 SSIM 0.9899520874023438 LPIPSA 0.023052275399951375 LPIPSV 0.042956180651398265 [11/02 17:22:35]\n",
"\n",
- "[ITER 17000] Evaluating train: L1 0.0013066162731881965 PSNR 46.09962799969841 [18/10 12:39:12]\n",
- "Training progress: 88% 17500/20000 [21:10<02:19, 17.97it/s, Loss=0.0013061, psnr=49.47, point=28090]\n",
- "[ITER 17500] Evaluating test: L1 0.002584036309545969 PSNR 38.29386655022116 [18/10 12:39:46]\n",
+ "[ITER 6500] Evaluating train: L1 0.003325723056845805 PSNR 36.22226782406078 SSIM 0.9901221990585327 LPIPSA 0.021615264827714246 LPIPSV 0.04317888661342509 [11/02 17:22:39]\n",
+ "Training progress: 35% 7000/20000 [06:43<09:42, 22.34it/s, Loss=0.0024489, psnr=40.79, point=24374]\n",
+ "[ITER 7000] Evaluating test: L1 0.002714633845778949 PSNR 38.182106242460364 SSIM 0.9923843145370483 LPIPSA 0.020030213684281883 LPIPSV 0.039630030994029605 [11/02 17:23:02]\n",
"\n",
- "[ITER 17500] Evaluating train: L1 0.0012900612988125753 PSNR 46.411755954518036 [18/10 12:39:47]\n",
- "Training progress: 90% 18000/20000 [21:44<01:52, 17.85it/s, Loss=0.0012141, psnr=47.12, point=28090]\n",
- "[ITER 18000] Evaluating test: L1 0.0025839185931117218 PSNR 38.271065431482654 [18/10 12:40:20]\n",
+ "[ITER 7000] Evaluating train: L1 0.002555035534040893 PSNR 39.586549422320196 SSIM 0.9936817288398743 LPIPSA 0.018233077491030973 LPIPSV 0.03892161795759902 [11/02 17:23:06]\n",
"\n",
- "[ITER 18000] Evaluating train: L1 0.0012591813095187878 PSNR 46.53801300946404 [18/10 12:40:21]\n",
- "Training progress: 92% 18500/20000 [22:18<01:23, 17.90it/s, Loss=0.0012329, psnr=43.69, point=28090]\n",
- "[ITER 18500] Evaluating test: L1 0.0025499572717201185 PSNR 38.25597471349379 [18/10 12:40:54]\n",
+ "[ITER 7000] Saving Gaussians [11/02 17:23:06]\n",
+ "Training progress: 38% 7500/20000 [07:11<08:20, 24.97it/s, Loss=0.0025567, psnr=39.56, point=24742]\n",
+ "[ITER 7500] Evaluating test: L1 0.0027706020730821524 PSNR 37.565733068129596 SSIM 0.9920533299446106 LPIPSA 0.018913329962421867 LPIPSV 0.03858349658548832 [11/02 17:23:30]\n",
"\n",
- "[ITER 18500] Evaluating train: L1 0.001257107250721139 PSNR 46.56728834264418 [18/10 12:40:56]\n",
- "Training progress: 95% 19000/20000 [22:52<00:58, 17.01it/s, Loss=0.0010787, psnr=44.04, point=28090]\n",
- "[ITER 19000] Evaluating test: L1 0.002554826876696418 PSNR 38.317366431741156 [18/10 12:41:29]\n",
+ "[ITER 7500] Evaluating train: L1 0.0025372104239923987 PSNR 39.1994530172909 SSIM 0.9933921694755554 LPIPSA 0.016464974895557937 LPIPSV 0.0374461935066125 [11/02 17:23:34]\n",
+ "Training progress: 40% 8000/20000 [07:38<08:09, 24.51it/s, Loss=0.0021458, psnr=43.14, point=25057]\n",
+ "[ITER 8000] Evaluating test: L1 0.0026131915133994293 PSNR 38.44493933284984 SSIM 0.99265456199646 LPIPSA 0.01797448958763305 LPIPSV 0.037766611313118655 [11/02 17:23:57]\n",
"\n",
- "[ITER 19000] Evaluating train: L1 0.001252242867314421 PSNR 46.75315206191119 [18/10 12:41:30]\n",
- "Training progress: 98% 19500/20000 [23:27<00:35, 14.07it/s, Loss=0.0010570, psnr=45.20, point=28090]\n",
- "[ITER 19500] Evaluating test: L1 0.002561302129727076 PSNR 38.29808044433594 [18/10 12:42:04]\n",
+ "[ITER 8000] Evaluating train: L1 0.0021607967449680846 PSNR 40.84522179996266 SSIM 0.9945445656776428 LPIPSA 0.014935463886050618 LPIPSV 0.03614613260416424 [11/02 17:24:01]\n",
+ "Training progress: 42% 8500/20000 [08:06<07:47, 24.62it/s, Loss=0.0022860, psnr=43.39, point=25386]\n",
+ "[ITER 8500] Evaluating test: L1 0.0025736233561902363 PSNR 38.27988187004538 SSIM 0.9923607110977173 LPIPSA 0.017356174011879107 LPIPSV 0.036891553112689185 [11/02 17:24:24]\n",
"\n",
- "[ITER 19500] Evaluating train: L1 0.0012227956698659588 PSNR 46.8690497454475 [18/10 12:42:05]\n",
- "Training progress: 100% 20000/20000 [24:02<00:00, 13.86it/s, Loss=0.0012103, psnr=47.98, point=28090]\n",
+ "[ITER 8500] Evaluating train: L1 0.0021153267212759923 PSNR 40.77711486816406 SSIM 0.9944233894348145 LPIPSA 0.014042956699781558 LPIPSV 0.03508854131488239 [11/02 17:24:29]\n",
+ "Training progress: 45% 9000/20000 [08:33<07:55, 23.12it/s, Loss=0.0021753, psnr=43.07, point=25642]\n",
+ "[ITER 9000] Evaluating test: L1 0.0025135679444407717 PSNR 38.43748563878676 SSIM 0.9928200840950012 LPIPSA 0.016329326831242618 LPIPSV 0.03590039634967551 [11/02 17:24:52]\n",
"\n",
- "[ITER 20000] Evaluating test: L1 0.002560187322909341 PSNR 38.32729653751149 [18/10 12:42:39]\n",
+ "[ITER 9000] Evaluating train: L1 0.0021034476675969712 PSNR 40.87159010943245 SSIM 0.9946141242980957 LPIPSA 0.013585881220505518 LPIPSV 0.03429606261060519 [11/02 17:24:56]\n",
"\n",
- "[ITER 20000] Evaluating train: L1 0.001223050821435583 PSNR 46.967015210319964 [18/10 12:42:40]\n",
+ "[ITER 9000] Saving Gaussians [11/02 17:24:56]\n",
+ "reset opacity [11/02 17:24:56]\n",
+ "Training progress: 48% 9500/20000 [09:01<07:07, 24.58it/s, Loss=0.0024335, psnr=34.92, point=25875]\n",
+ "[ITER 9500] Evaluating test: L1 0.0026429355527986496 PSNR 37.96924725700827 SSIM 0.9925356507301331 LPIPSA 0.01634865697911557 LPIPSV 0.0361452534356538 [11/02 17:25:20]\n",
"\n",
- "[ITER 20000] Saving Gaussians [18/10 12:42:40]\n",
+ "[ITER 9500] Evaluating train: L1 0.002404595964916927 PSNR 38.74566740148207 SSIM 0.9935274720191956 LPIPSA 0.013755457256646716 LPIPSV 0.035648652626311075 [11/02 17:25:24]\n",
+ "Training progress: 50% 10000/20000 [09:28<06:46, 24.62it/s, Loss=0.0019608, psnr=42.27, point=26096]\n",
+ "[ITER 10000] Evaluating test: L1 0.002488111166338272 PSNR 38.822247449089495 SSIM 0.993249773979187 LPIPSA 0.015101892435375382 LPIPSV 0.03491570493754219 [11/02 17:25:47]\n",
"\n",
- "Training complete. [18/10 12:42:42]\n"
+ "[ITER 10000] Evaluating train: L1 0.002018665326485301 PSNR 41.44515744377585 SSIM 0.9950626492500305 LPIPSA 0.012039102723493296 LPIPSV 0.03365573473274708 [11/02 17:25:51]\n",
+ "\n",
+ "[ITER 10000] Saving Gaussians [11/02 17:25:51]\n",
+ "Training progress: 52% 10500/20000 [09:56<07:53, 20.08it/s, Loss=0.0020278, psnr=43.41, point=26386]\n",
+ "[ITER 10500] Evaluating test: L1 0.0024021503049880266 PSNR 39.00146058026482 SSIM 0.9933052062988281 LPIPSA 0.014744987820877749 LPIPSV 0.03465937330004047 [11/02 17:26:15]\n",
+ "\n",
+ "[ITER 10500] Evaluating train: L1 0.0018889021388638545 PSNR 42.27921407362994 SSIM 0.9954285621643066 LPIPSA 0.011308080188053496 LPIPSV 0.032871129539083034 [11/02 17:26:20]\n",
+ "Training progress: 55% 11000/20000 [10:25<06:10, 24.28it/s, Loss=0.0018204, psnr=44.19, point=26607]\n",
+ "[ITER 11000] Evaluating test: L1 0.002297465826439507 PSNR 39.53458045510685 SSIM 0.9937449097633362 LPIPSA 0.013659962014678647 LPIPSV 0.03364503263112377 [11/02 17:26:43]\n",
+ "\n",
+ "[ITER 11000] Evaluating train: L1 0.0018650924321264029 PSNR 42.119149151970355 SSIM 0.9953991174697876 LPIPSA 0.010722003137583242 LPIPSV 0.032010440440738905 [11/02 17:26:47]\n",
+ "Training progress: 57% 11500/20000 [10:52<05:45, 24.60it/s, Loss=0.0018155, psnr=46.13, point=26808]\n",
+ "[ITER 11500] Evaluating test: L1 0.002284151255427038 PSNR 39.55042423921473 SSIM 0.9937691688537598 LPIPSA 0.01357002991854268 LPIPSV 0.03319815721581964 [11/02 17:27:11]\n",
+ "\n",
+ "[ITER 11500] Evaluating train: L1 0.001714855240767493 PSNR 43.1313279095818 SSIM 0.9958688020706177 LPIPSA 0.009897333295906292 LPIPSV 0.031154854990103665 [11/02 17:27:15]\n",
+ "Training progress: 60% 12000/20000 [11:20<05:26, 24.53it/s, Loss=0.0017734, psnr=40.93, point=26988]\n",
+ "[ITER 12000] Evaluating test: L1 0.0022819553269073367 PSNR 39.61295520558077 SSIM 0.9938121438026428 LPIPSA 0.01322529133518829 LPIPSV 0.03277498100172071 [11/02 17:27:38]\n",
+ "\n",
+ "[ITER 12000] Evaluating train: L1 0.0016934895093607552 PSNR 43.179365943459906 SSIM 0.9959117770195007 LPIPSA 0.009645847987164469 LPIPSV 0.030766778461196843 [11/02 17:27:43]\n",
+ "\n",
+ "[ITER 12000] Saving Gaussians [11/02 17:27:43]\n",
+ "reset opacity [11/02 17:27:43]\n",
+ "Training progress: 62% 12500/20000 [11:48<05:19, 23.49it/s, Loss=0.0016093, psnr=42.86, point=27110]\n",
+ "[ITER 12500] Evaluating test: L1 0.002297610407421256 PSNR 39.376884684843176 SSIM 0.9936938881874084 LPIPSA 0.013048184706884272 LPIPSV 0.033042823895812035 [11/02 17:28:07]\n",
+ "\n",
+ "[ITER 12500] Evaluating train: L1 0.0018375020556371002 PSNR 42.03203582763672 SSIM 0.9954715371131897 LPIPSA 0.009905017967171529 LPIPSV 0.031979144167374164 [11/02 17:28:11]\n",
+ "Training progress: 65% 13000/20000 [12:16<04:50, 24.11it/s, Loss=0.0017940, psnr=44.12, point=27239]\n",
+ "[ITER 13000] Evaluating test: L1 0.002239770913387046 PSNR 39.81040034574621 SSIM 0.9939890503883362 LPIPSA 0.012464346555883394 LPIPSV 0.03248039139982532 [11/02 17:28:35]\n",
+ "\n",
+ "[ITER 13000] Evaluating train: L1 0.0016994542538133614 PSNR 43.41728950949276 SSIM 0.9960163235664368 LPIPSA 0.009170782478416668 LPIPSV 0.030773750451557776 [11/02 17:28:39]\n",
+ "Training progress: 68% 13500/20000 [12:44<04:34, 23.72it/s, Loss=0.0018477, psnr=47.21, point=27346]\n",
+ "[ITER 13500] Evaluating test: L1 0.002237993203487028 PSNR 39.75710453706629 SSIM 0.9940112829208374 LPIPSA 0.012188616099164766 LPIPSV 0.03220965044901652 [11/02 17:29:02]\n",
+ "\n",
+ "[ITER 13500] Evaluating train: L1 0.0016441123683334272 PSNR 43.67578887939453 SSIM 0.9961831569671631 LPIPSA 0.008658237673123093 LPIPSV 0.0301563311368227 [11/02 17:29:06]\n",
+ "Training progress: 70% 14000/20000 [13:12<04:53, 20.42it/s, Loss=0.0015973, psnr=40.79, point=27451]\n",
+ "[ITER 14000] Evaluating test: L1 0.0022060937344041817 PSNR 39.967301537008844 SSIM 0.9941369295120239 LPIPSA 0.012034851071589133 LPIPSV 0.03177252652890542 [11/02 17:29:30]\n",
+ "\n",
+ "[ITER 14000] Evaluating train: L1 0.0015174680156633258 PSNR 44.3608611611759 SSIM 0.9963355660438538 LPIPSA 0.00825799916706541 LPIPSV 0.029595006585997695 [11/02 17:29:35]\n",
+ "\n",
+ "[ITER 14000] Saving Gaussians [11/02 17:29:35]\n",
+ "Training progress: 72% 14500/20000 [13:40<03:48, 24.07it/s, Loss=0.0015679, psnr=44.37, point=27593]\n",
+ "[ITER 14500] Evaluating test: L1 0.002174684387522147 PSNR 40.02431622673483 SSIM 0.9942036867141724 LPIPSA 0.011624384479706778 LPIPSV 0.03142466670011773 [11/02 17:29:59]\n",
+ "\n",
+ "[ITER 14500] Evaluating train: L1 0.0015449741074596258 PSNR 44.44091886632582 SSIM 0.9963601231575012 LPIPSA 0.008031947401297443 LPIPSV 0.029363312046317494 [11/02 17:30:03]\n",
+ "Training progress: 75% 15000/20000 [14:08<03:27, 24.09it/s, Loss=0.0017082, psnr=45.49, point=27727]\n",
+ "[ITER 15000] Evaluating test: L1 0.002170532414525309 PSNR 40.00325438555549 SSIM 0.9941788911819458 LPIPSA 0.011525094509124756 LPIPSV 0.031147205654312584 [11/02 17:30:26]\n",
+ "\n",
+ "[ITER 15000] Evaluating train: L1 0.0015221275046796482 PSNR 44.62129705092486 SSIM 0.9964356422424316 LPIPSA 0.007844462223789272 LPIPSV 0.02894417443038786 [11/02 17:30:31]\n",
+ "Training progress: 78% 15500/20000 [14:35<03:22, 22.18it/s, Loss=0.0016657, psnr=43.06, point=27727]\n",
+ "[ITER 15500] Evaluating test: L1 0.002165225601535948 PSNR 40.12736017563764 SSIM 0.9942759275436401 LPIPSA 0.011383367565405719 LPIPSV 0.031006063608562246 [11/02 17:30:54]\n",
+ "\n",
+ "[ITER 15500] Evaluating train: L1 0.0015075608663370503 PSNR 44.80114252427045 SSIM 0.9965313076972961 LPIPSA 0.007517649079946911 LPIPSV 0.028709262061645004 [11/02 17:30:58]\n",
+ "Training progress: 80% 16000/20000 [15:04<02:44, 24.26it/s, Loss=0.0017565, psnr=45.40, point=27727]\n",
+ "[ITER 16000] Evaluating test: L1 0.002170003793092773 PSNR 40.082411148968866 SSIM 0.9942787289619446 LPIPSA 0.01120605760747019 LPIPSV 0.03081674567040275 [11/02 17:31:22]\n",
+ "\n",
+ "[ITER 16000] Evaluating train: L1 0.0014748082578401355 PSNR 44.98534483068129 SSIM 0.996578574180603 LPIPSA 0.0072879949858521715 LPIPSV 0.028220255420926738 [11/02 17:31:26]\n",
+ "Training progress: 82% 16500/20000 [15:31<02:23, 24.40it/s, Loss=0.0015242, psnr=43.70, point=27727]\n",
+ "[ITER 16500] Evaluating test: L1 0.002182431655990727 PSNR 40.07672433292164 SSIM 0.9942605495452881 LPIPSA 0.011172443710486679 LPIPSV 0.030940957805689645 [11/02 17:31:50]\n",
+ "\n",
+ "[ITER 16500] Evaluating train: L1 0.0014928774565786999 PSNR 44.92567600923426 SSIM 0.9966106414794922 LPIPSA 0.007257470011930256 LPIPSV 0.02829482636469252 [11/02 17:31:54]\n",
+ "Training progress: 85% 17000/20000 [15:59<02:02, 24.45it/s, Loss=0.0012933, psnr=45.30, point=27727]\n",
+ "[ITER 17000] Evaluating test: L1 0.002100407358204179 PSNR 40.22231180527631 SSIM 0.9943820834159851 LPIPSA 0.010801971862640451 LPIPSV 0.03040830549948356 [11/02 17:32:17]\n",
+ "\n",
+ "[ITER 17000] Evaluating train: L1 0.0014164116685552632 PSNR 45.3301472383387 SSIM 0.9966742396354675 LPIPSA 0.007065797799869496 LPIPSV 0.028034540331538987 [11/02 17:32:21]\n",
+ "Training progress: 88% 17500/20000 [16:26<01:59, 20.87it/s, Loss=0.0015457, psnr=44.62, point=27727]\n",
+ "[ITER 17500] Evaluating test: L1 0.0021685194087159984 PSNR 40.09733491785386 SSIM 0.9943071007728577 LPIPSA 0.01089888597455095 LPIPSV 0.030573993921279907 [11/02 17:32:45]\n",
+ "\n",
+ "[ITER 17500] Evaluating train: L1 0.0014894306111861678 PSNR 45.19396277034984 SSIM 0.9966732263565063 LPIPSA 0.006857417606036453 LPIPSV 0.027882521424223396 [11/02 17:32:49]\n",
+ "Training progress: 90% 18000/20000 [16:54<01:21, 24.39it/s, Loss=0.0013952, psnr=42.92, point=27727]\n",
+ "[ITER 18000] Evaluating test: L1 0.0021219799733337235 PSNR 40.25679038552677 SSIM 0.9943795204162598 LPIPSA 0.010721617213943425 LPIPSV 0.030239368624546948 [11/02 17:33:13]\n",
+ "\n",
+ "[ITER 18000] Evaluating train: L1 0.0013718158759943703 PSNR 45.55018570843865 SSIM 0.9967539310455322 LPIPSA 0.006759961656130412 LPIPSV 0.027568239663891932 [11/02 17:33:17]\n",
+ "Training progress: 92% 18500/20000 [17:22<01:01, 24.59it/s, Loss=0.0013161, psnr=42.35, point=27727]\n",
+ "[ITER 18500] Evaluating test: L1 0.0021245930795831714 PSNR 40.30130251716165 SSIM 0.9944049715995789 LPIPSA 0.01053698533488547 LPIPSV 0.030308733201202226 [11/02 17:33:40]\n",
+ "\n",
+ "[ITER 18500] Evaluating train: L1 0.0014047519071027637 PSNR 45.62806858735926 SSIM 0.9967771172523499 LPIPSA 0.006587114322054035 LPIPSV 0.02755963254500838 [11/02 17:33:45]\n",
+ "Training progress: 95% 19000/20000 [17:49<00:40, 24.93it/s, Loss=0.0015127, psnr=45.46, point=27727]\n",
+ "[ITER 19000] Evaluating test: L1 0.0021082099924302276 PSNR 40.328684862922216 SSIM 0.9944429993629456 LPIPSA 0.010381340870962423 LPIPSV 0.03003409444628393 [11/02 17:34:08]\n",
+ "\n",
+ "[ITER 19000] Evaluating train: L1 0.0013861003715325804 PSNR 45.784013860365924 SSIM 0.996819019317627 LPIPSA 0.006414710297523176 LPIPSV 0.027390278656693065 [11/02 17:34:12]\n",
+ "Training progress: 98% 19500/20000 [18:17<00:22, 22.55it/s, Loss=0.0014379, psnr=44.02, point=27727]\n",
+ "[ITER 19500] Evaluating test: L1 0.002105023348977899 PSNR 40.294590893913714 SSIM 0.9944236874580383 LPIPSA 0.01050973919165485 LPIPSV 0.029871055856347084 [11/02 17:34:36]\n",
+ "\n",
+ "[ITER 19500] Evaluating train: L1 0.0013478695866925752 PSNR 45.844490948845355 SSIM 0.9968359470367432 LPIPSA 0.006467622759587625 LPIPSV 0.027163771857671878 [11/02 17:34:40]\n",
+ "Training progress: 100% 20000/20000 [18:45<00:00, 17.78it/s, Loss=0.0016553, psnr=45.63, point=27727]\n",
+ "\n",
+ "[ITER 20000] Evaluating test: L1 0.0021066682289957125 PSNR 40.3482946508071 SSIM 0.9944803714752197 LPIPSA 0.010181955746648944 LPIPSV 0.029882766744669748 [11/02 17:35:03]\n",
+ "\n",
+ "[ITER 20000] Evaluating train: L1 0.00144800975891378 PSNR 45.57883632884306 SSIM 0.99684077501297 LPIPSA 0.00632692635168924 LPIPSV 0.027194333153174204 [11/02 17:35:07]\n",
+ "\n",
+ "[ITER 20000] Saving Gaussians [11/02 17:35:07]\n",
+ "\n",
+ "Training complete. [11/02 17:35:08]\n"
]
}
],
@@ -456,10 +2167,10 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": 6,
"metadata": {
"id": "RTBK98DNl7_W",
- "outputId": "aabf7f5d-6189-4bd0-f81d-a4dc9d7c2acb",
+ "outputId": "0d5a6279-e419-4f40-c1bb-872af2fb1456",
"colab": {
"base_uri": "https://localhost:8080/"
}
@@ -473,31 +2184,28 @@
"Looking for config file in output/dnerf/bouncingballs/cfg_args\n",
"Config file found: output/dnerf/bouncingballs/cfg_args\n",
"Rendering output/dnerf/bouncingballs/\n",
- "feature_dim: 128 [18/10 12:42:48]\n",
- "Loading trained model at iteration 20000 [18/10 12:42:48]\n",
- "Found transforms_train.json file, assuming Blender data set! [18/10 12:42:48]\n",
- "Reading Training Transforms [18/10 12:42:48]\n",
- "Reading Test Transforms [18/10 12:43:00]\n",
- "Generating Video Transforms [18/10 12:43:02]\n",
- "Generating random point cloud (2000)... [18/10 12:43:02]\n",
- "Loading Training Cameras [18/10 12:43:02]\n",
- "Loading Test Cameras [18/10 12:43:02]\n",
- "Loading Video Cameras [18/10 12:43:02]\n",
+ "feature_dim: 64 [11/02 17:35:23]\n",
+ "Loading trained model at iteration 20000 [11/02 17:35:23]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 17:35:23]\n",
+ "Reading Training Transforms [11/02 17:35:23]\n",
+ "Reading Test Transforms [11/02 17:35:36]\n",
+ "Generating Video Transforms [11/02 17:35:37]\n",
+ "hello!!!! [11/02 17:35:37]\n",
+ "Generating random point cloud (2000)... [11/02 17:35:37]\n",
+ "Loading Training Cameras [11/02 17:35:37]\n",
+ "Loading Test Cameras [11/02 17:35:37]\n",
+ "Loading Video Cameras [11/02 17:35:37]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 17:35:37]\n",
"Voxel Plane: set aabb= Parameter containing:\n",
"tensor([[ 1.2998, 1.2999, 1.2999],\n",
- " [-1.2998, -1.2998, -1.2987]], requires_grad=True) [18/10 12:43:03]\n",
- "loading model from existsoutput/dnerf/bouncingballs/point_cloud/iteration_20000 [18/10 12:43:05]\n",
- "Rendering progress: 100% 20/20 [00:01<00:00, 12.51it/s]\n",
- "FPS: 11.988245192586138 [18/10 12:43:07]\n",
- "writing training images. [18/10 12:43:07]\n",
- "100% 20/20 [00:01<00:00, 15.48it/s]\n",
- "writing rendering images. [18/10 12:43:09]\n",
- "100% 20/20 [00:01<00:00, 13.67it/s]\n",
- "Rendering progress: 100% 40/40 [00:00<00:00, 55.85it/s]\n",
- "FPS: 54.44600711288924 [18/10 12:43:12]\n",
- "writing training images. [18/10 12:43:12]\n",
- "writing rendering images. [18/10 12:43:12]\n",
- "100% 40/40 [00:02<00:00, 13.61it/s]\n"
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 17:35:37]\n",
+ "loading model from existsoutput/dnerf/bouncingballs/point_cloud/iteration_20000 [11/02 17:35:37]\n",
+ "point nums: 27727 [11/02 17:35:37]\n",
+ "Rendering progress: 100% 20/20 [00:00<00:00, 22.06it/s]\n",
+ "FPS: 21.132538823445802 [11/02 17:35:38]\n",
+ "point nums: 27727 [11/02 17:35:41]\n",
+ "Rendering progress: 100% 160/160 [00:07<00:00, 22.82it/s]\n",
+ "FPS: 22.706665557597738 [11/02 17:35:49]\n"
]
}
],
@@ -523,7 +2231,7 @@
"metadata": {
"id": "oLyN3bAw0KCI"
},
- "execution_count": 3,
+ "execution_count": 7,
"outputs": []
},
{
@@ -542,9 +2250,9 @@
"height": 1000
},
"id": "nCTKtptS0MrA",
- "outputId": "ad2bdbac-ddeb-4d27-900c-24c96009bc44"
+ "outputId": "b27347ee-774d-4728-ddef-c4f382a4fcf5"
},
- "execution_count": 6,
+ "execution_count": 8,
"outputs": [
{
"output_type": "execute_result",
@@ -555,22 +2263,2747 @@
"text/html": [
"\n",
" \n",
" "
]
},
"metadata": {},
- "execution_count": 6
+ "execution_count": 8
}
]
- }
- ],
- "metadata": {
- "accelerator": "GPU",
- "colab": {
- "gpuType": "T4",
- "provenance": []
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# **Hell warrior**"
+ ],
+ "metadata": {
+ "id": "5W8d3nrVPAiL"
+ }
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {
+ "outputId": "b0a3f64b-3d67-4bc1-e527-7f7b2be2056d",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "LMBazODGPO_A"
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/4DGaussians\n",
+ "2024-02-11 17:36:10.214940: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
+ "2024-02-11 17:36:10.214993: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
+ "2024-02-11 17:36:10.216349: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
+ "2024-02-11 17:36:11.729168: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
+ "Optimizing \n",
+ "Output folder: ./output/dnerf/hellwarrior [11/02 17:36:13]\n",
+ "feature_dim: 64 [11/02 17:36:13]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 17:36:13]\n",
+ "Reading Training Transforms [11/02 17:36:13]\n",
+ "Reading Test Transforms [11/02 17:36:20]\n",
+ "Generating Video Transforms [11/02 17:36:23]\n",
+ "hello!!!! [11/02 17:36:23]\n",
+ "Generating random point cloud (2000)... [11/02 17:36:23]\n",
+ "Loading Training Cameras [11/02 17:36:23]\n",
+ "Loading Test Cameras [11/02 17:36:23]\n",
+ "Loading Video Cameras [11/02 17:36:23]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 17:36:23]\n",
+ "Voxel Plane: set aabb= Parameter containing:\n",
+ "tensor([[ 1.2998, 1.2999, 1.2999],\n",
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 17:36:23]\n",
+ "Number of points at initialisation : 2000 [11/02 17:36:23]\n",
+ "Training progress: 0% 0/3000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 17:36:23]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 17:36:24]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 17:36:24]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 17:36:25]\n",
+ "data loading done [11/02 17:36:27]\n",
+ "Training progress: 17% 500/3000 [00:22<01:30, 27.52it/s, Loss=0.0398093, psnr=17.69, point=2000]\n",
+ "[ITER 500] Evaluating test: L1 0.053061530432280374 PSNR 15.622326177709242 SSIM 0.8943843245506287 LPIPSA 0.23624918390722835 LPIPSV 0.1625049964470022 [11/02 17:36:51]\n",
+ "\n",
+ "[ITER 500] Evaluating train: L1 0.05411727025228388 PSNR 15.447214463177849 SSIM 0.8947705626487732 LPIPSA 0.2323621870840297 LPIPSV 0.16016949231133742 [11/02 17:36:54]\n",
+ "Training progress: 33% 1000/3000 [00:47<01:15, 26.49it/s, Loss=0.0433224, psnr=14.79, point=2549]\n",
+ "[ITER 1000] Evaluating test: L1 0.041660731439204776 PSNR 15.484002169440775 SSIM 0.9080678224563599 LPIPSA 0.2059157981592066 LPIPSV 0.1355145117815803 [11/02 17:37:15]\n",
+ "\n",
+ "[ITER 1000] Evaluating train: L1 0.04180135588873835 PSNR 15.48535161859849 SSIM 0.9086501598358154 LPIPSA 0.20328378940329833 LPIPSV 0.13376268218545354 [11/02 17:37:19]\n",
+ "\n",
+ "[ITER 1000] Saving Gaussians [11/02 17:37:19]\n",
+ "Training progress: 50% 1500/3000 [01:02<00:22, 67.26it/s, Loss=0.0394407, psnr=15.39, point=6870]\n",
+ "[ITER 1500] Evaluating test: L1 0.04171237174202414 PSNR 15.24512560227338 SSIM 0.9093536138534546 LPIPSA 0.18504502755754135 LPIPSV 0.12936155804816415 [11/02 17:37:30]\n",
+ "\n",
+ "[ITER 1500] Evaluating train: L1 0.04135853698586716 PSNR 15.320942934821634 SSIM 0.911273717880249 LPIPSA 0.18417384081027088 LPIPSV 0.12688856966355266 [11/02 17:37:34]\n",
+ "Training progress: 67% 2000/3000 [01:18<00:14, 68.13it/s, Loss=0.0419616, psnr=15.35, point=12475]\n",
+ "[ITER 2000] Evaluating test: L1 0.04185135158545831 PSNR 15.177439521340762 SSIM 0.9093917608261108 LPIPSA 0.17864827110486872 LPIPSV 0.12727602089152618 [11/02 17:37:45]\n",
+ "\n",
+ "[ITER 2000] Evaluating train: L1 0.040549912216032255 PSNR 15.368108300601735 SSIM 0.9125891923904419 LPIPSA 0.17678735098418066 LPIPSV 0.12428687775836271 [11/02 17:37:49]\n",
+ "Training progress: 83% 2500/3000 [01:33<00:07, 65.53it/s, Loss=0.0332009, psnr=17.02, point=17513]\n",
+ "[ITER 2500] Evaluating test: L1 0.04182806118007969 PSNR 15.161344472099753 SSIM 0.9093036651611328 LPIPSA 0.1739396111053579 LPIPSV 0.12622530320111444 [11/02 17:38:01]\n",
+ "\n",
+ "[ITER 2500] Evaluating train: L1 0.04010953995234826 PSNR 15.397430812611299 SSIM 0.9133593440055847 LPIPSA 0.17225883024580338 LPIPSV 0.12271021525649463 [11/02 17:38:05]\n",
+ "Training progress: 100% 3000/3000 [01:48<00:00, 45.79it/s, Loss=0.0399342, psnr=15.96, point=21740]\n",
+ "[ITER 3000] Evaluating test: L1 0.04198518570731668 PSNR 15.138204237994026 SSIM 0.908649742603302 LPIPSA 0.17093920795356526 LPIPSV 0.1259005060967277 [11/02 17:38:17]\n",
+ "\n",
+ "[ITER 3000] Evaluating train: L1 0.03987970981089508 PSNR 15.418603448306813 SSIM 0.9133381843566895 LPIPSA 0.16887319964521072 LPIPSV 0.12192977964878082 [11/02 17:38:20]\n",
+ "\n",
+ "[ITER 3000] Saving Gaussians [11/02 17:38:21]\n",
+ "reset opacity [11/02 17:38:21]\n",
+ "Training progress: 100% 3000/3000 [01:57<00:00, 25.45it/s, Loss=0.0399342, psnr=15.96, point=21740]\n",
+ "Training progress: 0% 0/20000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 17:38:21]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 17:38:21]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 17:38:21]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 17:38:22]\n",
+ "data loading done [11/02 17:38:24]\n",
+ "Training progress: 2% 500/20000 [00:34<18:06, 17.95it/s, Loss=0.0190001, psnr=21.10, point=22510]\n",
+ "[ITER 500] Evaluating test: L1 0.019510290749809322 PSNR 20.368475970099954 SSIM 0.9327805042266846 LPIPSA 0.1615376590805895 LPIPSV 0.11137746186817393 [11/02 17:38:59]\n",
+ "\n",
+ "[ITER 500] Evaluating train: L1 0.019787851611480993 PSNR 20.11071070502786 SSIM 0.933641254901886 LPIPSA 0.16223573684692383 LPIPSV 0.1108701080083847 [11/02 17:39:03]\n",
+ "Training progress: 5% 1000/20000 [01:10<16:25, 19.29it/s, Loss=0.0137942, psnr=25.13, point=24689]\n",
+ "[ITER 1000] Evaluating test: L1 0.013816328500123584 PSNR 22.406902874217312 SSIM 0.9435330629348755 LPIPSA 0.13057721625356114 LPIPSV 0.09734092346009086 [11/02 17:39:35]\n",
+ "\n",
+ "[ITER 1000] Evaluating train: L1 0.013253310750074247 PSNR 22.61927660773782 SSIM 0.9462329745292664 LPIPSA 0.13247966810184367 LPIPSV 0.09573191535823486 [11/02 17:39:39]\n",
+ "\n",
+ "[ITER 1000] Saving Gaussians [11/02 17:39:39]\n",
+ "Training progress: 8% 1500/20000 [01:37<12:03, 25.58it/s, Loss=0.0113837, psnr=23.58, point=28160]\n",
+ "[ITER 1500] Evaluating test: L1 0.012605223202091806 PSNR 22.61566723094267 SSIM 0.9475814700126648 LPIPSA 0.11322580628535327 LPIPSV 0.09037238710066851 [11/02 17:40:02]\n",
+ "\n",
+ "[ITER 1500] Evaluating train: L1 0.011050111258073765 PSNR 23.481477961820715 SSIM 0.9524956941604614 LPIPSA 0.1136124502210056 LPIPSV 0.0868777812403791 [11/02 17:40:06]\n",
+ "Training progress: 10% 2000/20000 [02:03<11:42, 25.63it/s, Loss=0.0111221, psnr=22.45, point=31073]\n",
+ "[ITER 2000] Evaluating test: L1 0.010557885747402906 PSNR 23.743290171903723 SSIM 0.9526512026786804 LPIPSA 0.09739114694735583 LPIPSV 0.08345382463406115 [11/02 17:40:29]\n",
+ "\n",
+ "[ITER 2000] Evaluating train: L1 0.00999262959093732 PSNR 24.022712595322552 SSIM 0.9564662575721741 LPIPSA 0.09918820901828654 LPIPSV 0.08071953408858355 [11/02 17:40:33]\n",
+ "Training progress: 12% 2500/20000 [02:31<12:36, 23.14it/s, Loss=0.0082677, psnr=25.67, point=33172]\n",
+ "[ITER 2500] Evaluating test: L1 0.009384035817621384 PSNR 24.579626644358914 SSIM 0.9565781950950623 LPIPSA 0.08715757815276876 LPIPSV 0.07861973652068306 [11/02 17:40:56]\n",
+ "\n",
+ "[ITER 2500] Evaluating train: L1 0.008716956794481067 PSNR 24.98398836921243 SSIM 0.9593548774719238 LPIPSA 0.0896571703693446 LPIPSV 0.07628511418314542 [11/02 17:41:00]\n",
+ "Training progress: 15% 3000/20000 [02:58<11:17, 25.10it/s, Loss=0.0072596, psnr=27.98, point=34750]\n",
+ "[ITER 3000] Evaluating test: L1 0.008834994041963536 PSNR 24.89417120989631 SSIM 0.9587001800537109 LPIPSA 0.0801312246305101 LPIPSV 0.07473123599501218 [11/02 17:41:24]\n",
+ "\n",
+ "[ITER 3000] Evaluating train: L1 0.007974739487775984 PSNR 25.258783003863165 SSIM 0.9632827639579773 LPIPSA 0.08178588134400985 LPIPSV 0.07144735928844004 [11/02 17:41:28]\n",
+ "\n",
+ "[ITER 3000] Saving Gaussians [11/02 17:41:28]\n",
+ "reset opacity [11/02 17:41:29]\n",
+ "Training progress: 18% 3500/20000 [03:26<10:51, 25.32it/s, Loss=0.0086884, psnr=25.59, point=35763]\n",
+ "[ITER 3500] Evaluating test: L1 0.008997636737630647 PSNR 24.57690362369313 SSIM 0.9580725431442261 LPIPSA 0.07777429613120415 LPIPSV 0.0726696160786292 [11/02 17:41:51]\n",
+ "\n",
+ "[ITER 3500] Evaluating train: L1 0.006978096166516051 PSNR 26.565622217514935 SSIM 0.9659871459007263 LPIPSA 0.0768828470917309 LPIPSV 0.06798853352665901 [11/02 17:41:55]\n",
+ "Training progress: 20% 4000/20000 [03:52<10:36, 25.15it/s, Loss=0.0076517, psnr=23.79, point=36703]\n",
+ "[ITER 4000] Evaluating test: L1 0.00826258264372454 PSNR 25.304171954884247 SSIM 0.9609381556510925 LPIPSA 0.07215101556742892 LPIPSV 0.06972455583951052 [11/02 17:42:18]\n",
+ "\n",
+ "[ITER 4000] Evaluating train: L1 0.006862159654059831 PSNR 26.534266303567325 SSIM 0.9668200016021729 LPIPSA 0.07183504652451067 LPIPSV 0.06519799022113576 [11/02 17:42:22]\n",
+ "\n",
+ "[ITER 4000] Saving Gaussians [11/02 17:42:22]\n",
+ "Training progress: 22% 4500/20000 [04:20<11:35, 22.29it/s, Loss=0.0069090, psnr=26.43, point=37532]\n",
+ "[ITER 4500] Evaluating test: L1 0.007508117267314126 PSNR 26.025861740112305 SSIM 0.9638369083404541 LPIPSA 0.06600751815473332 LPIPSV 0.06663869606221423 [11/02 17:42:46]\n",
+ "\n",
+ "[ITER 4500] Evaluating train: L1 0.006080815185080557 PSNR 27.26292340895709 SSIM 0.9699315428733826 LPIPSA 0.0663041522835984 LPIPSV 0.06236136047279134 [11/02 17:42:50]\n",
+ "Training progress: 25% 5000/20000 [04:47<10:00, 24.97it/s, Loss=0.0065968, psnr=25.38, point=38146]\n",
+ "[ITER 5000] Evaluating test: L1 0.006971459205755416 PSNR 26.4929760203642 SSIM 0.9654569029808044 LPIPSA 0.06042062754140181 LPIPSV 0.06412481998696047 [11/02 17:43:13]\n",
+ "\n",
+ "[ITER 5000] Evaluating train: L1 0.005154943367575898 PSNR 28.587409412159637 SSIM 0.9739391207695007 LPIPSA 0.059154869440723866 LPIPSV 0.05798327155849513 [11/02 17:43:17]\n",
+ "\n",
+ "[ITER 5000] Saving Gaussians [11/02 17:43:17]\n",
+ "Training progress: 28% 5500/20000 [05:14<09:35, 25.19it/s, Loss=0.0046743, psnr=30.93, point=38604]\n",
+ "[ITER 5500] Evaluating test: L1 0.006726038258741884 PSNR 26.71849822998047 SSIM 0.9669376015663147 LPIPSA 0.05750261225244578 LPIPSV 0.06233591411043616 [11/02 17:43:40]\n",
+ "\n",
+ "[ITER 5500] Evaluating train: L1 0.005004184909493607 PSNR 28.77884786269244 SSIM 0.9750209450721741 LPIPSA 0.056074153193656134 LPIPSV 0.05618460629792774 [11/02 17:43:44]\n",
+ "Training progress: 30% 6000/20000 [05:41<09:16, 25.14it/s, Loss=0.0047543, psnr=27.74, point=39026]\n",
+ "[ITER 6000] Evaluating test: L1 0.0068948172608061745 PSNR 26.485538819256952 SSIM 0.9669446349143982 LPIPSA 0.055062287651440674 LPIPSV 0.0612230175996528 [11/02 17:44:07]\n",
+ "\n",
+ "[ITER 6000] Evaluating train: L1 0.004862954840064049 PSNR 28.9272626989028 SSIM 0.976181149482727 LPIPSA 0.05216394189526053 LPIPSV 0.05354234335177085 [11/02 17:44:11]\n",
+ "\n",
+ "[ITER 6000] Saving Gaussians [11/02 17:44:11]\n",
+ "reset opacity [11/02 17:44:12]\n",
+ "Training progress: 32% 6500/20000 [06:09<10:18, 21.82it/s, Loss=0.0048688, psnr=29.97, point=39252]\n",
+ "[ITER 6500] Evaluating test: L1 0.006161784903858514 PSNR 27.522311042336856 SSIM 0.9695550203323364 LPIPSA 0.053324237246723735 LPIPSV 0.05875903565217467 [11/02 17:44:35]\n",
+ "\n",
+ "[ITER 6500] Evaluating train: L1 0.004353061140350559 PSNR 29.920637803919174 SSIM 0.9787791967391968 LPIPSA 0.05035701700869728 LPIPSV 0.05154937768683714 [11/02 17:44:39]\n",
+ "Training progress: 35% 7000/20000 [06:37<08:45, 24.75it/s, Loss=0.0039145, psnr=30.60, point=39450]\n",
+ "[ITER 7000] Evaluating test: L1 0.0058161574141944155 PSNR 27.877397537231445 SSIM 0.9710946083068848 LPIPSA 0.04922273557852296 LPIPSV 0.056760410394738704 [11/02 17:45:02]\n",
+ "\n",
+ "[ITER 7000] Evaluating train: L1 0.004166377886362812 PSNR 30.060681286980124 SSIM 0.9796577095985413 LPIPSA 0.04614816036294488 LPIPSV 0.04895017953479991 [11/02 17:45:06]\n",
+ "\n",
+ "[ITER 7000] Saving Gaussians [11/02 17:45:06]\n",
+ "Training progress: 38% 7500/20000 [07:04<08:22, 24.89it/s, Loss=0.0034413, psnr=32.40, point=39601]\n",
+ "[ITER 7500] Evaluating test: L1 0.005750063491766067 PSNR 27.885761934168197 SSIM 0.9713096618652344 LPIPSA 0.04624199494719505 LPIPSV 0.05557648574604707 [11/02 17:45:30]\n",
+ "\n",
+ "[ITER 7500] Evaluating train: L1 0.003883334479349501 PSNR 30.48431396484375 SSIM 0.9811912178993225 LPIPSA 0.04230729161816485 LPIPSV 0.04694490572985481 [11/02 17:45:34]\n",
+ "Training progress: 40% 8000/20000 [07:31<08:04, 24.77it/s, Loss=0.0038637, psnr=30.17, point=39757]\n",
+ "[ITER 8000] Evaluating test: L1 0.0056093919556587934 PSNR 28.099266725427963 SSIM 0.9718172550201416 LPIPSA 0.044265235829002714 LPIPSV 0.054416938520529694 [11/02 17:45:57]\n",
+ "\n",
+ "[ITER 8000] Evaluating train: L1 0.003799120443599189 PSNR 30.67918665268842 SSIM 0.9816905856132507 LPIPSA 0.04003017023205757 LPIPSV 0.04509362052468693 [11/02 17:46:01]\n",
+ "Training progress: 42% 8500/20000 [07:59<08:36, 22.25it/s, Loss=0.0033769, psnr=33.18, point=39927]\n",
+ "[ITER 8500] Evaluating test: L1 0.0057124621782671005 PSNR 27.886958178351907 SSIM 0.9713752865791321 LPIPSA 0.04260802904472632 LPIPSV 0.05384282088455032 [11/02 17:46:24]\n",
+ "\n",
+ "[ITER 8500] Evaluating train: L1 0.003390101606354994 PSNR 31.73609587725471 SSIM 0.9837040901184082 LPIPSA 0.03717026399338946 LPIPSV 0.043409503558102774 [11/02 17:46:28]\n",
+ "Training progress: 45% 9000/20000 [08:26<07:27, 24.57it/s, Loss=0.0031907, psnr=32.43, point=40091]\n",
+ "[ITER 9000] Evaluating test: L1 0.005376302760423106 PSNR 28.364555583280676 SSIM 0.9728959202766418 LPIPSA 0.040584286455722415 LPIPSV 0.052616683875813204 [11/02 17:46:52]\n",
+ "\n",
+ "[ITER 9000] Evaluating train: L1 0.0031869228354052584 PSNR 32.21646982080796 SSIM 0.9847837090492249 LPIPSA 0.035034142872866464 LPIPSV 0.04182265435948091 [11/02 17:46:56]\n",
+ "\n",
+ "[ITER 9000] Saving Gaussians [11/02 17:46:56]\n",
+ "reset opacity [11/02 17:46:56]\n",
+ "Training progress: 48% 9500/20000 [08:54<07:07, 24.58it/s, Loss=0.0034486, psnr=33.97, point=40160]\n",
+ "[ITER 9500] Evaluating test: L1 0.005501350675545195 PSNR 28.184502433328067 SSIM 0.9724866151809692 LPIPSA 0.04059842743856065 LPIPSV 0.05207572811666657 [11/02 17:47:20]\n",
+ "\n",
+ "[ITER 9500] Evaluating train: L1 0.0031213218428413655 PSNR 32.570650437298944 SSIM 0.9851880073547363 LPIPSA 0.03397759378832929 LPIPSV 0.040889430352870154 [11/02 17:47:24]\n",
+ "Training progress: 50% 10000/20000 [09:21<06:45, 24.67it/s, Loss=0.0029534, psnr=34.20, point=40243]\n",
+ "[ITER 10000] Evaluating test: L1 0.00520073708749431 PSNR 28.593639261582318 SSIM 0.9737080931663513 LPIPSA 0.03803241680211881 LPIPSV 0.05095019673599916 [11/02 17:47:47]\n",
+ "\n",
+ "[ITER 10000] Evaluating train: L1 0.002817357931395664 PSNR 33.29722325942095 SSIM 0.9867551326751709 LPIPSA 0.03101441417546833 LPIPSV 0.039057252380777806 [11/02 17:47:51]\n",
+ "\n",
+ "[ITER 10000] Saving Gaussians [11/02 17:47:51]\n",
+ "Training progress: 52% 10500/20000 [09:49<06:41, 23.65it/s, Loss=0.0030416, psnr=32.72, point=40314]\n",
+ "[ITER 10500] Evaluating test: L1 0.005343444672796656 PSNR 28.347119275261374 SSIM 0.9730339050292969 LPIPSA 0.03723313473165035 LPIPSV 0.05065901358337963 [11/02 17:48:15]\n",
+ "\n",
+ "[ITER 10500] Evaluating train: L1 0.0030883328222176608 PSNR 32.206464655259076 SSIM 0.985573947429657 LPIPSA 0.03018271747757407 LPIPSV 0.0384647029945079 [11/02 17:48:19]\n",
+ "Training progress: 55% 11000/20000 [10:17<06:05, 24.60it/s, Loss=0.0030158, psnr=32.48, point=40377]\n",
+ "[ITER 11000] Evaluating test: L1 0.005185516920926816 PSNR 28.61417994779699 SSIM 0.9735556244850159 LPIPSA 0.03603022063479704 LPIPSV 0.04998588890713804 [11/02 17:48:42]\n",
+ "\n",
+ "[ITER 11000] Evaluating train: L1 0.0025193503003238756 PSNR 34.39987362132353 SSIM 0.9882849454879761 LPIPSA 0.027483384837122524 LPIPSV 0.036844500176170296 [11/02 17:48:46]\n",
+ "Training progress: 57% 11500/20000 [10:44<05:44, 24.70it/s, Loss=0.0026152, psnr=35.51, point=40420]\n",
+ "[ITER 11500] Evaluating test: L1 0.005156602957012022 PSNR 28.6768897561466 SSIM 0.9736542701721191 LPIPSA 0.03493907686103793 LPIPSV 0.049648512373952305 [11/02 17:49:09]\n",
+ "\n",
+ "[ITER 11500] Evaluating train: L1 0.002478226293426226 PSNR 34.46981272977941 SSIM 0.9885453581809998 LPIPSA 0.026082396507263184 LPIPSV 0.03577986381509725 [11/02 17:49:14]\n",
+ "Training progress: 60% 12000/20000 [11:11<05:34, 23.91it/s, Loss=0.0029385, psnr=33.87, point=40471]\n",
+ "[ITER 12000] Evaluating test: L1 0.005179981071063701 PSNR 28.607513652128333 SSIM 0.9733569025993347 LPIPSA 0.03426444804405465 LPIPSV 0.0493441760978278 [11/02 17:49:37]\n",
+ "\n",
+ "[ITER 12000] Evaluating train: L1 0.0024233265601865507 PSNR 34.74583042369169 SSIM 0.9888172149658203 LPIPSA 0.02474510801189086 LPIPSV 0.03502880858586115 [11/02 17:49:41]\n",
+ "\n",
+ "[ITER 12000] Saving Gaussians [11/02 17:49:41]\n",
+ "reset opacity [11/02 17:49:42]\n",
+ "Training progress: 62% 12500/20000 [11:39<05:14, 23.83it/s, Loss=0.0021604, psnr=37.61, point=40496]\n",
+ "[ITER 12500] Evaluating test: L1 0.005040461060536259 PSNR 28.857556960161993 SSIM 0.9742862582206726 LPIPSA 0.0339877363294363 LPIPSV 0.04849091961103327 [11/02 17:50:05]\n",
+ "\n",
+ "[ITER 12500] Evaluating train: L1 0.0023284276317366783 PSNR 34.98167598948759 SSIM 0.9893122315406799 LPIPSA 0.02449406497180462 LPIPSV 0.034593210939098805 [11/02 17:50:09]\n",
+ "Training progress: 65% 13000/20000 [12:07<04:47, 24.39it/s, Loss=0.0023163, psnr=35.63, point=40532]\n",
+ "[ITER 13000] Evaluating test: L1 0.005038686480154009 PSNR 28.82899576074937 SSIM 0.9741299748420715 LPIPSA 0.03301518892540651 LPIPSV 0.04834653393310659 [11/02 17:50:33]\n",
+ "\n",
+ "[ITER 13000] Evaluating train: L1 0.0022580893025459614 PSNR 35.25124269373276 SSIM 0.9896680116653442 LPIPSA 0.023134025361608055 LPIPSV 0.03362704561475445 [11/02 17:50:37]\n",
+ "Training progress: 68% 13500/20000 [12:34<04:22, 24.79it/s, Loss=0.0023738, psnr=36.32, point=40568]\n",
+ "[ITER 13500] Evaluating test: L1 0.005004611167618457 PSNR 28.895904765409583 SSIM 0.9741354584693909 LPIPSA 0.0323327280142728 LPIPSV 0.04814732118564494 [11/02 17:51:00]\n",
+ "\n",
+ "[ITER 13500] Evaluating train: L1 0.002176491468323066 PSNR 35.55222432753619 SSIM 0.9900223016738892 LPIPSA 0.022187893114545765 LPIPSV 0.032981410403462016 [11/02 17:51:04]\n",
+ "Training progress: 70% 14000/20000 [13:02<04:35, 21.76it/s, Loss=0.0022903, psnr=37.89, point=40594]\n",
+ "[ITER 14000] Evaluating test: L1 0.005022229955476873 PSNR 28.864545822143555 SSIM 0.9740432500839233 LPIPSA 0.031788900275440776 LPIPSV 0.04807555237237145 [11/02 17:51:28]\n",
+ "\n",
+ "[ITER 14000] Evaluating train: L1 0.0020894555662594296 PSNR 35.93636546415441 SSIM 0.9904029965400696 LPIPSA 0.021149590227972057 LPIPSV 0.03234791021574946 [11/02 17:51:32]\n",
+ "\n",
+ "[ITER 14000] Saving Gaussians [11/02 17:51:32]\n",
+ "Training progress: 72% 14500/20000 [13:30<03:47, 24.19it/s, Loss=0.0019172, psnr=41.39, point=40630]\n",
+ "[ITER 14500] Evaluating test: L1 0.004977686755249605 PSNR 28.92402592827292 SSIM 0.9741551876068115 LPIPSA 0.031359371783978796 LPIPSV 0.047894890255787796 [11/02 17:51:56]\n",
+ "\n",
+ "[ITER 14500] Evaluating train: L1 0.0020348808919901356 PSNR 36.20766538732192 SSIM 0.9906772375106812 LPIPSA 0.020444087355452424 LPIPSV 0.03179208517951124 [11/02 17:52:00]\n",
+ "Training progress: 75% 15000/20000 [13:58<03:24, 24.49it/s, Loss=0.0019914, psnr=38.35, point=40670]\n",
+ "[ITER 15000] Evaluating test: L1 0.004955990815206485 PSNR 28.971073936013614 SSIM 0.9742005467414856 LPIPSA 0.030827885274501407 LPIPSV 0.047748466844067854 [11/02 17:52:23]\n",
+ "\n",
+ "[ITER 15000] Evaluating train: L1 0.0019453035270356957 PSNR 36.6184295205509 SSIM 0.9909923672676086 LPIPSA 0.019531751928084037 LPIPSV 0.0313120060326422 [11/02 17:52:27]\n",
+ "Training progress: 78% 15500/20000 [14:24<02:58, 25.27it/s, Loss=0.0018380, psnr=38.00, point=40670]\n",
+ "[ITER 15500] Evaluating test: L1 0.004981027025838985 PSNR 28.897165971643783 SSIM 0.974001407623291 LPIPSA 0.030653373721767876 LPIPSV 0.04770749237607507 [11/02 17:52:50]\n",
+ "\n",
+ "[ITER 15500] Evaluating train: L1 0.0019653375972719755 PSNR 36.457474428064685 SSIM 0.9909541010856628 LPIPSA 0.01909405184800134 LPIPSV 0.031045351944425526 [11/02 17:52:54]\n",
+ "Training progress: 80% 16000/20000 [14:52<03:09, 21.11it/s, Loss=0.0018209, psnr=35.90, point=40670]\n",
+ "[ITER 16000] Evaluating test: L1 0.00495578463682357 PSNR 28.959714216344498 SSIM 0.9740849733352661 LPIPSA 0.030172926428563455 LPIPSV 0.04754199734067216 [11/02 17:53:18]\n",
+ "\n",
+ "[ITER 16000] Evaluating train: L1 0.0018703230273197679 PSNR 36.9004565968233 SSIM 0.9913653135299683 LPIPSA 0.018330169786863467 LPIPSV 0.03044148162007332 [11/02 17:53:22]\n",
+ "Training progress: 82% 16500/20000 [15:19<02:23, 24.31it/s, Loss=0.0017665, psnr=39.47, point=40670]\n",
+ "[ITER 16500] Evaluating test: L1 0.004957363803816193 PSNR 28.959335663739374 SSIM 0.9740025997161865 LPIPSA 0.029907955185455436 LPIPSV 0.047468954368549234 [11/02 17:53:45]\n",
+ "\n",
+ "[ITER 16500] Evaluating train: L1 0.0018156531869488604 PSNR 37.14188542085535 SSIM 0.9915803074836731 LPIPSA 0.01779217875617392 LPIPSV 0.03002164260867764 [11/02 17:53:49]\n",
+ "Training progress: 85% 17000/20000 [15:46<01:59, 25.19it/s, Loss=0.0019556, psnr=38.10, point=40670]\n",
+ "[ITER 17000] Evaluating test: L1 0.004960857003050692 PSNR 28.94343993243049 SSIM 0.9739284515380859 LPIPSA 0.029696416767204508 LPIPSV 0.047453385722987795 [11/02 17:54:12]\n",
+ "\n",
+ "[ITER 17000] Evaluating train: L1 0.0018124961472280761 PSNR 37.20202434764189 SSIM 0.9916553497314453 LPIPSA 0.017440063331057045 LPIPSV 0.02978756048661821 [11/02 17:54:16]\n",
+ "Training progress: 88% 17500/20000 [16:13<01:39, 25.16it/s, Loss=0.0017481, psnr=40.76, point=40670]\n",
+ "[ITER 17500] Evaluating test: L1 0.004945539307835347 PSNR 28.972298117244943 SSIM 0.9739798307418823 LPIPSA 0.02949621802305474 LPIPSV 0.04735815974281115 [11/02 17:54:39]\n",
+ "\n",
+ "[ITER 17500] Evaluating train: L1 0.0017608945559271995 PSNR 37.50276632869945 SSIM 0.9918379187583923 LPIPSA 0.017020462464322064 LPIPSV 0.02943839691579342 [11/02 17:54:43]\n",
+ "Training progress: 90% 18000/20000 [16:40<01:19, 25.07it/s, Loss=0.0019518, psnr=37.74, point=40670]\n",
+ "[ITER 18000] Evaluating test: L1 0.004941555644002031 PSNR 28.98133580824908 SSIM 0.9739413857460022 LPIPSA 0.029312003403902054 LPIPSV 0.047312471029513026 [11/02 17:55:06]\n",
+ "\n",
+ "[ITER 18000] Evaluating train: L1 0.0017222190029261744 PSNR 37.648078469669116 SSIM 0.9919726252555847 LPIPSA 0.016682943110080325 LPIPSV 0.02922918844748946 [11/02 17:55:10]\n",
+ "Training progress: 92% 18500/20000 [17:07<01:05, 22.78it/s, Loss=0.0017271, psnr=39.34, point=40670]\n",
+ "[ITER 18500] Evaluating test: L1 0.004930976970011697 PSNR 29.00258726232192 SSIM 0.9739344120025635 LPIPSA 0.029157735955189255 LPIPSV 0.04731736156870337 [11/02 17:55:33]\n",
+ "\n",
+ "[ITER 18500] Evaluating train: L1 0.0017008070203968708 PSNR 37.75354475133559 SSIM 0.9920654296875 LPIPSA 0.016344932401004958 LPIPSV 0.028947002747479605 [11/02 17:55:37]\n",
+ "Training progress: 95% 19000/20000 [17:34<00:39, 25.06it/s, Loss=0.0017880, psnr=35.35, point=40670]\n",
+ "[ITER 19000] Evaluating test: L1 0.004938534091171973 PSNR 28.993863161872415 SSIM 0.973863959312439 LPIPSA 0.028964284697876257 LPIPSV 0.04727691050399752 [11/02 17:56:00]\n",
+ "\n",
+ "[ITER 19000] Evaluating train: L1 0.0016997870304347837 PSNR 37.82240632001091 SSIM 0.9921036958694458 LPIPSA 0.016118674977299047 LPIPSV 0.028902051312958494 [11/02 17:56:04]\n",
+ "Training progress: 98% 19500/20000 [18:01<00:19, 25.17it/s, Loss=0.0015812, psnr=42.70, point=40670]\n",
+ "[ITER 19500] Evaluating test: L1 0.004935049219056964 PSNR 28.98192809609806 SSIM 0.9738686680793762 LPIPSA 0.028845788363148186 LPIPSV 0.047188554178265965 [11/02 17:56:27]\n",
+ "\n",
+ "[ITER 19500] Evaluating train: L1 0.00165792548453764 PSNR 37.93830490112305 SSIM 0.9922850131988525 LPIPSA 0.01577754636459491 LPIPSV 0.02848746145472807 [11/02 17:56:31]\n",
+ "Training progress: 100% 20000/20000 [18:28<00:00, 18.04it/s, Loss=0.0017254, psnr=38.33, point=40670]\n",
+ "\n",
+ "[ITER 20000] Evaluating test: L1 0.004944422131623416 PSNR 28.9826265222886 SSIM 0.9737628698348999 LPIPSA 0.02883651841650991 LPIPSV 0.04729766069966204 [11/02 17:56:54]\n",
+ "\n",
+ "[ITER 20000] Evaluating train: L1 0.0016545113039148204 PSNR 38.0341137156767 SSIM 0.9923098087310791 LPIPSA 0.015564682455185582 LPIPSV 0.028479935491786283 [11/02 17:56:58]\n",
+ "\n",
+ "[ITER 20000] Saving Gaussians [11/02 17:56:58]\n",
+ "\n",
+ "Training complete. [11/02 17:56:58]\n"
+ ]
+ }
+ ],
+ "source": [
+ "%cd /content/4DGaussians\n",
+ "!python train.py -s /content/test/data/hellwarrior/ --port 6017 --expname \"dnerf/hellwarrior\" --configs arguments/dnerf/hellwarrior.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {
+ "outputId": "253cda64-36c6-4e1a-c4ff-fa95784cfaa2",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "xP5ryw8kPO_A"
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/4DGaussians\n",
+ "Looking for config file in output/dnerf/hellwarrior/cfg_args\n",
+ "Config file found: output/dnerf/hellwarrior/cfg_args\n",
+ "Rendering output/dnerf/hellwarrior/\n",
+ "feature_dim: 64 [11/02 17:57:07]\n",
+ "Loading trained model at iteration 20000 [11/02 17:57:07]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 17:57:07]\n",
+ "Reading Training Transforms [11/02 17:57:07]\n",
+ "Reading Test Transforms [11/02 17:57:14]\n",
+ "Generating Video Transforms [11/02 17:57:16]\n",
+ "hello!!!! [11/02 17:57:16]\n",
+ "Generating random point cloud (2000)... [11/02 17:57:16]\n",
+ "Loading Training Cameras [11/02 17:57:16]\n",
+ "Loading Test Cameras [11/02 17:57:16]\n",
+ "Loading Video Cameras [11/02 17:57:16]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 17:57:16]\n",
+ "Voxel Plane: set aabb= Parameter containing:\n",
+ "tensor([[ 1.2998, 1.2999, 1.2999],\n",
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 17:57:16]\n",
+ "loading model from existsoutput/dnerf/hellwarrior/point_cloud/iteration_20000 [11/02 17:57:16]\n",
+ "point nums: 40670 [11/02 17:57:16]\n",
+ "Rendering progress: 100% 20/20 [00:01<00:00, 14.19it/s]\n",
+ "FPS: 13.586990605766117 [11/02 17:57:18]\n",
+ "point nums: 40670 [11/02 17:57:21]\n",
+ "Rendering progress: 100% 160/160 [00:05<00:00, 28.82it/s]\n",
+ "FPS: 28.70296636421916 [11/02 17:57:26]\n"
+ ]
+ }
+ ],
+ "source": [
+ "%cd /content/4DGaussians\n",
+ "!python render.py --model_path \"output/dnerf/hellwarrior/\" --skip_train --configs arguments/dnerf/hellwarrior.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "from IPython.display import HTML\n",
+ "from base64 import b64encode\n",
+ "def display_video(video_path):\n",
+ " mp4 = open(video_path,'rb').read()\n",
+ " data_url = \"data:video/mp4;base64,\" + b64encode(mp4).decode()\n",
+ " return HTML(\"\"\"\n",
+ " \n",
+ " \"\"\" % data_url)"
+ ],
+ "metadata": {
+ "id": "ZZ-Qy6U6PO_A"
+ },
+ "execution_count": 11,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "save_dir = '/content/4DGaussians/output/dnerf/hellwarrior/video/ours_20000/video_rgb.mp4'\n",
+ "\n",
+ "import os\n",
+ "import glob\n",
+ "# video_path = glob.glob(os.path.join(save_dir, \"*-test.mp4\"))[0]\n",
+ "display_video(save_dir)"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 1000
+ },
+ "outputId": "131ea499-00c3-42c1-8df7-b9e328cc47ce",
+ "id": "Cg-VOdltPO_A"
+ },
+ "execution_count": 12,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ ""
+ ],
+ "text/html": [
+ "\n",
+ " \n",
+ " "
+ ]
+ },
+ "metadata": {},
+ "execution_count": 12
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# **Hook**"
+ ],
+ "metadata": {
+ "id": "9XtQzIqSRx_b"
+ }
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {
+ "outputId": "abe52166-ac92-49d3-b08a-74ba007174f1",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "JSeO1CdSRx_b"
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/4DGaussians\n",
+ "2024-02-11 17:57:47.215799: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
+ "2024-02-11 17:57:47.215851: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
+ "2024-02-11 17:57:47.217256: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
+ "2024-02-11 17:57:48.531581: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
+ "Optimizing \n",
+ "Output folder: ./output/dnerf/hook [11/02 17:57:49]\n",
+ "feature_dim: 64 [11/02 17:57:49]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 17:57:49]\n",
+ "Reading Training Transforms [11/02 17:57:49]\n",
+ "Reading Test Transforms [11/02 17:57:58]\n",
+ "Generating Video Transforms [11/02 17:57:59]\n",
+ "hello!!!! [11/02 17:57:59]\n",
+ "Generating random point cloud (2000)... [11/02 17:58:00]\n",
+ "Loading Training Cameras [11/02 17:58:00]\n",
+ "Loading Test Cameras [11/02 17:58:00]\n",
+ "Loading Video Cameras [11/02 17:58:00]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 17:58:00]\n",
+ "Voxel Plane: set aabb= Parameter containing:\n",
+ "tensor([[ 1.2998, 1.2999, 1.2999],\n",
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 17:58:00]\n",
+ "Number of points at initialisation : 2000 [11/02 17:58:00]\n",
+ "Training progress: 0% 0/3000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 17:58:00]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 17:58:00]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 17:58:00]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 17:58:02]\n",
+ "data loading done [11/02 17:58:04]\n",
+ "Training progress: 17% 500/3000 [00:23<01:19, 31.40it/s, Loss=0.0363756, psnr=19.63, point=2000]\n",
+ "[ITER 500] Evaluating test: L1 0.04125666344428763 PSNR 18.058483684764187 SSIM 0.895937442779541 LPIPSA 0.2081648777512943 LPIPSV 0.14190128533279195 [11/02 17:58:27]\n",
+ "\n",
+ "[ITER 500] Evaluating train: L1 0.04995431466137661 PSNR 16.829677750082578 SSIM 0.8892808556556702 LPIPSA 0.2189500209163217 LPIPSV 0.15209849354098826 [11/02 17:58:31]\n",
+ "Training progress: 33% 1000/3000 [00:47<01:03, 31.64it/s, Loss=0.0316190, psnr=22.34, point=2686]\n",
+ "[ITER 1000] Evaluating test: L1 0.034405323841116005 PSNR 18.531339645385742 SSIM 0.897787868976593 LPIPSA 0.17607162016279557 LPIPSV 0.1250098119763767 [11/02 17:58:52]\n",
+ "\n",
+ "[ITER 1000] Evaluating train: L1 0.04687396436929703 PSNR 16.494169066934024 SSIM 0.8864626288414001 LPIPSA 0.19846449617077322 LPIPSV 0.1401104523855097 [11/02 17:58:56]\n",
+ "\n",
+ "[ITER 1000] Saving Gaussians [11/02 17:58:56]\n",
+ "Training progress: 50% 1500/3000 [01:02<00:21, 68.53it/s, Loss=0.0313449, psnr=17.99, point=7217]\n",
+ "[ITER 1500] Evaluating test: L1 0.03358306034522898 PSNR 18.49999158522662 SSIM 0.8981430530548096 LPIPSA 0.15958185756907745 LPIPSV 0.11920136300956502 [11/02 17:59:07]\n",
+ "\n",
+ "[ITER 1500] Evaluating train: L1 0.045812307583058584 PSNR 16.541005415074967 SSIM 0.8867713212966919 LPIPSA 0.1872968117103857 LPIPSV 0.1366551952326999 [11/02 17:59:11]\n",
+ "Training progress: 67% 2000/3000 [01:17<00:14, 67.13it/s, Loss=0.0386316, psnr=15.18, point=12227]\n",
+ "[ITER 2000] Evaluating test: L1 0.033518327783574074 PSNR 18.48017131581026 SSIM 0.8971757888793945 LPIPSA 0.15515480935573578 LPIPSV 0.11841768596102209 [11/02 17:59:22]\n",
+ "\n",
+ "[ITER 2000] Evaluating train: L1 0.045430370222996265 PSNR 16.56129881914924 SSIM 0.8863839507102966 LPIPSA 0.18467421873527415 LPIPSV 0.13626358044498108 [11/02 17:59:26]\n",
+ "Training progress: 83% 2500/3000 [01:33<00:09, 50.38it/s, Loss=0.0377900, psnr=17.15, point=16597]\n",
+ "[ITER 2500] Evaluating test: L1 0.033435635542606604 PSNR 18.51737100937787 SSIM 0.8964686393737793 LPIPSA 0.15290277056834278 LPIPSV 0.11834460847518023 [11/02 17:59:38]\n",
+ "\n",
+ "[ITER 2500] Evaluating train: L1 0.04448069632053375 PSNR 16.659716045155246 SSIM 0.8865996599197388 LPIPSA 0.1814904708196135 LPIPSV 0.13594782659236124 [11/02 17:59:42]\n",
+ "Training progress: 100% 3000/3000 [01:49<00:00, 63.71it/s, Loss=0.0393546, psnr=15.12, point=20411]\n",
+ "[ITER 3000] Evaluating test: L1 0.03335073672454147 PSNR 18.520282857558307 SSIM 0.8960763812065125 LPIPSA 0.15098830663106022 LPIPSV 0.11824281732825671 [11/02 17:59:53]\n",
+ "\n",
+ "[ITER 3000] Evaluating train: L1 0.04396200552582741 PSNR 16.705122611101935 SSIM 0.8870652914047241 LPIPSA 0.17939049882047317 LPIPSV 0.1356947974247091 [11/02 17:59:57]\n",
+ "\n",
+ "[ITER 3000] Saving Gaussians [11/02 17:59:57]\n",
+ "reset opacity [11/02 17:59:58]\n",
+ "Training progress: 100% 3000/3000 [01:58<00:00, 25.41it/s, Loss=0.0393546, psnr=15.12, point=20411]\n",
+ "Training progress: 0% 0/20000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 17:59:58]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 17:59:59]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 17:59:59]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 18:00:00]\n",
+ "data loading done [11/02 18:00:01]\n",
+ "Training progress: 2% 500/20000 [00:33<17:48, 18.25it/s, Loss=0.0187687, psnr=20.45, point=21146]\n",
+ "[ITER 500] Evaluating test: L1 0.017355317304677823 PSNR 22.877246968886432 SSIM 0.9185815453529358 LPIPSA 0.13524442867321126 LPIPSV 0.09983867713633705 [11/02 18:00:36]\n",
+ "\n",
+ "[ITER 500] Evaluating train: L1 0.018705836661598262 PSNR 22.51610834458295 SSIM 0.91542649269104 LPIPSA 0.1402525296982597 LPIPSV 0.10395137834198334 [11/02 18:00:40]\n",
+ "Training progress: 5% 1000/20000 [01:09<16:14, 19.49it/s, Loss=0.0128374, psnr=25.18, point=22395]\n",
+ "[ITER 1000] Evaluating test: L1 0.014079422580406946 PSNR 24.287266114178827 SSIM 0.9268055558204651 LPIPSA 0.1178001673782573 LPIPSV 0.08933524612118215 [11/02 18:01:11]\n",
+ "\n",
+ "[ITER 1000] Evaluating train: L1 0.015511586002128966 PSNR 23.767571505378275 SSIM 0.921700656414032 LPIPSA 0.12157854832270566 LPIPSV 0.09359476277056862 [11/02 18:01:16]\n",
+ "\n",
+ "[ITER 1000] Saving Gaussians [11/02 18:01:16]\n",
+ "Training progress: 8% 1500/20000 [01:35<11:25, 26.98it/s, Loss=0.0095534, psnr=27.12, point=24395]\n",
+ "[ITER 1500] Evaluating test: L1 0.011387635405887575 PSNR 25.757115307976218 SSIM 0.9357467889785767 LPIPSA 0.09929569065570831 LPIPSV 0.07947556542999604 [11/02 18:01:38]\n",
+ "\n",
+ "[ITER 1500] Evaluating train: L1 0.011835734414703706 PSNR 25.51964064205394 SSIM 0.9337974190711975 LPIPSA 0.10254274308681488 LPIPSV 0.08234886781257741 [11/02 18:01:42]\n",
+ "Training progress: 10% 2000/20000 [02:02<12:58, 23.12it/s, Loss=0.0095856, psnr=26.95, point=26171]\n",
+ "[ITER 2000] Evaluating test: L1 0.009665363577797133 PSNR 26.918203802669748 SSIM 0.9432687163352966 LPIPSA 0.08828866306473226 LPIPSV 0.0734215795117266 [11/02 18:02:05]\n",
+ "\n",
+ "[ITER 2000] Evaluating train: L1 0.00979611647370107 PSNR 26.876566718606387 SSIM 0.9421588778495789 LPIPSA 0.09120743502588834 LPIPSV 0.07590916818555664 [11/02 18:02:09]\n",
+ "Training progress: 12% 2500/20000 [02:28<11:27, 25.47it/s, Loss=0.0098734, psnr=26.94, point=27469]\n",
+ "[ITER 2500] Evaluating test: L1 0.009412556557970889 PSNR 26.971386628992416 SSIM 0.9442901611328125 LPIPSA 0.0801679724279572 LPIPSV 0.06960538292632383 [11/02 18:02:31]\n",
+ "\n",
+ "[ITER 2500] Evaluating train: L1 0.009862460305585581 PSNR 26.583632637472714 SSIM 0.9417772889137268 LPIPSA 0.08370918429949704 LPIPSV 0.07287316418745939 [11/02 18:02:35]\n",
+ "Training progress: 15% 3000/20000 [02:55<10:34, 26.77it/s, Loss=0.0079914, psnr=27.15, point=28958]\n",
+ "[ITER 3000] Evaluating test: L1 0.00887526955236407 PSNR 27.27536526848288 SSIM 0.9474430084228516 LPIPSA 0.07218033688909867 LPIPSV 0.06513993946068428 [11/02 18:02:58]\n",
+ "\n",
+ "[ITER 3000] Evaluating train: L1 0.008394885726053925 PSNR 27.807607762953815 SSIM 0.9493540525436401 LPIPSA 0.07380872899118592 LPIPSV 0.06637055887018933 [11/02 18:03:02]\n",
+ "\n",
+ "[ITER 3000] Saving Gaussians [11/02 18:03:02]\n",
+ "reset opacity [11/02 18:03:02]\n",
+ "Training progress: 18% 3500/20000 [03:20<10:03, 27.35it/s, Loss=0.0090490, psnr=25.16, point=30013]\n",
+ "[ITER 3500] Evaluating test: L1 0.008157603743979159 PSNR 28.008056079640106 SSIM 0.9511962532997131 LPIPSA 0.06636936458594658 LPIPSV 0.06207774601438466 [11/02 18:03:23]\n",
+ "\n",
+ "[ITER 3500] Evaluating train: L1 0.008305138168746935 PSNR 27.930437761194565 SSIM 0.94999760389328 LPIPSA 0.06791547400986447 LPIPSV 0.06408567266429172 [11/02 18:03:27]\n",
+ "Training progress: 20% 4000/20000 [03:46<09:57, 26.78it/s, Loss=0.0095910, psnr=26.55, point=31219]\n",
+ "[ITER 4000] Evaluating test: L1 0.0086357723647619 PSNR 27.415707195506375 SSIM 0.9482667446136475 LPIPSA 0.061919847612871844 LPIPSV 0.0605593159356538 [11/02 18:03:48]\n",
+ "\n",
+ "[ITER 4000] Evaluating train: L1 0.008756752564188312 PSNR 27.315310085521023 SSIM 0.9475423097610474 LPIPSA 0.06313602735891062 LPIPSV 0.062256303122814965 [11/02 18:03:53]\n",
+ "\n",
+ "[ITER 4000] Saving Gaussians [11/02 18:03:53]\n",
+ "Training progress: 22% 4500/20000 [04:12<09:48, 26.35it/s, Loss=0.0067605, psnr=29.38, point=32236]\n",
+ "[ITER 4500] Evaluating test: L1 0.006866894224110772 PSNR 29.39057798946605 SSIM 0.9591644406318665 LPIPSA 0.054697918103021735 LPIPSV 0.05503992057021927 [11/02 18:04:14]\n",
+ "\n",
+ "[ITER 4500] Evaluating train: L1 0.006234833148910718 PSNR 30.075527303359088 SSIM 0.9623447060585022 LPIPSA 0.05463901403195718 LPIPSV 0.0551022144363207 [11/02 18:04:18]\n",
+ "Training progress: 25% 5000/20000 [04:37<09:21, 26.71it/s, Loss=0.0084570, psnr=25.96, point=33051]\n",
+ "[ITER 5000] Evaluating test: L1 0.007587340496042196 PSNR 28.364827324362363 SSIM 0.9550939202308655 LPIPSA 0.05291073615936672 LPIPSV 0.054681758231976456 [11/02 18:04:40]\n",
+ "\n",
+ "[ITER 5000] Evaluating train: L1 0.00728274742141366 PSNR 28.5517100165872 SSIM 0.9562844634056091 LPIPSA 0.05310694391236586 LPIPSV 0.05494892619111959 [11/02 18:04:44]\n",
+ "\n",
+ "[ITER 5000] Saving Gaussians [11/02 18:04:44]\n",
+ "Training progress: 28% 5500/20000 [05:03<10:39, 22.67it/s, Loss=0.0044018, psnr=32.16, point=33844]\n",
+ "[ITER 5500] Evaluating test: L1 0.006344823783044429 PSNR 29.878510194666244 SSIM 0.9628403782844543 LPIPSA 0.04645637554280898 LPIPSV 0.050096904530244714 [11/02 18:05:06]\n",
+ "\n",
+ "[ITER 5500] Evaluating train: L1 0.005829113480799338 PSNR 30.42428476670209 SSIM 0.9655705690383911 LPIPSA 0.046090748380212224 LPIPSV 0.049355939468916726 [11/02 18:05:10]\n",
+ "Training progress: 30% 6000/20000 [05:30<09:44, 23.95it/s, Loss=0.0053930, psnr=30.28, point=34513]\n",
+ "[ITER 6000] Evaluating test: L1 0.006577562883167582 PSNR 29.52352445265826 SSIM 0.961412787437439 LPIPSA 0.04291114747962531 LPIPSV 0.048145251458182055 [11/02 18:05:33]\n",
+ "\n",
+ "[ITER 6000] Evaluating train: L1 0.00619866545586025 PSNR 29.830154194551355 SSIM 0.9636001586914062 LPIPSA 0.04207752557361827 LPIPSV 0.04746377599589965 [11/02 18:05:37]\n",
+ "\n",
+ "[ITER 6000] Saving Gaussians [11/02 18:05:37]\n",
+ "reset opacity [11/02 18:05:37]\n",
+ "Training progress: 32% 6500/20000 [05:57<08:38, 26.05it/s, Loss=0.0055748, psnr=30.31, point=35032]\n",
+ "[ITER 6500] Evaluating test: L1 0.006392392370959415 PSNR 29.61504083521226 SSIM 0.9629237055778503 LPIPSA 0.04230749366037986 LPIPSV 0.04723224854644607 [11/02 18:05:59]\n",
+ "\n",
+ "[ITER 6500] Evaluating train: L1 0.005474197459133232 PSNR 30.948370204252356 SSIM 0.9681630730628967 LPIPSA 0.040330214206786716 LPIPSV 0.045678078032591767 [11/02 18:06:03]\n",
+ "Training progress: 35% 7000/20000 [06:23<08:28, 25.56it/s, Loss=0.0057558, psnr=29.39, point=35618]\n",
+ "[ITER 7000] Evaluating test: L1 0.005978950737592052 PSNR 30.150998396031998 SSIM 0.965756356716156 LPIPSA 0.038551125675439835 LPIPSV 0.04478955115465557 [11/02 18:06:25]\n",
+ "\n",
+ "[ITER 7000] Evaluating train: L1 0.0058394019108484775 PSNR 30.31564499350155 SSIM 0.9664899110794067 LPIPSA 0.0371948732611011 LPIPSV 0.04374612867832184 [11/02 18:06:30]\n",
+ "\n",
+ "[ITER 7000] Saving Gaussians [11/02 18:06:30]\n",
+ "Training progress: 38% 7500/20000 [06:49<08:00, 26.02it/s, Loss=0.0048222, psnr=31.14, point=36190]\n",
+ "[ITER 7500] Evaluating test: L1 0.005651544176918618 PSNR 30.681758095236386 SSIM 0.9678158760070801 LPIPSA 0.035816452411167765 LPIPSV 0.0427349626141436 [11/02 18:06:52]\n",
+ "\n",
+ "[ITER 7500] Evaluating train: L1 0.004807767334996777 PSNR 31.993544746847714 SSIM 0.9733189940452576 LPIPSA 0.033590525498285013 LPIPSV 0.04039300518000827 [11/02 18:06:56]\n",
+ "Training progress: 40% 8000/20000 [07:16<07:45, 25.79it/s, Loss=0.0043466, psnr=34.24, point=36689]\n",
+ "[ITER 8000] Evaluating test: L1 0.005368371836512404 PSNR 31.146766886991614 SSIM 0.9698240756988525 LPIPSA 0.033301023547263706 LPIPSV 0.040828038773992485 [11/02 18:07:18]\n",
+ "\n",
+ "[ITER 8000] Evaluating train: L1 0.0043726656448972575 PSNR 32.82287249845617 SSIM 0.9761500954627991 LPIPSA 0.030599506133619475 LPIPSV 0.037856346982366895 [11/02 18:07:22]\n",
+ "Training progress: 42% 8500/20000 [07:42<08:43, 21.98it/s, Loss=0.0039893, psnr=33.36, point=37142]\n",
+ "[ITER 8500] Evaluating test: L1 0.005285397911553874 PSNR 31.29989792318905 SSIM 0.970458984375 LPIPSA 0.03133533818318563 LPIPSV 0.039549575132482195 [11/02 18:07:45]\n",
+ "\n",
+ "[ITER 8500] Evaluating train: L1 0.00392191663987058 PSNR 33.822159486658435 SSIM 0.9793525338172913 LPIPSA 0.027707071317469373 LPIPSV 0.03547880513703122 [11/02 18:07:49]\n",
+ "Training progress: 45% 9000/20000 [08:09<07:18, 25.11it/s, Loss=0.0037850, psnr=32.84, point=37651]\n",
+ "[ITER 9000] Evaluating test: L1 0.00514117003801991 PSNR 31.583253636079675 SSIM 0.9714398384094238 LPIPSA 0.03014577892335022 LPIPSV 0.038584621075321644 [11/02 18:08:12]\n",
+ "\n",
+ "[ITER 9000] Evaluating train: L1 0.004075838252902031 PSNR 33.40688413732192 SSIM 0.9785079956054688 LPIPSA 0.026530251016511637 LPIPSV 0.03473668457830653 [11/02 18:08:16]\n",
+ "\n",
+ "[ITER 9000] Saving Gaussians [11/02 18:08:16]\n",
+ "reset opacity [11/02 18:08:17]\n",
+ "Training progress: 48% 9500/20000 [08:37<06:56, 25.23it/s, Loss=0.0035261, psnr=34.79, point=37806]\n",
+ "[ITER 9500] Evaluating test: L1 0.005119101048501975 PSNR 31.65118183809168 SSIM 0.9717811346054077 LPIPSA 0.029657370465643266 LPIPSV 0.037846845958162754 [11/02 18:08:39]\n",
+ "\n",
+ "[ITER 9500] Evaluating train: L1 0.0037085778518196415 PSNR 34.22278538872214 SSIM 0.9810218811035156 LPIPSA 0.025413589442477506 LPIPSV 0.0333847584312453 [11/02 18:08:43]\n",
+ "Training progress: 50% 10000/20000 [09:03<06:32, 25.51it/s, Loss=0.0038496, psnr=33.58, point=38080]\n",
+ "[ITER 10000] Evaluating test: L1 0.004913311081883662 PSNR 31.939671684713925 SSIM 0.9731882810592651 LPIPSA 0.027699884575079468 LPIPSV 0.03656587333363645 [11/02 18:09:06]\n",
+ "\n",
+ "[ITER 10000] Evaluating train: L1 0.0033830007099929977 PSNR 35.0988125520594 SSIM 0.9829714298248291 LPIPSA 0.022974921719116324 LPIPSV 0.031669218640993625 [11/02 18:09:10]\n",
+ "\n",
+ "[ITER 10000] Saving Gaussians [11/02 18:09:10]\n",
+ "Training progress: 52% 10500/20000 [09:30<06:48, 23.26it/s, Loss=0.0031380, psnr=35.25, point=38316]\n",
+ "[ITER 10500] Evaluating test: L1 0.004858247976859703 PSNR 32.05097209706026 SSIM 0.9736260771751404 LPIPSA 0.02648219181334271 LPIPSV 0.035736057030804014 [11/02 18:09:34]\n",
+ "\n",
+ "[ITER 10500] Evaluating train: L1 0.0034129406352910925 PSNR 34.97601767147289 SSIM 0.9831058382987976 LPIPSA 0.021697207408792833 LPIPSV 0.030588837559608853 [11/02 18:09:38]\n",
+ "Training progress: 55% 11000/20000 [09:58<06:14, 24.06it/s, Loss=0.0031231, psnr=35.78, point=38564]\n",
+ "[ITER 11000] Evaluating test: L1 0.0048383147565319255 PSNR 32.10052636090447 SSIM 0.9737254977226257 LPIPSA 0.02546028310761732 LPIPSV 0.03512296529815478 [11/02 18:10:01]\n",
+ "\n",
+ "[ITER 11000] Evaluating train: L1 0.003170539501725751 PSNR 35.66833742927103 SSIM 0.9846951961517334 LPIPSA 0.020195515607209766 LPIPSV 0.02941603881909567 [11/02 18:10:05]\n",
+ "Training progress: 57% 11500/20000 [10:25<05:37, 25.17it/s, Loss=0.0031424, psnr=34.72, point=38798]\n",
+ "[ITER 11500] Evaluating test: L1 0.004702114335754339 PSNR 32.30085148530848 SSIM 0.9746522307395935 LPIPSA 0.02459597894374062 LPIPSV 0.034414063372156196 [11/02 18:10:28]\n",
+ "\n",
+ "[ITER 11500] Evaluating train: L1 0.0029573896351982563 PSNR 36.265638912425324 SSIM 0.9858790040016174 LPIPSA 0.01884680345435353 LPIPSV 0.02823771645917612 [11/02 18:10:32]\n",
+ "Training progress: 60% 12000/20000 [10:51<05:14, 25.43it/s, Loss=0.0026531, psnr=38.94, point=38987]\n",
+ "[ITER 12000] Evaluating test: L1 0.004629391314023558 PSNR 32.448045281802905 SSIM 0.9751397371292114 LPIPSA 0.023814774830551708 LPIPSV 0.033843672012581545 [11/02 18:10:54]\n",
+ "\n",
+ "[ITER 12000] Evaluating train: L1 0.0028651014299077146 PSNR 36.62410534129423 SSIM 0.9865437746047974 LPIPSA 0.018079739776166046 LPIPSV 0.02744755126974162 [11/02 18:10:58]\n",
+ "\n",
+ "[ITER 12000] Saving Gaussians [11/02 18:10:58]\n",
+ "reset opacity [11/02 18:10:59]\n",
+ "Training progress: 62% 12500/20000 [11:19<05:09, 24.23it/s, Loss=0.0027483, psnr=36.30, point=39084]\n",
+ "[ITER 12500] Evaluating test: L1 0.004568642662728534 PSNR 32.605551326976105 SSIM 0.9755740761756897 LPIPSA 0.023820564260377604 LPIPSV 0.03352845153387855 [11/02 18:11:22]\n",
+ "\n",
+ "[ITER 12500] Evaluating train: L1 0.0027966723188429196 PSNR 36.77877964692957 SSIM 0.9869693517684937 LPIPSA 0.017889395410961965 LPIPSV 0.027054845738936874 [11/02 18:11:26]\n",
+ "Training progress: 65% 13000/20000 [11:46<05:08, 22.66it/s, Loss=0.0029271, psnr=36.46, point=39175]\n",
+ "[ITER 13000] Evaluating test: L1 0.004597263040897601 PSNR 32.51923056209789 SSIM 0.9754272103309631 LPIPSA 0.022933347808087572 LPIPSV 0.033179299358059376 [11/02 18:11:49]\n",
+ "\n",
+ "[ITER 13000] Evaluating train: L1 0.002738131771740668 PSNR 37.03037441478056 SSIM 0.9874515533447266 LPIPSA 0.016770821119494298 LPIPSV 0.026295396608903128 [11/02 18:11:53]\n",
+ "Training progress: 68% 13500/20000 [12:13<04:20, 24.99it/s, Loss=0.0023418, psnr=40.48, point=39273]\n",
+ "[ITER 13500] Evaluating test: L1 0.004528915509581566 PSNR 32.64909688164206 SSIM 0.9758092164993286 LPIPSA 0.02237814588143545 LPIPSV 0.03276322420467349 [11/02 18:12:16]\n",
+ "\n",
+ "[ITER 13500] Evaluating train: L1 0.002582596053424127 PSNR 37.54177542293773 SSIM 0.9882064461708069 LPIPSA 0.015931161942289156 LPIPSV 0.02556094658725402 [11/02 18:12:20]\n",
+ "Training progress: 70% 14000/20000 [12:40<03:58, 25.18it/s, Loss=0.0026724, psnr=36.74, point=39360]\n",
+ "[ITER 14000] Evaluating test: L1 0.004520180306452162 PSNR 32.65037491742302 SSIM 0.9758769869804382 LPIPSA 0.02198992932544035 LPIPSV 0.032487275517162156 [11/02 18:12:42]\n",
+ "\n",
+ "[ITER 14000] Evaluating train: L1 0.002527153167380568 PSNR 37.659365710090185 SSIM 0.9885650277137756 LPIPSA 0.015329277690719156 LPIPSV 0.024971433093442637 [11/02 18:12:47]\n",
+ "\n",
+ "[ITER 14000] Saving Gaussians [11/02 18:12:47]\n",
+ "Training progress: 72% 14500/20000 [13:07<03:44, 24.54it/s, Loss=0.0025352, psnr=38.10, point=39449]\n",
+ "[ITER 14500] Evaluating test: L1 0.004493263550102711 PSNR 32.72252094044405 SSIM 0.9760388731956482 LPIPSA 0.02156905287548023 LPIPSV 0.03224609704578624 [11/02 18:13:10]\n",
+ "\n",
+ "[ITER 14500] Evaluating train: L1 0.0024866254671531566 PSNR 37.8453005622415 SSIM 0.9888366460800171 LPIPSA 0.01488524565801901 LPIPSV 0.024684986976139685 [11/02 18:13:14]\n",
+ "Training progress: 75% 15000/20000 [13:34<03:53, 21.45it/s, Loss=0.0024503, psnr=39.15, point=39519]\n",
+ "[ITER 15000] Evaluating test: L1 0.004471481024451992 PSNR 32.756254757151886 SSIM 0.9762061834335327 LPIPSA 0.021248168257229468 LPIPSV 0.03194659323815037 [11/02 18:13:37]\n",
+ "\n",
+ "[ITER 15000] Evaluating train: L1 0.0024259548346676372 PSNR 38.06902088838465 SSIM 0.9891737699508667 LPIPSA 0.014375785036998637 LPIPSV 0.024212910738937995 [11/02 18:13:41]\n",
+ "Training progress: 78% 15500/20000 [14:01<03:01, 24.79it/s, Loss=0.0025803, psnr=38.01, point=39519]\n",
+ "[ITER 15500] Evaluating test: L1 0.004455615013070843 PSNR 32.790670955882355 SSIM 0.9762925505638123 LPIPSA 0.02093277679865851 LPIPSV 0.03172055347000852 [11/02 18:14:03]\n",
+ "\n",
+ "[ITER 15500] Evaluating train: L1 0.0023655578603639323 PSNR 38.30636753755457 SSIM 0.9894798398017883 LPIPSA 0.013882617740070118 LPIPSV 0.02373189095626859 [11/02 18:14:07]\n",
+ "Training progress: 80% 16000/20000 [14:27<02:37, 25.45it/s, Loss=0.0024385, psnr=38.75, point=39519]\n",
+ "[ITER 16000] Evaluating test: L1 0.004437510622665286 PSNR 32.840532302856445 SSIM 0.9763898253440857 LPIPSA 0.020574140143306816 LPIPSV 0.03161009554477299 [11/02 18:14:30]\n",
+ "\n",
+ "[ITER 16000] Evaluating train: L1 0.0023473449023988318 PSNR 38.36010629990522 SSIM 0.9896154999732971 LPIPSA 0.013566321967279209 LPIPSV 0.023468229481402564 [11/02 18:14:34]\n",
+ "Training progress: 82% 16500/20000 [14:54<02:15, 25.90it/s, Loss=0.0024966, psnr=39.44, point=39519]\n",
+ "[ITER 16500] Evaluating test: L1 0.004455152679892147 PSNR 32.792140511905444 SSIM 0.9763392210006714 LPIPSA 0.020430106614880702 LPIPSV 0.03151435251621639 [11/02 18:14:56]\n",
+ "\n",
+ "[ITER 16500] Evaluating train: L1 0.0023033170385614913 PSNR 38.50364505543428 SSIM 0.989878237247467 LPIPSA 0.013217487208106938 LPIPSV 0.02311458537245498 [11/02 18:15:00]\n",
+ "Training progress: 85% 17000/20000 [15:20<01:56, 25.70it/s, Loss=0.0025038, psnr=38.19, point=39519]\n",
+ "[ITER 17000] Evaluating test: L1 0.004421865909962969 PSNR 32.85810784732594 SSIM 0.9764929413795471 LPIPSA 0.020161639022476533 LPIPSV 0.03133493968669106 [11/02 18:15:22]\n",
+ "\n",
+ "[ITER 17000] Evaluating train: L1 0.002236457144403282 PSNR 38.75170606725356 SSIM 0.9901599884033203 LPIPSA 0.012932778226540369 LPIPSV 0.02280734441078761 [11/02 18:15:27]\n",
+ "Training progress: 88% 17500/20000 [15:46<01:48, 22.96it/s, Loss=0.0021583, psnr=39.39, point=39519]\n",
+ "[ITER 17500] Evaluating test: L1 0.004393898142392144 PSNR 32.90949597078211 SSIM 0.9766743779182434 LPIPSA 0.019961257355616373 LPIPSV 0.031166120377533576 [11/02 18:15:49]\n",
+ "\n",
+ "[ITER 17500] Evaluating train: L1 0.0022350720965358265 PSNR 38.77663444070255 SSIM 0.990222692489624 LPIPSA 0.01273247692734003 LPIPSV 0.022594462970600408 [11/02 18:15:53]\n",
+ "Training progress: 90% 18000/20000 [16:13<01:20, 24.83it/s, Loss=0.0023350, psnr=39.78, point=39519]\n",
+ "[ITER 18000] Evaluating test: L1 0.004394845776807736 PSNR 32.928970224717084 SSIM 0.9766545295715332 LPIPSA 0.019786313450073496 LPIPSV 0.031139766906990725 [11/02 18:16:16]\n",
+ "\n",
+ "[ITER 18000] Evaluating train: L1 0.0021786029751905624 PSNR 38.98536928962259 SSIM 0.990453839302063 LPIPSA 0.012469124322866692 LPIPSV 0.02237203821320744 [11/02 18:16:20]\n",
+ "Training progress: 92% 18500/20000 [16:40<00:58, 25.46it/s, Loss=0.0020580, psnr=41.33, point=39519]\n",
+ "[ITER 18500] Evaluating test: L1 0.004405585172421792 PSNR 32.90545429902918 SSIM 0.9765904545783997 LPIPSA 0.01972682259100325 LPIPSV 0.031074097380042076 [11/02 18:16:42]\n",
+ "\n",
+ "[ITER 18500] Evaluating train: L1 0.0021602416630176935 PSNR 39.059151593376605 SSIM 0.9905562996864319 LPIPSA 0.012346417533562463 LPIPSV 0.022167014725068036 [11/02 18:16:46]\n",
+ "Training progress: 95% 19000/20000 [17:06<00:38, 25.90it/s, Loss=0.0021138, psnr=39.63, point=39519]\n",
+ "[ITER 19000] Evaluating test: L1 0.00438943865489872 PSNR 32.94438732371611 SSIM 0.9766784310340881 LPIPSA 0.01952850100967814 LPIPSV 0.030981780303751722 [11/02 18:17:09]\n",
+ "\n",
+ "[ITER 19000] Evaluating train: L1 0.0021385650821577977 PSNR 39.16545172298656 SSIM 0.9906765222549438 LPIPSA 0.012092018280835712 LPIPSV 0.022071363240042153 [11/02 18:17:13]\n",
+ "Training progress: 98% 19500/20000 [17:32<00:19, 25.75it/s, Loss=0.0021546, psnr=38.53, point=39519]\n",
+ "[ITER 19500] Evaluating test: L1 0.004378597827299552 PSNR 32.9598123887006 SSIM 0.9767182469367981 LPIPSA 0.019436391837456646 LPIPSV 0.030908426160321516 [11/02 18:17:35]\n",
+ "\n",
+ "[ITER 19500] Evaluating train: L1 0.0021240437910070315 PSNR 39.2304801940918 SSIM 0.9908209443092346 LPIPSA 0.01191437488202663 LPIPSV 0.021773624507819906 [11/02 18:17:39]\n",
+ "Training progress: 100% 20000/20000 [17:59<00:00, 18.53it/s, Loss=0.0023264, psnr=39.60, point=39519]\n",
+ "\n",
+ "[ITER 20000] Evaluating test: L1 0.0043757758240270265 PSNR 32.97477710948271 SSIM 0.9767575263977051 LPIPSA 0.019354190239134955 LPIPSV 0.030843206297825363 [11/02 18:18:02]\n",
+ "\n",
+ "[ITER 20000] Evaluating train: L1 0.0020984484992154382 PSNR 39.299454857321344 SSIM 0.9908888339996338 LPIPSA 0.011814976604107548 LPIPSV 0.02169408057542408 [11/02 18:18:06]\n",
+ "\n",
+ "[ITER 20000] Saving Gaussians [11/02 18:18:06]\n",
+ "\n",
+ "Training complete. [11/02 18:18:06]\n"
+ ]
+ }
+ ],
+ "source": [
+ "%cd /content/4DGaussians\n",
+ "!python train.py -s /content/test/data/hook --port 6017 --expname \"dnerf/hook\" --configs arguments/dnerf/hook.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {
+ "outputId": "5627ee41-1ed4-42a9-afa6-2244d9bea9d7",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "w5-J_Y19Rx_b"
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/4DGaussians\n",
+ "Looking for config file in output/dnerf/hook/cfg_args\n",
+ "Config file found: output/dnerf/hook/cfg_args\n",
+ "Rendering output/dnerf/hook/\n",
+ "feature_dim: 64 [11/02 18:18:14]\n",
+ "Loading trained model at iteration 20000 [11/02 18:18:14]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 18:18:14]\n",
+ "Reading Training Transforms [11/02 18:18:14]\n",
+ "Reading Test Transforms [11/02 18:18:22]\n",
+ "Generating Video Transforms [11/02 18:18:24]\n",
+ "hello!!!! [11/02 18:18:24]\n",
+ "Generating random point cloud (2000)... [11/02 18:18:24]\n",
+ "Loading Training Cameras [11/02 18:18:24]\n",
+ "Loading Test Cameras [11/02 18:18:24]\n",
+ "Loading Video Cameras [11/02 18:18:24]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 18:18:24]\n",
+ "Voxel Plane: set aabb= Parameter containing:\n",
+ "tensor([[ 1.2998, 1.2999, 1.2999],\n",
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 18:18:24]\n",
+ "loading model from existsoutput/dnerf/hook/point_cloud/iteration_20000 [11/02 18:18:24]\n",
+ "point nums: 39519 [11/02 18:18:25]\n",
+ "Rendering progress: 100% 20/20 [00:00<00:00, 22.68it/s]\n",
+ "FPS: 21.71064756985924 [11/02 18:18:25]\n",
+ "point nums: 39519 [11/02 18:18:28]\n",
+ "Rendering progress: 100% 160/160 [00:05<00:00, 27.87it/s]\n",
+ "FPS: 27.745629370993043 [11/02 18:18:34]\n"
+ ]
+ }
+ ],
+ "source": [
+ "%cd /content/4DGaussians\n",
+ "!python render.py --model_path \"output/dnerf/hook/\" --skip_train --configs arguments/dnerf/hook.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "from IPython.display import HTML\n",
+ "from base64 import b64encode\n",
+ "def display_video(video_path):\n",
+ " mp4 = open(video_path,'rb').read()\n",
+ " data_url = \"data:video/mp4;base64,\" + b64encode(mp4).decode()\n",
+ " return HTML(\"\"\"\n",
+ " \n",
+ " \"\"\" % data_url)"
+ ],
+ "metadata": {
+ "id": "ZsX0kdxZRx_c"
+ },
+ "execution_count": 15,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "save_dir = '/content/4DGaussians/output/dnerf/hook/video/ours_20000/video_rgb.mp4'\n",
+ "\n",
+ "import os\n",
+ "import glob\n",
+ "# video_path = glob.glob(os.path.join(save_dir, \"*-test.mp4\"))[0]\n",
+ "display_video(save_dir)"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 1000
+ },
+ "outputId": "a6cf10a8-6e50-40a7-d3fa-7ff233ce0a2a",
+ "id": "kiLF7f7IRx_c"
+ },
+ "execution_count": 16,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ ""
+ ],
+ "text/html": [
+ "\n",
+ " \n",
+ " "
+ ]
+ },
+ "metadata": {},
+ "execution_count": 16
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# **Jumping Jacks**"
+ ],
+ "metadata": {
+ "id": "yLZDDHL1SGYU"
+ }
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {
+ "outputId": "cb69dc08-e71f-4935-ed51-fafa37cb16ef",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "Lj7Xp_GvSGYV"
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/4DGaussians\n",
+ "2024-02-11 18:18:53.938585: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
+ "2024-02-11 18:18:53.938637: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
+ "2024-02-11 18:18:53.939973: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
+ "2024-02-11 18:18:55.205681: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
+ "Optimizing \n",
+ "Output folder: ./output/dnerf/jumpingjacks [11/02 18:18:56]\n",
+ "feature_dim: 64 [11/02 18:18:56]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 18:18:56]\n",
+ "Reading Training Transforms [11/02 18:18:56]\n",
+ "Reading Test Transforms [11/02 18:19:11]\n",
+ "Generating Video Transforms [11/02 18:19:14]\n",
+ "hello!!!! [11/02 18:19:14]\n",
+ "Generating random point cloud (2000)... [11/02 18:19:14]\n",
+ "Loading Training Cameras [11/02 18:19:14]\n",
+ "Loading Test Cameras [11/02 18:19:14]\n",
+ "Loading Video Cameras [11/02 18:19:14]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 18:19:14]\n",
+ "Voxel Plane: set aabb= Parameter containing:\n",
+ "tensor([[ 1.2998, 1.2999, 1.2999],\n",
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 18:19:14]\n",
+ "Number of points at initialisation : 2000 [11/02 18:19:14]\n",
+ "Training progress: 0% 0/3000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 18:19:14]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 18:19:15]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 18:19:15]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 18:19:17]\n",
+ "data loading done [11/02 18:19:21]\n",
+ "Training progress: 17% 500/3000 [00:25<01:39, 25.18it/s, Loss=0.0241143, psnr=20.94, point=2000]\n",
+ "[ITER 500] Evaluating test: L1 0.02710624214480905 PSNR 20.312154657700482 SSIM 0.9371089935302734 LPIPSA 0.1671809874036733 LPIPSV 0.10731351375579834 [11/02 18:19:44]\n",
+ "\n",
+ "[ITER 500] Evaluating train: L1 0.029899073107277647 PSNR 19.382957570693073 SSIM 0.9330521821975708 LPIPSA 0.1704596048768829 LPIPSV 0.11282826609471265 [11/02 18:19:47]\n",
+ "Training progress: 33% 1000/3000 [00:48<01:14, 26.78it/s, Loss=0.0212415, psnr=18.26, point=2364]\n",
+ "[ITER 1000] Evaluating test: L1 0.023258380701436716 PSNR 20.56044028787052 SSIM 0.9378436803817749 LPIPSA 0.142013828324921 LPIPSV 0.09627915151855525 [11/02 18:20:07]\n",
+ "\n",
+ "[ITER 1000] Evaluating train: L1 0.029074055307051715 PSNR 18.789797951193417 SSIM 0.9297546744346619 LPIPSA 0.15191367969793432 LPIPSV 0.10538694963735692 [11/02 18:20:11]\n",
+ "\n",
+ "[ITER 1000] Saving Gaussians [11/02 18:20:11]\n",
+ "Training progress: 50% 1490/3000 [01:03<00:16, 90.64it/s, Loss=0.0183983, psnr=20.74, point=5031]\n",
+ "[ITER 1500] Evaluating test: L1 0.022526897298281685 PSNR 20.643070108750287 SSIM 0.9388591647148132 LPIPSA 0.1324200020993457 LPIPSV 0.09272175786249778 [11/02 18:20:22]\n",
+ "\n",
+ "[ITER 1500] Evaluating train: L1 0.028837918906527406 PSNR 18.71863342733944 SSIM 0.9301009774208069 LPIPSA 0.14509287476539612 LPIPSV 0.10407668045338463 [11/02 18:20:26]\n",
+ "Training progress: 67% 2000/3000 [01:18<00:14, 69.60it/s, Loss=0.0239293, psnr=21.77, point=8047]\n",
+ "[ITER 2000] Evaluating test: L1 0.02254987308098113 PSNR 20.682474248549518 SSIM 0.9385100603103638 LPIPSA 0.13055872435078902 LPIPSV 0.09237229188575465 [11/02 18:20:37]\n",
+ "\n",
+ "[ITER 2000] Evaluating train: L1 0.028475023477393037 PSNR 18.827850229599896 SSIM 0.9300740361213684 LPIPSA 0.1442501623840893 LPIPSV 0.1039102686678662 [11/02 18:20:41]\n",
+ "Training progress: 83% 2500/3000 [01:32<00:07, 69.17it/s, Loss=0.0199539, psnr=22.56, point=10655]\n",
+ "[ITER 2500] Evaluating test: L1 0.022247892283998868 PSNR 20.6953041974236 SSIM 0.9391475319862366 LPIPSA 0.12873758748173714 LPIPSV 0.09156651707256541 [11/02 18:20:51]\n",
+ "\n",
+ "[ITER 2500] Evaluating train: L1 0.028361164690817103 PSNR 18.832933089312384 SSIM 0.9305316805839539 LPIPSA 0.14296499157653136 LPIPSV 0.10346011992763071 [11/02 18:20:55]\n",
+ "Training progress: 100% 3000/3000 [01:47<00:00, 69.63it/s, Loss=0.0222180, psnr=22.08, point=12893]\n",
+ "[ITER 3000] Evaluating test: L1 0.0222608172608649 PSNR 20.715256298289578 SSIM 0.9391190409660339 LPIPSA 0.12749284878373146 LPIPSV 0.0911806427380618 [11/02 18:21:06]\n",
+ "\n",
+ "[ITER 3000] Evaluating train: L1 0.0284152426483 PSNR 18.797605851117304 SSIM 0.9305304288864136 LPIPSA 0.14226555561318116 LPIPSV 0.10335023613537059 [11/02 18:21:10]\n",
+ "\n",
+ "[ITER 3000] Saving Gaussians [11/02 18:21:10]\n",
+ "reset opacity [11/02 18:21:10]\n",
+ "Training progress: 100% 3000/3000 [01:55<00:00, 25.89it/s, Loss=0.0222180, psnr=22.08, point=12893]\n",
+ "Training progress: 0% 0/20000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 18:21:10]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 18:21:10]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 18:21:10]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 18:21:12]\n",
+ "data loading done [11/02 18:21:14]\n",
+ "Training progress: 2% 500/20000 [00:31<18:40, 17.40it/s, Loss=0.0077003, psnr=29.15, point=13314]\n",
+ "[ITER 500] Evaluating test: L1 0.014371544232263285 PSNR 23.92514217601103 SSIM 0.9466734528541565 LPIPSA 0.11041330765275394 LPIPSV 0.08198071643710136 [11/02 18:21:46]\n",
+ "\n",
+ "[ITER 500] Evaluating train: L1 0.01603389120496371 PSNR 23.568682165706857 SSIM 0.9435392022132874 LPIPSA 0.11354073093217962 LPIPSV 0.08481246013851727 [11/02 18:21:50]\n",
+ "Training progress: 5% 1000/20000 [01:04<14:45, 21.46it/s, Loss=0.0089631, psnr=25.70, point=14398]\n",
+ "[ITER 1000] Evaluating test: L1 0.010306531494921622 PSNR 25.885637395522174 SSIM 0.9545285701751709 LPIPSA 0.08879655654377797 LPIPSV 0.07218663164359682 [11/02 18:22:19]\n",
+ "\n",
+ "[ITER 1000] Evaluating train: L1 0.011295803986928043 PSNR 25.355659821454218 SSIM 0.9504712224006653 LPIPSA 0.08784203691517606 LPIPSV 0.07339959613540593 [11/02 18:22:23]\n",
+ "\n",
+ "[ITER 1000] Saving Gaussians [11/02 18:22:23]\n",
+ "Training progress: 8% 1500/20000 [01:28<10:00, 30.82it/s, Loss=0.0122351, psnr=22.85, point=16155]\n",
+ "[ITER 1500] Evaluating test: L1 0.009166255976785631 PSNR 26.387819065767175 SSIM 0.9560515284538269 LPIPSA 0.07606648894793847 LPIPSV 0.06625922133817393 [11/02 18:22:43]\n",
+ "\n",
+ "[ITER 1500] Evaluating train: L1 0.009902930161093964 PSNR 26.0817788067986 SSIM 0.952022910118103 LPIPSA 0.07586595328415141 LPIPSV 0.06846592860186801 [11/02 18:22:47]\n",
+ "Training progress: 10% 2000/20000 [01:52<10:06, 29.68it/s, Loss=0.0078661, psnr=25.15, point=17847]\n",
+ "[ITER 2000] Evaluating test: L1 0.008158379165893969 PSNR 27.092139636769012 SSIM 0.959454357624054 LPIPSA 0.0667896625750205 LPIPSV 0.06016934838365106 [11/02 18:23:06]\n",
+ "\n",
+ "[ITER 2000] Evaluating train: L1 0.008944218156530577 PSNR 26.369252373190488 SSIM 0.9553579688072205 LPIPSA 0.06690391427015557 LPIPSV 0.06274166955229114 [11/02 18:23:10]\n",
+ "Training progress: 12% 2500/20000 [02:15<09:42, 30.04it/s, Loss=0.0047407, psnr=31.97, point=19131]\n",
+ "[ITER 2500] Evaluating test: L1 0.006926961441743462 PSNR 28.202732422772577 SSIM 0.9631562232971191 LPIPSA 0.059377031619934476 LPIPSV 0.055852832930052984 [11/02 18:23:30]\n",
+ "\n",
+ "[ITER 2500] Evaluating train: L1 0.00719194977051195 PSNR 28.088910158942728 SSIM 0.9613397121429443 LPIPSA 0.05772425245274516 LPIPSV 0.056826754637500816 [11/02 18:23:34]\n",
+ "Training progress: 15% 3000/20000 [02:39<09:30, 29.82it/s, Loss=0.0069372, psnr=28.30, point=20182]\n",
+ "[ITER 3000] Evaluating test: L1 0.006203596192576429 PSNR 29.0753116607666 SSIM 0.9666063785552979 LPIPSA 0.05544819069259307 LPIPSV 0.052198945928145855 [11/02 18:23:54]\n",
+ "\n",
+ "[ITER 3000] Evaluating train: L1 0.006153660155284931 PSNR 29.087464613073013 SSIM 0.966189444065094 LPIPSA 0.053572231794104856 LPIPSV 0.052217005587675995 [11/02 18:23:58]\n",
+ "\n",
+ "[ITER 3000] Saving Gaussians [11/02 18:23:58]\n",
+ "reset opacity [11/02 18:23:58]\n",
+ "Training progress: 18% 3500/20000 [03:03<09:01, 30.49it/s, Loss=0.0048800, psnr=29.36, point=20852]\n",
+ "[ITER 3500] Evaluating test: L1 0.006038734189453809 PSNR 29.148491130155676 SSIM 0.9681472778320312 LPIPSA 0.05219548272297663 LPIPSV 0.050380331628462845 [11/02 18:24:17]\n",
+ "\n",
+ "[ITER 3500] Evaluating train: L1 0.006315803853794932 PSNR 28.904376534854663 SSIM 0.9656388759613037 LPIPSA 0.05096843422335737 LPIPSV 0.05082652893136529 [11/02 18:24:21]\n",
+ "Training progress: 20% 4000/20000 [03:26<08:55, 29.87it/s, Loss=0.0053586, psnr=27.59, point=21542]\n",
+ "[ITER 4000] Evaluating test: L1 0.005153235945138423 PSNR 30.189102621639474 SSIM 0.9716442227363586 LPIPSA 0.046942523844978386 LPIPSV 0.04612090241383104 [11/02 18:24:41]\n",
+ "\n",
+ "[ITER 4000] Evaluating train: L1 0.0055427331860889405 PSNR 29.517062243293314 SSIM 0.969357430934906 LPIPSA 0.045693245113772506 LPIPSV 0.04634571119266398 [11/02 18:24:44]\n",
+ "\n",
+ "[ITER 4000] Saving Gaussians [11/02 18:24:45]\n",
+ "Training progress: 22% 4500/20000 [03:49<08:30, 30.37it/s, Loss=0.0045442, psnr=29.46, point=22088]\n",
+ "[ITER 4500] Evaluating test: L1 0.005260773324955474 PSNR 30.015955532298367 SSIM 0.9697749018669128 LPIPSA 0.045138358412420046 LPIPSV 0.04544217303833541 [11/02 18:25:04]\n",
+ "\n",
+ "[ITER 4500] Evaluating train: L1 0.005686213440426132 PSNR 29.521374646355124 SSIM 0.9672065377235413 LPIPSA 0.044006500776638 LPIPSV 0.04618562023867579 [11/02 18:25:08]\n",
+ "Training progress: 25% 5000/20000 [04:12<08:25, 29.67it/s, Loss=0.0042615, psnr=30.62, point=22584]\n",
+ "[ITER 5000] Evaluating test: L1 0.004522933946538936 PSNR 31.210225049187155 SSIM 0.9747005105018616 LPIPSA 0.04147335147375569 LPIPSV 0.04195819126770777 [11/02 18:25:27]\n",
+ "\n",
+ "[ITER 5000] Evaluating train: L1 0.004638302605599165 PSNR 30.65725651909323 SSIM 0.9733245968818665 LPIPSA 0.039187411284622026 LPIPSV 0.04196020062355434 [11/02 18:25:31]\n",
+ "\n",
+ "[ITER 5000] Saving Gaussians [11/02 18:25:31]\n",
+ "Training progress: 28% 5500/20000 [04:36<08:08, 29.70it/s, Loss=0.0029166, psnr=32.93, point=23021]\n",
+ "[ITER 5500] Evaluating test: L1 0.004250064618912909 PSNR 31.66934652889476 SSIM 0.9758867621421814 LPIPSA 0.039180763887570184 LPIPSV 0.04006083167212851 [11/02 18:25:51]\n",
+ "\n",
+ "[ITER 5500] Evaluating train: L1 0.004200174076044384 PSNR 31.608665129717657 SSIM 0.9758791327476501 LPIPSA 0.03591713774949312 LPIPSV 0.03951402654980912 [11/02 18:25:55]\n",
+ "Training progress: 30% 6000/20000 [05:00<07:56, 29.41it/s, Loss=0.0037385, psnr=30.80, point=23370]\n",
+ "[ITER 6000] Evaluating test: L1 0.00424037131426089 PSNR 31.33752721898696 SSIM 0.9756608009338379 LPIPSA 0.03750892290297676 LPIPSV 0.038872886339531225 [11/02 18:26:14]\n",
+ "\n",
+ "[ITER 6000] Evaluating train: L1 0.004203968571827692 PSNR 31.582007127649643 SSIM 0.9757416844367981 LPIPSA 0.03439526380423237 LPIPSV 0.03835123284336399 [11/02 18:26:18]\n",
+ "\n",
+ "[ITER 6000] Saving Gaussians [11/02 18:26:18]\n",
+ "reset opacity [11/02 18:26:19]\n",
+ "Training progress: 32% 6500/20000 [05:24<07:48, 28.79it/s, Loss=0.0036734, psnr=30.45, point=23548]\n",
+ "[ITER 6500] Evaluating test: L1 0.004074710791054017 PSNR 31.944759593290442 SSIM 0.9767956733703613 LPIPSA 0.03626057363170035 LPIPSV 0.03749223349287229 [11/02 18:26:39]\n",
+ "\n",
+ "[ITER 6500] Evaluating train: L1 0.003980318419909214 PSNR 32.128413144279925 SSIM 0.977087140083313 LPIPSA 0.033133078585652745 LPIPSV 0.03676721628974466 [11/02 18:26:42]\n",
+ "Training progress: 35% 7000/20000 [05:48<08:03, 26.86it/s, Loss=0.0020759, psnr=39.56, point=23764]\n",
+ "[ITER 7000] Evaluating test: L1 0.0037789968979161453 PSNR 32.22492442411535 SSIM 0.9782325625419617 LPIPSA 0.03383819747935323 LPIPSV 0.0362699057578164 [11/02 18:27:02]\n",
+ "\n",
+ "[ITER 7000] Evaluating train: L1 0.003991140942911015 PSNR 31.944228340597714 SSIM 0.9768080115318298 LPIPSA 0.03125307024182642 LPIPSV 0.0356675626929192 [11/02 18:27:06]\n",
+ "\n",
+ "[ITER 7000] Saving Gaussians [11/02 18:27:06]\n",
+ "Training progress: 38% 7500/20000 [06:12<08:02, 25.93it/s, Loss=0.0032961, psnr=31.46, point=23933]\n",
+ "[ITER 7500] Evaluating test: L1 0.0037251527606071357 PSNR 32.77735811121323 SSIM 0.9789029359817505 LPIPSA 0.032343049150179416 LPIPSV 0.034920737037763876 [11/02 18:27:26]\n",
+ "\n",
+ "[ITER 7500] Evaluating train: L1 0.0034567788517212168 PSNR 33.44094332526712 SSIM 0.9799153804779053 LPIPSA 0.02871931760626681 LPIPSV 0.03346619031885091 [11/02 18:27:30]\n",
+ "Training progress: 40% 8000/20000 [06:35<08:35, 23.27it/s, Loss=0.0024295, psnr=37.17, point=24084]\n",
+ "[ITER 8000] Evaluating test: L1 0.0035220249795683604 PSNR 33.07514134575339 SSIM 0.9798880219459534 LPIPSA 0.030461773495463765 LPIPSV 0.03359439629403984 [11/02 18:27:50]\n",
+ "\n",
+ "[ITER 8000] Evaluating train: L1 0.0032528147430104367 PSNR 33.96747589111328 SSIM 0.9810803532600403 LPIPSA 0.026826014906606254 LPIPSV 0.032145552391953325 [11/02 18:27:54]\n",
+ "Training progress: 42% 8500/20000 [06:59<07:34, 25.28it/s, Loss=0.0036363, psnr=31.16, point=24214]\n",
+ "[ITER 8500] Evaluating test: L1 0.0032748170573647845 PSNR 33.7009822621065 SSIM 0.9812740087509155 LPIPSA 0.028727128526524585 LPIPSV 0.03211115103434114 [11/02 18:28:14]\n",
+ "\n",
+ "[ITER 8500] Evaluating train: L1 0.0032516251427724082 PSNR 33.800359950346106 SSIM 0.9810361266136169 LPIPSA 0.02574104136403869 LPIPSV 0.031329532756524926 [11/02 18:28:18]\n",
+ "Training progress: 45% 9000/20000 [07:23<06:51, 26.72it/s, Loss=0.0020959, psnr=40.88, point=24306]\n",
+ "[ITER 9000] Evaluating test: L1 0.0031649012269233077 PSNR 33.87025272144991 SSIM 0.981970489025116 LPIPSA 0.027622271356556344 LPIPSV 0.03151215977199814 [11/02 18:28:38]\n",
+ "\n",
+ "[ITER 9000] Evaluating train: L1 0.002793642569004613 PSNR 34.92502358380486 SSIM 0.983968198299408 LPIPSA 0.0236304008456714 LPIPSV 0.029542722465360865 [11/02 18:28:42]\n",
+ "\n",
+ "[ITER 9000] Saving Gaussians [11/02 18:28:42]\n",
+ "reset opacity [11/02 18:28:42]\n",
+ "Training progress: 48% 9500/20000 [07:47<06:43, 26.01it/s, Loss=0.0027470, psnr=33.97, point=24368]\n",
+ "[ITER 9500] Evaluating test: L1 0.0031569887753849957 PSNR 33.96403009751264 SSIM 0.9820945858955383 LPIPSA 0.027360702399164438 LPIPSV 0.03105309112545322 [11/02 18:29:02]\n",
+ "\n",
+ "[ITER 9500] Evaluating train: L1 0.0028558048241607405 PSNR 34.851489010979144 SSIM 0.983700156211853 LPIPSA 0.023630955968709552 LPIPSV 0.02925186061902958 [11/02 18:29:06]\n",
+ "Training progress: 50% 10000/20000 [08:11<06:07, 27.19it/s, Loss=0.0019086, psnr=36.03, point=24442]\n",
+ "[ITER 10000] Evaluating test: L1 0.0030327317791114397 PSNR 34.147565729477826 SSIM 0.9828214049339294 LPIPSA 0.025493967127712333 LPIPSV 0.029985568951815367 [11/02 18:29:26]\n",
+ "\n",
+ "[ITER 10000] Evaluating train: L1 0.002679194036104223 PSNR 35.13378053552964 SSIM 0.9848082065582275 LPIPSA 0.021619555732125744 LPIPSV 0.027824260897057897 [11/02 18:29:30]\n",
+ "\n",
+ "[ITER 10000] Saving Gaussians [11/02 18:29:30]\n",
+ "Training progress: 52% 10500/20000 [08:35<05:21, 29.52it/s, Loss=0.0018865, psnr=38.16, point=24514]\n",
+ "[ITER 10500] Evaluating test: L1 0.00294649536269443 PSNR 34.39047835854923 SSIM 0.9832819700241089 LPIPSA 0.024591245359795934 LPIPSV 0.02925098260097644 [11/02 18:29:50]\n",
+ "\n",
+ "[ITER 10500] Evaluating train: L1 0.0024808338699478876 PSNR 35.8442580279182 SSIM 0.9859755039215088 LPIPSA 0.02028107856783797 LPIPSV 0.026838390211410382 [11/02 18:29:54]\n",
+ "Training progress: 55% 11000/20000 [08:59<05:09, 29.09it/s, Loss=0.0026960, psnr=35.41, point=24564]\n",
+ "[ITER 11000] Evaluating test: L1 0.0028649115907576155 PSNR 34.55718365837546 SSIM 0.9838062524795532 LPIPSA 0.023507550139637554 LPIPSV 0.028543090940836596 [11/02 18:30:14]\n",
+ "\n",
+ "[ITER 11000] Evaluating train: L1 0.0023641814957098928 PSNR 36.256965412813074 SSIM 0.9867919683456421 LPIPSA 0.019234513069557792 LPIPSV 0.026302899453131592 [11/02 18:30:17]\n",
+ "Training progress: 57% 11500/20000 [09:22<04:44, 29.87it/s, Loss=0.0030781, psnr=33.43, point=24603]\n",
+ "[ITER 11500] Evaluating test: L1 0.0028679188724388092 PSNR 34.59598507600672 SSIM 0.9839022159576416 LPIPSA 0.02258559171219959 LPIPSV 0.028210934361114222 [11/02 18:30:37]\n",
+ "\n",
+ "[ITER 11500] Evaluating train: L1 0.0022428183412343703 PSNR 36.768378987031824 SSIM 0.9874865412712097 LPIPSA 0.018137759583837846 LPIPSV 0.02529228270492133 [11/02 18:30:41]\n",
+ "Training progress: 60% 12000/20000 [09:46<04:31, 29.43it/s, Loss=0.0023997, psnr=33.98, point=24618]\n",
+ "[ITER 12000] Evaluating test: L1 0.002812561216106748 PSNR 34.72661512038287 SSIM 0.9841364622116089 LPIPSA 0.02176102050854003 LPIPSV 0.027820941855144853 [11/02 18:31:01]\n",
+ "\n",
+ "[ITER 12000] Evaluating train: L1 0.0021894177120617207 PSNR 36.751340978285846 SSIM 0.9879097938537598 LPIPSA 0.017442922053091666 LPIPSV 0.024939138716196314 [11/02 18:31:04]\n",
+ "\n",
+ "[ITER 12000] Saving Gaussians [11/02 18:31:04]\n",
+ "reset opacity [11/02 18:31:05]\n",
+ "Training progress: 62% 12500/20000 [10:10<04:14, 29.46it/s, Loss=0.0021355, psnr=35.59, point=24626]\n",
+ "[ITER 12500] Evaluating test: L1 0.002776315326199812 PSNR 34.847051957074335 SSIM 0.9844439029693604 LPIPSA 0.02170181194977725 LPIPSV 0.027479648809222615 [11/02 18:31:24]\n",
+ "\n",
+ "[ITER 12500] Evaluating train: L1 0.002121602198766435 PSNR 37.22243522195255 SSIM 0.9883207678794861 LPIPSA 0.017209080020513606 LPIPSV 0.024574685403529334 [11/02 18:31:28]\n",
+ "Training progress: 65% 13000/20000 [10:33<03:55, 29.74it/s, Loss=0.0020123, psnr=40.04, point=24642]\n",
+ "[ITER 13000] Evaluating test: L1 0.002775607825777329 PSNR 34.81444684196921 SSIM 0.9843992590904236 LPIPSA 0.02096586162224412 LPIPSV 0.027182228726280087 [11/02 18:31:48]\n",
+ "\n",
+ "[ITER 13000] Evaluating train: L1 0.002171478494453956 PSNR 36.895574906293085 SSIM 0.9879686832427979 LPIPSA 0.016614120480987954 LPIPSV 0.024142753168502274 [11/02 18:31:52]\n",
+ "Training progress: 68% 13500/20000 [10:57<03:35, 30.17it/s, Loss=0.0022815, psnr=35.89, point=24659]\n",
+ "[ITER 13500] Evaluating test: L1 0.0026942428495899283 PSNR 35.08682295855354 SSIM 0.984908938407898 LPIPSA 0.020181908491341508 LPIPSV 0.026765895350014463 [11/02 18:32:11]\n",
+ "\n",
+ "[ITER 13500] Evaluating train: L1 0.0020241073524469837 PSNR 37.52518575331744 SSIM 0.988922655582428 LPIPSA 0.015830832752673066 LPIPSV 0.02348142067956574 [11/02 18:32:15]\n",
+ "Training progress: 70% 14000/20000 [11:20<03:20, 29.88it/s, Loss=0.0014118, psnr=38.75, point=24674]\n",
+ "[ITER 14000] Evaluating test: L1 0.0026895037667332765 PSNR 35.10319260989918 SSIM 0.9849939942359924 LPIPSA 0.01970697838046095 LPIPSV 0.026522815172724864 [11/02 18:32:35]\n",
+ "\n",
+ "[ITER 14000] Evaluating train: L1 0.001964839072624112 PSNR 37.7271250556497 SSIM 0.9893941283226013 LPIPSA 0.015003981370040598 LPIPSV 0.02294308618258904 [11/02 18:32:39]\n",
+ "\n",
+ "[ITER 14000] Saving Gaussians [11/02 18:32:39]\n",
+ "Training progress: 72% 14500/20000 [11:44<03:04, 29.85it/s, Loss=0.0015215, psnr=41.99, point=24694]\n",
+ "[ITER 14500] Evaluating test: L1 0.0026560540427453816 PSNR 35.20591993892894 SSIM 0.9851296544075012 LPIPSA 0.019109266152714983 LPIPSV 0.026201496174668566 [11/02 18:32:59]\n",
+ "\n",
+ "[ITER 14500] Evaluating train: L1 0.0019161827718455564 PSNR 37.93336397058823 SSIM 0.9896736145019531 LPIPSA 0.01444413888213389 LPIPSV 0.022578878492555198 [11/02 18:33:03]\n",
+ "Training progress: 75% 15000/20000 [12:07<02:47, 29.92it/s, Loss=0.0020034, psnr=36.41, point=24712]\n",
+ "[ITER 15000] Evaluating test: L1 0.0026230491999098484 PSNR 35.27818141264074 SSIM 0.9853312373161316 LPIPSA 0.018818173201425988 LPIPSV 0.026031684760442552 [11/02 18:33:22]\n",
+ "\n",
+ "[ITER 15000] Evaluating train: L1 0.0018515129032654358 PSNR 38.19725866878734 SSIM 0.9900351166725159 LPIPSA 0.014111427097197841 LPIPSV 0.022263575180926743 [11/02 18:33:26]\n",
+ "Training progress: 78% 15500/20000 [12:31<02:29, 30.11it/s, Loss=0.0016591, psnr=43.73, point=24712]\n",
+ "[ITER 15500] Evaluating test: L1 0.00262293652015026 PSNR 35.24217515833237 SSIM 0.9853635430335999 LPIPSA 0.01831961716251338 LPIPSV 0.025915168664034677 [11/02 18:33:45]\n",
+ "\n",
+ "[ITER 15500] Evaluating train: L1 0.0018314817228683214 PSNR 38.403444177964154 SSIM 0.9901542663574219 LPIPSA 0.013684470288674621 LPIPSV 0.02211842280538643 [11/02 18:33:49]\n",
+ "Training progress: 80% 16000/20000 [12:54<02:13, 29.96it/s, Loss=0.0021353, psnr=36.86, point=24712]\n",
+ "[ITER 16000] Evaluating test: L1 0.0026190714013543635 PSNR 35.25893996743595 SSIM 0.9853807091712952 LPIPSA 0.018105642020921495 LPIPSV 0.025666100630427107 [11/02 18:34:09]\n",
+ "\n",
+ "[ITER 16000] Evaluating train: L1 0.0017759406747406019 PSNR 38.53894357120289 SSIM 0.9905276894569397 LPIPSA 0.013195468825014198 LPIPSV 0.021783255068037438 [11/02 18:34:13]\n",
+ "Training progress: 82% 16500/20000 [13:17<02:00, 28.96it/s, Loss=0.0015531, psnr=41.07, point=24712]\n",
+ "[ITER 16500] Evaluating test: L1 0.002605615132351351 PSNR 35.32060623168945 SSIM 0.9854267239570618 LPIPSA 0.01788383158033385 LPIPSV 0.02563065498629037 [11/02 18:34:32]\n",
+ "\n",
+ "[ITER 16500] Evaluating train: L1 0.0017395223942859208 PSNR 38.75641071095186 SSIM 0.9906600713729858 LPIPSA 0.012969702345264308 LPIPSV 0.02160416987231549 [11/02 18:34:36]\n",
+ "Training progress: 85% 17000/20000 [13:41<01:54, 26.31it/s, Loss=0.0012990, psnr=41.05, point=24712]\n",
+ "[ITER 17000] Evaluating test: L1 0.00260438652270857 PSNR 35.329575931324676 SSIM 0.9854564666748047 LPIPSA 0.01759074636570671 LPIPSV 0.02543566239011638 [11/02 18:34:55]\n",
+ "\n",
+ "[ITER 17000] Evaluating train: L1 0.0017227851956918398 PSNR 38.849170460420495 SSIM 0.9907532930374146 LPIPSA 0.012585466755006243 LPIPSV 0.02134008623440476 [11/02 18:34:59]\n",
+ "Training progress: 88% 17500/20000 [14:04<01:35, 26.18it/s, Loss=0.0021275, psnr=36.82, point=24712]\n",
+ "[ITER 17500] Evaluating test: L1 0.0025978474466897107 PSNR 35.36489015467026 SSIM 0.9855204224586487 LPIPSA 0.017363789162653333 LPIPSV 0.02527563613565529 [11/02 18:35:19]\n",
+ "\n",
+ "[ITER 17500] Evaluating train: L1 0.001696037864723407 PSNR 38.95300651999081 SSIM 0.9909890294075012 LPIPSA 0.012402956017895657 LPIPSV 0.02115323252099402 [11/02 18:35:23]\n",
+ "Training progress: 90% 18000/20000 [14:27<01:11, 27.99it/s, Loss=0.0019563, psnr=37.17, point=24712]\n",
+ "[ITER 18000] Evaluating test: L1 0.002572958539103103 PSNR 35.435571894926184 SSIM 0.9856330752372742 LPIPSA 0.017130285063210654 LPIPSV 0.02518259876352899 [11/02 18:35:42]\n",
+ "\n",
+ "[ITER 18000] Evaluating train: L1 0.0016473254928474916 PSNR 39.18763441198012 SSIM 0.991258978843689 LPIPSA 0.01202140942982891 LPIPSV 0.020772454602753416 [11/02 18:35:46]\n",
+ "Training progress: 92% 18500/20000 [14:51<00:48, 30.80it/s, Loss=0.0017810, psnr=38.26, point=24712]\n",
+ "[ITER 18500] Evaluating test: L1 0.0025679614970131833 PSNR 35.45779946271111 SSIM 0.9856708645820618 LPIPSA 0.016918635023209977 LPIPSV 0.025096921907628283 [11/02 18:36:06]\n",
+ "\n",
+ "[ITER 18500] Evaluating train: L1 0.0016453368828960638 PSNR 39.21269405589384 SSIM 0.9912947416305542 LPIPSA 0.011988169897128554 LPIPSV 0.020801724646897876 [11/02 18:36:09]\n",
+ "Training progress: 95% 19000/20000 [15:14<00:33, 29.64it/s, Loss=0.0017786, psnr=38.98, point=24712]\n",
+ "[ITER 19000] Evaluating test: L1 0.002561851928476244 PSNR 35.47652345545151 SSIM 0.9856762886047363 LPIPSA 0.01681080728988437 LPIPSV 0.02503517864491133 [11/02 18:36:29]\n",
+ "\n",
+ "[ITER 19000] Evaluating train: L1 0.0016217785750460975 PSNR 39.303154216093176 SSIM 0.9914258122444153 LPIPSA 0.011734659619191113 LPIPSV 0.02054583322366371 [11/02 18:36:33]\n",
+ "Training progress: 98% 19500/20000 [15:37<00:16, 30.87it/s, Loss=0.0015868, psnr=37.35, point=24712]\n",
+ "[ITER 19500] Evaluating test: L1 0.0025829268235932376 PSNR 35.41023433909697 SSIM 0.985568642616272 LPIPSA 0.016723044463159406 LPIPSV 0.024997382753473872 [11/02 18:36:52]\n",
+ "\n",
+ "[ITER 19500] Evaluating train: L1 0.0016140254892354064 PSNR 39.39664638743681 SSIM 0.9914842247962952 LPIPSA 0.011554611627669895 LPIPSV 0.020384207313113353 [11/02 18:36:56]\n",
+ "Training progress: 100% 20000/20000 [16:00<00:00, 20.82it/s, Loss=0.0018911, psnr=35.78, point=24712]\n",
+ "\n",
+ "[ITER 20000] Evaluating test: L1 0.002554079144508304 PSNR 35.50526450662052 SSIM 0.9857316017150879 LPIPSA 0.016540843089494633 LPIPSV 0.02488091535975828 [11/02 18:37:15]\n",
+ "\n",
+ "[ITER 20000] Evaluating train: L1 0.0015861261225141147 PSNR 39.489428127513214 SSIM 0.9916273951530457 LPIPSA 0.011360294788199313 LPIPSV 0.020296348642338726 [11/02 18:37:19]\n",
+ "\n",
+ "[ITER 20000] Saving Gaussians [11/02 18:37:19]\n",
+ "\n",
+ "Training complete. [11/02 18:37:19]\n"
+ ]
+ }
+ ],
+ "source": [
+ "%cd /content/4DGaussians\n",
+ "!python train.py -s /content/test/data/jumpingjacks --port 6017 --expname \"dnerf/jumpingjacks\" --configs arguments/dnerf/jumpingjacks.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {
+ "outputId": "c7a2778c-8172-4633-87b9-0b4820105f69",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "snolzdTPSGYV"
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/4DGaussians\n",
+ "Looking for config file in output/dnerf/jumpingjacks/cfg_args\n",
+ "Config file found: output/dnerf/jumpingjacks/cfg_args\n",
+ "Rendering output/dnerf/jumpingjacks/\n",
+ "feature_dim: 64 [11/02 18:37:28]\n",
+ "Loading trained model at iteration 20000 [11/02 18:37:28]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 18:37:28]\n",
+ "Reading Training Transforms [11/02 18:37:28]\n",
+ "Reading Test Transforms [11/02 18:37:44]\n",
+ "Generating Video Transforms [11/02 18:37:46]\n",
+ "hello!!!! [11/02 18:37:46]\n",
+ "Generating random point cloud (2000)... [11/02 18:37:46]\n",
+ "Loading Training Cameras [11/02 18:37:46]\n",
+ "Loading Test Cameras [11/02 18:37:46]\n",
+ "Loading Video Cameras [11/02 18:37:46]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 18:37:46]\n",
+ "Voxel Plane: set aabb= Parameter containing:\n",
+ "tensor([[ 1.2998, 1.2999, 1.2999],\n",
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 18:37:46]\n",
+ "loading model from existsoutput/dnerf/jumpingjacks/point_cloud/iteration_20000 [11/02 18:37:46]\n",
+ "point nums: 24712 [11/02 18:37:46]\n",
+ "Rendering progress: 100% 20/20 [00:00<00:00, 24.94it/s]\n",
+ "FPS: 23.901562189764995 [11/02 18:37:47]\n",
+ "point nums: 24712 [11/02 18:37:49]\n",
+ "Rendering progress: 100% 160/160 [00:05<00:00, 28.52it/s]\n",
+ "FPS: 28.392839494115258 [11/02 18:37:55]\n"
+ ]
+ }
+ ],
+ "source": [
+ "%cd /content/4DGaussians\n",
+ "!python render.py --model_path \"output/dnerf/jumpingjacks/\" --skip_train --configs arguments/dnerf/jumpingjacks.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "from IPython.display import HTML\n",
+ "from base64 import b64encode\n",
+ "def display_video(video_path):\n",
+ " mp4 = open(video_path,'rb').read()\n",
+ " data_url = \"data:video/mp4;base64,\" + b64encode(mp4).decode()\n",
+ " return HTML(\"\"\"\n",
+ " \n",
+ " \"\"\" % data_url)"
+ ],
+ "metadata": {
+ "id": "G08_Ej5_SGYV"
+ },
+ "execution_count": 19,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "save_dir = '/content/4DGaussians/output/dnerf/jumpingjacks/video/ours_20000/video_rgb.mp4'\n",
+ "\n",
+ "import os\n",
+ "import glob\n",
+ "# video_path = glob.glob(os.path.join(save_dir, \"*-test.mp4\"))[0]\n",
+ "display_video(save_dir)"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 1000
+ },
+ "outputId": "b3f2e8d1-a4b6-47d8-a841-c626967bf39c",
+ "id": "aXCyJLp9SGYV"
+ },
+ "execution_count": 20,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ ""
+ ],
+ "text/html": [
+ "\n",
+ " \n",
+ " "
+ ]
+ },
+ "metadata": {},
+ "execution_count": 20
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# **Lego**"
+ ],
+ "metadata": {
+ "id": "8Uf5cAlQSjoD"
+ }
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {
+ "outputId": "a8490bc0-8689-42b3-f736-e643b65d9040",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "1BIAx3VkSjoE"
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/4DGaussians\n",
+ "2024-02-11 18:38:13.965227: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
+ "2024-02-11 18:38:13.965278: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
+ "2024-02-11 18:38:13.966623: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
+ "2024-02-11 18:38:15.252957: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
+ "Optimizing \n",
+ "Output folder: ./output/dnerf/lego [11/02 18:38:16]\n",
+ "feature_dim: 64 [11/02 18:38:16]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 18:38:16]\n",
+ "Reading Training Transforms [11/02 18:38:16]\n",
+ "Reading Test Transforms [11/02 18:38:21]\n",
+ "Generating Video Transforms [11/02 18:38:22]\n",
+ "hello!!!! [11/02 18:38:22]\n",
+ "Generating random point cloud (2000)... [11/02 18:38:22]\n",
+ "Loading Training Cameras [11/02 18:38:22]\n",
+ "Loading Test Cameras [11/02 18:38:22]\n",
+ "Loading Video Cameras [11/02 18:38:22]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 18:38:22]\n",
+ "Voxel Plane: set aabb= Parameter containing:\n",
+ "tensor([[ 1.2998, 1.2999, 1.2999],\n",
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 18:38:22]\n",
+ "Number of points at initialisation : 2000 [11/02 18:38:23]\n",
+ "Training progress: 0% 0/3000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 18:38:23]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 18:38:23]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 18:38:23]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 18:38:25]\n",
+ "data loading done [11/02 18:38:26]\n",
+ "Training progress: 17% 500/3000 [00:21<01:42, 24.41it/s, Loss=0.0388426, psnr=19.73, point=2000]\n",
+ "[ITER 500] Evaluating test: L1 0.04062288758509299 PSNR 19.83289483014275 SSIM 0.8206266760826111 LPIPSA 0.29759760113323436 LPIPSV 0.24200439891394446 [11/02 18:38:49]\n",
+ "\n",
+ "[ITER 500] Evaluating train: L1 0.03892901386408245 PSNR 19.867262223187616 SSIM 0.8339160680770874 LPIPSA 0.2806808527778177 LPIPSV 0.2289090559763067 [11/02 18:38:53]\n",
+ "Training progress: 33% 1000/3000 [00:47<01:23, 24.01it/s, Loss=0.0232802, psnr=21.79, point=2969]\n",
+ "[ITER 1000] Evaluating test: L1 0.027372531483278555 PSNR 21.528053395888385 SSIM 0.8456314206123352 LPIPSA 0.21944702635793126 LPIPSV 0.1966876580434687 [11/02 18:39:15]\n",
+ "\n",
+ "[ITER 1000] Evaluating train: L1 0.026534994406735197 PSNR 21.43836705824908 SSIM 0.8571985363960266 LPIPSA 0.21143408470294056 LPIPSV 0.1870805288062376 [11/02 18:39:19]\n",
+ "\n",
+ "[ITER 1000] Saving Gaussians [11/02 18:39:19]\n",
+ "Training progress: 50% 1500/3000 [01:03<00:22, 67.55it/s, Loss=0.0201504, psnr=23.32, point=8497]\n",
+ "[ITER 1500] Evaluating test: L1 0.023500230492037887 PSNR 22.07238006591797 SSIM 0.8652415871620178 LPIPSA 0.16861382565077612 LPIPSV 0.16531595061807072 [11/02 18:39:30]\n",
+ "\n",
+ "[ITER 1500] Evaluating train: L1 0.022191308164859518 PSNR 22.14285401736989 SSIM 0.8781333565711975 LPIPSA 0.16283923168392742 LPIPSV 0.1567090144928764 [11/02 18:39:35]\n",
+ "Training progress: 67% 2000/3000 [01:18<00:15, 63.71it/s, Loss=0.0225681, psnr=20.75, point=14743]\n",
+ "[ITER 2000] Evaluating test: L1 0.022360418101443964 PSNR 22.234026852776022 SSIM 0.8750289082527161 LPIPSA 0.1498602295623106 LPIPSV 0.15223546150852652 [11/02 18:39:46]\n",
+ "\n",
+ "[ITER 2000] Evaluating train: L1 0.021034146166023084 PSNR 22.31739066628849 SSIM 0.8876004815101624 LPIPSA 0.14430427200653972 LPIPSV 0.14380184254225561 [11/02 18:39:50]\n",
+ "Training progress: 83% 2500/3000 [01:35<00:10, 45.79it/s, Loss=0.0194303, psnr=24.08, point=21098]\n",
+ "[ITER 2500] Evaluating test: L1 0.021591165267369327 PSNR 22.377391702988568 SSIM 0.8832220435142517 LPIPSA 0.13468600941054962 LPIPSV 0.14053193479776382 [11/02 18:40:03]\n",
+ "\n",
+ "[ITER 2500] Evaluating train: L1 0.019849649575703284 PSNR 22.62983075310202 SSIM 0.8964292407035828 LPIPSA 0.12925677220611012 LPIPSV 0.13213615689207525 [11/02 18:40:07]\n",
+ "Training progress: 100% 3000/3000 [01:52<00:00, 53.55it/s, Loss=0.0163592, psnr=24.49, point=27592]\n",
+ "[ITER 3000] Evaluating test: L1 0.020791264470009244 PSNR 22.53683583876666 SSIM 0.8916993737220764 LPIPSA 0.1223499314749942 LPIPSV 0.13055518401019714 [11/02 18:40:20]\n",
+ "\n",
+ "[ITER 3000] Evaluating train: L1 0.01891504354117548 PSNR 22.872319726382983 SSIM 0.9047808051109314 LPIPSA 0.11696384233586929 LPIPSV 0.1223766457508592 [11/02 18:40:24]\n",
+ "\n",
+ "[ITER 3000] Saving Gaussians [11/02 18:40:24]\n",
+ "reset opacity [11/02 18:40:24]\n",
+ "Training progress: 100% 3000/3000 [02:01<00:00, 24.69it/s, Loss=0.0163592, psnr=24.49, point=27592]\n",
+ "Training progress: 0% 0/20000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 18:40:24]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 18:40:25]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 18:40:25]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 18:40:26]\n",
+ "data loading done [11/02 18:40:27]\n",
+ "Training progress: 2% 500/20000 [00:35<22:40, 14.34it/s, Loss=0.0165640, psnr=24.49, point=28933]\n",
+ "[ITER 500] Evaluating test: L1 0.01876937088501804 PSNR 23.836365755866556 SSIM 0.8737938404083252 LPIPSA 0.14225562004482045 LPIPSV 0.14664883035070755 [11/02 18:41:04]\n",
+ "\n",
+ "[ITER 500] Evaluating train: L1 0.01491707525051692 PSNR 25.616691925946405 SSIM 0.888218879699707 LPIPSA 0.12861450498594956 LPIPSV 0.13294697246130774 [11/02 18:41:08]\n",
+ "Training progress: 5% 1000/20000 [01:14<19:00, 16.66it/s, Loss=0.0134141, psnr=25.68, point=32322]\n",
+ "[ITER 1000] Evaluating test: L1 0.017269666087539756 PSNR 24.12006602567785 SSIM 0.888066828250885 LPIPSA 0.11611582338809967 LPIPSV 0.12816354895339294 [11/02 18:41:43]\n",
+ "\n",
+ "[ITER 1000] Evaluating train: L1 0.013112053220324656 PSNR 26.409999959609088 SSIM 0.9002687335014343 LPIPSA 0.10291955488569596 LPIPSV 0.1140891870155054 [11/02 18:41:48]\n",
+ "\n",
+ "[ITER 1000] Saving Gaussians [11/02 18:41:48]\n",
+ "Training progress: 8% 1500/20000 [01:45<13:53, 22.19it/s, Loss=0.0138151, psnr=26.01, point=37759]\n",
+ "[ITER 1500] Evaluating test: L1 0.01816495546304128 PSNR 23.670688068165497 SSIM 0.8836386203765869 LPIPSA 0.1033774386433994 LPIPSV 0.11949478747213588 [11/02 18:42:14]\n",
+ "\n",
+ "[ITER 1500] Evaluating train: L1 0.012726923779529683 PSNR 26.488412744858685 SSIM 0.9044042229652405 LPIPSA 0.08975973944453632 LPIPSV 0.1035986503257471 [11/02 18:42:19]\n",
+ "Training progress: 10% 2000/20000 [02:16<14:20, 20.93it/s, Loss=0.0111743, psnr=27.66, point=43229]\n",
+ "[ITER 2000] Evaluating test: L1 0.015841940746587867 PSNR 24.512654360602884 SSIM 0.9016738533973694 LPIPSA 0.09318106358542162 LPIPSV 0.10930445614983053 [11/02 18:42:46]\n",
+ "\n",
+ "[ITER 2000] Evaluating train: L1 0.01070936383022105 PSNR 27.79740041844985 SSIM 0.9196328520774841 LPIPSA 0.07980170407715965 LPIPSV 0.09447624271406847 [11/02 18:42:50]\n",
+ "Training progress: 12% 2500/20000 [02:49<16:34, 17.60it/s, Loss=0.0092990, psnr=28.69, point=49318]\n",
+ "[ITER 2500] Evaluating test: L1 0.015301531892927253 PSNR 24.57456038979923 SSIM 0.9102594256401062 LPIPSA 0.08426294256659116 LPIPSV 0.10071711286025889 [11/02 18:43:18]\n",
+ "\n",
+ "[ITER 2500] Evaluating train: L1 0.009753112242940594 PSNR 28.644055086023666 SSIM 0.9289010763168335 LPIPSA 0.07037823274731636 LPIPSV 0.085081968237372 [11/02 18:43:22]\n",
+ "Training progress: 15% 3000/20000 [03:22<14:50, 19.09it/s, Loss=0.0097856, psnr=29.52, point=54934]\n",
+ "[ITER 3000] Evaluating test: L1 0.014466783744009101 PSNR 24.823357301599838 SSIM 0.9171720743179321 LPIPSA 0.07679443411967334 LPIPSV 0.09361793057006948 [11/02 18:43:52]\n",
+ "\n",
+ "[ITER 3000] Evaluating train: L1 0.008961695492925012 PSNR 29.17878543629366 SSIM 0.9352798461914062 LPIPSA 0.06370943631319438 LPIPSV 0.0785353455473395 [11/02 18:43:56]\n",
+ "\n",
+ "[ITER 3000] Saving Gaussians [11/02 18:43:56]\n",
+ "reset opacity [11/02 18:43:57]\n",
+ "Training progress: 18% 3500/20000 [03:56<14:04, 19.54it/s, Loss=0.0094042, psnr=28.59, point=58730]\n",
+ "[ITER 3500] Evaluating test: L1 0.014676793204510912 PSNR 24.839402815874884 SSIM 0.916870653629303 LPIPSA 0.07111666483037613 LPIPSV 0.08927793231080561 [11/02 18:44:25]\n",
+ "\n",
+ "[ITER 3500] Evaluating train: L1 0.008880352847935521 PSNR 29.224988488590014 SSIM 0.938066840171814 LPIPSA 0.058094822308596444 LPIPSV 0.07326601445674896 [11/02 18:44:30]\n",
+ "Training progress: 20% 4000/20000 [04:30<14:26, 18.46it/s, Loss=0.0104083, psnr=26.59, point=63389]\n",
+ "[ITER 4000] Evaluating test: L1 0.01511083378949586 PSNR 24.666043674244598 SSIM 0.9154147505760193 LPIPSA 0.06760901497567401 LPIPSV 0.0863500697647824 [11/02 18:44:59]\n",
+ "\n",
+ "[ITER 4000] Evaluating train: L1 0.009337252772906247 PSNR 28.83375122967888 SSIM 0.9357436895370483 LPIPSA 0.05527726429350236 LPIPSV 0.07064938063130659 [11/02 18:45:04]\n",
+ "\n",
+ "[ITER 4000] Saving Gaussians [11/02 18:45:04]\n",
+ "Training progress: 22% 4500/20000 [05:05<14:33, 17.75it/s, Loss=0.0082978, psnr=30.37, point=68116]\n",
+ "[ITER 4500] Evaluating test: L1 0.014225908335955703 PSNR 24.850863961612475 SSIM 0.9235144853591919 LPIPSA 0.06358454376459122 LPIPSV 0.08169819677577299 [11/02 18:45:35]\n",
+ "\n",
+ "[ITER 4500] Evaluating train: L1 0.007819451254737727 PSNR 30.29010144401999 SSIM 0.9464970827102661 LPIPSA 0.049316569286234235 LPIPSV 0.06434863477068789 [11/02 18:45:39]\n",
+ "Training progress: 25% 5000/20000 [05:41<14:30, 17.23it/s, Loss=0.0066357, psnr=33.04, point=71591]\n",
+ "[ITER 5000] Evaluating test: L1 0.013130180482916972 PSNR 25.17518469866584 SSIM 0.9307284951210022 LPIPSA 0.05866382468272658 LPIPSV 0.07648720197817858 [11/02 18:46:10]\n",
+ "\n",
+ "[ITER 5000] Evaluating train: L1 0.006544612567214405 PSNR 31.383620430441464 SSIM 0.9559382200241089 LPIPSA 0.04420297023128061 LPIPSV 0.05867507812731406 [11/02 18:46:15]\n",
+ "\n",
+ "[ITER 5000] Saving Gaussians [11/02 18:46:15]\n",
+ "Training progress: 28% 5500/20000 [06:19<14:19, 16.88it/s, Loss=0.0092754, psnr=26.85, point=74612]\n",
+ "[ITER 5500] Evaluating test: L1 0.013209964586969684 PSNR 25.108317992266485 SSIM 0.931334912776947 LPIPSA 0.05687982545179479 LPIPSV 0.0748520668815164 [11/02 18:46:48]\n",
+ "\n",
+ "[ITER 5500] Evaluating train: L1 0.006663647809011094 PSNR 31.241100423476276 SSIM 0.9562790989875793 LPIPSA 0.041769173872821475 LPIPSV 0.05664467833497945 [11/02 18:46:52]\n",
+ "Training progress: 30% 6000/20000 [06:56<14:01, 16.64it/s, Loss=0.0061233, psnr=32.59, point=77613]\n",
+ "[ITER 6000] Evaluating test: L1 0.012944911223124056 PSNR 25.151950050802792 SSIM 0.9332789778709412 LPIPSA 0.054490369032411015 LPIPSV 0.07243644774836652 [11/02 18:47:25]\n",
+ "\n",
+ "[ITER 6000] Evaluating train: L1 0.005849200465223368 PSNR 32.49539902630974 SSIM 0.961584210395813 LPIPSA 0.03829429189071936 LPIPSV 0.05254419761545518 [11/02 18:47:29]\n",
+ "\n",
+ "[ITER 6000] Saving Gaussians [11/02 18:47:29]\n",
+ "reset opacity [11/02 18:47:30]\n",
+ "Training progress: 32% 6500/20000 [07:34<13:30, 16.66it/s, Loss=0.0065286, psnr=32.85, point=79219]\n",
+ "[ITER 6500] Evaluating test: L1 0.01287553837413297 PSNR 25.17431921117446 SSIM 0.933883547782898 LPIPSA 0.05329652174430735 LPIPSV 0.07143402450224932 [11/02 18:48:03]\n",
+ "\n",
+ "[ITER 6500] Evaluating train: L1 0.005709167556179797 PSNR 32.69686261345358 SSIM 0.9628368020057678 LPIPSA 0.03691480856607942 LPIPSV 0.051098117276149636 [11/02 18:48:08]\n",
+ "Training progress: 35% 7000/20000 [08:12<13:15, 16.35it/s, Loss=0.0053889, psnr=33.73, point=81377]\n",
+ "[ITER 7000] Evaluating test: L1 0.01289466324755374 PSNR 25.12246648003073 SSIM 0.934576153755188 LPIPSA 0.05159663562388981 LPIPSV 0.06973758164574118 [11/02 18:48:41]\n",
+ "\n",
+ "[ITER 7000] Evaluating train: L1 0.005512405994950848 PSNR 32.94975224663229 SSIM 0.9645390510559082 LPIPSA 0.034597137942910194 LPIPSV 0.048884337000987106 [11/02 18:48:46]\n",
+ "\n",
+ "[ITER 7000] Saving Gaussians [11/02 18:48:46]\n",
+ "Training progress: 38% 7500/20000 [08:51<12:45, 16.33it/s, Loss=0.0058429, psnr=33.69, point=83269]\n",
+ "[ITER 7500] Evaluating test: L1 0.012697099653237006 PSNR 25.21511953017291 SSIM 0.9358090162277222 LPIPSA 0.04968353360891342 LPIPSV 0.06798828656182569 [11/02 18:49:20]\n",
+ "\n",
+ "[ITER 7500] Evaluating train: L1 0.005379518576185493 PSNR 33.06238353953642 SSIM 0.9657828211784363 LPIPSA 0.032425358014948225 LPIPSV 0.047085645882522356 [11/02 18:49:25]\n",
+ "Training progress: 40% 8000/20000 [09:30<13:24, 14.91it/s, Loss=0.0061058, psnr=32.43, point=85194]\n",
+ "[ITER 8000] Evaluating test: L1 0.01273220936384271 PSNR 25.23020430172191 SSIM 0.9354315400123596 LPIPSA 0.048778043073766375 LPIPSV 0.06740547913838835 [11/02 18:49:59]\n",
+ "\n",
+ "[ITER 8000] Evaluating train: L1 0.005513365147635341 PSNR 32.89201254003188 SSIM 0.965429425239563 LPIPSA 0.031159007045276025 LPIPSV 0.046093843000776624 [11/02 18:50:04]\n",
+ "Training progress: 42% 8500/20000 [10:09<12:26, 15.40it/s, Loss=0.0053600, psnr=32.67, point=87202]\n",
+ "[ITER 8500] Evaluating test: L1 0.012641949962605448 PSNR 25.193904764512006 SSIM 0.9366821646690369 LPIPSA 0.04760196191423079 LPIPSV 0.06621634499991641 [11/02 18:50:38]\n",
+ "\n",
+ "[ITER 8500] Evaluating train: L1 0.004989180946722627 PSNR 33.781222848331225 SSIM 0.9691360592842102 LPIPSA 0.029274058363893452 LPIPSV 0.04382548590793329 [11/02 18:50:43]\n",
+ "Training progress: 45% 9000/20000 [10:48<11:36, 15.80it/s, Loss=0.0051337, psnr=33.96, point=88468]\n",
+ "[ITER 9000] Evaluating test: L1 0.012525085743297549 PSNR 25.252142962287454 SSIM 0.9371633529663086 LPIPSA 0.04655462963616147 LPIPSV 0.06535487841157352 [11/02 18:51:18]\n",
+ "\n",
+ "[ITER 9000] Evaluating train: L1 0.0049457428429056614 PSNR 33.81356138341567 SSIM 0.9695577025413513 LPIPSA 0.02795815226786277 LPIPSV 0.042691149255808664 [11/02 18:51:22]\n",
+ "\n",
+ "[ITER 9000] Saving Gaussians [11/02 18:51:22]\n",
+ "Training progress: 45% 9000/20000 [10:58<11:36, 15.80it/s, Loss=0.0051337, psnr=33.96, point=88468]reset opacity [11/02 18:51:23]\n",
+ "Training progress: 48% 9500/20000 [11:28<11:08, 15.71it/s, Loss=0.0051695, psnr=33.38, point=89185]\n",
+ "[ITER 9500] Evaluating test: L1 0.012642598601386827 PSNR 25.189016342163086 SSIM 0.9369903802871704 LPIPSA 0.046790036208489365 LPIPSV 0.06545261994880788 [11/02 18:51:58]\n",
+ "\n",
+ "[ITER 9500] Evaluating train: L1 0.004916344180374461 PSNR 34.059699787813074 SSIM 0.9704442024230957 LPIPSA 0.027415872387149754 LPIPSV 0.04222829420776928 [11/02 18:52:02]\n",
+ "Training progress: 50% 10000/20000 [12:08<11:26, 14.56it/s, Loss=0.0058143, psnr=32.87, point=90083]\n",
+ "[ITER 10000] Evaluating test: L1 0.012534883366349866 PSNR 25.203240226296817 SSIM 0.9375761151313782 LPIPSA 0.04576772582881591 LPIPSV 0.06464615486123983 [11/02 18:52:38]\n",
+ "\n",
+ "[ITER 10000] Evaluating train: L1 0.004593136487528682 PSNR 34.538988674388214 SSIM 0.9723771214485168 LPIPSA 0.026082245633006096 LPIPSV 0.04072745078626801 [11/02 18:52:42]\n",
+ "\n",
+ "[ITER 10000] Saving Gaussians [11/02 18:52:42]\n",
+ "Training progress: 52% 10500/20000 [12:49<10:13, 15.48it/s, Loss=0.0046758, psnr=34.36, point=90853]\n",
+ "[ITER 10500] Evaluating test: L1 0.012588511001976097 PSNR 25.17407551933737 SSIM 0.9376353025436401 LPIPSA 0.04529083607827916 LPIPSV 0.06414890705662615 [11/02 18:53:19]\n",
+ "\n",
+ "[ITER 10500] Evaluating train: L1 0.004448331372045419 PSNR 34.78612047083237 SSIM 0.9733812808990479 LPIPSA 0.025037176578360444 LPIPSV 0.03966058834510691 [11/02 18:53:23]\n",
+ "Training progress: 55% 11000/20000 [13:29<09:37, 15.58it/s, Loss=0.0044977, psnr=34.02, point=91462]\n",
+ "[ITER 11000] Evaluating test: L1 0.012587267028934816 PSNR 25.180115531472598 SSIM 0.9377861618995667 LPIPSA 0.04469834399573943 LPIPSV 0.06384364143013954 [11/02 18:53:58]\n",
+ "\n",
+ "[ITER 11000] Evaluating train: L1 0.004382452215341961 PSNR 35.00742384966682 SSIM 0.9741635322570801 LPIPSA 0.024130170507466093 LPIPSV 0.03880989398149883 [11/02 18:54:03]\n",
+ "Training progress: 57% 11500/20000 [14:09<09:55, 14.28it/s, Loss=0.0047310, psnr=34.42, point=92012]\n",
+ "[ITER 11500] Evaluating test: L1 0.012639248579302254 PSNR 25.16133420607623 SSIM 0.9377089738845825 LPIPSA 0.044397757772137135 LPIPSV 0.06366960112662877 [11/02 18:54:39]\n",
+ "\n",
+ "[ITER 11500] Evaluating train: L1 0.004507661714930744 PSNR 34.763765671673944 SSIM 0.9738181829452515 LPIPSA 0.023862211691106066 LPIPSV 0.03844897953026435 [11/02 18:54:43]\n",
+ "Training progress: 60% 12000/20000 [14:49<08:44, 15.24it/s, Loss=0.0048856, psnr=34.98, point=92419]\n",
+ "[ITER 12000] Evaluating test: L1 0.012645532705766313 PSNR 25.181877472821405 SSIM 0.9374989867210388 LPIPSA 0.04387263537329786 LPIPSV 0.06344645755255923 [11/02 18:55:19]\n",
+ "\n",
+ "[ITER 12000] Evaluating train: L1 0.004549782106871991 PSNR 34.76705259435317 SSIM 0.9741729497909546 LPIPSA 0.023063772631918684 LPIPSV 0.037876346751171 [11/02 18:55:23]\n",
+ "\n",
+ "[ITER 12000] Saving Gaussians [11/02 18:55:23]\n",
+ "reset opacity [11/02 18:55:24]\n",
+ "Training progress: 62% 12500/20000 [15:30<08:06, 15.43it/s, Loss=0.0046693, psnr=34.90, point=92623]\n",
+ "[ITER 12500] Evaluating test: L1 0.012573097284664126 PSNR 25.173128240248737 SSIM 0.937984049320221 LPIPSA 0.04379555385778932 LPIPSV 0.06338265647783 [11/02 18:56:00]\n",
+ "\n",
+ "[ITER 12500] Evaluating train: L1 0.004173143506597947 PSNR 35.40794282801011 SSIM 0.9756956696510315 LPIPSA 0.022607291117310524 LPIPSV 0.0373858501586844 [11/02 18:56:04]\n",
+ "Training progress: 65% 13000/20000 [16:11<08:01, 14.53it/s, Loss=0.0043622, psnr=34.99, point=92830]\n",
+ "[ITER 13000] Evaluating test: L1 0.01253103935981498 PSNR 25.183775172514075 SSIM 0.9381987452507019 LPIPSA 0.04338237577501465 LPIPSV 0.06304939989657964 [11/02 18:56:40]\n",
+ "\n",
+ "[ITER 13000] Evaluating train: L1 0.004111959900268737 PSNR 35.52290478874655 SSIM 0.9762353301048279 LPIPSA 0.022007913602625623 LPIPSV 0.03668526823029799 [11/02 18:56:45]\n",
+ "Training progress: 68% 13500/20000 [16:51<07:09, 15.14it/s, Loss=0.0043949, psnr=36.27, point=93044]\n",
+ "[ITER 13500] Evaluating test: L1 0.012513308840639451 PSNR 25.18394660949707 SSIM 0.9382743239402771 LPIPSA 0.043031026992727726 LPIPSV 0.06273489598842229 [11/02 18:57:20]\n",
+ "\n",
+ "[ITER 13500] Evaluating train: L1 0.004026995602009051 PSNR 35.62908845789292 SSIM 0.976643979549408 LPIPSA 0.02147485370583394 LPIPSV 0.036051627467660344 [11/02 18:57:25]\n",
+ "Training progress: 70% 14000/20000 [17:31<06:31, 15.34it/s, Loss=0.0041951, psnr=35.27, point=93179]\n",
+ "[ITER 14000] Evaluating test: L1 0.012532626990886295 PSNR 25.17288174348719 SSIM 0.9383184313774109 LPIPSA 0.04288848555263351 LPIPSV 0.06262611071853076 [11/02 18:58:01]\n",
+ "\n",
+ "[ITER 14000] Evaluating train: L1 0.003980556160540265 PSNR 35.75565405452953 SSIM 0.977157711982727 LPIPSA 0.02102922724888605 LPIPSV 0.035544175654649734 [11/02 18:58:05]\n",
+ "\n",
+ "[ITER 14000] Saving Gaussians [11/02 18:58:05]\n",
+ "Training progress: 72% 14500/20000 [18:13<06:17, 14.55it/s, Loss=0.0044070, psnr=33.20, point=93347]\n",
+ "[ITER 14500] Evaluating test: L1 0.012512709671521887 PSNR 25.18301986245548 SSIM 0.9383237361907959 LPIPSA 0.04263521993861479 LPIPSV 0.0624159182257512 [11/02 18:58:42]\n",
+ "\n",
+ "[ITER 14500] Evaluating train: L1 0.003910572062629987 PSNR 35.90840530395508 SSIM 0.9775872230529785 LPIPSA 0.02060183510184288 LPIPSV 0.03501782741616754 [11/02 18:58:47]\n",
+ "Training progress: 75% 15000/20000 [18:53<05:29, 15.19it/s, Loss=0.0042992, psnr=35.76, point=93505]\n",
+ "[ITER 15000] Evaluating test: L1 0.012522179578595301 PSNR 25.180522021125345 SSIM 0.9383617043495178 LPIPSA 0.042374814586604345 LPIPSV 0.06226143832592403 [11/02 18:59:23]\n",
+ "\n",
+ "[ITER 15000] Evaluating train: L1 0.003860601990976754 PSNR 35.96966193704044 SSIM 0.9778562188148499 LPIPSA 0.020333743215922045 LPIPSV 0.034695533268591934 [11/02 18:59:27]\n",
+ "Training progress: 78% 15500/20000 [19:34<04:52, 15.40it/s, Loss=0.0039826, psnr=35.26, point=93505]\n",
+ "[ITER 15500] Evaluating test: L1 0.012521140606087796 PSNR 25.18207134920008 SSIM 0.9383856058120728 LPIPSA 0.04218868341516046 LPIPSV 0.062163340256494636 [11/02 19:00:03]\n",
+ "\n",
+ "[ITER 15500] Evaluating train: L1 0.00385967138058999 PSNR 36.01000191183651 SSIM 0.9780527949333191 LPIPSA 0.020000432015341872 LPIPSV 0.034365361885112876 [11/02 19:00:07]\n",
+ "Training progress: 80% 16000/20000 [20:14<04:46, 13.98it/s, Loss=0.0039962, psnr=34.77, point=93505]\n",
+ "[ITER 16000] Evaluating test: L1 0.012552010388497044 PSNR 25.16610538258272 SSIM 0.9383337497711182 LPIPSA 0.042180265573894274 LPIPSV 0.062138757065815085 [11/02 19:00:44]\n",
+ "\n",
+ "[ITER 16000] Evaluating train: L1 0.0038390933426425736 PSNR 36.07707304113051 SSIM 0.9782960414886475 LPIPSA 0.019709238946876106 LPIPSV 0.03400664656039547 [11/02 19:00:48]\n",
+ "Training progress: 82% 16500/20000 [20:55<03:49, 15.23it/s, Loss=0.0039393, psnr=35.61, point=93505]\n",
+ "[ITER 16500] Evaluating test: L1 0.012564428260221201 PSNR 25.161338918349323 SSIM 0.9383216500282288 LPIPSA 0.042004017159342766 LPIPSV 0.06204798357451663 [11/02 19:01:24]\n",
+ "\n",
+ "[ITER 16500] Evaluating train: L1 0.0037890989881227996 PSNR 36.13994239358341 SSIM 0.9785423278808594 LPIPSA 0.019450739299988046 LPIPSV 0.03364858732504003 [11/02 19:01:29]\n",
+ "Training progress: 85% 17000/20000 [21:35<03:16, 15.25it/s, Loss=0.0038554, psnr=35.68, point=93505]\n",
+ "[ITER 17000] Evaluating test: L1 0.012583923087838818 PSNR 25.15487704557531 SSIM 0.9383483529090881 LPIPSA 0.04185256613966297 LPIPSV 0.06201622678953059 [11/02 19:02:04]\n",
+ "\n",
+ "[ITER 17000] Evaluating train: L1 0.0037658976747051757 PSNR 36.19999649945427 SSIM 0.9787678718566895 LPIPSA 0.019261034589041683 LPIPSV 0.03347723832463517 [11/02 19:02:09]\n",
+ "Training progress: 88% 17500/20000 [22:16<02:58, 14.00it/s, Loss=0.0042133, psnr=35.50, point=93505]\n",
+ "[ITER 17500] Evaluating test: L1 0.012542663635138203 PSNR 25.163773256189682 SSIM 0.9383628368377686 LPIPSA 0.04169816699098138 LPIPSV 0.06184853503809256 [11/02 19:02:45]\n",
+ "\n",
+ "[ITER 17500] Evaluating train: L1 0.0037400343674508963 PSNR 36.255007575539985 SSIM 0.9789491295814514 LPIPSA 0.01899541706285056 LPIPSV 0.03314946438459789 [11/02 19:02:50]\n",
+ "Training progress: 90% 18000/20000 [22:56<02:12, 15.11it/s, Loss=0.0037420, psnr=35.73, point=93505]\n",
+ "[ITER 18000] Evaluating test: L1 0.012570810120771913 PSNR 25.156949435963348 SSIM 0.9383463859558105 LPIPSA 0.04163109752185205 LPIPSV 0.061822318636319214 [11/02 19:03:25]\n",
+ "\n",
+ "[ITER 18000] Evaluating train: L1 0.00369560213156921 PSNR 36.343563079833984 SSIM 0.9792121648788452 LPIPSA 0.018836551510235843 LPIPSV 0.03292942825047409 [11/02 19:03:30]\n",
+ "Training progress: 92% 18500/20000 [23:36<01:37, 15.39it/s, Loss=0.0033173, psnr=37.77, point=93505]\n",
+ "[ITER 18500] Evaluating test: L1 0.012555860366453142 PSNR 25.163014580221738 SSIM 0.9383165240287781 LPIPSA 0.041512676679036194 LPIPSV 0.06176760551684043 [11/02 19:04:05]\n",
+ "\n",
+ "[ITER 18500] Evaluating train: L1 0.0036707526100251604 PSNR 36.38657558665556 SSIM 0.9793564081192017 LPIPSA 0.018654303132172895 LPIPSV 0.032777295831371754 [11/02 19:04:10]\n",
+ "Training progress: 95% 19000/20000 [24:16<01:05, 15.32it/s, Loss=0.0032929, psnr=37.75, point=93505]\n",
+ "[ITER 19000] Evaluating test: L1 0.012577674954253085 PSNR 25.148856667911303 SSIM 0.9382855892181396 LPIPSA 0.0414486928459476 LPIPSV 0.061716682332403516 [11/02 19:04:46]\n",
+ "\n",
+ "[ITER 19000] Evaluating train: L1 0.003651129889904576 PSNR 36.42247749777401 SSIM 0.9794893860816956 LPIPSA 0.01849353461361983 LPIPSV 0.03262188975863597 [11/02 19:04:50]\n",
+ "Training progress: 98% 19500/20000 [24:56<00:33, 14.75it/s, Loss=0.0037903, psnr=36.06, point=93505]\n",
+ "[ITER 19500] Evaluating test: L1 0.012595642577199374 PSNR 25.149701174567728 SSIM 0.938270628452301 LPIPSA 0.04139612428843975 LPIPSV 0.06171861619633787 [11/02 19:05:26]\n",
+ "\n",
+ "[ITER 19500] Evaluating train: L1 0.003663864616742905 PSNR 36.4352019814884 SSIM 0.9795573949813843 LPIPSA 0.018387949861147824 LPIPSV 0.03246661900159191 [11/02 19:05:30]\n",
+ "Training progress: 100% 20000/20000 [25:37<00:00, 13.01it/s, Loss=0.0035798, psnr=37.02, point=93505]\n",
+ "\n",
+ "[ITER 20000] Evaluating test: L1 0.012594590620959505 PSNR 25.149967305800494 SSIM 0.9382729530334473 LPIPSA 0.041329403353088046 LPIPSV 0.061642274698790384 [11/02 19:06:06]\n",
+ "\n",
+ "[ITER 20000] Evaluating train: L1 0.0036209676661254726 PSNR 36.497415879193476 SSIM 0.9797510504722595 LPIPSA 0.01820179321529234 LPIPSV 0.03229399628060706 [11/02 19:06:10]\n",
+ "\n",
+ "[ITER 20000] Saving Gaussians [11/02 19:06:11]\n",
+ "\n",
+ "Training complete. [11/02 19:06:11]\n"
+ ]
+ }
+ ],
+ "source": [
+ "%cd /content/4DGaussians\n",
+ "!python train.py -s /content/test/data/lego --port 6017 --expname \"dnerf/lego\" --configs arguments/dnerf/lego.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {
+ "outputId": "0e36ee25-f1f0-489f-dbbd-387ab5d5f389",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "bmZyEq3ISjoE"
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/4DGaussians\n",
+ "Looking for config file in output/dnerf/lego/cfg_args\n",
+ "Config file found: output/dnerf/lego/cfg_args\n",
+ "Rendering output/dnerf/lego/\n",
+ "feature_dim: 64 [11/02 19:06:20]\n",
+ "Loading trained model at iteration 20000 [11/02 19:06:20]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 19:06:20]\n",
+ "Reading Training Transforms [11/02 19:06:20]\n",
+ "Reading Test Transforms [11/02 19:06:24]\n",
+ "Generating Video Transforms [11/02 19:06:26]\n",
+ "hello!!!! [11/02 19:06:26]\n",
+ "Generating random point cloud (2000)... [11/02 19:06:26]\n",
+ "Loading Training Cameras [11/02 19:06:26]\n",
+ "Loading Test Cameras [11/02 19:06:26]\n",
+ "Loading Video Cameras [11/02 19:06:26]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 19:06:26]\n",
+ "Voxel Plane: set aabb= Parameter containing:\n",
+ "tensor([[ 1.2998, 1.2999, 1.2999],\n",
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 19:06:26]\n",
+ "loading model from existsoutput/dnerf/lego/point_cloud/iteration_20000 [11/02 19:06:26]\n",
+ "point nums: 93505 [11/02 19:06:26]\n",
+ "Rendering progress: 100% 20/20 [00:01<00:00, 16.91it/s]\n",
+ "FPS: 16.154698928044372 [11/02 19:06:27]\n",
+ "point nums: 93505 [11/02 19:06:31]\n",
+ "Rendering progress: 100% 160/160 [00:08<00:00, 19.75it/s]\n",
+ "FPS: 19.650546608949533 [11/02 19:06:39]\n"
+ ]
+ }
+ ],
+ "source": [
+ "%cd /content/4DGaussians\n",
+ "!python render.py --model_path \"output/dnerf/lego/\" --skip_train --configs arguments/dnerf/lego.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "from IPython.display import HTML\n",
+ "from base64 import b64encode\n",
+ "def display_video(video_path):\n",
+ " mp4 = open(video_path,'rb').read()\n",
+ " data_url = \"data:video/mp4;base64,\" + b64encode(mp4).decode()\n",
+ " return HTML(\"\"\"\n",
+ " \n",
+ " \"\"\" % data_url)"
+ ],
+ "metadata": {
+ "id": "K29Ynrm1SjoE"
+ },
+ "execution_count": 23,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "save_dir = '/content/4DGaussians/output/dnerf/lego/video/ours_20000/video_rgb.mp4'\n",
+ "\n",
+ "import os\n",
+ "import glob\n",
+ "# video_path = glob.glob(os.path.join(save_dir, \"*-test.mp4\"))[0]\n",
+ "display_video(save_dir)"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 1000
+ },
+ "outputId": "ac4ad83e-212d-452a-9d03-7b3327acff5f",
+ "id": "ThKLgjBvSjoE"
+ },
+ "execution_count": 24,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ ""
+ ],
+ "text/html": [
+ "\n",
+ " \n",
+ " "
+ ]
+ },
+ "metadata": {},
+ "execution_count": 24
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# **Mutant**"
+ ],
+ "metadata": {
+ "id": "NcJK9UZUS4Dm"
+ }
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {
+ "outputId": "63275a81-cbc3-4e69-d41f-a74e67a523b1",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "T207ED7BS4Dm"
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/4DGaussians\n",
+ "2024-02-11 19:07:02.932489: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
+ "2024-02-11 19:07:02.932537: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
+ "2024-02-11 19:07:02.934048: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
+ "2024-02-11 19:07:04.324046: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
+ "Optimizing \n",
+ "Output folder: ./output/dnerf/mutant [11/02 19:07:05]\n",
+ "feature_dim: 64 [11/02 19:07:05]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 19:07:05]\n",
+ "Reading Training Transforms [11/02 19:07:05]\n",
+ "Reading Test Transforms [11/02 19:07:18]\n",
+ "Generating Video Transforms [11/02 19:07:19]\n",
+ "hello!!!! [11/02 19:07:19]\n",
+ "Generating random point cloud (2000)... [11/02 19:07:19]\n",
+ "Loading Training Cameras [11/02 19:07:19]\n",
+ "Loading Test Cameras [11/02 19:07:19]\n",
+ "Loading Video Cameras [11/02 19:07:19]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 19:07:19]\n",
+ "Voxel Plane: set aabb= Parameter containing:\n",
+ "tensor([[ 1.2998, 1.2999, 1.2999],\n",
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 19:07:19]\n",
+ "Number of points at initialisation : 2000 [11/02 19:07:19]\n",
+ "Training progress: 0% 0/3000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 19:07:19]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 19:07:20]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 19:07:20]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 19:07:22]\n",
+ "data loading done [11/02 19:07:26]\n",
+ "Training progress: 17% 500/3000 [00:24<01:17, 32.31it/s, Loss=0.0273273, psnr=21.45, point=2000]\n",
+ "[ITER 500] Evaluating test: L1 0.030988898785675272 PSNR 19.632268344654758 SSIM 0.9015381336212158 LPIPSA 0.18736659603960373 LPIPSV 0.13747560364358566 [11/02 19:07:49]\n",
+ "\n",
+ "[ITER 500] Evaluating train: L1 0.03430976313264931 PSNR 18.83283592672909 SSIM 0.8996147513389587 LPIPSA 0.18840686100370743 LPIPSV 0.13954951701795354 [11/02 19:07:53]\n",
+ "Training progress: 33% 1000/3000 [00:49<01:02, 32.04it/s, Loss=0.0195617, psnr=21.68, point=2546]\n",
+ "[ITER 1000] Evaluating test: L1 0.021486601189655417 PSNR 20.618338528801413 SSIM 0.9147356748580933 LPIPSA 0.14888696197201223 LPIPSV 0.10630703848951004 [11/02 19:08:13]\n",
+ "\n",
+ "[ITER 1000] Evaluating train: L1 0.026373491348589167 PSNR 19.217280556173886 SSIM 0.9106695652008057 LPIPSA 0.15312551093452118 LPIPSV 0.10990284646258634 [11/02 19:08:17]\n",
+ "\n",
+ "[ITER 1000] Saving Gaussians [11/02 19:08:17]\n",
+ "Training progress: 50% 1500/3000 [01:04<00:23, 64.69it/s, Loss=0.0185752, psnr=21.99, point=5926]\n",
+ "[ITER 1500] Evaluating test: L1 0.01957133910892641 PSNR 20.965915231143725 SSIM 0.9221382737159729 LPIPSA 0.12581243979580262 LPIPSV 0.09486482643029269 [11/02 19:08:28]\n",
+ "\n",
+ "[ITER 1500] Evaluating train: L1 0.02504824019749375 PSNR 19.205591313979205 SSIM 0.916925847530365 LPIPSA 0.13182687978534138 LPIPSV 0.09979220786515404 [11/02 19:08:32]\n",
+ "Training progress: 67% 2000/3000 [01:19<00:17, 56.57it/s, Loss=0.0155000, psnr=25.10, point=9893]\n",
+ "[ITER 2000] Evaluating test: L1 0.019087665358229595 PSNR 21.06428673688103 SSIM 0.9250449538230896 LPIPSA 0.11596513232764076 LPIPSV 0.08999553410445943 [11/02 19:08:43]\n",
+ "\n",
+ "[ITER 2000] Evaluating train: L1 0.024710782146190897 PSNR 19.206152635462143 SSIM 0.9193097352981567 LPIPSA 0.12251384994562935 LPIPSV 0.09516800194978714 [11/02 19:08:47]\n",
+ "Training progress: 83% 2500/3000 [01:34<00:07, 66.94it/s, Loss=0.0167191, psnr=23.42, point=13560]\n",
+ "[ITER 2500] Evaluating test: L1 0.01867298105293337 PSNR 21.19289555269129 SSIM 0.9270487427711487 LPIPSA 0.10942623457487892 LPIPSV 0.08668781247209101 [11/02 19:08:59]\n",
+ "\n",
+ "[ITER 2500] Evaluating train: L1 0.024309082066311556 PSNR 19.267825407140396 SSIM 0.9206841588020325 LPIPSA 0.11677988399477567 LPIPSV 0.09218460672041949 [11/02 19:09:03]\n",
+ "Training progress: 100% 3000/3000 [01:50<00:00, 65.52it/s, Loss=0.0183200, psnr=21.59, point=16874]\n",
+ "[ITER 3000] Evaluating test: L1 0.0185782913776005 PSNR 21.135390786563647 SSIM 0.9278451800346375 LPIPSA 0.10544922027517767 LPIPSV 0.08426163915325613 [11/02 19:09:14]\n",
+ "\n",
+ "[ITER 3000] Evaluating train: L1 0.024127362295985222 PSNR 19.27749880622415 SSIM 0.92172771692276 LPIPSA 0.11299978591063443 LPIPSV 0.08999738535460304 [11/02 19:09:18]\n",
+ "\n",
+ "[ITER 3000] Saving Gaussians [11/02 19:09:18]\n",
+ "reset opacity [11/02 19:09:18]\n",
+ "Training progress: 100% 3000/3000 [01:59<00:00, 25.21it/s, Loss=0.0183200, psnr=21.59, point=16874]\n",
+ "Training progress: 0% 0/20000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 19:09:18]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 19:09:19]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 19:09:19]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 19:09:20]\n",
+ "data loading done [11/02 19:09:22]\n",
+ "Training progress: 2% 500/20000 [00:32<17:15, 18.82it/s, Loss=0.0094890, psnr=26.02, point=17517]\n",
+ "[ITER 500] Evaluating test: L1 0.01111486945849131 PSNR 25.583659340353574 SSIM 0.9325138926506042 LPIPSA 0.10870152229771894 LPIPSV 0.0811763296232504 [11/02 19:09:56]\n",
+ "\n",
+ "[ITER 500] Evaluating train: L1 0.011491244521868579 PSNR 25.342292336856616 SSIM 0.9322097897529602 LPIPSA 0.10714047007700976 LPIPSV 0.0811069485019235 [11/02 19:10:00]\n",
+ "Training progress: 5% 1000/20000 [01:08<16:14, 19.49it/s, Loss=0.0098716, psnr=25.15, point=18419]\n",
+ "[ITER 1000] Evaluating test: L1 0.00819932485875838 PSNR 27.75004555197323 SSIM 0.9459769129753113 LPIPSA 0.08607491346843102 LPIPSV 0.06882868575699189 [11/02 19:10:31]\n",
+ "\n",
+ "[ITER 1000] Evaluating train: L1 0.008797309470965582 PSNR 27.13468349681181 SSIM 0.9444807767868042 LPIPSA 0.08596871245433302 LPIPSV 0.0691672971143442 [11/02 19:10:35]\n",
+ "\n",
+ "[ITER 1000] Saving Gaussians [11/02 19:10:35]\n",
+ "Training progress: 8% 1500/20000 [01:33<10:56, 28.17it/s, Loss=0.0054472, psnr=30.39, point=20129]\n",
+ "[ITER 1500] Evaluating test: L1 0.007589964689139058 PSNR 28.261542039759018 SSIM 0.9490876793861389 LPIPSA 0.07265206952305402 LPIPSV 0.06277953811428126 [11/02 19:10:57]\n",
+ "\n",
+ "[ITER 1500] Evaluating train: L1 0.008509632063043468 PSNR 27.310287138995 SSIM 0.9453652501106262 LPIPSA 0.0730991777690018 LPIPSV 0.06405634564511917 [11/02 19:11:01]\n",
+ "Training progress: 10% 2000/20000 [01:59<10:51, 27.62it/s, Loss=0.0048570, psnr=31.49, point=22070]\n",
+ "[ITER 2000] Evaluating test: L1 0.006899458725991494 PSNR 29.04899305455825 SSIM 0.9534937739372253 LPIPSA 0.06339186758679502 LPIPSV 0.058067893718971926 [11/02 19:11:22]\n",
+ "\n",
+ "[ITER 2000] Evaluating train: L1 0.007095789098564316 PSNR 28.6912758771111 SSIM 0.9532328248023987 LPIPSA 0.06288119580815821 LPIPSV 0.057951319524470496 [11/02 19:11:26]\n",
+ "Training progress: 12% 2500/20000 [02:25<12:10, 23.95it/s, Loss=0.0067567, psnr=27.58, point=23955]\n",
+ "[ITER 2500] Evaluating test: L1 0.006985123419915052 PSNR 28.78750845965217 SSIM 0.9535378813743591 LPIPSA 0.057009403758189255 LPIPSV 0.054767306674929225 [11/02 19:11:48]\n",
+ "\n",
+ "[ITER 2500] Evaluating train: L1 0.007055655587464571 PSNR 28.797885670381433 SSIM 0.9534421563148499 LPIPSA 0.05675894109641805 LPIPSV 0.05476364668677835 [11/02 19:11:52]\n",
+ "Training progress: 15% 3000/20000 [02:51<11:56, 23.72it/s, Loss=0.0057638, psnr=29.73, point=25782]\n",
+ "[ITER 3000] Evaluating test: L1 0.005904012907515554 PSNR 30.108148238238165 SSIM 0.9607644081115723 LPIPSA 0.04993809343260877 LPIPSV 0.050054648343254536 [11/02 19:12:14]\n",
+ "\n",
+ "[ITER 3000] Evaluating train: L1 0.0061498541960164025 PSNR 29.680583168478574 SSIM 0.9597110748291016 LPIPSA 0.05018840532969026 LPIPSV 0.0504680900451015 [11/02 19:12:18]\n",
+ "\n",
+ "[ITER 3000] Saving Gaussians [11/02 19:12:18]\n",
+ "reset opacity [11/02 19:12:18]\n",
+ "Training progress: 18% 3500/20000 [03:16<10:15, 26.79it/s, Loss=0.0048561, psnr=31.27, point=27009]\n",
+ "[ITER 3500] Evaluating test: L1 0.006335064371609513 PSNR 29.419299967148724 SSIM 0.9584744572639465 LPIPSA 0.04683238749994951 LPIPSV 0.048596557010622585 [11/02 19:12:40]\n",
+ "\n",
+ "[ITER 3500] Evaluating train: L1 0.0065845767419566124 PSNR 28.825226503259994 SSIM 0.9579177498817444 LPIPSA 0.046766302383997864 LPIPSV 0.04876280071980813 [11/02 19:12:44]\n",
+ "Training progress: 20% 4000/20000 [03:42<09:49, 27.12it/s, Loss=0.0049532, psnr=32.52, point=28497]\n",
+ "[ITER 4000] Evaluating test: L1 0.005047484259942875 PSNR 31.423928316901712 SSIM 0.9671912789344788 LPIPSA 0.04103788218515761 LPIPSV 0.04437048185397597 [11/02 19:13:05]\n",
+ "\n",
+ "[ITER 4000] Evaluating train: L1 0.005029162309844704 PSNR 31.45694418514476 SSIM 0.9671456813812256 LPIPSA 0.04095965722466216 LPIPSV 0.044251411495839846 [11/02 19:13:09]\n",
+ "\n",
+ "[ITER 4000] Saving Gaussians [11/02 19:13:09]\n",
+ "Training progress: 22% 4500/20000 [04:07<09:16, 27.84it/s, Loss=0.0039706, psnr=34.09, point=29813]\n",
+ "[ITER 4500] Evaluating test: L1 0.0049891602795790226 PSNR 31.4014315885656 SSIM 0.9682600498199463 LPIPSA 0.03715465174001806 LPIPSV 0.041954505969496333 [11/02 19:13:31]\n",
+ "\n",
+ "[ITER 4500] Evaluating train: L1 0.00492496439255774 PSNR 31.477957220638498 SSIM 0.9682242274284363 LPIPSA 0.03693196429487537 LPIPSV 0.04178130823899718 [11/02 19:13:35]\n",
+ "Training progress: 25% 5000/20000 [04:32<09:18, 26.87it/s, Loss=0.0046502, psnr=31.66, point=31228]\n",
+ "[ITER 5000] Evaluating test: L1 0.005044164820848142 PSNR 31.162933574003333 SSIM 0.9682460427284241 LPIPSA 0.03508217139717411 LPIPSV 0.040508023737108004 [11/02 19:13:56]\n",
+ "\n",
+ "[ITER 5000] Evaluating train: L1 0.004955034976934686 PSNR 31.118786082548255 SSIM 0.9686752557754517 LPIPSA 0.035012777785167974 LPIPSV 0.04013800379984519 [11/02 19:14:00]\n",
+ "\n",
+ "[ITER 5000] Saving Gaussians [11/02 19:14:00]\n",
+ "Training progress: 28% 5500/20000 [04:58<09:05, 26.59it/s, Loss=0.0044634, psnr=31.77, point=32491]\n",
+ "[ITER 5500] Evaluating test: L1 0.0044787255225374415 PSNR 32.322823468376605 SSIM 0.9724539518356323 LPIPSA 0.0313915932441459 LPIPSV 0.037845431662657684 [11/02 19:14:22]\n",
+ "\n",
+ "[ITER 5500] Evaluating train: L1 0.004528058227151632 PSNR 31.987751007080078 SSIM 0.971892774105072 LPIPSA 0.03140276966287809 LPIPSV 0.037514915908960736 [11/02 19:14:26]\n",
+ "Training progress: 30% 6000/20000 [05:24<08:53, 26.26it/s, Loss=0.0042937, psnr=33.51, point=33453]\n",
+ "[ITER 6000] Evaluating test: L1 0.0045458753867184416 PSNR 32.17702506570255 SSIM 0.9723084568977356 LPIPSA 0.029075626503018773 LPIPSV 0.036221356733756906 [11/02 19:14:47]\n",
+ "\n",
+ "[ITER 6000] Evaluating train: L1 0.00484223900746335 PSNR 31.484486748190488 SSIM 0.9701336026191711 LPIPSA 0.029448311666355413 LPIPSV 0.03627955749192659 [11/02 19:14:51]\n",
+ "\n",
+ "[ITER 6000] Saving Gaussians [11/02 19:14:51]\n",
+ "reset opacity [11/02 19:14:52]\n",
+ "Training progress: 32% 6500/20000 [05:50<08:32, 26.33it/s, Loss=0.0033834, psnr=36.19, point=33958]\n",
+ "[ITER 6500] Evaluating test: L1 0.004090844790506012 PSNR 33.07897096521714 SSIM 0.9757533669471741 LPIPSA 0.026870833490701282 LPIPSV 0.03458418697118759 [11/02 19:15:14]\n",
+ "\n",
+ "[ITER 6500] Evaluating train: L1 0.004248478107483071 PSNR 32.63094486909754 SSIM 0.9743587374687195 LPIPSA 0.027086525826769715 LPIPSV 0.03434322719626567 [11/02 19:15:18]\n",
+ "Training progress: 35% 7000/20000 [06:16<09:20, 23.21it/s, Loss=0.0033056, psnr=32.89, point=34719]\n",
+ "[ITER 7000] Evaluating test: L1 0.003945588065749582 PSNR 33.41969108581543 SSIM 0.9768930077552795 LPIPSA 0.025007403400890967 LPIPSV 0.03287894278764725 [11/02 19:15:40]\n",
+ "\n",
+ "[ITER 7000] Evaluating train: L1 0.003938293207765502 PSNR 33.41304599537569 SSIM 0.9765980839729309 LPIPSA 0.025135644437635645 LPIPSV 0.03284823795890107 [11/02 19:15:44]\n",
+ "\n",
+ "[ITER 7000] Saving Gaussians [11/02 19:15:44]\n",
+ "Training progress: 38% 7500/20000 [06:43<08:19, 25.02it/s, Loss=0.0028276, psnr=37.00, point=35563]\n",
+ "[ITER 7500] Evaluating test: L1 0.003669505363658947 PSNR 34.06252692727482 SSIM 0.978874683380127 LPIPSA 0.0230882989571375 LPIPSV 0.03146918380961699 [11/02 19:16:07]\n",
+ "\n",
+ "[ITER 7500] Evaluating train: L1 0.0037315320771406676 PSNR 33.91167124579935 SSIM 0.9781094789505005 LPIPSA 0.02326554585905636 LPIPSV 0.031325144145418614 [11/02 19:16:11]\n",
+ "Training progress: 40% 8000/20000 [07:10<08:07, 24.60it/s, Loss=0.0030161, psnr=35.97, point=36042]\n",
+ "[ITER 8000] Evaluating test: L1 0.0034628150135497834 PSNR 34.693495133343866 SSIM 0.9803813695907593 LPIPSA 0.02112495543106514 LPIPSV 0.029972408952958444 [11/02 19:16:33]\n",
+ "\n",
+ "[ITER 8000] Evaluating train: L1 0.0033730326138217658 PSNR 34.77629313749426 SSIM 0.980743408203125 LPIPSA 0.020815784002051634 LPIPSV 0.029418904663008803 [11/02 19:16:37]\n",
+ "Training progress: 42% 8500/20000 [07:37<07:23, 25.92it/s, Loss=0.0031273, psnr=35.03, point=36509]\n",
+ "[ITER 8500] Evaluating test: L1 0.0034480804700733106 PSNR 34.613047431497016 SSIM 0.9807423949241638 LPIPSA 0.019357826615519384 LPIPSV 0.028897227171589348 [11/02 19:17:00]\n",
+ "\n",
+ "[ITER 8500] Evaluating train: L1 0.00331478999407195 PSNR 35.210073471069336 SSIM 0.981204628944397 LPIPSA 0.019145667333813274 LPIPSV 0.028459480908863685 [11/02 19:17:04]\n",
+ "Training progress: 45% 9000/20000 [08:03<07:10, 25.52it/s, Loss=0.0024304, psnr=37.64, point=36914]\n",
+ "[ITER 9000] Evaluating test: L1 0.0034110437161015235 PSNR 34.75067811853745 SSIM 0.9811845421791077 LPIPSA 0.01827626921893919 LPIPSV 0.028046962750308654 [11/02 19:17:26]\n",
+ "\n",
+ "[ITER 9000] Evaluating train: L1 0.0032033528447808586 PSNR 35.36122153787052 SSIM 0.9821615815162659 LPIPSA 0.01793628081898479 LPIPSV 0.027312259135000846 [11/02 19:17:30]\n",
+ "\n",
+ "[ITER 9000] Saving Gaussians [11/02 19:17:30]\n",
+ "reset opacity [11/02 19:17:31]\n",
+ "Training progress: 48% 9500/20000 [08:30<07:46, 22.53it/s, Loss=0.0029684, psnr=37.75, point=37095]\n",
+ "[ITER 9500] Evaluating test: L1 0.003254024763865506 PSNR 35.060238669900336 SSIM 0.9823163151741028 LPIPSA 0.017527389559237397 LPIPSV 0.027526870479478556 [11/02 19:17:54]\n",
+ "\n",
+ "[ITER 9500] Evaluating train: L1 0.0030866927563158028 PSNR 35.53834230759565 SSIM 0.9829998016357422 LPIPSA 0.017309976358185795 LPIPSV 0.027026461985181358 [11/02 19:17:58]\n",
+ "Training progress: 50% 10000/20000 [08:57<06:51, 24.29it/s, Loss=0.0029300, psnr=35.44, point=37319]\n",
+ "[ITER 10000] Evaluating test: L1 0.003340763758922763 PSNR 34.662400638355926 SSIM 0.981692373752594 LPIPSA 0.01672503604170154 LPIPSV 0.027003027827424163 [11/02 19:18:20]\n",
+ "\n",
+ "[ITER 10000] Evaluating train: L1 0.003205317147897885 PSNR 34.95492026385139 SSIM 0.9820066094398499 LPIPSA 0.01667404952732956 LPIPSV 0.026527898386120796 [11/02 19:18:24]\n",
+ "\n",
+ "[ITER 10000] Saving Gaussians [11/02 19:18:24]\n",
+ "Training progress: 52% 10500/20000 [09:24<06:17, 25.16it/s, Loss=0.0024995, psnr=37.61, point=37559]\n",
+ "[ITER 10500] Evaluating test: L1 0.0032008867916267585 PSNR 35.3037711872774 SSIM 0.9828834533691406 LPIPSA 0.01581904845421805 LPIPSV 0.026098495897124794 [11/02 19:18:48]\n",
+ "\n",
+ "[ITER 10500] Evaluating train: L1 0.002927875769434168 PSNR 36.45147839714499 SSIM 0.9843409061431885 LPIPSA 0.015364961260381867 LPIPSV 0.025397950892939287 [11/02 19:18:52]\n",
+ "Training progress: 55% 11000/20000 [09:51<05:52, 25.53it/s, Loss=0.0022476, psnr=38.23, point=37807]\n",
+ "[ITER 11000] Evaluating test: L1 0.0030129700145848535 PSNR 35.837824540979724 SSIM 0.9842076301574707 LPIPSA 0.014785802430089782 LPIPSV 0.025335071389289462 [11/02 19:19:14]\n",
+ "\n",
+ "[ITER 11000] Evaluating train: L1 0.0027154915596303694 PSNR 36.976509991814105 SSIM 0.9856329560279846 LPIPSA 0.014408533516175607 LPIPSV 0.02460534859667806 [11/02 19:19:18]\n",
+ "Training progress: 57% 11500/20000 [10:17<05:33, 25.52it/s, Loss=0.0025666, psnr=36.49, point=37968]\n",
+ "[ITER 11500] Evaluating test: L1 0.002915184328551678 PSNR 36.17398609834559 SSIM 0.9848762154579163 LPIPSA 0.014057884376276942 LPIPSV 0.024749030961709863 [11/02 19:19:41]\n",
+ "\n",
+ "[ITER 11500] Evaluating train: L1 0.002565905561341959 PSNR 37.73028631771312 SSIM 0.9866116642951965 LPIPSA 0.013506861074882396 LPIPSV 0.02381473681067719 [11/02 19:19:45]\n",
+ "Training progress: 60% 12000/20000 [10:44<06:10, 21.60it/s, Loss=0.0018637, psnr=40.05, point=38106]\n",
+ "[ITER 12000] Evaluating test: L1 0.002930107238866827 PSNR 36.14046612907858 SSIM 0.9849343299865723 LPIPSA 0.013839915504350382 LPIPSV 0.024457210127045128 [11/02 19:20:08]\n",
+ "\n",
+ "[ITER 12000] Evaluating train: L1 0.0025679944562451806 PSNR 37.8368595347685 SSIM 0.9868142604827881 LPIPSA 0.01330531421391403 LPIPSV 0.023546571898109773 [11/02 19:20:12]\n",
+ "\n",
+ "[ITER 12000] Saving Gaussians [11/02 19:20:12]\n",
+ "reset opacity [11/02 19:20:12]\n",
+ "Training progress: 62% 12500/20000 [11:12<05:03, 24.71it/s, Loss=0.0026737, psnr=36.65, point=38172]\n",
+ "[ITER 12500] Evaluating test: L1 0.002854858394986128 PSNR 36.3792412701775 SSIM 0.98539137840271 LPIPSA 0.013600096928284448 LPIPSV 0.0243504530585864 [11/02 19:20:35]\n",
+ "\n",
+ "[ITER 12500] Evaluating train: L1 0.002508758457706255 PSNR 37.97113800048828 SSIM 0.9871201515197754 LPIPSA 0.013192660010912838 LPIPSV 0.023526531589381835 [11/02 19:20:39]\n",
+ "Training progress: 65% 13000/20000 [11:39<04:40, 24.96it/s, Loss=0.0021787, psnr=39.72, point=38242]\n",
+ "[ITER 13000] Evaluating test: L1 0.002794696298866149 PSNR 36.53680891149184 SSIM 0.9857669472694397 LPIPSA 0.013086954624775578 LPIPSV 0.02383247291778817 [11/02 19:21:02]\n",
+ "\n",
+ "[ITER 13000] Evaluating train: L1 0.002414548975032042 PSNR 38.333258011761835 SSIM 0.9876539707183838 LPIPSA 0.012558340259334621 LPIPSV 0.022927907898145562 [11/02 19:21:06]\n",
+ "Training progress: 68% 13500/20000 [12:05<04:17, 25.20it/s, Loss=0.0020656, psnr=40.98, point=38285]\n",
+ "[ITER 13500] Evaluating test: L1 0.0028184506603900123 PSNR 36.510282179888556 SSIM 0.985741913318634 LPIPSA 0.012906440533697605 LPIPSV 0.023633148849887008 [11/02 19:21:29]\n",
+ "\n",
+ "[ITER 13500] Evaluating train: L1 0.0024571928250439025 PSNR 38.199531106387866 SSIM 0.9875783324241638 LPIPSA 0.012410065542687388 LPIPSV 0.022774154649061316 [11/02 19:21:33]\n",
+ "Training progress: 70% 14000/20000 [12:32<03:57, 25.29it/s, Loss=0.0024022, psnr=39.38, point=38350]\n",
+ "[ITER 14000] Evaluating test: L1 0.0027586292913731407 PSNR 36.691038468304804 SSIM 0.9860872626304626 LPIPSA 0.012427446925464799 LPIPSV 0.02322193596731214 [11/02 19:21:56]\n",
+ "\n",
+ "[ITER 14000] Evaluating train: L1 0.0023456191753639896 PSNR 38.86192635928883 SSIM 0.9881938695907593 LPIPSA 0.011794272207600228 LPIPSV 0.02228109790560077 [11/02 19:22:00]\n",
+ "\n",
+ "[ITER 14000] Saving Gaussians [11/02 19:22:00]\n",
+ "Training progress: 72% 14500/20000 [13:00<04:32, 20.20it/s, Loss=0.0024632, psnr=37.46, point=38383]\n",
+ "[ITER 14500] Evaluating test: L1 0.0027174187673474934 PSNR 36.79174131505629 SSIM 0.9863510131835938 LPIPSA 0.01214445146786816 LPIPSV 0.023029029807623697 [11/02 19:22:23]\n",
+ "\n",
+ "[ITER 14500] Evaluating train: L1 0.0023080611272769816 PSNR 38.920307383817786 SSIM 0.9883705377578735 LPIPSA 0.011536383946590564 LPIPSV 0.02204000445849755 [11/02 19:22:27]\n",
+ "Training progress: 75% 15000/20000 [13:27<03:22, 24.72it/s, Loss=0.0018753, psnr=41.52, point=38423]\n",
+ "[ITER 15000] Evaluating test: L1 0.0027127237648100536 PSNR 36.812233195585364 SSIM 0.9864359498023987 LPIPSA 0.012005755239549805 LPIPSV 0.022804752978331903 [11/02 19:22:50]\n",
+ "\n",
+ "[ITER 15000] Evaluating train: L1 0.0022729080485399153 PSNR 39.04854426664465 SSIM 0.9885985255241394 LPIPSA 0.011373790693195426 LPIPSV 0.0218417757355115 [11/02 19:22:54]\n",
+ "Training progress: 78% 15500/20000 [13:54<02:56, 25.54it/s, Loss=0.0017468, psnr=41.84, point=38423]\n",
+ "[ITER 15500] Evaluating test: L1 0.002681943286648568 PSNR 36.9323351242963 SSIM 0.9866222143173218 LPIPSA 0.011728836004348361 LPIPSV 0.02260852583190974 [11/02 19:23:17]\n",
+ "\n",
+ "[ITER 15500] Evaluating train: L1 0.0022277947967214618 PSNR 39.251268050249884 SSIM 0.9888396859169006 LPIPSA 0.011078801553915529 LPIPSV 0.021581262350082397 [11/02 19:23:21]\n",
+ "Training progress: 80% 16000/20000 [14:20<02:38, 25.31it/s, Loss=0.0021102, psnr=37.93, point=38423]\n",
+ "[ITER 16000] Evaluating test: L1 0.002661184298203272 PSNR 36.97825802073759 SSIM 0.9867798089981079 LPIPSA 0.011540479601963478 LPIPSV 0.022395393098978436 [11/02 19:23:43]\n",
+ "\n",
+ "[ITER 16000] Evaluating train: L1 0.0021705756525454275 PSNR 39.54075712316176 SSIM 0.9891544580459595 LPIPSA 0.010820629184736925 LPIPSV 0.021320161464459756 [11/02 19:23:47]\n",
+ "Training progress: 82% 16500/20000 [14:46<02:16, 25.60it/s, Loss=0.0020274, psnr=41.16, point=38423]\n",
+ "[ITER 16500] Evaluating test: L1 0.002658620969775845 PSNR 36.99405176499311 SSIM 0.9868270754814148 LPIPSA 0.011423214092193282 LPIPSV 0.022228678359704858 [11/02 19:24:10]\n",
+ "\n",
+ "[ITER 16500] Evaluating train: L1 0.0021608137895407923 PSNR 39.551789452047906 SSIM 0.9892385005950928 LPIPSA 0.010627333704820451 LPIPSV 0.021102356067036882 [11/02 19:24:14]\n",
+ "Training progress: 85% 17000/20000 [15:13<02:19, 21.52it/s, Loss=0.0017841, psnr=40.20, point=38423]\n",
+ "[ITER 17000] Evaluating test: L1 0.0026379124781883813 PSNR 37.07336291144876 SSIM 0.9869372248649597 LPIPSA 0.011198214501799905 LPIPSV 0.02206174815621446 [11/02 19:24:36]\n",
+ "\n",
+ "[ITER 17000] Evaluating train: L1 0.002137040870045038 PSNR 39.690284729003906 SSIM 0.9893389344215393 LPIPSA 0.010386260828989394 LPIPSV 0.020932879079790676 [11/02 19:24:40]\n",
+ "Training progress: 88% 17500/20000 [15:40<01:40, 24.91it/s, Loss=0.0022521, psnr=37.54, point=38423]\n",
+ "[ITER 17500] Evaluating test: L1 0.0026322054361705392 PSNR 37.052227693445545 SSIM 0.9870169162750244 LPIPSA 0.011183774564415216 LPIPSV 0.022020921878078404 [11/02 19:25:03]\n",
+ "\n",
+ "[ITER 17500] Evaluating train: L1 0.002125683535054764 PSNR 39.65483631807215 SSIM 0.989483654499054 LPIPSA 0.010388160507906885 LPIPSV 0.020818918505135703 [11/02 19:25:07]\n",
+ "Training progress: 90% 18000/20000 [16:06<01:18, 25.63it/s, Loss=0.0017316, psnr=40.90, point=38423]\n",
+ "[ITER 18000] Evaluating test: L1 0.0026197804699597113 PSNR 37.1237860286937 SSIM 0.9870747923851013 LPIPSA 0.011037179104545537 LPIPSV 0.021888700697351906 [11/02 19:25:30]\n",
+ "\n",
+ "[ITER 18000] Evaluating train: L1 0.002127148416441153 PSNR 39.74956871481503 SSIM 0.9894909858703613 LPIPSA 0.010293694569126648 LPIPSV 0.020734424428904757 [11/02 19:25:34]\n",
+ "Training progress: 92% 18500/20000 [16:33<00:58, 25.82it/s, Loss=0.0018575, psnr=41.84, point=38423]\n",
+ "[ITER 18500] Evaluating test: L1 0.002598200267290368 PSNR 37.190296846277576 SSIM 0.987231969833374 LPIPSA 0.010890092700719833 LPIPSV 0.021737862487926203 [11/02 19:25:56]\n",
+ "\n",
+ "[ITER 18500] Evaluating train: L1 0.0020968770224820163 PSNR 39.8924726598403 SSIM 0.9896359443664551 LPIPSA 0.010136106380206697 LPIPSV 0.020599267883774114 [11/02 19:26:00]\n",
+ "Training progress: 95% 19000/20000 [16:59<00:38, 25.65it/s, Loss=0.0021199, psnr=40.04, point=38423]\n",
+ "[ITER 19000] Evaluating test: L1 0.002602464108563521 PSNR 37.179586073931524 SSIM 0.9872004389762878 LPIPSA 0.010852924548089504 LPIPSV 0.02171512841082671 [11/02 19:26:22]\n",
+ "\n",
+ "[ITER 19000] Evaluating train: L1 0.002092276597121621 PSNR 39.889078476849726 SSIM 0.989681601524353 LPIPSA 0.010039156713687322 LPIPSV 0.020490946357741076 [11/02 19:26:26]\n",
+ "Training progress: 98% 19500/20000 [17:26<00:20, 24.89it/s, Loss=0.0019727, psnr=40.53, point=38423]\n",
+ "[ITER 19500] Evaluating test: L1 0.002599571509670247 PSNR 37.1752815246582 SSIM 0.9872435331344604 LPIPSA 0.010805497849907945 LPIPSV 0.021622130866436398 [11/02 19:26:49]\n",
+ "\n",
+ "[ITER 19500] Evaluating train: L1 0.002061131095294567 PSNR 40.011390910429114 SSIM 0.9898018836975098 LPIPSA 0.00995204008786994 LPIPSV 0.020369241781094494 [11/02 19:26:53]\n",
+ "Training progress: 100% 20000/20000 [17:52<00:00, 18.64it/s, Loss=0.0014147, psnr=43.57, point=38423]\n",
+ "\n",
+ "[ITER 20000] Evaluating test: L1 0.002599639366106952 PSNR 37.19709598316866 SSIM 0.9872522950172424 LPIPSA 0.010740002151578665 LPIPSV 0.021575804580660427 [11/02 19:27:16]\n",
+ "\n",
+ "[ITER 20000] Evaluating train: L1 0.002043470973148942 PSNR 40.128671533921185 SSIM 0.9899181127548218 LPIPSA 0.009841759908286965 LPIPSV 0.02023502943270347 [11/02 19:27:20]\n",
+ "\n",
+ "[ITER 20000] Saving Gaussians [11/02 19:27:20]\n",
+ "\n",
+ "Training complete. [11/02 19:27:20]\n"
+ ]
+ }
+ ],
+ "source": [
+ "%cd /content/4DGaussians\n",
+ "!python train.py -s /content/test/data/mutant --port 6017 --expname \"dnerf/mutant\" --configs arguments/dnerf/mutant.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {
+ "outputId": "66b3aa6b-7d6a-4dd4-a23a-c390f204364f",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "DnJ4hXTMS4Dn"
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/4DGaussians\n",
+ "Looking for config file in output/dnerf/mutant/cfg_args\n",
+ "Config file found: output/dnerf/mutant/cfg_args\n",
+ "Rendering output/dnerf/mutant/\n",
+ "feature_dim: 64 [11/02 19:27:29]\n",
+ "Loading trained model at iteration 20000 [11/02 19:27:29]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 19:27:29]\n",
+ "Reading Training Transforms [11/02 19:27:29]\n",
+ "Reading Test Transforms [11/02 19:27:41]\n",
+ "Generating Video Transforms [11/02 19:27:42]\n",
+ "hello!!!! [11/02 19:27:42]\n",
+ "Generating random point cloud (2000)... [11/02 19:27:42]\n",
+ "Loading Training Cameras [11/02 19:27:42]\n",
+ "Loading Test Cameras [11/02 19:27:42]\n",
+ "Loading Video Cameras [11/02 19:27:42]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 19:27:42]\n",
+ "Voxel Plane: set aabb= Parameter containing:\n",
+ "tensor([[ 1.2998, 1.2999, 1.2999],\n",
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 19:27:42]\n",
+ "loading model from existsoutput/dnerf/mutant/point_cloud/iteration_20000 [11/02 19:27:42]\n",
+ "point nums: 38423 [11/02 19:27:42]\n",
+ "Rendering progress: 100% 20/20 [00:00<00:00, 23.47it/s]\n",
+ "FPS: 22.468501661487384 [11/02 19:27:43]\n",
+ "point nums: 38423 [11/02 19:27:46]\n",
+ "Rendering progress: 100% 160/160 [00:06<00:00, 26.18it/s]\n",
+ "FPS: 26.058357149053194 [11/02 19:27:52]\n"
+ ]
+ }
+ ],
+ "source": [
+ "%cd /content/4DGaussians\n",
+ "!python render.py --model_path \"output/dnerf/mutant/\" --skip_train --configs arguments/dnerf/mutant.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "from IPython.display import HTML\n",
+ "from base64 import b64encode\n",
+ "def display_video(video_path):\n",
+ " mp4 = open(video_path,'rb').read()\n",
+ " data_url = \"data:video/mp4;base64,\" + b64encode(mp4).decode()\n",
+ " return HTML(\"\"\"\n",
+ " \n",
+ " \"\"\" % data_url)"
+ ],
+ "metadata": {
+ "id": "o89H8OEpS4Dn"
+ },
+ "execution_count": 27,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "save_dir = '/content/4DGaussians/output/dnerf/mutant/video/ours_20000/video_rgb.mp4'\n",
+ "\n",
+ "import os\n",
+ "import glob\n",
+ "# video_path = glob.glob(os.path.join(save_dir, \"*-test.mp4\"))[0]\n",
+ "display_video(save_dir)"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 1000
+ },
+ "outputId": "a1a1c60e-21a0-4b7c-a140-0afafe24c580",
+ "id": "Otl8KWxWS4Dn"
+ },
+ "execution_count": 28,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ ""
+ ],
+ "text/html": [
+ "\n",
+ " \n",
+ " "
+ ]
+ },
+ "metadata": {},
+ "execution_count": 28
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# **Standup**"
+ ],
+ "metadata": {
+ "id": "DKqkeJqDTT73"
+ }
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {
+ "outputId": "2fac6d91-fb99-43ba-aa96-a2397391ac55",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "p6gzIFT_TT73"
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/4DGaussians\n",
+ "2024-02-11 19:28:11.038743: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
+ "2024-02-11 19:28:11.038795: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
+ "2024-02-11 19:28:11.040191: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
+ "2024-02-11 19:28:12.405372: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
+ "Optimizing \n",
+ "Output folder: ./output/dnerf/standup [11/02 19:28:13]\n",
+ "feature_dim: 64 [11/02 19:28:14]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 19:28:14]\n",
+ "Reading Training Transforms [11/02 19:28:14]\n",
+ "Reading Test Transforms [11/02 19:28:25]\n",
+ "Generating Video Transforms [11/02 19:28:28]\n",
+ "hello!!!! [11/02 19:28:28]\n",
+ "Generating random point cloud (2000)... [11/02 19:28:28]\n",
+ "Loading Training Cameras [11/02 19:28:28]\n",
+ "Loading Test Cameras [11/02 19:28:28]\n",
+ "Loading Video Cameras [11/02 19:28:28]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 19:28:28]\n",
+ "Voxel Plane: set aabb= Parameter containing:\n",
+ "tensor([[ 1.2998, 1.2999, 1.2999],\n",
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 19:28:28]\n",
+ "Number of points at initialisation : 2000 [11/02 19:28:28]\n",
+ "Training progress: 0% 0/3000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 19:28:28]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 19:28:29]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 19:28:29]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 19:28:30]\n",
+ "data loading done [11/02 19:28:33]\n",
+ "Training progress: 17% 500/3000 [00:23<01:45, 23.77it/s, Loss=0.0283186, psnr=22.24, point=2000]\n",
+ "[ITER 500] Evaluating test: L1 0.033517565797357 PSNR 18.642715510200052 SSIM 0.9302602410316467 LPIPSA 0.15393631116432302 LPIPSV 0.11344560411046534 [11/02 19:28:56]\n",
+ "\n",
+ "[ITER 500] Evaluating train: L1 0.03681816122330287 PSNR 17.834696825812845 SSIM 0.9302884936332703 LPIPSA 0.15220471164759466 LPIPSV 0.11300353705883026 [11/02 19:29:00]\n",
+ "Training progress: 33% 1000/3000 [00:48<01:17, 25.91it/s, Loss=0.0253975, psnr=18.10, point=2395]\n",
+ "[ITER 1000] Evaluating test: L1 0.03048765768899637 PSNR 18.591461854822494 SSIM 0.9301106929779053 LPIPSA 0.1379653859664412 LPIPSV 0.10107496556113749 [11/02 19:29:21]\n",
+ "\n",
+ "[ITER 1000] Evaluating train: L1 0.03483341448009014 PSNR 17.611179127412683 SSIM 0.9280822277069092 LPIPSA 0.14259847779484355 LPIPSV 0.10276752315899905 [11/02 19:29:25]\n",
+ "\n",
+ "[ITER 1000] Saving Gaussians [11/02 19:29:25]\n",
+ "Training progress: 50% 1500/3000 [01:03<00:21, 70.62it/s, Loss=0.0312671, psnr=19.47, point=5457]\n",
+ "[ITER 1500] Evaluating test: L1 0.030387047185179063 PSNR 18.48914920582491 SSIM 0.9305925369262695 LPIPSA 0.12805105614311554 LPIPSV 0.09766853885615573 [11/02 19:29:36]\n",
+ "\n",
+ "[ITER 1500] Evaluating train: L1 0.03391286226756433 PSNR 17.646597189061783 SSIM 0.9291674494743347 LPIPSA 0.13415235719260046 LPIPSV 0.10055634138338707 [11/02 19:29:40]\n",
+ "Training progress: 67% 2000/3000 [01:18<00:14, 69.58it/s, Loss=0.0255262, psnr=19.37, point=9139]\n",
+ "[ITER 2000] Evaluating test: L1 0.0301675026798073 PSNR 18.551864792318906 SSIM 0.9311052560806274 LPIPSA 0.12378070722608005 LPIPSV 0.09641685205347397 [11/02 19:29:51]\n",
+ "\n",
+ "[ITER 2000] Evaluating train: L1 0.033795981394017446 PSNR 17.6471949745627 SSIM 0.9291774034500122 LPIPSA 0.13075551324907472 LPIPSV 0.10002495808636441 [11/02 19:29:55]\n",
+ "Training progress: 83% 2500/3000 [01:33<00:07, 67.05it/s, Loss=0.0228253, psnr=22.74, point=12375]\n",
+ "[ITER 2500] Evaluating test: L1 0.03004318574333892 PSNR 18.602524420794317 SSIM 0.9311752915382385 LPIPSA 0.1216767075307229 LPIPSV 0.09573209614438169 [11/02 19:30:05]\n",
+ "\n",
+ "[ITER 2500] Evaluating train: L1 0.03350587024846498 PSNR 17.687162399291992 SSIM 0.9293272495269775 LPIPSA 0.12895930164000569 LPIPSV 0.09942149261341375 [11/02 19:30:09]\n",
+ "Training progress: 100% 3000/3000 [01:48<00:00, 67.32it/s, Loss=0.0276772, psnr=17.58, point=15184]\n",
+ "[ITER 3000] Evaluating test: L1 0.030176608111051953 PSNR 18.613938051111557 SSIM 0.9306563138961792 LPIPSA 0.12130293381564758 LPIPSV 0.09631482161143247 [11/02 19:30:21]\n",
+ "\n",
+ "[ITER 3000] Evaluating train: L1 0.03330768047667602 PSNR 17.722367567174576 SSIM 0.9291552901268005 LPIPSA 0.12794846011435285 LPIPSV 0.099817313692149 [11/02 19:30:24]\n",
+ "\n",
+ "[ITER 3000] Saving Gaussians [11/02 19:30:24]\n",
+ "reset opacity [11/02 19:30:25]\n",
+ "Training progress: 100% 3000/3000 [01:56<00:00, 25.69it/s, Loss=0.0276772, psnr=17.58, point=15184]\n",
+ "Training progress: 0% 0/20000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 19:30:25]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 19:30:25]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 19:30:25]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 19:30:26]\n",
+ "data loading done [11/02 19:30:28]\n",
+ "Training progress: 2% 500/20000 [00:31<17:51, 18.19it/s, Loss=0.0141905, psnr=23.94, point=15678]\n",
+ "[ITER 500] Evaluating test: L1 0.012777601012631375 PSNR 24.070267957799576 SSIM 0.9481892585754395 LPIPSA 0.09722504238871967 LPIPSV 0.07598230746739051 [11/02 19:31:01]\n",
+ "\n",
+ "[ITER 500] Evaluating train: L1 0.011646499062943108 PSNR 24.58033202676212 SSIM 0.9509192109107971 LPIPSA 0.0916537971181028 LPIPSV 0.07178099102833692 [11/02 19:31:05]\n",
+ "Training progress: 5% 1000/20000 [01:05<15:41, 20.17it/s, Loss=0.0093521, psnr=24.92, point=16661]\n",
+ "[ITER 1000] Evaluating test: L1 0.008952730362687041 PSNR 26.39662181629854 SSIM 0.9565096497535706 LPIPSA 0.07884919752969462 LPIPSV 0.06456201896071434 [11/02 19:31:35]\n",
+ "\n",
+ "[ITER 1000] Evaluating train: L1 0.008720660993062398 PSNR 26.417526245117188 SSIM 0.9573805332183838 LPIPSA 0.07405991479754448 LPIPSV 0.0616946424193242 [11/02 19:31:39]\n",
+ "\n",
+ "[ITER 1000] Saving Gaussians [11/02 19:31:39]\n",
+ "Training progress: 8% 1500/20000 [01:30<12:06, 25.47it/s, Loss=0.0073408, psnr=28.44, point=18233]\n",
+ "[ITER 1500] Evaluating test: L1 0.008002826971385409 PSNR 26.729864793665268 SSIM 0.9584992527961731 LPIPSA 0.06811534985899925 LPIPSV 0.05920184250263607 [11/02 19:32:00]\n",
+ "\n",
+ "[ITER 1500] Evaluating train: L1 0.008032685994882794 PSNR 26.590954724480124 SSIM 0.958954393863678 LPIPSA 0.06511336543104228 LPIPSV 0.0568467453122139 [11/02 19:32:04]\n",
+ "Training progress: 10% 2000/20000 [01:55<10:58, 27.33it/s, Loss=0.0072957, psnr=26.99, point=19546]\n",
+ "[ITER 2000] Evaluating test: L1 0.007263748317628222 PSNR 27.292645061717312 SSIM 0.9617735743522644 LPIPSA 0.060365740209817886 LPIPSV 0.054890481864704806 [11/02 19:32:24]\n",
+ "\n",
+ "[ITER 2000] Evaluating train: L1 0.007253194515428999 PSNR 27.380057839786303 SSIM 0.9624135494232178 LPIPSA 0.05763932350365555 LPIPSV 0.053181990761967265 [11/02 19:32:28]\n",
+ "Training progress: 12% 2500/20000 [02:19<11:59, 24.31it/s, Loss=0.0057624, psnr=30.29, point=20496]\n",
+ "[ITER 2500] Evaluating test: L1 0.0062026551048107005 PSNR 28.467799579395965 SSIM 0.9651832580566406 LPIPSA 0.053289953619241714 LPIPSV 0.05009262329515289 [11/02 19:32:49]\n",
+ "\n",
+ "[ITER 2500] Evaluating train: L1 0.006047462551471065 PSNR 28.459726109224206 SSIM 0.9660788178443909 LPIPSA 0.05107153787770692 LPIPSV 0.048244159559116644 [11/02 19:32:53]\n",
+ "Training progress: 15% 3000/20000 [02:44<11:38, 24.33it/s, Loss=0.0047625, psnr=30.30, point=21271]\n",
+ "[ITER 3000] Evaluating test: L1 0.005498218884253327 PSNR 29.355234594906076 SSIM 0.9684793949127197 LPIPSA 0.049153175204992294 LPIPSV 0.04700055630768046 [11/02 19:33:14]\n",
+ "\n",
+ "[ITER 3000] Evaluating train: L1 0.0057210726526511066 PSNR 28.752485724056466 SSIM 0.9675012230873108 LPIPSA 0.0469876370009254 LPIPSV 0.045675825546769536 [11/02 19:33:18]\n",
+ "\n",
+ "[ITER 3000] Saving Gaussians [11/02 19:33:18]\n",
+ "reset opacity [11/02 19:33:18]\n",
+ "Training progress: 18% 3500/20000 [03:08<09:58, 27.58it/s, Loss=0.0031245, psnr=35.07, point=21818]\n",
+ "[ITER 3500] Evaluating test: L1 0.005312145400025389 PSNR 29.48710901596967 SSIM 0.9696060419082642 LPIPSA 0.045676443725824356 LPIPSV 0.04478574511321152 [11/02 19:33:38]\n",
+ "\n",
+ "[ITER 3500] Evaluating train: L1 0.00570430197095608 PSNR 28.822988622328815 SSIM 0.9681973457336426 LPIPSA 0.04440473129644113 LPIPSV 0.044290695668143386 [11/02 19:33:42]\n",
+ "Training progress: 20% 4000/20000 [03:32<09:04, 29.38it/s, Loss=0.0047036, psnr=28.31, point=22273]\n",
+ "[ITER 4000] Evaluating test: L1 0.005025019430938889 PSNR 29.659847820506375 SSIM 0.9708570837974548 LPIPSA 0.04093580802573877 LPIPSV 0.041722862926476145 [11/02 19:34:01]\n",
+ "\n",
+ "[ITER 4000] Evaluating train: L1 0.004480045494239996 PSNR 30.600607367122873 SSIM 0.9731447696685791 LPIPSA 0.03783351686947486 LPIPSV 0.039120748310404664 [11/02 19:34:05]\n",
+ "\n",
+ "[ITER 4000] Saving Gaussians [11/02 19:34:05]\n",
+ "Training progress: 22% 4500/20000 [03:55<08:36, 30.02it/s, Loss=0.0033620, psnr=32.23, point=22676]\n",
+ "[ITER 4500] Evaluating test: L1 0.004498632583657608 PSNR 30.52115597444422 SSIM 0.9737926125526428 LPIPSA 0.03662191309473094 LPIPSV 0.038839604596004766 [11/02 19:34:25]\n",
+ "\n",
+ "[ITER 4500] Evaluating train: L1 0.0046547087977695115 PSNR 30.191712211160098 SSIM 0.9730828404426575 LPIPSA 0.03551987372338772 LPIPSV 0.03810737271080999 [11/02 19:34:29]\n",
+ "Training progress: 25% 5000/20000 [04:19<08:19, 30.05it/s, Loss=0.0035578, psnr=32.54, point=22971]\n",
+ "[ITER 5000] Evaluating test: L1 0.0044236395727185645 PSNR 30.80147855422076 SSIM 0.9739700555801392 LPIPSA 0.03437995647682863 LPIPSV 0.03740564196863595 [11/02 19:34:48]\n",
+ "\n",
+ "[ITER 5000] Evaluating train: L1 0.004211230682866538 PSNR 30.826314365162567 SSIM 0.9745741486549377 LPIPSA 0.03246773176771753 LPIPSV 0.03547796382404426 [11/02 19:34:52]\n",
+ "\n",
+ "[ITER 5000] Saving Gaussians [11/02 19:34:52]\n",
+ "Training progress: 28% 5500/20000 [04:42<08:03, 29.98it/s, Loss=0.0036727, psnr=31.43, point=23340]\n",
+ "[ITER 5500] Evaluating test: L1 0.003991520073374405 PSNR 31.70307226742015 SSIM 0.9760613441467285 LPIPSA 0.03166236662689377 LPIPSV 0.03519039918832919 [11/02 19:35:12]\n",
+ "\n",
+ "[ITER 5500] Evaluating train: L1 0.003723124644773848 PSNR 32.019970052382526 SSIM 0.977371871471405 LPIPSA 0.029298940016066328 LPIPSV 0.03294667470104554 [11/02 19:35:16]\n",
+ "Training progress: 30% 6000/20000 [05:06<07:53, 29.57it/s, Loss=0.0031016, psnr=33.12, point=23664]\n",
+ "[ITER 6000] Evaluating test: L1 0.003626870785785072 PSNR 32.264641032499426 SSIM 0.9785195589065552 LPIPSA 0.02875254358000615 LPIPSV 0.03300334666581715 [11/02 19:35:35]\n",
+ "\n",
+ "[ITER 6000] Evaluating train: L1 0.003470742061515065 PSNR 32.48500229330624 SSIM 0.9790921807289124 LPIPSA 0.027060272336444435 LPIPSV 0.03131019532242242 [11/02 19:35:39]\n",
+ "\n",
+ "[ITER 6000] Saving Gaussians [11/02 19:35:39]\n",
+ "reset opacity [11/02 19:35:39]\n",
+ "Training progress: 32% 6500/20000 [05:30<07:40, 29.33it/s, Loss=0.0021710, psnr=35.57, point=23860]\n",
+ "[ITER 6500] Evaluating test: L1 0.003937000299201292 PSNR 31.469909443574792 SSIM 0.9768012762069702 LPIPSA 0.028228310429874587 LPIPSV 0.03284720254733282 [11/02 19:35:59]\n",
+ "\n",
+ "[ITER 6500] Evaluating train: L1 0.0038941325345898375 PSNR 31.471329857321347 SSIM 0.9770874977111816 LPIPSA 0.026127802920253837 LPIPSV 0.030966101049938622 [11/02 19:36:03]\n",
+ "Training progress: 35% 7000/20000 [05:53<07:13, 29.99it/s, Loss=0.0020965, psnr=35.84, point=24128]\n",
+ "[ITER 7000] Evaluating test: L1 0.00322633288691149 PSNR 33.21299642675063 SSIM 0.9809293150901794 LPIPSA 0.02474558643777581 LPIPSV 0.030020463992567623 [11/02 19:36:23]\n",
+ "\n",
+ "[ITER 7000] Evaluating train: L1 0.0031620937481741693 PSNR 33.2718719033634 SSIM 0.9810404181480408 LPIPSA 0.022758835159680423 LPIPSV 0.027890958056292114 [11/02 19:36:26]\n",
+ "\n",
+ "[ITER 7000] Saving Gaussians [11/02 19:36:26]\n",
+ "Training progress: 38% 7500/20000 [06:17<07:00, 29.70it/s, Loss=0.0022441, psnr=39.25, point=24377]\n",
+ "[ITER 7500] Evaluating test: L1 0.0030787767267183345 PSNR 33.56330389135024 SSIM 0.9819073677062988 LPIPSA 0.022754613255314967 LPIPSV 0.028409223674851304 [11/02 19:36:46]\n",
+ "\n",
+ "[ITER 7500] Evaluating train: L1 0.002893353387823 PSNR 33.73301472383387 SSIM 0.9823980927467346 LPIPSA 0.02019603125860586 LPIPSV 0.02604539834839456 [11/02 19:36:50]\n",
+ "Training progress: 40% 8000/20000 [06:40<06:38, 30.10it/s, Loss=0.0028349, psnr=33.62, point=24582]\n",
+ "[ITER 8000] Evaluating test: L1 0.0031418657437076464 PSNR 33.28362924912397 SSIM 0.9815787076950073 LPIPSA 0.02164464378181626 LPIPSV 0.027787077624131653 [11/02 19:37:10]\n",
+ "\n",
+ "[ITER 8000] Evaluating train: L1 0.002831060692722745 PSNR 33.94874471776625 SSIM 0.983362078666687 LPIPSA 0.01896772298085339 LPIPSV 0.024709843318252003 [11/02 19:37:14]\n",
+ "Training progress: 42% 8500/20000 [07:04<06:34, 29.12it/s, Loss=0.0023249, psnr=35.43, point=24791]\n",
+ "[ITER 8500] Evaluating test: L1 0.002722093931344502 PSNR 34.722577151130224 SSIM 0.9842833280563354 LPIPSA 0.019171635675079682 LPIPSV 0.0258544588680653 [11/02 19:37:33]\n",
+ "\n",
+ "[ITER 8500] Evaluating train: L1 0.0022115933517103687 PSNR 36.539487053366265 SSIM 0.987093985080719 LPIPSA 0.0160608127274934 LPIPSV 0.02244157124968136 [11/02 19:37:37]\n",
+ "Training progress: 45% 9000/20000 [07:27<06:09, 29.76it/s, Loss=0.0024319, psnr=36.90, point=24991]\n",
+ "[ITER 9000] Evaluating test: L1 0.002782985944684376 PSNR 34.39767613130457 SSIM 0.9838734865188599 LPIPSA 0.018390752922962692 LPIPSV 0.025475855807171148 [11/02 19:37:57]\n",
+ "\n",
+ "[ITER 9000] Evaluating train: L1 0.002325658843962147 PSNR 35.662805669447955 SSIM 0.9864339232444763 LPIPSA 0.015304956338642275 LPIPSV 0.021827210672199726 [11/02 19:38:01]\n",
+ "\n",
+ "[ITER 9000] Saving Gaussians [11/02 19:38:01]\n",
+ "reset opacity [11/02 19:38:01]\n",
+ "Training progress: 48% 9500/20000 [07:51<05:55, 29.54it/s, Loss=0.0022370, psnr=36.35, point=25142]\n",
+ "[ITER 9500] Evaluating test: L1 0.0029182358617510866 PSNR 33.941201714908374 SSIM 0.9831724762916565 LPIPSA 0.018854162327068692 LPIPSV 0.02544196996399585 [11/02 19:38:21]\n",
+ "\n",
+ "[ITER 9500] Evaluating train: L1 0.0024151302161900434 PSNR 35.21466535680434 SSIM 0.9859926700592041 LPIPSA 0.015866662046926862 LPIPSV 0.021733265150995815 [11/02 19:38:25]\n",
+ "Training progress: 50% 10000/20000 [08:15<05:40, 29.35it/s, Loss=0.0018371, psnr=39.57, point=25333]\n",
+ "[ITER 10000] Evaluating test: L1 0.0025155138060012285 PSNR 35.28580856323242 SSIM 0.9857916235923767 LPIPSA 0.0164103426367921 LPIPSV 0.02364072987042806 [11/02 19:38:44]\n",
+ "\n",
+ "[ITER 10000] Evaluating train: L1 0.002036749405841179 PSNR 36.98998372695025 SSIM 0.9885491728782654 LPIPSA 0.013549817616448682 LPIPSV 0.02003512076814385 [11/02 19:38:48]\n",
+ "\n",
+ "[ITER 10000] Saving Gaussians [11/02 19:38:48]\n",
+ "Training progress: 52% 10500/20000 [08:39<05:27, 29.01it/s, Loss=0.0020259, psnr=38.06, point=25463]\n",
+ "[ITER 10500] Evaluating test: L1 0.002474383553763961 PSNR 35.48461229660932 SSIM 0.9861041903495789 LPIPSA 0.015623357749598868 LPIPSV 0.02309504753964789 [11/02 19:39:08]\n",
+ "\n",
+ "[ITER 10500] Evaluating train: L1 0.0019195839344962116 PSNR 37.73864319745232 SSIM 0.9893012642860413 LPIPSA 0.0125727744992165 LPIPSV 0.01934837719754261 [11/02 19:39:12]\n",
+ "Training progress: 55% 11000/20000 [09:03<05:11, 28.85it/s, Loss=0.0020350, psnr=38.28, point=25603]\n",
+ "[ITER 11000] Evaluating test: L1 0.002474288093200063 PSNR 35.391779506907746 SSIM 0.9861826300621033 LPIPSA 0.014980822801589966 LPIPSV 0.0225992964251953 [11/02 19:39:32]\n",
+ "\n",
+ "[ITER 11000] Evaluating train: L1 0.001963925684857018 PSNR 37.28930103077608 SSIM 0.9892082214355469 LPIPSA 0.012041262570111191 LPIPSV 0.018896694048581755 [11/02 19:39:36]\n",
+ "Training progress: 57% 11500/20000 [09:26<05:01, 28.21it/s, Loss=0.0020863, psnr=37.58, point=25745]\n",
+ "[ITER 11500] Evaluating test: L1 0.0023690679261241764 PSNR 35.83518937054802 SSIM 0.9867870211601257 LPIPSA 0.014182944727294585 LPIPSV 0.022009404704851264 [11/02 19:39:56]\n",
+ "\n",
+ "[ITER 11500] Evaluating train: L1 0.0017094836700433755 PSNR 38.7799534517176 SSIM 0.9907426238059998 LPIPSA 0.010985186296131681 LPIPSV 0.01764915116569575 [11/02 19:40:00]\n",
+ "Training progress: 60% 12000/20000 [09:50<04:37, 28.79it/s, Loss=0.0016835, psnr=38.92, point=25861]\n",
+ "[ITER 12000] Evaluating test: L1 0.0023068275905269034 PSNR 36.11221077862908 SSIM 0.987166702747345 LPIPSA 0.013501460061353795 LPIPSV 0.021488242673085016 [11/02 19:40:19]\n",
+ "\n",
+ "[ITER 12000] Evaluating train: L1 0.001640654107391396 PSNR 39.27409026202034 SSIM 0.9911698698997498 LPIPSA 0.01031685494543875 LPIPSV 0.017301115097806734 [11/02 19:40:23]\n",
+ "\n",
+ "[ITER 12000] Saving Gaussians [11/02 19:40:23]\n",
+ "reset opacity [11/02 19:40:24]\n",
+ "Training progress: 62% 12500/20000 [10:14<04:21, 28.71it/s, Loss=0.0019704, psnr=38.45, point=25920]\n",
+ "[ITER 12500] Evaluating test: L1 0.002254809718579054 PSNR 36.39647136015051 SSIM 0.987551212310791 LPIPSA 0.013404926367323189 LPIPSV 0.02115322687827489 [11/02 19:40:44]\n",
+ "\n",
+ "[ITER 12500] Evaluating train: L1 0.0017735589439432848 PSNR 38.48572472965016 SSIM 0.9902158379554749 LPIPSA 0.010537253144909354 LPIPSV 0.017490607905475533 [11/02 19:40:48]\n",
+ "Training progress: 65% 13000/20000 [10:38<04:03, 28.80it/s, Loss=0.0018404, psnr=36.97, point=26020]\n",
+ "[ITER 13000] Evaluating test: L1 0.002259130254113937 PSNR 36.21306273516487 SSIM 0.9875560402870178 LPIPSA 0.012956181869787328 LPIPSV 0.020939334679175827 [11/02 19:41:08]\n",
+ "\n",
+ "[ITER 13000] Evaluating train: L1 0.0018593564643250668 PSNR 37.47833229513729 SSIM 0.9898385405540466 LPIPSA 0.010177546235568383 LPIPSV 0.01713647852268289 [11/02 19:41:11]\n",
+ "Training progress: 68% 13500/20000 [11:02<03:42, 29.15it/s, Loss=0.0013432, psnr=41.01, point=26107]\n",
+ "[ITER 13500] Evaluating test: L1 0.0021840180818210628 PSNR 36.64903438792509 SSIM 0.9879549741744995 LPIPSA 0.012416717651135781 LPIPSV 0.020510641310144875 [11/02 19:41:31]\n",
+ "\n",
+ "[ITER 13500] Evaluating train: L1 0.0014966326039832305 PSNR 40.29557979808134 SSIM 0.9921141266822815 LPIPSA 0.00901140141136506 LPIPSV 0.016132625614238137 [11/02 19:41:35]\n",
+ "Training progress: 70% 14000/20000 [11:26<03:27, 28.97it/s, Loss=0.0014442, psnr=38.88, point=26185]\n",
+ "[ITER 14000] Evaluating test: L1 0.0021634726607076384 PSNR 36.868135788861444 SSIM 0.9881017208099365 LPIPSA 0.012043800533694379 LPIPSV 0.020393183783573264 [11/02 19:41:55]\n",
+ "\n",
+ "[ITER 14000] Evaluating train: L1 0.001421041981837548 PSNR 40.90569081025965 SSIM 0.9925393462181091 LPIPSA 0.008589388241114862 LPIPSV 0.015651223003206885 [11/02 19:41:59]\n",
+ "\n",
+ "[ITER 14000] Saving Gaussians [11/02 19:41:59]\n",
+ "Training progress: 72% 14500/20000 [11:50<03:09, 29.07it/s, Loss=0.0016932, psnr=38.88, point=26248]\n",
+ "[ITER 14500] Evaluating test: L1 0.0021575742592925534 PSNR 36.81680499806124 SSIM 0.9881910681724548 LPIPSA 0.011699592393329915 LPIPSV 0.020007818882517955 [11/02 19:42:19]\n",
+ "\n",
+ "[ITER 14500] Evaluating train: L1 0.0014391985845149439 PSNR 40.817247951731964 SSIM 0.992483913898468 LPIPSA 0.008308254579520401 LPIPSV 0.015421819352709195 [11/02 19:42:23]\n",
+ "Training progress: 75% 15000/20000 [12:14<02:52, 28.95it/s, Loss=0.0017991, psnr=38.04, point=26290]\n",
+ "[ITER 15000] Evaluating test: L1 0.002138127285220167 PSNR 36.816998425651995 SSIM 0.9882873296737671 LPIPSA 0.011435872656019294 LPIPSV 0.019753200275933042 [11/02 19:42:43]\n",
+ "\n",
+ "[ITER 15000] Evaluating train: L1 0.0014240432064980268 PSNR 40.823616925407855 SSIM 0.9926294684410095 LPIPSA 0.008059776251149528 LPIPSV 0.015094283146455008 [11/02 19:42:47]\n",
+ "Training progress: 78% 15500/20000 [12:37<02:35, 28.91it/s, Loss=0.0016086, psnr=38.51, point=26290]\n",
+ "[ITER 15500] Evaluating test: L1 0.0020944507696720608 PSNR 37.00463721331428 SSIM 0.9885693788528442 LPIPSA 0.011072512469528353 LPIPSV 0.019485292846665662 [11/02 19:43:07]\n",
+ "\n",
+ "[ITER 15500] Evaluating train: L1 0.0013416494030560203 PSNR 41.554411719827094 SSIM 0.9930062294006348 LPIPSA 0.007582568590912749 LPIPSV 0.014767095939639737 [11/02 19:43:10]\n",
+ "Training progress: 80% 16000/20000 [13:01<02:18, 28.95it/s, Loss=0.0012458, psnr=44.54, point=26290]\n",
+ "[ITER 16000] Evaluating test: L1 0.0020916331833338037 PSNR 37.05635676664465 SSIM 0.988582193851471 LPIPSA 0.01092055776868673 LPIPSV 0.019359046319390043 [11/02 19:43:30]\n",
+ "\n",
+ "[ITER 16000] Evaluating train: L1 0.0013274909544955282 PSNR 41.6330155765309 SSIM 0.9931477904319763 LPIPSA 0.007408008828540058 LPIPSV 0.01452579877941924 [11/02 19:43:34]\n",
+ "Training progress: 82% 16500/20000 [13:24<01:58, 29.57it/s, Loss=0.0013561, psnr=41.18, point=26290]\n",
+ "[ITER 16500] Evaluating test: L1 0.0020606508136124294 PSNR 37.18249085370232 SSIM 0.9887865781784058 LPIPSA 0.010729558519361651 LPIPSV 0.019139877575285295 [11/02 19:43:54]\n",
+ "\n",
+ "[ITER 16500] Evaluating train: L1 0.0012675872866940848 PSNR 42.14861095652861 SSIM 0.9934194087982178 LPIPSA 0.0071537342360791034 LPIPSV 0.014282486894551446 [11/02 19:43:57]\n",
+ "Training progress: 85% 17000/20000 [13:48<01:41, 29.57it/s, Loss=0.0014765, psnr=40.25, point=26290]\n",
+ "[ITER 17000] Evaluating test: L1 0.0020460870925008375 PSNR 37.281663333668426 SSIM 0.9888578057289124 LPIPSA 0.010544639464248629 LPIPSV 0.018995609572705102 [11/02 19:44:17]\n",
+ "\n",
+ "[ITER 17000] Evaluating train: L1 0.0012355104828362955 PSNR 42.435851601993335 SSIM 0.9935672879219055 LPIPSA 0.006927046637215158 LPIPSV 0.014086675939752775 [11/02 19:44:21]\n",
+ "Training progress: 88% 17500/20000 [14:11<01:24, 29.68it/s, Loss=0.0013269, psnr=43.89, point=26290]\n",
+ "[ITER 17500] Evaluating test: L1 0.0020378988017054167 PSNR 37.27000157973345 SSIM 0.9889217615127563 LPIPSA 0.010389339124016902 LPIPSV 0.018863909439567256 [11/02 19:44:40]\n",
+ "\n",
+ "[ITER 17500] Evaluating train: L1 0.001248505904667956 PSNR 42.26368668500115 SSIM 0.9935436248779297 LPIPSA 0.006807167028241298 LPIPSV 0.014019168529878645 [11/02 19:44:44]\n",
+ "Training progress: 90% 18000/20000 [14:35<01:07, 29.70it/s, Loss=0.0011387, psnr=45.58, point=26290]\n",
+ "[ITER 18000] Evaluating test: L1 0.002029527156778118 PSNR 37.3591589086196 SSIM 0.9889591336250305 LPIPSA 0.010240388869800988 LPIPSV 0.018774282899411285 [11/02 19:45:04]\n",
+ "\n",
+ "[ITER 18000] Evaluating train: L1 0.0012165572364157176 PSNR 42.63148520974552 SSIM 0.9937038421630859 LPIPSA 0.0066218414508244575 LPIPSV 0.013847511754754712 [11/02 19:45:08]\n",
+ "Training progress: 92% 18500/20000 [14:58<00:51, 28.91it/s, Loss=0.0012597, psnr=41.85, point=26290]\n",
+ "[ITER 18500] Evaluating test: L1 0.0020250013330951333 PSNR 37.35789759018842 SSIM 0.9890071749687195 LPIPSA 0.010166761054493049 LPIPSV 0.018697300816283506 [11/02 19:45:28]\n",
+ "\n",
+ "[ITER 18500] Evaluating train: L1 0.0011915953638141645 PSNR 42.86218620749081 SSIM 0.9938138127326965 LPIPSA 0.006492300232981934 LPIPSV 0.013772370814181426 [11/02 19:45:32]\n",
+ "Training progress: 95% 19000/20000 [15:22<00:33, 29.89it/s, Loss=0.0011309, psnr=43.36, point=26290]\n",
+ "[ITER 19000] Evaluating test: L1 0.002021696767769754 PSNR 37.393651850083295 SSIM 0.9890210628509521 LPIPSA 0.010066144113593242 LPIPSV 0.01858490502790493 [11/02 19:45:51]\n",
+ "\n",
+ "[ITER 19000] Evaluating train: L1 0.001198059697772431 PSNR 42.79396079568302 SSIM 0.9938265681266785 LPIPSA 0.006420347866985728 LPIPSV 0.013702843940871604 [11/02 19:45:55]\n",
+ "Training progress: 98% 19500/20000 [15:45<00:16, 29.77it/s, Loss=0.0010230, psnr=44.79, point=26290]\n",
+ "[ITER 19500] Evaluating test: L1 0.002008887500885655 PSNR 37.430504854987646 SSIM 0.9891350865364075 LPIPSA 0.009930628817528486 LPIPSV 0.01849834544255453 [11/02 19:46:15]\n",
+ "\n",
+ "[ITER 19500] Evaluating train: L1 0.0011919196386931134 PSNR 42.9009267021628 SSIM 0.993862509727478 LPIPSA 0.0063138376537929565 LPIPSV 0.01360575742471744 [11/02 19:46:19]\n",
+ "Training progress: 100% 20000/20000 [16:09<00:00, 20.63it/s, Loss=0.0013473, psnr=41.59, point=26290]\n",
+ "\n",
+ "[ITER 20000] Evaluating test: L1 0.0020156945649753596 PSNR 37.41020191417021 SSIM 0.989123523235321 LPIPSA 0.009821310283287484 LPIPSV 0.018423838212209588 [11/02 19:46:38]\n",
+ "\n",
+ "[ITER 20000] Evaluating train: L1 0.0011875605046310846 PSNR 42.74012801226448 SSIM 0.9939517974853516 LPIPSA 0.006239970666630303 LPIPSV 0.013402188196778297 [11/02 19:46:42]\n",
+ "\n",
+ "[ITER 20000] Saving Gaussians [11/02 19:46:42]\n",
+ "\n",
+ "Training complete. [11/02 19:46:43]\n"
+ ]
+ }
+ ],
+ "source": [
+ "%cd /content/4DGaussians\n",
+ "!python train.py -s /content/test/data/standup --port 6017 --expname \"dnerf/standup\" --configs arguments/dnerf/standup.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "metadata": {
+ "outputId": "573d031d-9d79-44a3-f580-efe5584c4f58",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "L00Aw9kCTT74"
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/4DGaussians\n",
+ "Looking for config file in output/dnerf/standup/cfg_args\n",
+ "Config file found: output/dnerf/standup/cfg_args\n",
+ "Rendering output/dnerf/standup/\n",
+ "feature_dim: 64 [11/02 19:46:51]\n",
+ "Loading trained model at iteration 20000 [11/02 19:46:51]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 19:46:51]\n",
+ "Reading Training Transforms [11/02 19:46:51]\n",
+ "Reading Test Transforms [11/02 19:47:03]\n",
+ "Generating Video Transforms [11/02 19:47:04]\n",
+ "hello!!!! [11/02 19:47:04]\n",
+ "Generating random point cloud (2000)... [11/02 19:47:04]\n",
+ "Loading Training Cameras [11/02 19:47:04]\n",
+ "Loading Test Cameras [11/02 19:47:04]\n",
+ "Loading Video Cameras [11/02 19:47:04]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 19:47:04]\n",
+ "Voxel Plane: set aabb= Parameter containing:\n",
+ "tensor([[ 1.2998, 1.2999, 1.2999],\n",
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 19:47:04]\n",
+ "loading model from existsoutput/dnerf/standup/point_cloud/iteration_20000 [11/02 19:47:05]\n",
+ "point nums: 26290 [11/02 19:47:05]\n",
+ "Rendering progress: 100% 20/20 [00:01<00:00, 17.02it/s]\n",
+ "FPS: 16.371805293877525 [11/02 19:47:06]\n",
+ "point nums: 26290 [11/02 19:47:08]\n",
+ "Rendering progress: 100% 160/160 [00:05<00:00, 30.23it/s]\n",
+ "FPS: 30.095600305372077 [11/02 19:47:13]\n"
+ ]
+ }
+ ],
+ "source": [
+ "%cd /content/4DGaussians\n",
+ "!python render.py --model_path \"output/dnerf/standup/\" --skip_train --configs arguments/dnerf/standup.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "from IPython.display import HTML\n",
+ "from base64 import b64encode\n",
+ "def display_video(video_path):\n",
+ " mp4 = open(video_path,'rb').read()\n",
+ " data_url = \"data:video/mp4;base64,\" + b64encode(mp4).decode()\n",
+ " return HTML(\"\"\"\n",
+ " \n",
+ " \"\"\" % data_url)"
+ ],
+ "metadata": {
+ "id": "Q2Ex7QzPTT74"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "save_dir = '/content/4DGaussians/output/dnerf/standup/video/ours_20000/video_rgb.mp4'\n",
+ "\n",
+ "import os\n",
+ "import glob\n",
+ "# video_path = glob.glob(os.path.join(save_dir, \"*-test.mp4\"))[0]\n",
+ "display_video(save_dir)"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 1000
+ },
+ "outputId": "e8ea7b72-4b36-4bc6-cb52-54fe3bd92244",
+ "id": "KEy7xpU1TT74"
+ },
+ "execution_count": 32,
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ ""
+ ],
+ "text/html": [
+ "\n",
+ " \n",
+ " "
+ ]
+ },
+ "metadata": {},
+ "execution_count": 32
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# **Trex**"
+ ],
+ "metadata": {
+ "id": "i7-5hOlRT4-q"
+ }
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 33,
+ "metadata": {
+ "outputId": "2b0c6774-344d-4c42-f687-e28ebb6c8d56",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "PFFjjnUcT4-r"
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/4DGaussians\n",
+ "2024-02-11 19:47:32.582506: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
+ "2024-02-11 19:47:32.582563: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
+ "2024-02-11 19:47:32.584003: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
+ "2024-02-11 19:47:33.877822: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
+ "Optimizing \n",
+ "Output folder: ./output/dnerf/trex [11/02 19:47:35]\n",
+ "feature_dim: 64 [11/02 19:47:35]\n",
+ "Found transforms_train.json file, assuming Blender data set! [11/02 19:47:35]\n",
+ "Reading Training Transforms [11/02 19:47:35]\n",
+ "Reading Test Transforms [11/02 19:47:50]\n",
+ "Generating Video Transforms [11/02 19:47:52]\n",
+ "hello!!!! [11/02 19:47:52]\n",
+ "Generating random point cloud (2000)... [11/02 19:47:52]\n",
+ "Loading Training Cameras [11/02 19:47:52]\n",
+ "Loading Test Cameras [11/02 19:47:52]\n",
+ "Loading Video Cameras [11/02 19:47:52]\n",
+ "Deformation Net Set aabb [1.29982098 1.29990645 1.29988719] [-1.29980838 -1.29981163 -1.29872349] [11/02 19:47:52]\n",
+ "Voxel Plane: set aabb= Parameter containing:\n",
+ "tensor([[ 1.2998, 1.2999, 1.2999],\n",
+ " [-1.2998, -1.2998, -1.2987]]) [11/02 19:47:52]\n",
+ "Number of points at initialisation : 2000 [11/02 19:47:52]\n",
+ "Training progress: 0% 0/3000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 19:47:52]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 19:47:53]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 19:47:53]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 19:47:55]\n",
+ "data loading done [11/02 19:47:59]\n",
+ "Training progress: 17% 500/3000 [00:25<01:32, 27.10it/s, Loss=0.0198344, psnr=22.80, point=2000]\n",
+ "[ITER 500] Evaluating test: L1 0.01880175499793361 PSNR 23.148789574118222 SSIM 0.9239090085029602 LPIPSA 0.16599441232050166 LPIPSV 0.12117235450183644 [11/02 19:48:22]\n",
+ "\n",
+ "[ITER 500] Evaluating train: L1 0.01960206086582997 PSNR 22.89168211993049 SSIM 0.9188885688781738 LPIPSA 0.17978349678656635 LPIPSV 0.12976750293198755 [11/02 19:48:26]\n",
+ "Training progress: 33% 1000/3000 [00:50<01:14, 26.84it/s, Loss=0.0140043, psnr=24.50, point=2673]\n",
+ "[ITER 1000] Evaluating test: L1 0.013691086924689658 PSNR 24.23300283095416 SSIM 0.9354406595230103 LPIPSA 0.12596542694989374 LPIPSV 0.09529253336436608 [11/02 19:48:47]\n",
+ "\n",
+ "[ITER 1000] Evaluating train: L1 0.014349581125904532 PSNR 23.921652849982767 SSIM 0.9311635494232178 LPIPSA 0.137371045701644 LPIPSV 0.1038892282282605 [11/02 19:48:50]\n",
+ "\n",
+ "[ITER 1000] Saving Gaussians [11/02 19:48:50]\n",
+ "Training progress: 50% 1500/3000 [01:05<00:21, 68.64it/s, Loss=0.0124970, psnr=24.36, point=5359]\n",
+ "[ITER 1500] Evaluating test: L1 0.012499812086496283 PSNR 24.68300168654498 SSIM 0.9411484599113464 LPIPSA 0.1007689287995591 LPIPSV 0.08369221625959172 [11/02 19:49:01]\n",
+ "\n",
+ "[ITER 1500] Evaluating train: L1 0.013053810388288078 PSNR 24.39313293905819 SSIM 0.937388002872467 LPIPSA 0.11119456019471674 LPIPSV 0.09188445876626407 [11/02 19:49:05]\n",
+ "Training progress: 67% 2000/3000 [01:20<00:14, 70.84it/s, Loss=0.0115764, psnr=24.22, point=8431]\n",
+ "[ITER 2000] Evaluating test: L1 0.012059309355476323 PSNR 24.8897762298584 SSIM 0.9448144435882568 LPIPSA 0.09133063607356127 LPIPSV 0.07819218122783829 [11/02 19:49:16]\n",
+ "\n",
+ "[ITER 2000] Evaluating train: L1 0.012328217956511412 PSNR 24.716165654799518 SSIM 0.941516637802124 LPIPSA 0.09997074612799813 LPIPSV 0.0857686904423377 [11/02 19:49:20]\n",
+ "Training progress: 83% 2500/3000 [01:34<00:07, 68.78it/s, Loss=0.0102680, psnr=24.23, point=11520]\n",
+ "[ITER 2500] Evaluating test: L1 0.011751653900479568 PSNR 24.945278953103458 SSIM 0.9472237825393677 LPIPSA 0.08486020740340738 LPIPSV 0.07383104194613065 [11/02 19:49:31]\n",
+ "\n",
+ "[ITER 2500] Evaluating train: L1 0.01179611009052571 PSNR 24.91128943948185 SSIM 0.9441668391227722 LPIPSA 0.09253634424770579 LPIPSV 0.0807957097011454 [11/02 19:49:35]\n",
+ "Training progress: 100% 3000/3000 [01:49<00:00, 47.74it/s, Loss=0.0116150, psnr=24.70, point=14611]\n",
+ "[ITER 3000] Evaluating test: L1 0.011731497066862443 PSNR 24.96229138093836 SSIM 0.9484957456588745 LPIPSA 0.08036957286736544 LPIPSV 0.07122244642061346 [11/02 19:49:46]\n",
+ "\n",
+ "[ITER 3000] Evaluating train: L1 0.011608932550777407 PSNR 25.10470536175896 SSIM 0.9457627534866333 LPIPSA 0.08669275396010455 LPIPSV 0.07743125715676476 [11/02 19:49:50]\n",
+ "\n",
+ "[ITER 3000] Saving Gaussians [11/02 19:49:50]\n",
+ "reset opacity [11/02 19:49:51]\n",
+ "Training progress: 100% 3000/3000 [01:58<00:00, 25.27it/s, Loss=0.0116150, psnr=24.70, point=14611]\n",
+ "Training progress: 0% 0/20000 [00:00, ?it/s]Setting up [LPIPS] perceptual loss: trunk [alex], v[0.1], spatial [off] [11/02 19:49:51]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/alex.pth [11/02 19:49:51]\n",
+ "Setting up [LPIPS] perceptual loss: trunk [vgg], v[0.1], spatial [off] [11/02 19:49:51]\n",
+ "Loading model from: /usr/local/lib/python3.10/dist-packages/lpips/weights/v0.1/vgg.pth [11/02 19:49:52]\n",
+ "data loading done [11/02 19:49:56]\n",
+ "Training progress: 2% 500/20000 [00:32<16:36, 19.56it/s, Loss=0.0107347, psnr=26.24, point=15243]\n",
+ "[ITER 500] Evaluating test: L1 0.010059972047148381 PSNR 26.51243636187385 SSIM 0.9450697302818298 LPIPSA 0.09285357046653242 LPIPSV 0.07533321529626846 [11/02 19:50:27]\n",
+ "\n",
+ "[ITER 500] Evaluating train: L1 0.010494797874022932 PSNR 26.357239779304056 SSIM 0.9405146837234497 LPIPSA 0.10238884345573537 LPIPSV 0.08429559364038355 [11/02 19:50:31]\n",
+ "Training progress: 5% 1000/20000 [01:06<17:18, 18.30it/s, Loss=0.0082935, psnr=28.18, point=16871]\n",
+ "[ITER 1000] Evaluating test: L1 0.008503053227768224 PSNR 27.463179981007293 SSIM 0.9518308043479919 LPIPSA 0.07584421205170014 LPIPSV 0.06559897783924551 [11/02 19:51:01]\n",
+ "\n",
+ "[ITER 1000] Evaluating train: L1 0.008643959813258228 PSNR 27.392246695125802 SSIM 0.9482237696647644 LPIPSA 0.08358014133923194 LPIPSV 0.07293087156379924 [11/02 19:51:05]\n",
+ "\n",
+ "[ITER 1000] Saving Gaussians [11/02 19:51:06]\n",
+ "Training progress: 8% 1500/20000 [01:32<11:14, 27.41it/s, Loss=0.0080521, psnr=25.92, point=19831]\n",
+ "[ITER 1500] Evaluating test: L1 0.00773907419951523 PSNR 28.0397891998291 SSIM 0.9555554986000061 LPIPSA 0.06971785952063168 LPIPSV 0.06176053327234352 [11/02 19:51:27]\n",
+ "\n",
+ "[ITER 1500] Evaluating train: L1 0.008081091102212667 PSNR 27.77856658486759 SSIM 0.9513309597969055 LPIPSA 0.07709098848349907 LPIPSV 0.06906467785730082 [11/02 19:51:31]\n",
+ "Training progress: 10% 2000/20000 [01:58<11:07, 26.98it/s, Loss=0.0074458, psnr=27.31, point=22953]\n",
+ "[ITER 2000] Evaluating test: L1 0.007461616742041181 PSNR 28.239310208488913 SSIM 0.9572708010673523 LPIPSA 0.06413528651875608 LPIPSV 0.05850114366587471 [11/02 19:51:53]\n",
+ "\n",
+ "[ITER 2000] Evaluating train: L1 0.007441659238846863 PSNR 28.28712407280417 SSIM 0.9549981355667114 LPIPSA 0.0702871759148205 LPIPSV 0.06454163219998865 [11/02 19:51:57]\n",
+ "Training progress: 12% 2500/20000 [02:24<10:59, 26.54it/s, Loss=0.0071665, psnr=28.74, point=26074]\n",
+ "[ITER 2500] Evaluating test: L1 0.006809241877978339 PSNR 28.79032393062816 SSIM 0.9613041877746582 LPIPSA 0.05911114921464639 LPIPSV 0.05526455108295469 [11/02 19:52:19]\n",
+ "\n",
+ "[ITER 2500] Evaluating train: L1 0.006643827182843404 PSNR 29.084723192102768 SSIM 0.9602696895599365 LPIPSA 0.06451545348938774 LPIPSV 0.06058453045347158 [11/02 19:52:23]\n",
+ "Training progress: 15% 3000/20000 [02:51<11:35, 24.45it/s, Loss=0.0079107, psnr=27.59, point=29221]\n",
+ "[ITER 3000] Evaluating test: L1 0.006818714359884753 PSNR 28.71978198780733 SSIM 0.9614056944847107 LPIPSA 0.05603775743614225 LPIPSV 0.053357873331097996 [11/02 19:52:47]\n",
+ "\n",
+ "[ITER 3000] Evaluating train: L1 0.00647195108125315 PSNR 29.185923071468576 SSIM 0.9614923000335693 LPIPSA 0.06065971250919735 LPIPSV 0.05789673941976884 [11/02 19:52:51]\n",
+ "\n",
+ "[ITER 3000] Saving Gaussians [11/02 19:52:51]\n",
+ "reset opacity [11/02 19:52:51]\n",
+ "Training progress: 18% 3500/20000 [03:18<11:08, 24.69it/s, Loss=0.0060754, psnr=27.97, point=31931]\n",
+ "[ITER 3500] Evaluating test: L1 0.006276976070640718 PSNR 29.322380178114948 SSIM 0.9654931426048279 LPIPSA 0.051912844838464964 LPIPSV 0.05041589127743945 [11/02 19:53:14]\n",
+ "\n",
+ "[ITER 3500] Evaluating train: L1 0.005951071158051491 PSNR 29.735413495232077 SSIM 0.9653357267379761 LPIPSA 0.05647267905228278 LPIPSV 0.05484080577597899 [11/02 19:53:18]\n",
+ "Training progress: 20% 4000/20000 [03:45<10:35, 25.16it/s, Loss=0.0060314, psnr=27.71, point=35324]\n",
+ "[ITER 4000] Evaluating test: L1 0.006263944744954214 PSNR 29.32524871826172 SSIM 0.9649496674537659 LPIPSA 0.049332519993186 LPIPSV 0.04866959527134895 [11/02 19:53:41]\n",
+ "\n",
+ "[ITER 4000] Evaluating train: L1 0.006197311668930685 PSNR 29.396788204417508 SSIM 0.9631126523017883 LPIPSA 0.054053949082598966 LPIPSV 0.05326729640364647 [11/02 19:53:45]\n",
+ "\n",
+ "[ITER 4000] Saving Gaussians [11/02 19:53:45]\n",
+ "Training progress: 22% 4500/20000 [04:13<10:44, 24.04it/s, Loss=0.0052566, psnr=29.16, point=38602]\n",
+ "[ITER 4500] Evaluating test: L1 0.005904420785715475 PSNR 29.71140547359691 SSIM 0.9683398008346558 LPIPSA 0.04633492242325755 LPIPSV 0.0464415245634668 [11/02 19:54:08]\n",
+ "\n",
+ "[ITER 4500] Evaluating train: L1 0.005451850894400302 PSNR 30.281902201035443 SSIM 0.9692823886871338 LPIPSA 0.05005399576004814 LPIPSV 0.05007187430472935 [11/02 19:54:12]\n",
+ "Training progress: 25% 5000/20000 [04:41<11:29, 21.75it/s, Loss=0.0052184, psnr=29.39, point=41701]\n",
+ "[ITER 5000] Evaluating test: L1 0.005316310863503639 PSNR 30.392251631792853 SSIM 0.9720409512519836 LPIPSA 0.042555758729577065 LPIPSV 0.04358559770180898 [11/02 19:54:37]\n",
+ "\n",
+ "[ITER 5000] Evaluating train: L1 0.0049092683612423785 PSNR 30.94488177579992 SSIM 0.9727191925048828 LPIPSA 0.04653498179772321 LPIPSV 0.04746141832541017 [11/02 19:54:41]\n",
+ "\n",
+ "[ITER 5000] Saving Gaussians [11/02 19:54:41]\n",
+ "Training progress: 28% 5500/20000 [05:11<10:40, 22.63it/s, Loss=0.0049548, psnr=32.33, point=44872]\n",
+ "[ITER 5500] Evaluating test: L1 0.005440058063386994 PSNR 30.4190097135656 SSIM 0.9715037941932678 LPIPSA 0.041047771735226404 LPIPSV 0.04263925946810666 [11/02 19:55:06]\n",
+ "\n",
+ "[ITER 5500] Evaluating train: L1 0.005006182884030482 PSNR 31.007236368515912 SSIM 0.9723650813102722 LPIPSA 0.044388860244961345 LPIPSV 0.045861020026838076 [11/02 19:55:10]\n",
+ "Training progress: 30% 6000/20000 [05:40<11:46, 19.82it/s, Loss=0.0051759, psnr=29.80, point=47985]\n",
+ "[ITER 6000] Evaluating test: L1 0.005160198861952214 PSNR 30.737794651704675 SSIM 0.973230242729187 LPIPSA 0.038378781803390556 LPIPSV 0.040968897368978054 [11/02 19:55:36]\n",
+ "\n",
+ "[ITER 6000] Evaluating train: L1 0.004628789789207718 PSNR 31.487671796013327 SSIM 0.9748685359954834 LPIPSA 0.04155167301788049 LPIPSV 0.04387927405974444 [11/02 19:55:40]\n",
+ "\n",
+ "[ITER 6000] Saving Gaussians [11/02 19:55:40]\n",
+ "reset opacity [11/02 19:55:40]\n",
+ "Training progress: 32% 6500/20000 [06:11<10:34, 21.29it/s, Loss=0.0041229, psnr=30.22, point=50039]\n",
+ "[ITER 6500] Evaluating test: L1 0.004991759066743886 PSNR 30.90988473331227 SSIM 0.9744866490364075 LPIPSA 0.03649620914503055 LPIPSV 0.039942114997436017 [11/02 19:56:06]\n",
+ "\n",
+ "[ITER 6500] Evaluating train: L1 0.004465482416836654 PSNR 31.59623561185949 SSIM 0.9759798645973206 LPIPSA 0.039251767537173105 LPIPSV 0.04283428893369787 [11/02 19:56:11]\n",
+ "Training progress: 35% 7000/20000 [06:41<10:37, 20.38it/s, Loss=0.0041221, psnr=33.17, point=52519]\n",
+ "[ITER 7000] Evaluating test: L1 0.004922177601496086 PSNR 31.140842774335077 SSIM 0.9754375219345093 LPIPSA 0.03500666217330624 LPIPSV 0.03875595265451599 [11/02 19:56:37]\n",
+ "\n",
+ "[ITER 7000] Evaluating train: L1 0.004232478697839028 PSNR 32.215220619650445 SSIM 0.9778322577476501 LPIPSA 0.03745685463004252 LPIPSV 0.041254893164424336 [11/02 19:56:41]\n",
+ "\n",
+ "[ITER 7000] Saving Gaussians [11/02 19:56:41]\n",
+ "Training progress: 38% 7500/20000 [07:13<10:06, 20.59it/s, Loss=0.0039245, psnr=30.69, point=54906]\n",
+ "[ITER 7500] Evaluating test: L1 0.004622431964996983 PSNR 31.551762412576114 SSIM 0.9769333004951477 LPIPSA 0.03303717958795674 LPIPSV 0.03738241281141253 [11/02 19:57:09]\n",
+ "\n",
+ "[ITER 7500] Evaluating train: L1 0.00396633887773051 PSNR 32.6414135203642 SSIM 0.9792860746383667 LPIPSA 0.03472770542344626 LPIPSV 0.039397241109434294 [11/02 19:57:13]\n",
+ "Training progress: 40% 8000/20000 [07:45<09:52, 20.25it/s, Loss=0.0036215, psnr=34.86, point=57158]\n",
+ "[ITER 8000] Evaluating test: L1 0.004448248881517964 PSNR 31.839734021355124 SSIM 0.9781795740127563 LPIPSA 0.03090365307734293 LPIPSV 0.03582913406631526 [11/02 19:57:41]\n",
+ "\n",
+ "[ITER 8000] Evaluating train: L1 0.003648587584714679 PSNR 33.3141809351304 SSIM 0.9814309477806091 LPIPSA 0.0322286753970034 LPIPSV 0.037641638013369894 [11/02 19:57:45]\n",
+ "Training progress: 42% 8500/20000 [08:17<09:52, 19.42it/s, Loss=0.0040814, psnr=31.09, point=59215]\n",
+ "[ITER 8500] Evaluating test: L1 0.00439088639584096 PSNR 31.978923236622528 SSIM 0.9788561463356018 LPIPSA 0.029759964193491376 LPIPSV 0.03528891514767619 [11/02 19:58:13]\n",
+ "\n",
+ "[ITER 8500] Evaluating train: L1 0.0034776923575383775 PSNR 33.743813907398895 SSIM 0.9825975894927979 LPIPSA 0.030817703508278903 LPIPSV 0.03648220681968857 [11/02 19:58:17]\n",
+ "Training progress: 45% 9000/20000 [08:49<09:28, 19.34it/s, Loss=0.0038393, psnr=34.06, point=61025]\n",
+ "[ITER 9000] Evaluating test: L1 0.004309330217759399 PSNR 32.12810583675609 SSIM 0.9792281985282898 LPIPSA 0.028704935191747022 LPIPSV 0.03436037481707685 [11/02 19:58:45]\n",
+ "\n",
+ "[ITER 9000] Evaluating train: L1 0.003430729841484743 PSNR 33.93895945829504 SSIM 0.9830036163330078 LPIPSA 0.029525452457806644 LPIPSV 0.03556227629237315 [11/02 19:58:49]\n",
+ "\n",
+ "[ITER 9000] Saving Gaussians [11/02 19:58:49]\n",
+ "reset opacity [11/02 19:58:50]\n",
+ "Training progress: 48% 9500/20000 [09:23<09:04, 19.27it/s, Loss=0.0033051, psnr=32.35, point=61813]\n",
+ "[ITER 9500] Evaluating test: L1 0.004158472272512668 PSNR 32.4715378705193 SSIM 0.9803122282028198 LPIPSA 0.027432689278879586 LPIPSV 0.033726864330032295 [11/02 19:59:18]\n",
+ "\n",
+ "[ITER 9500] Evaluating train: L1 0.003218998940771117 PSNR 34.422919105081 SSIM 0.9841103553771973 LPIPSA 0.02798707180601709 LPIPSV 0.034809706513496005 [11/02 19:59:22]\n",
+ "Training progress: 50% 10000/20000 [09:56<08:44, 19.06it/s, Loss=0.0034315, psnr=32.52, point=62905]\n",
+ "[ITER 10000] Evaluating test: L1 0.004174274460905615 PSNR 32.40211834627039 SSIM 0.9802983403205872 LPIPSA 0.026683700270950794 LPIPSV 0.03301398379399496 [11/02 19:59:51]\n",
+ "\n",
+ "[ITER 10000] Evaluating train: L1 0.0032101518830613177 PSNR 34.45219825295841 SSIM 0.9844654202461243 LPIPSA 0.02717371065826977 LPIPSV 0.03389762692591723 [11/02 19:59:55]\n",
+ "\n",
+ "[ITER 10000] Saving Gaussians [11/02 19:59:55]\n",
+ "Training progress: 52% 10500/20000 [10:30<08:18, 19.07it/s, Loss=0.0030283, psnr=37.38, point=63972]\n",
+ "[ITER 10500] Evaluating test: L1 0.004118613350917311 PSNR 32.54274895611931 SSIM 0.980718731880188 LPIPSA 0.025657088221872553 LPIPSV 0.0323334942188333 [11/02 20:00:25]\n",
+ "\n",
+ "[ITER 10500] Evaluating train: L1 0.0031671744141289417 PSNR 34.6388749515309 SSIM 0.9847819209098816 LPIPSA 0.02563790451077854 LPIPSV 0.0329230517587241 [11/02 20:00:29]\n",
+ "Training progress: 55% 11000/20000 [11:03<08:24, 17.85it/s, Loss=0.0036144, psnr=32.03, point=64823]\n",
+ "[ITER 11000] Evaluating test: L1 0.003960702787427341 PSNR 32.84606136995203 SSIM 0.9815709590911865 LPIPSA 0.024773080833256245 LPIPSV 0.031685027567779314 [11/02 20:00:58]\n",
+ "\n",
+ "[ITER 11000] Evaluating train: L1 0.002906792564317584 PSNR 35.35626445097082 SSIM 0.9862543940544128 LPIPSA 0.024514070383327848 LPIPSV 0.03202154553111862 [11/02 20:01:02]\n",
+ "Training progress: 57% 11500/20000 [11:36<07:27, 18.99it/s, Loss=0.0038329, psnr=31.93, point=65620]\n",
+ "[ITER 11500] Evaluating test: L1 0.004047579310067436 PSNR 32.68005494510426 SSIM 0.9813019037246704 LPIPSA 0.024308488923398888 LPIPSV 0.03132567236966947 [11/02 20:01:32]\n",
+ "\n",
+ "[ITER 11500] Evaluating train: L1 0.0029915418777176563 PSNR 35.13838016285616 SSIM 0.9859566688537598 LPIPSA 0.023818130385788047 LPIPSV 0.031422309467897695 [11/02 20:01:36]\n",
+ "Training progress: 60% 12000/20000 [12:10<07:04, 18.86it/s, Loss=0.0031661, psnr=33.08, point=66330]\n",
+ "[ITER 12000] Evaluating test: L1 0.0038703684421146616 PSNR 33.08936657625086 SSIM 0.9823927283287048 LPIPSA 0.023293630658265424 LPIPSV 0.030797829303671333 [11/02 20:02:05]\n",
+ "\n",
+ "[ITER 12000] Evaluating train: L1 0.002694884328829015 PSNR 35.96186559340533 SSIM 0.9874290227890015 LPIPSA 0.02251506662544082 LPIPSV 0.030707758358296228 [11/02 20:02:09]\n",
+ "\n",
+ "[ITER 12000] Saving Gaussians [11/02 20:02:09]\n",
+ "reset opacity [11/02 20:02:10]\n",
+ "Training progress: 62% 12500/20000 [12:44<06:41, 18.66it/s, Loss=0.0027083, psnr=37.33, point=66633]\n",
+ "[ITER 12500] Evaluating test: L1 0.004045265644569607 PSNR 32.76010984532973 SSIM 0.9816367030143738 LPIPSA 0.023098896093228283 LPIPSV 0.030778738505700055 [11/02 20:02:40]\n",
+ "\n",
+ "[ITER 12500] Evaluating train: L1 0.002980308168951203 PSNR 35.250071806066174 SSIM 0.9860715270042419 LPIPSA 0.022382862975492197 LPIPSV 0.030832299414803 [11/02 20:02:44]\n",
+ "Training progress: 65% 13000/20000 [13:18<06:17, 18.53it/s, Loss=0.0028291, psnr=38.08, point=67070]\n",
+ "[ITER 13000] Evaluating test: L1 0.0037885586292866398 PSNR 33.250728494980756 SSIM 0.9828112125396729 LPIPSA 0.022341431666384724 LPIPSV 0.030182414633386275 [11/02 20:03:13]\n",
+ "\n",
+ "[ITER 13000] Evaluating train: L1 0.00256488991298658 PSNR 36.33257809807272 SSIM 0.9880160093307495 LPIPSA 0.021199504297007534 LPIPSV 0.029799283098648575 [11/02 20:03:18]\n",
+ "Training progress: 68% 13500/20000 [13:52<05:54, 18.36it/s, Loss=0.0030381, psnr=35.66, point=67482]\n",
+ "[ITER 13500] Evaluating test: L1 0.0037254342151915327 PSNR 33.304793189553656 SSIM 0.9830456972122192 LPIPSA 0.02187432808911099 LPIPSV 0.029764069025130832 [11/02 20:03:47]\n",
+ "\n",
+ "[ITER 13500] Evaluating train: L1 0.0025499563883332643 PSNR 36.359037062701056 SSIM 0.9881306886672974 LPIPSA 0.02079529222100973 LPIPSV 0.029457193744533202 [11/02 20:03:51]\n",
+ "Training progress: 70% 14000/20000 [14:26<05:38, 17.72it/s, Loss=0.0026062, psnr=34.84, point=67840]\n",
+ "[ITER 14000] Evaluating test: L1 0.0036819026367191005 PSNR 33.4215690388399 SSIM 0.9833311438560486 LPIPSA 0.02134865467600963 LPIPSV 0.029360899263445067 [11/02 20:04:21]\n",
+ "\n",
+ "[ITER 14000] Evaluating train: L1 0.002469104673603878 PSNR 36.65652555577895 SSIM 0.988574743270874 LPIPSA 0.020026287885711473 LPIPSV 0.02888151488321669 [11/02 20:04:25]\n",
+ "\n",
+ "[ITER 14000] Saving Gaussians [11/02 20:04:25]\n",
+ "Training progress: 72% 14500/20000 [15:01<05:21, 17.13it/s, Loss=0.0027255, psnr=38.67, point=68215]\n",
+ "[ITER 14500] Evaluating test: L1 0.0036775862442000825 PSNR 33.43101714639103 SSIM 0.9834379553794861 LPIPSA 0.021026410819853052 LPIPSV 0.02918763436815318 [11/02 20:04:56]\n",
+ "\n",
+ "[ITER 14500] Evaluating train: L1 0.0024540416745688107 PSNR 36.72125446095186 SSIM 0.9887538552284241 LPIPSA 0.019567860728677583 LPIPSV 0.028482276836738867 [11/02 20:05:00]\n",
+ "Training progress: 75% 15000/20000 [15:35<04:42, 17.70it/s, Loss=0.0032185, psnr=33.34, point=68582]\n",
+ "[ITER 15000] Evaluating test: L1 0.0036595718359903376 PSNR 33.475546331966626 SSIM 0.9835842251777649 LPIPSA 0.020678027587778428 LPIPSV 0.02896598112933776 [11/02 20:05:31]\n",
+ "\n",
+ "[ITER 15000] Evaluating train: L1 0.0023971107336418595 PSNR 36.974844091078815 SSIM 0.9890598058700562 LPIPSA 0.019020767937249997 LPIPSV 0.028176188907202554 [11/02 20:05:35]\n",
+ "Training progress: 78% 15500/20000 [16:09<03:59, 18.77it/s, Loss=0.0025343, psnr=37.89, point=68582]\n",
+ "[ITER 15500] Evaluating test: L1 0.0036273793016067322 PSNR 33.56937374788172 SSIM 0.9837498664855957 LPIPSA 0.02038805663366528 LPIPSV 0.02875011951169547 [11/02 20:06:04]\n",
+ "\n",
+ "[ITER 15500] Evaluating train: L1 0.0023307538153055835 PSNR 37.18874426449047 SSIM 0.9893030524253845 LPIPSA 0.018540347159347114 LPIPSV 0.0278258897802409 [11/02 20:06:08]\n",
+ "Training progress: 80% 16000/20000 [16:42<03:38, 18.32it/s, Loss=0.0030620, psnr=33.90, point=68582]\n",
+ "[ITER 16000] Evaluating test: L1 0.0036349283147822406 PSNR 33.59557780097513 SSIM 0.9838820099830627 LPIPSA 0.020207094905131003 LPIPSV 0.028608373201945248 [11/02 20:06:38]\n",
+ "\n",
+ "[ITER 16000] Evaluating train: L1 0.0023036232245538164 PSNR 37.36785125732422 SSIM 0.9895520806312561 LPIPSA 0.01824615934096715 LPIPSV 0.027594883761861744 [11/02 20:06:42]\n",
+ "Training progress: 82% 16500/20000 [17:16<03:09, 18.50it/s, Loss=0.0022302, psnr=39.45, point=68582]\n",
+ "[ITER 16500] Evaluating test: L1 0.003608447006520103 PSNR 33.59735870361328 SSIM 0.9839379787445068 LPIPSA 0.01993996458237662 LPIPSV 0.0284372202175505 [11/02 20:07:12]\n",
+ "\n",
+ "[ITER 16500] Evaluating train: L1 0.002277945229948005 PSNR 37.42123929192038 SSIM 0.9896661043167114 LPIPSA 0.01786537462955012 LPIPSV 0.027274546816068536 [11/02 20:07:16]\n",
+ "Training progress: 85% 17000/20000 [17:50<02:41, 18.53it/s, Loss=0.0025815, psnr=38.61, point=68582]\n",
+ "[ITER 17000] Evaluating test: L1 0.0035745390855214175 PSNR 33.67773931166705 SSIM 0.9840154051780701 LPIPSA 0.01970217519384973 LPIPSV 0.02827961786704905 [11/02 20:07:46]\n",
+ "\n",
+ "[ITER 17000] Evaluating train: L1 0.0022404862239080317 PSNR 37.54612036312328 SSIM 0.9898358583450317 LPIPSA 0.017622757976984278 LPIPSV 0.027052016052253106 [11/02 20:07:50]\n",
+ "Training progress: 88% 17500/20000 [18:24<02:22, 17.52it/s, Loss=0.0031549, psnr=34.81, point=68582]\n",
+ "[ITER 17500] Evaluating test: L1 0.003578889471314409 PSNR 33.663823408239026 SSIM 0.9841052889823914 LPIPSA 0.01949795160223456 LPIPSV 0.02814220921958194 [11/02 20:08:20]\n",
+ "\n",
+ "[ITER 17500] Evaluating train: L1 0.0022307517801356666 PSNR 37.60303205602309 SSIM 0.9899232387542725 LPIPSA 0.017451702781459865 LPIPSV 0.02687989383497659 [11/02 20:08:24]\n",
+ "Training progress: 90% 18000/20000 [18:58<01:57, 17.05it/s, Loss=0.0026362, psnr=35.90, point=68582]\n",
+ "[ITER 18000] Evaluating test: L1 0.003545800080139409 PSNR 33.737288755529065 SSIM 0.9842310547828674 LPIPSA 0.019215019474572995 LPIPSV 0.028018660843372345 [11/02 20:08:54]\n",
+ "\n",
+ "[ITER 18000] Evaluating train: L1 0.002188974539475406 PSNR 37.76615456973805 SSIM 0.9901342988014221 LPIPSA 0.01711268043693374 LPIPSV 0.026681782787337023 [11/02 20:08:58]\n",
+ "Training progress: 92% 18500/20000 [19:32<01:20, 18.61it/s, Loss=0.0027251, psnr=33.62, point=68582]\n",
+ "[ITER 18500] Evaluating test: L1 0.0035712857977213226 PSNR 33.73222956937902 SSIM 0.9842899441719055 LPIPSA 0.019187442629652864 LPIPSV 0.027893033097772038 [11/02 20:09:28]\n",
+ "\n",
+ "[ITER 18500] Evaluating train: L1 0.002206967766944538 PSNR 37.76776571834789 SSIM 0.9901586174964905 LPIPSA 0.016937159330529326 LPIPSV 0.026600766488734412 [11/02 20:09:32]\n",
+ "Training progress: 95% 19000/20000 [20:06<00:53, 18.58it/s, Loss=0.0024518, psnr=39.17, point=68582]\n",
+ "[ITER 19000] Evaluating test: L1 0.0035337324130951483 PSNR 33.783190334544464 SSIM 0.9843568205833435 LPIPSA 0.019025096450658405 LPIPSV 0.02772905315984698 [11/02 20:10:01]\n",
+ "\n",
+ "[ITER 19000] Evaluating train: L1 0.0021754320546546403 PSNR 37.86920637242934 SSIM 0.9903094172477722 LPIPSA 0.01677452718072078 LPIPSV 0.02633450213162338 [11/02 20:10:05]\n",
+ "Training progress: 98% 19500/20000 [20:40<00:26, 18.73it/s, Loss=0.0023580, psnr=36.15, point=68582]\n",
+ "[ITER 19500] Evaluating test: L1 0.003524167387855842 PSNR 33.78546826979693 SSIM 0.9843757748603821 LPIPSA 0.01894274731988416 LPIPSV 0.027688861660221043 [11/02 20:10:35]\n",
+ "\n",
+ "[ITER 19500] Evaluating train: L1 0.002132532001911279 PSNR 37.98792625876034 SSIM 0.9904065132141113 LPIPSA 0.016570675570298645 LPIPSV 0.026215560843839365 [11/02 20:10:39]\n",
+ "Training progress: 100% 20000/20000 [21:13<00:00, 15.70it/s, Loss=0.0029503, psnr=34.21, point=68582]\n",
+ "\n",
+ "[ITER 20000] Evaluating test: L1 0.0035425250706098534 PSNR 33.76519977345186 SSIM 0.98441082239151 LPIPSA 0.01883706357330084 LPIPSV 0.02761949259130394 [11/02 20:11:09]\n",
+ "\n",
+ "[ITER 20000] Evaluating train: L1 0.0021336265456150558 PSNR 38.019817127900964 SSIM 0.9904910922050476 LPIPSA 0.016449138631715494 LPIPSV 0.02606668816331555 [11/02 20:11:13]\n",
+ "\n",
+ "[ITER 20000] Saving Gaussians [11/02 20:11:13]\n",
+ "\n",
+ "Training complete. [11/02 20:11:14]\n"
+ ]
+ }
+ ],
+ "source": [
+ "%cd /content/4DGaussians\n",
+ "!python train.py -s /content/test/data/trex --port 6017 --expname \"dnerf/trex\" --configs arguments/dnerf/trex.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "iIcuFQhQT4-r"
+ },
+ "outputs": [],
+ "source": [
+ "%cd /content/4DGaussians\n",
+ "!python render.py --model_path \"output/dnerf/trex/\" --skip_train --configs arguments/dnerf/trex.py"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "from IPython.display import HTML\n",
+ "from base64 import b64encode\n",
+ "def display_video(video_path):\n",
+ " mp4 = open(video_path,'rb').read()\n",
+ " data_url = \"data:video/mp4;base64,\" + b64encode(mp4).decode()\n",
+ " return HTML(\"\"\"\n",
+ " \n",
+ " \"\"\" % data_url)"
+ ],
+ "metadata": {
+ "id": "GOYqyAd1T4-s"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "save_dir = '/content/4DGaussians/output/dnerf/trex/video/ours_20000/video_rgb.mp4'\n",
+ "\n",
+ "import os\n",
+ "import glob\n",
+ "# video_path = glob.glob(os.path.join(save_dir, \"*-test.mp4\"))[0]\n",
+ "display_video(save_dir)"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 1000
+ },
+ "outputId": "8403214d-cb33-4c7c-82fa-18f67d0adb18",
+ "id": "v6VTcnnNT4-s"
+ },
+ "execution_count": 36,
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ " \n",
+ " "
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 36,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": [
+ ""
+ ],
+ "text/html": [
+ "\n",
+ " \n",
+ " "
+ ]
+ },
+ "metadata": {},
+ "execution_count": 36
+ }
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "gpuType": "T4",
+ "provenance": [],
+ "collapsed_sections": [
+ "IBB-voY0oqpy",
+ "5W8d3nrVPAiL",
+ "9XtQzIqSRx_b",
+ "yLZDDHL1SGYU",
+ "DKqkeJqDTT73",
+ "i7-5hOlRT4-q"
+ ]
},
"kernelspec": {
"display_name": "Python 3",
From 8d3fed3953e0ee62d8af49595096db051b7937b3 Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Mon, 12 Feb 2024 02:48:22 +0530
Subject: [PATCH 15/18] Update README.md
Added new colab
---
README.md | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index f9372469..5f97f145 100644
--- a/README.md
+++ b/README.md
@@ -17,7 +17,9 @@
![block](assets/teaserfig.jpg)
Our method converges very quickly and achieves real-time rendering speed.
-Colab demo:[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/hustvl/4DGaussians/blob/master/4DGaussians.ipynb) (Thanks [camenduru](https://github.com/camenduru/4DGaussians-colab).)
+New Colab demo:[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1wz0D5Y9egAlcxXy8YO9UmpQ9oH51R7OW?usp=sharing)
+
+Old Colab demo:[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/hustvl/4DGaussians/blob/master/4DGaussians.ipynb) (Thanks [camenduru](https://github.com/camenduru/4DGaussians-colab).)
Light Gaussian implementation: [This link](https://github.com/pablodawson/4DGaussians) (Thanks [pablodawson](https://github.com/pablodawson))
From fa219d26c302bece85371dfca1bef5bc881b8a40 Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Mon, 12 Feb 2024 02:57:49 +0530
Subject: [PATCH 16/18] Create jk
---
Output Videos/jk | 1 +
1 file changed, 1 insertion(+)
create mode 100644 Output Videos/jk
diff --git a/Output Videos/jk b/Output Videos/jk
new file mode 100644
index 00000000..1f9d725a
--- /dev/null
+++ b/Output Videos/jk
@@ -0,0 +1 @@
+l
From 2813a2ce6458add4a119228d761e257f664a3b64 Mon Sep 17 00:00:00 2001
From: Tasmay Pankaj Tibrewal
<85983760+Tasmay-Tibrewal@users.noreply.github.com>
Date: Mon, 12 Feb 2024 02:58:30 +0530
Subject: [PATCH 17/18] Added output videos
---
Output Videos/T_rex.mp4 | Bin 0 -> 490484 bytes
Output Videos/bouncing_balls.mp4 | Bin 0 -> 270761 bytes
Output Videos/hell_warrior.mp4 | Bin 0 -> 381829 bytes
Output Videos/hook.mp4 | Bin 0 -> 439895 bytes
Output Videos/jumping_jacks.mp4 | Bin 0 -> 267049 bytes
Output Videos/lego.mp4 | Bin 0 -> 894013 bytes
Output Videos/mutant.mp4 | Bin 0 -> 414657 bytes
Output Videos/standup.mp4 | Bin 0 -> 271499 bytes
8 files changed, 0 insertions(+), 0 deletions(-)
create mode 100644 Output Videos/T_rex.mp4
create mode 100644 Output Videos/bouncing_balls.mp4
create mode 100644 Output Videos/hell_warrior.mp4
create mode 100644 Output Videos/hook.mp4
create mode 100644 Output Videos/jumping_jacks.mp4
create mode 100644 Output Videos/lego.mp4
create mode 100644 Output Videos/mutant.mp4
create mode 100644 Output Videos/standup.mp4
diff --git a/Output Videos/T_rex.mp4 b/Output Videos/T_rex.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..af17a38b93ca591b3eb00ebb49e1e4a9bd8ff958
GIT binary patch
literal 490484
zcmYhh19WaZ&?x-Ww(a)RwryLdwr$(CZJye;ZQJc>d+Of4-~H~t{16LzP8+#T;0001K>S$sDfNqW
zDu{~HvJeWY3IA{!n;88NgzfA-tW8Xv37Hrem}!|97&(8C=FZOcT=evAZf=*Xl!C=ZD(Y~%gDvR
z#Xx9aYhdl+WWvkf&dkN&&dA72Xk)@_ZsJbp(u@l-mdHjg}T1OsDFJ6jW8CPo@YMnY2qCuco-Co2p4|2Y1y0tb6NJ5y696K7spCPHU(#~%r&
zALFg5qsTe=2(a564L8WNl&eW6S?TFc8{0{!bf57B&XX|Jh+->ulm^ZSceNgEzEx
zaWwGIGqSU>H*o%`8~t>Vv!j89?T?BdMn{AHcuXA)Y)t-Z&rr|a;|I1d=4JYU4U7%!
z|MS97&(Ola>AyxS98LbmFE|03;wM6FCbesuFPu`>KG
zsApqg`%@)!GBU9>F>-O{WnuWwOh<$Ny42Cc$^1v$(Ma$AbN4^GqYqbZ?{;ZMK*
z=hjb!mx-B9ZVWf?l+?4i@(A82|v}e;*)ZSnT#T
z5Fzi;{}8}@0OLL@zy#0<7%8FvQ{NScHL`&2ACLhHq9Guy;K#da5{>sNhZiE34z?MG
z$F7(6j?Tf`w;0s*R~#u8HcxehPp|y&J|KO+ui1qa(aiwQI<+P$5C;~YUs3G$@8ahB
z7BVPQ8L&D@ui`Y%f~)Y;=nEoRc*b2k4GRY_-X~8&LlIamI19OP*2@SuhVPAR!9VHt
z-wgtaZS{W!ZnXiJ+ya(0W{)6MumvbLC!EYqE9-3&QVqW_jF5eGhGfe*!Fm;8bu)k{
zTC|r4G7a!oziCI;6_N%>*S%_LJ?kdp^f4io&hh&MisDdTWqNRm%cXk1Su$Hy(`kY{
zhZg5?r&fS96J7@?ud;b{i2VH8i_dNSTnZea(kI?ab^K6Sdey7&SuyoB4o>hWE7m&*EU4I6MAr246VCpE#fHQ
z>yW4HtkE<(LE+Lc`(`UA_P7T=k}VN0=uv6L9;C`xv^GQ0pu7A&ANK>2bGX>NwDyaL
zkGa3^cQ>YmmB*1O8MN>3jl1lH`g3!Wcr`qD(>5DHJIr-Wmprj7=E*5W^;v9x1XLQK
z_yJCrU`gNN#v?q{-Z7>07ZT`?DM~4N=5X&wHE;8S@y}%O@cH}az^>NVFl3C!Oayx_
zZ19DuD;pTRI=g*ETdrY8E&YwAGb9~t;m}?|=OxNQ;A=7;ExM_PxnSM;gd#<+k{qD2
z#Y|I`OT4hJ<*31FE)B#vz*&CcfD&+ZKbkI15^y#g#Lkfpnp63Ei{0(F{+l22ZQ6I%
z+{M%|pZSL{Z8-dh%6aAdQmcp~TYT-;)2f;Xz>tz1clfEt4Zj$FXuWY>SQk7Ue0GiZ
zstV&0_KxsyN&67~(ox^BOG^RwpWh4D1ADB`dB`}k)_dU6!8Ape5*~0lTgmnZA^*Yx
z(7?nnJB_~Ea4P~M$uk%8z-9Hb&C)HVMx+x*pN`RVnuVNz+}Hl!NivYK+~O_$S&kiND`d
zkz8woS=~l9^auJ@_+Q_81_uWHUkeGQivTtHq$=2#*1x+}Sf$@Xb@WRt$Si8sQ4iRz
z>*)#r05YIHmb-aPnM-_jgno{Ao_WKTsSI)~WOfM0r+_R~Jjku97Vo=Pxs{cZJ#wTsKy~Ot?J_PpJ~l
zQSu-RynOhK&Bm^+_h@v4^V1zKJ_#_9Q@I1e*}K%-WGEmM6TQoa=*wL|#W@7tCz^Aw9L@kpqx+4}!pXj}S)+jjBle|#2}
zP5&An9RnJ_DiuyyI~Dz|q0leb3#cU~%dMy>IOi`8@|kSg9s&e5=f^p=ZtYdIMw
z3{|dO<8ZOw^jlYOQ=`pMFQ?J{;3r6ANsgPzDWm?rfIhYc44$vM^Z2>Jy{~<-T40)S
zT;tGmnu6p^g79s25944#>m^9}(ZqUhC
zsK+vwa2W8OSq`6Mbf)<#nc_TBSAvsUpwC(o^lxtSE
z#RH^S5iWg`WyWGn$o31>W>mSjcivgB^Q!0w>XKpbSq*3|O?|n#GSEK$d0#-=9;FhF
z0r9I&@4uBj#S3Hch!`-*X~(Y!wkxP)VF5gsu7LYRMT4t1KO8~{bgn7jw4yXaT%IRs
zoy6?9;+j4(!E1JH6yAZAY{rL_lu!sJ>sQ7hf>%!AU)BM=FPiu>yy2Pko+t~?T~)QY
z>1SfJUbN<_`GV9is^v=PaNhQJ2z;
zpdlqPajvMY!iy6~m0JjdenAuzvB%2NUuK*`2s`rStmyCe+kqQCyq22pnziYkvQdug*d2Xli
zMk}|!=R!phtFUnyIKI{7yXugA)uOy#^63p!c35tZc%Clci-K=B>SS{eHdz=I+ij@B
zM9pF`2l?&X8eX?4;`LSK+NuLWfOmMGw2PXljgwt7+v_Ztup-`9EWPm#@dVNsbKeT=
zfop3j9qKmvYIc6>L7nso8bSS0d3jQKw*lG{*YSzy3Y_sR_xrqjT|vSp%)9-3F`>o7
zw@MX2Ct-q&G1un}qL|i9a5B4~2b@E;{ArzKD`By&WGkA0XVqM+4VrU*eir~!FM@|q
z7JLD2=zIqN!18CWOR)+Hjrf^uvfWMI76e-Zm1T_ikH^KdZp6s8>4*z2j-Jr|~}T
zKzT{4ot1qfz=)_iJ$wwQaORrYk%MPGl1$d@$??>_a~@vR*%!}J^TG}U^ffs75e4&R
z@xOZ(N?jxN8m+9!?H4q*7Yg)qV4QQtf0cZOBW(BsdI!{&8?TYVxKOG8^gFnBrd9%Mc2;?Z^q;vij?)W`S1P2o<~7UoPt_6Gd+K-OsV%u1BJ8wHi9`!R!6@V?9Kx?jX8oiGqriJn7CB2EW;l
zUVQH&6A5>cndI+=Hq0hp0JL;9!M`kYe1q
z^BR5Nwb>u!dBKh{R)P9Z=>>Zzq;p{EA#);q@^7k0WuQ8~R_7*B-q2C0v>77|vm~<1
z_E1te9U3>$)34;^&qe)Vn^}>0_^@yohGizSiojltxspC>tPEC)VTw{gCrN7h1E!wsTCQ^~%Q9*y+G1S4c66Vv%zgo<$Id
zD1c+IpEtT6Mui2_VUvBi8IpZ-zKkRZ7Y`>_m878TyF&@_Y}FzM4ue5}GCD&?Z*8AO
zwh>l3FeO&cUW6(#_q1vaYKsbQ&>u%46u?ZZVAFFKE$zT-A5tE-8GE$|hj*=TjrOb&n8^8JNOl9~@y
zQEu)jrYzjwIl*6$CDCYR{K|Mt^g3y2vNS7oI^0+~
zC6*~05kyV03HgefU+wA)jB)?N`%o@?i86;%jpx&D|_e9szaKNRy6E>u{PIi!vhW9z1jKeslW+z=C-3zm-#L)Mn-
zGM%6wNj+#4Fp@&X)z=MTlc&U1f`GKW5V%K}=x@_U{QU6GkTs9t3BE}r)I47a
zY?_`68GO6>Bl-0q+Y@$dY|0jDWVo9*b;;xImI&w%*2)>iNmD)ZovFwhlZGSd4S8(^
zMXTF?Cc{Qtl8x3M&tt47y`n{C_}>U7(W(IV@~3+9ze!c)W<-I`O0aWgs9io)^R&tc6K+dQqHzV*N5d_S9lnMH
zSNeYlo+4)#`qodvae=1uZ?kkm+|V^^aN@>!+WD58T~L0aCwh4{xN^WzAJ)VJAC7`9!*Ca(S9=g
zszk$ePBX}cQiH*0Gx-GfJCrUIqvBxtBM=$LTcM}PKZWQ9Fx&-TD<2t(j(Kh9DfR{r
z%PMGWI0=uP1=$R9KO~ZuX~+Y>B&_nvW$@I)mac}Z0(F3^;FZlgS=zFJeSlGb9rcR>
zTH7}z%lK38o?E5Zu`t3@2hOhP(~HPg?bg!3n3$Vw!9$fB1P~nCy^17^EKo{-&`Cub
zt8GsDhhu;tW=&2QGg%Q{b#O<4TkvX>&i8u>shHd5+kFTt&DO)?Pmn8k`~g~-2W(sU
zf=V|2RKPTRN`P-_N#QL9c5sa|_1f-%EjEfqls!>Ij4U%IEToq;XuqeriyeE7H})Ig
zh^&^#UDX`C)ND0NS!N(ZI|o@hbSmUbAT(?i?JYH0J-Ef$)NiCDGT)&kr2+zziaQ?S
zwY7$jy19!v9&leJ0|N06$C7X74CcF*2&;6_PL1JAM7w*Hbd_}z48aA|NU&7(j(EKf
zK+;#MpS)>4kv#$2OLZw*CuPXgJha1w{%Pus*Kd;5-y(AV@X=UYsOT-)DQ>oDEWNm0
z3H>d2kr~GhJf--7&h&qA@m>|7wU{vOXNIF=~_+J&n+DdYYgT@*=E$r>Za3jY+z7FvN9>spn!-Unb*@A%{9}_I>`-s{C~~B=K07
zQ11)?BYv?_X7R=wiFVt%)e?oThs_@cBJ9*))W=n6Ez%%tBNxMgzqh>7m4~J0**7GT
zuGWfrgs+&(I*ZFqEUO>FFVchLSRk}}YZSa5;7u&h5kO?{3UaTqB66oVqr2P
z4asi=w};?-$K^dgMy3SFLmpys{9SnTi)EgwW&3F#cBWkfW}XQGtYaBcCO)=DLfC)`pSd`y;|vGTFp);IT_
zm^p0O%RhYKEbj<+tep_{5y_^n-1YZ$iPugoOJ<1}0Y$}F*iA>N4<%6=5rZC+zmT
zzve0$l>6ceYX)7hL1jyZIpl{S!u*k}(AGh^a3||VdJO3Q-u`PaXIM$M}PXJ!0W2`dC0BXm9
z5f;{X7%*INW8ui{=l!DdyI|o9_bXDCc2_o7moS6lUAO@PU`r!!3uriG
ze5eU+mZZ2^VDJ#r*k$0Y&sED011q`F(JCLAag>9Uq?auwL6VfSXOKpNfsjIq-
z^8w(O2*}IKZp{8P{kzDTCb0L=jRvyfo@9yBvU&4UGSM0Or3CH8O(5NYejpSC`T_r>
zRA%vKHuq8~a59;Sq@&4m6s
z`k#Kv>pWEI2|-ro#sru`JloRE5|z_1*-l+$HD8~QKBb%NQ)Xy=Kk9tYYS*N5kt<9^
z=-$beKkM6y%#%**sNgHFI7c3NNnDL%Wq4JXf#`f9OAXCzU%0!=h86PTIV?CswJXWo
zhA)Wh5~mc5w8@)?RyPKGzikP;dGi?>zv9iFW9DRfH&@2Pi0aPl@m24z)_)DP$0&kU
zN2sgR5R#JyWeEEB$ijcDZQpS|M@{>+7-{;?FI3JjKN`E4-MtPWJlW!2T&br6895w{
zeVQOcY*eNOrn>3-O;Iw4!=7jqvsHM$P~clbUE-1XK$UZ~-Lamat@%lQilS2Kvp#t$
zeawykoc8Gp@PNj
zIaOTv(+_8(HjeV1!^bDDqztkI2&x#g7+jZbFnL{TqL;U2XV@-95p@cAZ}VJv3XZry
zq9G2{c$F{%le~00?Q24LSY<_exC1h*F}I
z%XjBT9$nO@0T_VGl&uL}TCWtXSEQlk5VW~d?Xnr(DR+IEc|Zag9OJVB(rm#
z(iOS(C`(itmXpYJ;1ME=-pcruNIa{+g1zw5D&)J!AwxOJ4tzF>D|>pwlhOHOLvOj59hdUfj&TtFOUy@gYa~pV-E1e)m6Bt6IHELi
zzNGf$5)B%&X`F5%78n^EUEe}WoR}P9W~UN@@daai;)0oWl1*}oQvCERtAVW0Mq)3B
z;suE0i2QqAqH}aM;HbH*R%Or`;UctK`RmQj4W2X&cQ`_bpO4%`R$*xz#ZJkxrnpgffXD^+(0P*n4nq%xz#mY(NFb$u3`tJpR6
zLrl+PRWhm>I0=6R@(k|V2?kLwAKYN_jjKF4v!HgO(R9PU{{)SxT?8pXb$Txz%$5*A
z)S6CZGW;0@wKP(}Pw&QaVVuRF=oT0JOWY>LUq?Zhd>t;(yU%PpQ
zXupHf^sz=bfNGs8D78GQl9(j{AIq0h4M!*d)R@z
z#->mMl*&ao1(-loUT`pE?cfr*c#pkdIO=(8Gj=&g*<(#!SJVX?o!n48?%A0bI62`J
zT1zT+hbRym#mWF;=)sA#i76C`vXlD^MyD@fm{NK+RZ;j@
zWp}buQ&<
zs0yLlvXG6f%hE99!y+o7W*gQ_5KRkprE6lM#K#EuCNge86ln5uz#GyONOXKEuk?}X
z27#WcR;vv;#>JK9r=NXxnm=jAS9u?+0+j&B~&5%hd&seoOY-TxDS+dvk
z(r6Tz382Hq=Og0o>S8m013}O+pMBOcI@sFr0v`OAi$^KhE8u{8K}uH%Ek!({?e*jB
zK#n)O=k0aCXCbu0@Iw=KBzM7TqqRk)U9prl?6H$%uo2uOgHEXSqYrG9%-Pg1WC^a_
zKP&V~M27<3ud~^kKeBQLA-bd0puWFo8@3Xt5OTdDKYRGk#&jU7{WLvfcCkZ=s|qHo
zD>8>nP2MV}XZG49Hm6)ppw9;ssX2@`*-c|_y=uyN4SHQ(qW7Ar%mMxPaQd
z2AJ!-!p|CdNtPz7bbVd3kvm&U1zi)P5g)}@*lj~9pd7B3XAXtK4P~mPbnIfH!Urwo
zt|lYj50QQ;9ECNNUQ}_>6F0F!Hy2pM;>v+gU|QI)pXsu$ID%3Sv<
zDX-#04P;_6l=M_51j^A7u07ENehJJ?>ORiZGkr7X4@IA3PETlgGi1eBEW-L1gkQ|`
zlw#YV1JxKWOj3NVc4Pf-QCm}Tu7_ePm4x%I`Op}v1PEyv3VK$MqpJ3h^o8M^M
zPzLMzavUhy_JNEGIK#{B)!dV1XyaMBxp
zVjM1bjRVi|@ClSkVx5wehRsx;nlBY^=D->{G!M;(bzaf{ZS$x?{;joV`am>dN>@}K
zQHL#hb5B6Ct1cbEFs@7I>v0mQz8S|PiA7fZ!SRQ;JVSaA1gmVX0^O6QPZbNqIhXLh
z%;Fy(T$^GWC39p}EKb-^*7zE|Ih-OV(q>mk5Lz>`yjM;yoW3N^Vw;3s2cW1PnWJN0
zmmi3_$xI-qL0RRuZ-E#+CR5XGFWth*TWe#+SbftcV2WS02x&~zr43+0z!z2be-L$=
zn#TM3^3@Cj;PD7zg`13)B5{TC>kBRme6@^CHsNR$o;}i%e0<~M^ya2`u%FYo9l&GC
zy%;TOPW2Y4b~KcaQ%J=St_gXGK~AOI#vx;>?B9wwE=N?LN@QP8^Hhowp83%ezDSuX
zFw}_L*7~I=ar)`%dJ`G2{ze0?(nMIp{MSRepPohEM59we#3eFIq8Ijdv`aoC>*D
zQ!hu_I?u2#mnylJU{V%%U0$@kGBiGu>1IYuzMxX&TdCERE#WHQ{&}3Yq&tZHj$-iC
zW~DBFDa#~9)ahXJ4S!Z@U93g>Jyhv*?KYAR`n$YNb~gkAP9rB}Rl}JO4bJezf8x)@
zm-YDimk>08?Dz&&oHyFOx?Nswx!)r?I`29vM&oe%pY8w~F&_TW&0aw$oZsMgMK|%d
zHojBzaBw%^Wbi_ANvU+Url#9@A#xop%3F`%nXbeSsSXy^!j5t`r8O!N~Ze
zY_!HLVWr+Y8QmiUM1Mg7gxqVgFPc>Arl~Z)W&Q}DXjfZ+`QLxq
zBG)Xbj>}hpTN{L;jj?7Sb{|B
zvyEsf{zj>puxHyx=03HfD9d^~!5Z($DSSBcs^FENpZu2tq|Y5pi7izA@kwTS(!d-$
z)T6O-A}g=h?tu0ZSNbd8C=S36F*m~}Ksdx9r*vuILW_&GnwNIRD_VpF(z)8MB&~fl
z!$+Xxe%wG&UEsAB7liy}fdK-xoDPWbpZDZ~FpGpWcVjhb6E*U?Y-8!8dmefe!i&)x
zxQcLn*D`dVAnD!tLU&~Bue;`;RYQu&Lq(lVW;Q*G(ptUf6C^2*9m)nKm>%H91tr`{
zrx@3KrmGL!9szaXgfyZJkqYZ&^X+?o`C6N;76CHXDj$dHHf1-y0Dfz={0}tTO@0y0GZK5t*!;7YLPDX8p&dtL|ZMC0rkj57)d>vp4c7+S@S>h^`2l
zxD`NoOU@*e2+8gQM^{yBi;n2o>6ppPc8mD@x@dZvTqmTyeO8|wu;7I|BR)wBVEYA5
z4;9`D$3eV5S6z&xfRhsVLFfeME?catCZP%nL`ufAvvdFR2b*X-hH6qb!ARntFYYOZ
zXun@1L7yHDi@g|=;_cWZ&K0uO(=%3Rq_^mhNI#Ks^VMO9J~Rk}YnQQr#0;BXrKee9
zV>Q{b=X1Nc#=%kIg!vyTLJr9WA31Hf-FzHCfZ9F3qP16%xHMlm2k<gER2}4`@c;CxRiE{&-100n36FzH`8@NSiIyd}3DX
zi(Kt_h8svkXed+NQirf8hP-oA?K{>iLJRLhG~+w!m-+*O4vz!~NhHYP=jB?fQ4EUX02;PF6WV
z!2Qd9i=B+A@UvdlefiA-LDyOjc(EvkV=oC`C{;){@(1Q2VNRPe
zjsA6ibFZPo(k7&!IK=4srzz>&?2T|2t*FqrWjGC1<>y=R)yo?zXhM_ggflivE*-rFKPWBuH
zlbGOgF&(~o35{#zc&G^HxLPb6ZP^ycAd6ezQ%%|}D}~$Pg-==f{}IgzVo0yWHA}+x>2*tyD6%%(#txf0wtC28deisdgp@K^4h&0=N&oWFTP;
zKpm6LB{{`=QI7nY>`RindBg1v%<#1|K9x{R|0PdU{Df&Bp#*b<+(Z2S3)sLsy42=P
z_{;j_*}Xcw9XXb>QP#+_ru;7KI8y`SvtQ%-6$1-JYPNl-)|#%ze@D488-q48)N`IX
zXl@0IR#no_O==M&q`Ln-Lt~HQ*#sf@5fT>sDdHZYp;9zZI^r{!@|(1B7cH-$%elvn
z!EP(5Kre9D+AGhr76+WJMNUwXm(*Uol3y3EZL*SDby8j8Pgk#U2v*A)x1FFjn)
zK2fBi)%JCy7kPLD$c?7GX9}sqXWZIWhOwrM!y52m-qZ)6N6Ni7&)7LMp4Wsc>+keN
z_iS$cDyIVOzI&($w73bf)R{sB((gOy!LC4$leMfnBc#<8uJSB$dxirxE3`T*^p`C+
z9hf!ezYWzz=vo<8T&@3@1!xb9Q-tbc=)y22?KA7ddf)8e*q^>|?H&t}w-eRrlc%Z_
zrW;(BA9+fC$7C$qUJE|JMUomO&zcd~WlVP6?>hKLOu%%8?hjInh4i%5x2~KpnLD&O
zYysza)bfllkWTGxwBvsr7Z336Hr5wNt38^YFP0jF6U&G7pekqLPaI*y_d16Q+E~*w
zg22{WWW*ctkbLFo@`D<2Mduz`5kf(Kdfqm_de19t3qOR;_J8~(hASV~ws3I$_0|qD
zY#U7E=jHv{7>iYi}nsOl6TJg%+3tXIv@CuBl&(bdNplphAe}a9KHYWAk%|k2&I%
z2c9Bx&He@1B7*{<|`RUZ^8GkiqWFZ{Q=F496tWNef%zqheh5i#q$XDZf;AR6`RLocn;qC
zcu~3oAc8>-n?$TqeAw7Nmb^k^)TY$3%B?!Nx)QFB5_9t)?c|?wFS&NGr_`d=e0RW>
zQHzGbg&}-ph}+7wu;KmK^{J&D9yWkAXZ*(Q_NyAg{WpOaCDpG$?b-}f0ZXt|UCP+q
zZxC`tvsMlyz?%Q$gH?K04cwEfs>s%Le!?UrVq1vAK@mTvYWf)7tQMV~=j&+gCV2nIeNn4P~?h}D^%x;0<%Zu%%r@N_kQ_gl7Iq^y;B
zkz&c+vn>cU=1gMPpg{9pOu4e1hE
zNXSm>z0KT+Be}I7G0mcw9XA4xk$im(Q4hv>jhbZ*i4NsG(?#9a$-$Kk$-pO4LC0C>
zGMs|2li1TD%HG&sW)zF6+~7bii9JHuCPB&`CdcVVM?&!|>&JrTUczQs=`P_s
z7{zAFX&(g_|1!08T=j%(*V=qvcbAjV1d^D9>0l!M;QL)N6P2)_^aN>CKa~K@$e>AD
zUW};vXBGYc=O^kki^ViRb|i-fB^DK25fcq@KXTO+u8A*{?2=MT6}Gg8g0BE-z&W83
zI08jdFv}o^&7$_jY1neDsp&1V`zboCpChSW>bC(Q=uS4O(@T@-L6??-m2!>c2YiRA
zq7@aS4lDN*1dhQUYnVcCBii%r5JZjH*Beg+mn>11Lz3km4=(w49qH(GomvE)G-YPA
zkRfz*lwB4Y3dOn|GS)jXzaoiyy8H46$$ZAeOQHXM+9ApZ0GJ8@0MHVkTmxsJ|Je+Hvag@tz5qtP_A_w_<#nZD;i7O7+KVI4
z)sSGi1ef5yIBfeNTZfY^f4<&faV?N3?`%x6VWQisQf#*eEl-JDMcAJ<{Og6}DBv-L
z;VKnwe&bJV4)e)>?Hqzz#fotqTgr*xsKu<(bnlmJ_1sd(tCKp9Y&7m(Zz%QP#k&0(
zt%cS?jilhJj8?1p@lB5^j+2HQ@CDx=#Ntahr)|pR>;$;_;;Q6|HcRWFGalm`jok0c
zy_a*z!1X-l?7Z09m}UarCB%5Bs-A)s^w^shT~eW{dwYN`Z)sIgm4$iv)H5tv(S~rr
zl~$1qK|>1N5zv|veGgKM#SUvbrXRH=VfQJ|_(>1a)%?_-9OM};t&wc4L;wUNU^VLVYlAn3d9k(
zKTSQ}mXdoq&N#42pi@l+yk)5L8^SI+?4pt3IfV*Y@6&7JF}$3;5d*v!CNrtEcg;sT
zpD}8E0=x0UqA>wmlT)1me0!Rf!L`=(>Z!VyyrM;R&f+y*mB)vXo2`{l~?c}u^*=!Jy3F7;$n#LP^L?AO*V
zRx(>TROa|JB@$6^N>#>-IowC-n%92@EMaoaL)u67vyJ#VGaSzje9t@|OKQPE`LxM2
z{wOy_YJyUhthP$WepC@%HgY{Jn>+p#gyL_7DZFMQkc~#9O&EY*r>&xd#I1qJ<%F`7
zx>m$!AlvoZn+Vhgg%S4D5FMsE%&f7p>(dcj-}Uf?d9I}(hFeN46FCsydv$UD#7}1#
zle&FuA59=_2~gOgyk&^f+K%uVzXy}6@0v`w+*+OH%5)4611vzhY(FL66^n*Sbb$VA
z0xjQK99t~+D<4UGi*3qHnF3-D4J;v3oxw@vF~HsFA!gb}Hti4U_zG6V6LrS%zi}
zbUN8dSD!zQ8i9EDjE$S|mNvqcEhjJ+lC$VyMc=?<9KJ*H+ulBMjue@qO&ar|8Bk{Q
z+LdqlYZ^Qjg+7IeP6ZA#7L$6ql31s%6@(B_$Qz?D$7d278vYjBp#tN&B9N~a>6w&O
zzyvBw`#92y^}7&KU}s5M!7=QlL8?k@S;FTXG7D(#JaX8sfwiodaRgzfU{GX27v1fV
zyb*D)wYH_S+{3iEW@AR`n<}-q>^fiti9BV{na3v%G9~ED>J^A)H=A=u5BN{j=*beK
zU0K6CT=2HeZ?#H`+JxW8f8O_|hdw)K=-=R5t=uH7C>3QC1Wy`!?;J^w`GBy#e~ERR
z`>7mN^Zj>9iSz$isM3IPjV%9nq2kafg7OwSYG054VuDZuKx1WD018^e0F)0L2Qt=&Zngya^aXnzVqrR~cHJ-;MR-i1PRJ8a;fgc-|yJVabN~iN?}o
zZ*L*#GRNsY`<|6TB`pblEswG2DcW?s;uQ`S%jHSG_1Xb7*WU{;{mahR6NUsiH6%$0
zR*w3HT!MtU{~)YBmyVj6uc63LKUcM+9H$>qOGj8q#pY@7clwCKj@wKw#7};(Vj3t>
zFic_KTx%X84xhs_ki==f|B@r^0}IjH-SX?5#ox^QT%?T$2e1g-wbI*Vd-okAqU_N~
zP~2{D#kTrnEuQ0Mj5Kky%C|tf?XrF(N=`Uy-Q)9LF7)mmJ|KtBYtq|sie^U>1kTL)
z|8*ca*27xcxDqEuPQnJ29;VLRzOIT7N(%Gs72W1U#Q>dlq2K3HvRFIhYqZc=AqZ
zlr||hlgN*UgzI}ZjmJ!&POI{hH_f%xa>R23_QMTtE#e+am^4OU5Fm@(x9m!$rS%=;
z7d&wNB=e!b!BUj$1c22zQWeeEa`5H9P4_ztO0U5(x@7$eH6vOUx#H6zQ-4i$5uayw
zEL&n##X_ELyMkjy$#Y=xkbktnqvl{*2{*0YuWJiivbTC!iddv*M$)DFzoJquyLwFC
zzem^kPiD2wvjmW@koXK_ic&=^@z#XOc>IeQ600RwZ(QBJ%Z{CIFB_nVHacFnfvZSo
zBa4&_-3lkV`TJ^f6?IrZYjc>%F59*g&qzmt6;&B_a?YG~@SLV0;YL;V9EIhSg2G3+
zzO)^~{|N!lILF50hf2@mHFnI```OvnGJ%&;EV~)nvm`7HEIKQ^*J?$+K>
zYK9b)JN3-?&CtE~oltLaZ+$YR=d>?V2m(Y(q8~oLWMwb}1&(CEMgcIY*z)~)DbCTp
zT$CrmG=%ELS77T3-X=!&MK>$Q>!?{mU6K>;P624Lm-tk#UIn^UmeS`x-FmC~IaQ#|
zBprfwPFO1;!m<3OtYKifV3o_OVgXj6T5k)?gI&M=qFMe5%Ad6f1mh>5G9;L3B3UkI
zo*@Hi_Wi$CVL11<0gT-n4zIYmJm1~@y?_54^}=yNje_(hLy}WcLGrik($xmED?Cix
zLOiVeZtlI156?9E7*O87BQ)T|qXvq!iruJ2)d+!j_;4$ygef{8Uq2D}K>Wvb+jMRr
zfUW#YEP_-Fb#q+B6Dsce3Y+XR@i;PeSM^7mJb###zB|8F9^peO4g
zD@7f<_Fc%3Xa;bYL3Y)j}GREYuP7ApQPA6#aB(OXA>VaRXlH
zwSwFgkXXW3H5z9){afV|yK*POz`xBhE0ZR>Z*AeK3n;ow(q?+(JU5_Vbyi}(F?=v8
z$WUgZfR`f~x?Aacj-r@Ec$%%!lw{rXIK!@Y%g^$)G%Lp1Hq3xX$$w>a@PAt-dL!#7
zEfqixY)j~oo#hWei4-P-~cR+Hle=A&6=o;*;ZzqovUOPqd;v%Tt98?qt%L7
zOAK~br*Bc=+pkDptSL)~ubqv8lx({wcbo$YFAEi*fCcMh`YJO=4AeJi^?2TCmyu33
zf}D!8sMkO^TpJQ_-P_7M4Iv
z;e*K~BIB9OAZ1q;SxnYE&>l8x`2nSI&&sdOlFD#(JeW~AfI_~SC6gYhn2CZb99k{7
zz{{iIKAy`)xv=!;@uBdOZaq6wo9HIDVi(l}BN(DRI0P4!e0oc$<8ldq*>~a$fLo;a
z3`e;ItMBi|*q@iIM9`{3$CGGBM6ko)>9ua{4^$9==&OW8eb%CZ7}gUCR6Aae1w~Zb
zjNG`u0fltA`I1~XgWi~fRZD+o-rc1M&eTy2{h0h6)QYV2oGc@m#WJ;-DV^9p?eY50
zT%h%YY%^PP
zQ6G-hM^1(&3lS1|5>GE#tz!Um1sMWhE&BYv{NDh?Z=S!nRQH8zT6E=5Rzl>S5
z$2FO8o$UC(&-=n3*7%Q6#d11#JK6G-Fid^G{*}VTemNys0kv0?%cMIe|06O9=l)ed
znh5cK0B=B$zlK+N@osxlZ!Lo8HSuh69{({)Wax4TRMqsKJ*f8k`OpWcOLP2oM!Dw>
zD`+Q;*Vj@LIAO9gmT-#-w_y|n5TjxmvADKhkj(aOT2E|h$YYS6pDbjt8qw;Iz1FSO
zo;gz?Qy9;8Oz_dvCCP_&QfyL!_-pqTfrdTuQWioknKe+?M`NiDL4~+q%6gqTd(P%q
z#E%9eARp^VDQ7ztZkm-86B@G+jVV8|M2(r&^xk%jSa(iHZ;9oNaEtUTK+71gzggMh
zNC}zC;FK7a%#PMXRA+!gKqZ1Phh>b3_W%$v8Ay
z2VWmEVZR{)V0;kkk_5Kat$Q?5nmxWc33zR8g$RI}oMaY5+YP~oAYa;+#{S7alH(L8
zT1{^??V&4F#nis(Hhv85c-Va~vk)#_o>kob>`mADp>gV}9#T*P593ri*Rx}&GJ`B+
z8i+qW3VT5s)Y`gSM-tpr;up9#
zH{>>hRvT2Tb|f%R$I*$U<7$BiHFxCKtG{fOmEQk>20yVtkrQF<>c3=$*$5pw-+TwG
zBLGT2skCNmRwkBVhZ}Lzb@z$A*oK+AGoWF=Tw`h^x<$7nLdkQ7P*dRgXLIrgO&$oH
zXuMO;krAmn7^@B(S!YN1kh_A&Y{6rfU`WxMoWKCpPD$(`wIG=A3m`49qkT`zogh
z>3YHYR3m3=NDNT=LB*pkIU}5+O_(`B@sHTEYE{wV6K{QOIJfX4SU7f}*=$(pvZ7P-
z;dj!#M%4xFwt8>lA*Zanl*eHne_p3H12H&*GA@XFUq;qx_03=2om-ROnD%*7!ljwO
z;A*ar(xYAgt940ipXc?c&)E14<6hhqC{}iG%|j1CMU4ouhc9yncbnM)p#d-Z7t658
z4aMR?eHN%U@b&eJQ+MNyPr}-E`S$b)=|4NapwB)fYg`F*zg{MR|5m(^@L_%H6Sg)w
zOM=gbQK*Fz=2&DwnV5fsH?!&SClGcc$CWdMxGSy*vWNd|JvG0SsLm_5CHi{OalR7s
zeH4IVa4H7?00}uknubZ?3Q}b-A)o%95C8xJ06FN3SOeyKBKBPb;X(splhx7Q>ygxR
z&kT9C9{wsu$J2H4vFz+Q(7k=#xI?de;)U_mMguYk(3n$uto?38w7xC?7kh1?8qCu
ztUfR@q=qm^+qT*w-Tq;cWl7j0m6JQ9OGwh@6h<~BRpWtU{+ovuw{J(HjJp|WaVI{S
z$MD=GTns0aP!g|5(sk}hXR4#=SR_KMXR^^_?}p3c
z6wLJ#;6-by#Lu9Fg#*+?Kasg~iM|?qK9C1=kC5>}8*Spfybp7Kn6y1{w!yGw2rGTy
zVni=oEJ`@)^hO8|WQS}wMZAf6SXM7-;LNI7h(GFgOQntPn8_2q0!e0*Q75_Jl`99J
zQ^M#LD%zI$F9vOLHC~OMEx-(UY~pCWr?8Fa;=DgyWTCdK^wI=Qb(Gcn%jDjS)rkem
zdM(B>0(=}J99t(?obugDWCB1#65-zIkQYsl$Qbny-#}S>HsAf`!!x1-q=UFS$dA~?KSk=SrwAaQDo#yvr
zAQaB^ObuHl(70ijlSr40R3_la46~JSPNO#aM8b??&dI3J0{i1WHV32eGO|M_kI4&g
z{Sc}A1iePfwo_gjgBUq3RMHLwL=PC*Ant_JMHD8uh@B~@4pp3`B143$NdTzRQThzk
zjzZy!fNkD2vl~(m|D@$I-a4CvCG-Z6(OvV@5!&8hV6C=`pZN)h6o~_>Loj?0;3h6Q
z|IVCrsCV%p^PUrtdL1Ks_vcc6?|nUpt8(QB2|8RSy6aFC5Psawy%3?#<6WIWV!vIL
zSx-)^sR8PY?#TmjWc}w0G8T2i4#q=?FL%9kdNZT)OG;=TcDR!E4~Jxf`@^D_kP^aoEc==$@{^?f)R!s?@M5P)Sr_%4x6epkw=IEzM+gM{C^W*Z_+BqbvQ?i=M3J724oIuyhUK)Nlsz;SJTVgHa$700xjt4_z<)rOo8rvn8p(gxkC7yOK;U@zEfK#8Zj0TL)w
z!nl6kdDX{k5L^YlDRMbM34gJatySl0utyUYSz>wuJEy2RkW@du1y3ppk4Hu-R3HAv
zY_Bdlm`)3m?v$TWd&>|Kv~M!8OI2R}eQWUV#hMIM64)XgoU{mu(P5
z-$|2qK9DC)vVDqon(!VYZ&wA=Z
zm5VOB-%w6@xt?KZjQBy%jY=N2JG|6=Pn40K!E
zhcrFli&@j0GIe`7T=2x@@WGXrI0RzIW}X;JW;0X$GDf-x45WM4HjTGD6rh%gFd>PoUG%MLjwxn*xt*!ic0Vctm^_>tN0KrE{yV%EnW+
z&@c9*1!l++F>=-1;5cUvy`Qu%L!#?!_53{`LRyGvRc6}1mPcrAfOp7vfqb**y#Law
zJ16UGnfs>#?>IShO_Z-%Up*f*m+LyGUJ{Fj(fclN0k8oO
z^*-`Qd_Qj~B==F77`x!z^T!04>#xGg!H`>yp6WBzwbvZ!EJm;Bm18s2D?<&~hX6b-
zTnRjLdvPHH4KXV%9p-F{iv+r#L(qUEq*bw}_g6pgd-g$v4#7uVP?WT>^NZo=3adF0
ztm>BD#obBOM#+7>J;xPi+Add+ncjlOIEkm09mY?9ND5^69wkURa4M``h1wk%Biy;`
zN>_vG+mYTmgKv;LIMGo|9$lKfj>+3}`2ZerT4?Z&j04DEO+s!tYHb>y#7zIv91kwy
z;5Y9On{OK?ZfaWv4e2JsIRoEVM{jlZ7Nvmu#jS+pbsRD4w0FTl}Z0q7(o-^igb%yu@!QF#)+S5Zg)IGW-*U<*HbPqPKWZBIx
zA(x!rFFme^^HWr6Ge|SSJkSC)k0JO-S9^Xz7vCo8`P#;zKdYESzWOSNk&o8&aH%Og
zBQrlIBE(}mlCqYa-aSxJWvwFfI(_E@z(HmOu2Xj95z;I>qLA_qRJuXR_te%{TTgy2
zZzEe^uY0z&Ix!|1~P;^@CRh*Ty?6B|u0EwdzA<$Ow_p@1?&MbJ4j_^k}*JVi1@|IyD}&BS5sJIc;QT
zu_hsGW9fqUExupsYD8zwfgMo?bpa6S*h4c_AqdpO6sVAV94@na)BX@(g2O+LjH1%D
zV-5hlp_Z*D1NqAIiMwU5u&}Tnjmj~2#&5kWVW0Wgik!KEpr4V4nr05|tmg$P%sWWq
z>0PT5=$hLE%QW6Yo<5t8Ui6r`88zr3n}nG>vpTkx6XH5b|57Czj8Vp|~BuyQ3BpZ)TnhGU+AB>D_DwSzpA%p3;u?kGq`UT+WfmMy_X*nDe
zzdj*bmX}!smOCA71LYHHBXK0c+$x5NC@_;gXK?0H^n$g09NPBK{Q`
zyfdMekna$*fqO(`HSD57!4MNZFLTOs@*fW7v?nilH(px3_ZKtzrUfmsc9dgU~ScMMQ
zpRd0OrvTd+1(p6oqu7t8N1$L?i)yuc4|5Ig=@iYG%XQh%?z(XLE-nCt+ed3e
zr?s&4#<_As`?3%uYs$QvQL2)2qyNzrxyVUU*S
zB*F0}pJjy`Rp!d^@?T@H1r_yT_8~b6J~IM~DuwCO>`G6@G)~#?76RD(Sd?7oB63&L
zS!?tux}{7bGZe(&*hNK&r(2#`29oj#LIot3_5Ep5O0nJLDKN)K?_Y1;4SChpqr80i
zYv?%>L@qFx#JLc-Q?8_vyF3S&44O(9BNy2^Sm3-4ZCzho0#yh;O)kj9MvhbeaFNrB
zEF$9Pzm&QS?e;JPq^}^pFK7M0&Oq}!u#How%bH;Q5@^8z5<^6~Ll~LHNV_utpPq>d
zsd>N9toG{De~X0!``l6$$CT)v@;f~rG}28^UC-FXfNRBdB^6NVk1y&1Zmji*r6EAR
z;s2SeHJYj?yv3-4I6KLLD+%+}7{(g-Z3XVmx0f&}-uPz09C=c0kos=XRYnt#j}@P5
zJ6BCzhu&{6Nn|%4bm*jT`;rUuAwhWn!PGZ{YJ3m1M$0
zndc^FARI<`w_ph4T9Md*O!di62=IhH(^!mh@9M8tA&XfRmEVW;Z6gA73{uglT(`s@3Z=U+8iO(cvad=Vnj2IbWInO=3R62)DZ_kBZ^0)QQmV);376l7&%E&)wJC
z*B@VGkZDt8Iub9=fQ=c~lz8N1EJfQXRk2Meb4vAWF#6^iE8g)I{82=_USKrf2$m|Y
zjGP3b{Ic
z{{$c1JvOTmrr-wiwoJCE44?mZWFf8s878-xaPcK%g*K03N?uP-S-H@<^5&wHcNb2W
zz@Y35reCaQQ8s`Ho$r}
z*R{oTL3lCP-As^0hn+b}{kX(!j@(~2oq;0VWdt2RR=1I7d5RrB5K-JOzNQ&6ai1Nt+8$v+oY+xy^7z9a
z;GhNJYVEp4FL1G_3GGI*cSq|ad1C#kZ@akFOLS7SGM~Y4g;G&AJG>H4k
zF^*)7R#rTsrpXp~ow|JELOS$xdCBz-k28kFxINde1Y0Y_H8U-WJCXjdTf=1RS>F`@CQ$NkoDz?fCJ*M90WcDG=dg#NAcZBQMI+`)BO7<+`#&2rUnt)jy1QNkfi
z96;+a8qxK70Yllf?*Bmc&-7jY;A5ia*V&5~*Mao{YX>AEXVmmIPKf4vuwO&MFGeF!
zbaGt3K&GSwr-#@Hq5@5;y7*9j9Uz&ngS$5q|1cE-HnjDo+$Ip5Smkuuqzbn@^QIk-
zk`#`6!q%(*uX6r2+*HQQN*`4-8F5?XsHl~+{5&tYWk>_Z}$&b>fpAN4`
zG;aIWOXTwh_Cx`>HTT#*)Q+=6;Hx|Bd&XOM=J4PJrEUTMLtpRQ%@V3<7PqKUoobXe
zz$Y|trT&m*7yKZ89|XBuPUIB@?H>k)?ls`ad_zZ?~fp$W(7d)T9fm<%{B$V4-ju8nLs1lkI8S
zC;JfB{kj?cF>%j|L;L3p6BF)5R7k9B*+1G30Vc}gn7m$MzlzO2$>Tl}&jJ}Dep!P6
zfs9x^~eih(MsEBcG`gBrViZKF`Mb)U!
zgY}h$LHqJ(7=*;V#6sL|9%-yI@kuqWNVpq0MDvgC*gElXMjD}|*eh^Rm|!u%n;(QT
zd|4BKk+lvoFOK#x7hiz#8Gckie9*IipiDq;iIzh=xJvTJ?QN7}uLI(Jk7Xc(o6|H!
ztMi~}K^1_6kwiY8^LOFLJ5tEt=?7gcA-4hwjug#Wci_F77`**SMN=q2Xz4jeS>xUP
z8rc}ZZhp7`yCoPxZWo=(44D7`1d9Ql$7(`<0009358l`6s-4A+ug<}t9YCo70bMbV
zazQ>^&A{~fs(=Nte!uGlW;~OBe62rYxtgg9UJfAT(tM|j#8ur4ARCO?
z?=jcY!8tou3~^xxzW=6TrTo&Qz(&=8h#)D0J9#xN$#RkOs_J_c865mrcZV5ZW#g5Q
zwCEncz3T3CJAm?gbJ|9eB&dcB65W9k!S`s;rxLN$>ko#4g@jPtsZ-7#CQl@6_J`x$
z!~@coZ>osfNNFaPsab7TCO2~kujq<6;;H^@&ATjEjcl65OxJH@(|?+qMj$g6CX*qX
zu_}s%lsw2Io5Zllu`1j)IG5s|L~j_R70)Nv8}`c6E4AqyzrQ1JeWD%gvWfxR%SZ&+ijDErhNvRV
z(TjdACgcABxx3Pof3O%2g938IgFMHt<_9v(ZYEVjASGO)mfX-;)!WFWqK)gWPr35l
z(Gta3Z2aCC+9s#KF01O4q`e)qdWEO2v0`rpqw<3hg9
zZ$GR^y__lGaM02|JMw86T5CBE;Cz4KFiZ1bA<@d3iDWW3>5V
zZvCne=TZ|z7Rek`@NAP$jqxrdbdf?QwB=>oNDY;YG6^=D9|s_q#UWL{c`~Qq^)Rg8
zcQK^_9a)p}?pm@Ent`EOR%CaBZ^`K&5NSX_#*b;Zxa_D1^OnT1Z8T~Kk`XD2QWr~y^q&;14g2S$6H(Omt$%!KisI?dIy+6G@P
z`HA{Z&y*cc)-}F37f?JnrqKd4Pi=s(bOUb&Br3Tsv~7_@($3>UqgfW1hrkt?<3OXi
zt|j+y)Euu+r<)X9l^6rV2-~-`t`jXPkH=n-ywn=UBtheq6TTV32&uPUcl##y_%rV(
z5_8ewQb*v2m;ZDq7~!HQ3K1hZ06CIUs5EG03lcj1xXHl-_W
z@t5_%aM^S5XoLgc4kvW4yK!JaT=<-sMvB{UZK{N`tDQ~tn{+?|;~oxmPS
zVTeT63PIbgt(O&8X*j79^>^cAk)Ly>+63Im?qRCbn%Ry1eNFBR$4ajj$Zt&VQGLTM
zesUo71ZViP!fcLo8pKrP>ztkTn>M!AsqqrHY!TO|j!9ONhipc;`!i&ts-|HSK}=mZ
zIIo{J)N;mAYybxj2;C7;zsjwJ(;cM~TW?hQYQ5ypF$+iO*4-R&rmZ#ST~#n@
z?v@8XhnM}dNfTy9rO$jEoP?o
zF`#8yg{XrVjSl}quq-Q<&U{oUWFL|jt#9*3bs`GRa)d1a00RKyfB*mwr$L&^N#PGB
zQw2OT|Nfp300093Jfe2E1LzSTl3OZ_x(TVW&To|X29
z@1!Jx1Ga1ylR9s`*&T7k+@rgQ;Mw6%2Qo?AwMI@Oi0XMPPQ<6!Mb{b3NIidWCASs_
zD>6?FFM@VXTroSQ>zv{B6l$DFgOaIw=FN7N4nODe_`8K6x<(y
z=9)7ub$P($Hlmb+@Aa&E_;r;vfH2K_Q~;27IZR>)A2ovVJ^7Cq?H3E0OAntl6J;z@!~T9p6!MAJgNI}C31Dh?|S
zHq?axh7)YXKA6bIUPSOxa4HtWt55uST?BJ0WSG2
z2c$_>`@Q*lTXG%1fl%uSL1KGBcbIQBON_WsG9*Jy)uKl=vD60Qaa?X0
z>Dg9_6URP{%!{YZZ-Wi`11cL%z4ReN&Y?rH}lI!1?^E5#OzS
z!`Nsg0|;Gw@B!wZGIxpk*xv@_C0T!S@59mK@31V_N%~r?40R%Zg6cvGmXYwpDn!*?
zZ79BdE5$=9%S5nSW7u#Ii?HfGG-HWlA@03(x@AY0tc~n%ZHMWol%;;Xf=SZ=23G&2
zwE2%15~^{?f4z62r^Fo}oqEZ?v#5l%;wdc@DDaNk!VQVa?W;7iT?|YL?0j7rPd}R3
zLJ>>Of*{O%K2`xWhgw6mP$?5;5MM=f9L9}1$V13+&bJjW?@6RW(Ez(3sQp#+GwjIKHk0r42uh)2tmzV(V=fY{bODZ-%7@W#_6ZOHQM6Z@Ch|3c
z*C~NGH4UfB#26bM%Zzx}WN20EO+M4oH%WK0H6Gh1%cm~{2%~sR9ADC*9yotb^Qy3m
zfMe19YUlmxHPM1j1gm{FHzzM=l_MBeH|4=w?U97EPyc>;BW@n-rYpP26Db&<}LX{
zTe%zCdo1548i`FkpKaM#Vy(Q1cytjUeb$sZDQ!ddOnH@FgFR3vMCDeu%#ai7+JjF2
zCB_<{7c)M>uJgZXMg78m#+TV!4=S5o8XoB1nfSlF@OD{|7M+>*Rrc1Ipl7Wn&9}X{WzyP*m7lsEu&&=$1y2!A>3B0H4fJgu|uK|CpTYTrQ
z{*cbquC_ZMM~x4r>4=h_`?5VrCt;HZpKF=bCT#0VW;J8@>$YN>pplM1%!W(YXdSNL
zx~~-jvlv+fNk5shAAFooHaQCm7Z?Ck<^vOb5y%b=ZX11{mOcdf!}2`YrL+z5INybN
zc5X(FDSP9J@|?IU3bN)Ayp*0e)6IF6>$v~5UZ89-`?!XtDW!l8#PKQ1aMpN4EzIjZ
zN0L~|LnUZmY-RaYDH(vKWT>?>M_a!^E@n^UTC;~-1U%dB=|Ka@f!Uy-w?8DT81<<>
zo_hf}&|nNE(*yUiQelVlRRqkjqA#cU>}jxA30h2!D#n8b)XsxhQM^zWs8j#UQ*;
zy9lrr5bA?unc`z5XR5AE^v-W?YJ|!91W>6aPT)}No}&yR8amenoI2deu*AANenOP*
zrBD^^zc?j>10(vhhW9N5yl|{tqq9%iS7E*3qQYvyQ4L(@M@|}C+%|m5tR)BSbQX1T
zs{p1&U$jVHN4r+VQ;2_AD9>$N;adEps6ecuWON`7BGL5os)#tCU;g~XJ(UQ#KGARi7CF)Wj+H=b
zp=9bY`7WIL{9bBh655rSC0-SV#C0MYlDF-lLnNHPDp5G5+Qi`c5eNV{P8%Uq@+#fg
zk}tKpJa4D(=k=iyw!?FoijwVe4#&m^-{{k{%|DxkjjkCH$aF$6+1f?oo!UKWTU9
zCjOjNex8ogT^BhWWR9n5|I&GWNRBwcX?DKIOM!5Wx(*at7mA|Cap>%20OyjH@D_+r
zaW`XgF+$vNL?CvC*X@SsGD9w6T0uCpe?RA1GH)#36jno(lQc2s5k=>3R(|_AuX9)>
z*QfFiJFn>s2(Gmok&Nz13_>@MJmgNBO)ukkjghMP!J~p5VhLo0B1Wmgn_xe~uWtry
zmG!R^+im~%ce0u@X+hX+w;^OT3Y%RwCFSK|==F6$Q_<5;{Bv!%EDRZG=`bm*s`!?%ji?v|@8M=$KL%9S%Np1K4;ga$4C>?qo1^N9wJ{n%|r}BCq2;B!I
zFMuCn?Lq4bD_R%>^~~5!_u*f)cJ>fU9BTEuYXZ-bBoznZ-lwIrYHJ3{O;Ap3=f4l^p~CCFJR?}D)%62i^a!75JBlHQ%4x)=K@I__6s@KxL0oyTT95*+qicV8
zf}l<&Q%T^3ryJzq5*fH6#%)Sad(^43RJ`9buA23YQXKZyYA~%NHoS~YCwkVaJ=i~O
zVOT07lv+72)6~G}hGaw)EEpz%0cM3w0PMangCINQ@R|&KuUS^4`eLc%4fR$fGs})o
z3joLVw7Ykh_&F=|e^%m(0e2{P~g97E_UT=*S$8JGnQaX*ydD3UF;rI9n=O`zAD
z{7oMnvX{oR$T>
zFu@z|55%!q#+_#G@qq*JLQ5VxOL`$NfQxQ+wb*vT7WeW+Og#b~e@oY)pD^@HqLW_Q
z$eRhNp`SQ;V}2F^@U>Jx7jU58#xygs*IE%yYY&N~>gR(eIT)Q5$1ZJ7el?ujT7}Ob
zemxgA-X#Qi%Es2q4PsDyB|w
zFD85&VQ;F8j;Zheslmn9KEAO3?ciuSKsL?Lc?@hq^5dGG*d0@xW^v(__qSEl&7{wC
z$`bie%8|Z(wSIM}l0;-R1E14hlcbmv1i&47On{$27pWK%bm2+RwK(zNb{uiD8Fg;K
zx(X^%wx27M>erah5ns9k8G12nYbL2U(PGRAbc%}*JkVGth;|`gUoX!WdH0*5D#8q~
zT`{CR_41`M<%x{H-g*Mv5nn~lj``zW@xES7w)D~r@)EiUA$^c#t!Y>4;0`ORXm!w$
zQpvp-S5;ykA*aB+Q3KWWr7(5A;IpNAVi$E)q*7d{dc?Cm2DOaxH#qNS6F5Q`S^OQ%
z(R-hEL}{7ef?0%Ni{g$(K4Z8I{WGDz6<9l4M=e#Avj{rfv$uA(_geB3Q|{72!Y;yG
z&^7)-R;$1OCE{Oc{21puLyN%c#zD6^_??!=Na+22fvT`2>Q}zEf48W}?RlMN3U=F$
zNnO=jf}zK_z6+FH=W;jhPL14#95L)#^m_AsSiZFm>#P60gM|AcVT)FaJN13dc&Q*W
zW|Oh}wo)KHO1V>p1SQD_BUjn7V4(sGotgU!_U;hw_|}PEv;)!4f&K3Cv8$?_r&yEm@aqsEO-Y4S3;KxiyYA_SSsFNSrdK
zdDH@$T|YXnC>eMLIFO%Lqlu-qcvfcP-h3}_MWDr$o|+Q&9KE25VbeQ+ELIs!MC2>%
z@|`i<3|c@3s&LJ29~IR~l(X8C01DBD`IG(>6BTOR7}xf)k4-v0Lkz81N8JV{jFY_bAUJn=)&9I8=Q&uFnD+-E{K7pTm!x@S-`ahmBs$07a
zX0D3TNUj_GicRe=^62j%QT6;}&EZ>2=GZ_hg>fQtXb$U#s^?V#ZeeIYT++N(0^XQFDrhc$xH8A<>N`No>AFpv?PfquqX
zklNrWtYS`sLFk)8>xH$TGD-|0{8?;{+Y}l7_=iIpk*p>m9Kb(Pd;vCo#peB%SFXepXzncCE&zbvZj%z}C~;0zEPMRM%7vk{;%JvEaJ2xM%yM)uocPr(CD
zVfy#-b$#ZNk&@Fi?C)gR2&U1y#qz;xzOm8*OQ!OVV316gCK*sYdZ+0A)A}gF`X}#h
zde3~;s$jiNo8vKdnNs)FVTgIPMd8+naJgjdrlY#Y0Gj2oyeflB9jZ-6kxl=GNu5TA
z6jd039WEX3HrdqtUk-#d!{-}n#(Z9@5)4p$Mgk42N(-w9ocT0q;5uc1z=rdLRaC%6
znb?EwvY(SE0Rvh~;iNcRP1CBUP1Hq!VrF)9A-R*>HI+m`aAIDnhpOvHeW}K*0++HRMmt+zgNp>vXN+TXpHik1C4DXu&QFA_j+)Q6;R9xKC#qdL;9wCtqYjQ
z%6#@_S?D*;F~t&px^
zvhmud%^)|xRPIFnt}TvCnY%{HB<4FN41uu|)4Q&J#Vu%ZEXyJbMV94C?Fl_jt<14L0#5JoCIuvB#*lv-k53{l!$nb7NA|5G
z8xP=IN!78vr++3?S2o~YW84Shw32b73Dy{aq7P6ch%xOYn+ln^G#iYE1x~;*!a0i-
zdm$R{3x-YmcS44vbkwT>feg6$FFYwvj`d0VOTFyW`YLmcc=q553mOc67sX+5tkmGN
zlMGrey+=n!tEj8Fxd(#&oAfrD3KNJ-o&E^xpuTRTKDop-$t)#IEf4B634m=r?ydhl
zvia-1(OR#Ez21$SEG1gd-#Vh<0QAP$?%2s~szg+TO}OZ>&E@Awn)P`)?a1>q{({H1
z+9Va$63FT0Bp_Tw1ONaK@c;l2CqbI*N#PGBQw2OT|Nfp300093KVjVf3D(rv^f(^<
z>l|ohN`+j`jviS9%7;>Gwng`T9%l$ZLxtk?4mlGU(FU9+*@N5T^7=of?~nemh*D!H
zGu&LI^?<|58oH7(JbyJ0f4T3468)arb<(I7d24V+U367}hniRPKpSJjF?TfPyo7mn
z&wG7D^wOj57OLh;u2eE0s`<&6liCZV^u8$kkg$jO(GFgg*XIgeMa{#=A=45M(&;$B
z-P7+cKYW@lD`o(4j9D7tUVq4;RTMAW)?r%~1e4c?u$e!ik#k=wMtyaTi_6U5T*KZr
z0YWl85r{C~*#Piho-?6a;2wJ!;-0Ew;mVwisSi@^b58EzN!5tkF>~VRr$Xm=!on3!
z1*}1oN820&LcL}pwdLp>ibH`Dabk+yTmt!4V(gV~fxLAPS{yc-K0(Xx)z8cxK2=^<
zb=iu;tUfvthlN5-&HpG0V!og&^l%(-(08eWcUV@iOB7K-eZyHe|Hm0@8gbAo4t+%8
z^9gsxlV@ytkEQzTuA9jK>uZag6y6!%x;0VUMP|b5j_e&+$r9!43Rv_=uiU!uvB_wI
z1K=B$_jde_So5#*qib`gB)UjlR&rtSfu%m|j>6%fpE?ZfNMYB(FLIl2=u(Qxr&99g
zG^&PFB4gLEOs#J`4$dgyGE-Owq|UQzDAsvOAJ)}os~#*4E)nhJOj@J+E#FVbV~b6XZeN(Tc#
zb4XMV>n>%)qA|&Rk)_Vgj#0
zRQr7Vrlx)_Nq9o)bLX{<@2pncQG^o&1DS;Tf~_exB-HUzig5ZcxYmJ>gKAF@$v$a*
z4i6vX1|mAe|0i*(+%%Oi1m}7dg8RITiAvl1vR$AH>S6jD3!9-{;n%ur0-{c)van|`
z-jb2P&4YecT`Y+^;;A&9?!937^%Zja^QSE|n)Jz6B!u42%i~f9Mw1otApRgIH(j?N
zyJeqrlJ)PVp?8V*~-v&gPFmAxL#xCz6n|CHcVRd
zu9-$>IO+CYt(sL$c3Stx+4$BdX4OZhq!1-wXW$TVyKmv49#s`KU}QFoaLXWV{Ykd`
zxlBl@T>rJk;JmPkH1l5E4p&gKoEszF+$=bWHKm%df1zOMn*OxGw*5}~PDfIr?tDEk
z69egv#txuMII5T%IzRnyV_NdybM7v6vt4`v$
zI|&_&Fl!(uyXXBHESsJnb9SCx^OiEtF{s<0*oaTim!bqkMzMe2t<1h8Ir%UVIz$}F
zvEIMTk4QcuL)md=>JVyOEr(i`e&&AMachvF!UG@YJ+~Mp(N0T-FwY{r
z+aJnM{OUEh(Wwx1vROg6S2wNqmFhctfX=<3%Ik%NKG-x}7G5Bb;7)W!q-aP1t~$$X
zbPZlFJ=W7XyBr6)(%jtRGO$USe9(O$ZtUwBnf?vR(fUNK=jEB2PZQTFbs)9g3y
zc1D%4P3RW24VFqu;A@_3FH*%nHL*_J!Ql4dlAnxmV)b~-_#TyA56V5Vp89R+sBZBc
z%gtmy`wHze^2O5nT-6H2;cU29pGA3!gAS(j=|*ws;Dh;1EgrypE38KYsp%GEic~g^
zI6oBqeAI*A;49o3%Nam65N|}FH8i({TK{HHA$9{r6R+!Pg$Ga77@)d-;Kg(*U&Tl<
zXguj?C&MzhM2-GyF|vqjuxCU`B_S>^HpFs#b2~&4^j=HdYw?vYv=S+F0a8ie3NmdR
zOWwy+eO?LIl>z(%kn&D)lbd3UM{LR=s=WT(m0b-h*JJrImAATP}Y^vks~ubvNnTm
zNmjz074mU1H{zgi9rfwmB1zv>qfd!WZ$~|*t&}YGnd{qQ4o4d0+4rjt2SFyR<`dYv
zUIE0Mj|XC4eYEp&R*&d<9s%}Dn{oxJ=T6Db)v3fs#u;`k!xKMqsMc6K{p^yn)Pvo@
zIiM|YXu$!rW%0r;N6xaX9EA0elFl1Qp6jy7QzFr*n5cmawe1=O;F~(
z&Xc;_`ULGFKtk30DK31#y_eAGo20dAP3Rvx%`pwM`NK*IpU~>b@D@uh6E8=#gas}
zX~K?l>fnOL)*yUU5zox(Tmija$sD;j!+hdBxsPk76q{OTcQ`0?;ruS{PUThpLM*mu
zC|GWPvj2*4U0Fx;$`(7Iaeb4YL_yPKWt${=o5V($a=@0+qPTU1W3M+Yp(GQ*B1uO+!rH(#mW*DQt(B$8
zm5`-mg0Wi@H55MQ-&tU|JtLEvC9(ss29pP0q#9
zs-Gsp=*-dIqMx}&=16zDZME+DI(oMy+E=?gBT_Db&F@=!L@U`PQqkwSfE$aVrGQfbU
z|LdxQA(@ECEpVA|EsYi59uyUe8
zFV;JZ9>+*<3=+abW|56u9qd*gcwZ$X;7QMuOkstu8dc2weZX3@KeWb&GaiYmK>5&{yyP34^r3@$!Ku-u%u{qFomKf?bg&%US(*=cL
z<(~_UQ_FFJylijfdWItF*&J{U$B5)}nWMOA^Zb
zCQ^Vt3XlL4kOn`SqY;&)c5-Npm#sYWztL1}ua)~QGa)O34UL-pV8)NgMMDai=AGFC2C0F^RxBg3h
zV%9R%P5=7o8-WC99XpIGqC1>dBLIBZ)&E%$B2s`^DUu2_1C;9HMoW84H?W~j@LH^0
zlgHjw+WT$DgD7cu5novvQS!UeQQ&Q530qQ)1!SL7mmo^T|nOkCC)VfS{h3QYKx
z$s+)A;q|R+1IxrAoxexX`=PI#B`;XG|1C6!Rz%Eu=4SN0lO*QJV4&?@<(rP&^te(q^|U$CH*UXXU@rZ>+Z3Rl
z!vNiqD?_-2Qm)bAsiQD7F?+&
zN`te6?M5h+vh0bZn5bek3*;M)YvL#-`|yO58Xql=w>l)gcy-7az5oJ^4IFzp$m`&;
zoB#j?908vTYC?Yi00RIKU9d@wFdoEbHaAKu9%Z=OTZq>jP_BNJ!tNLVA#LeHCMkfB
zz(RD}RSC{CiOD*k8Bq(y5ZCD`Bs4MLWQdSRJiV%`7cgFCuX%i6c1eSFQeoDq{!`E^
zcxVbeP*A%fQA<{0C!s3K_GqWeW`_O(l2D_S*jgeEY((Oaa?);y0~POT&&@UbNqhb~
zFAd?P7qCzzAx3}q>iD7`2^ck`NqtK@_$u#V^cEP=;ox*gr7e{*bD8A^5Z7?VXCgWI
z9MXZ!-uu(L4U^!&o*Bti*}`UhdpQf+PN&?4kGLnf6|zr6&ar1j#h^VdZNjfFD%a%Q
zRkk<#;6hNN5C3VswxQ>uPcv}KxPzO01sLs{9puRC3|D05-%M(iUnI#oc@aTFZyxJI
zA9qRQQ@0*dQiCcovC|LiV>NAffAjfuN)c2;xsloJOT^OBy=1sDp}*%yAT27{s?t{n%3N26_KEIvd^Sd#5wZyCkZT+x3
z$0U%bgS)t=7>87r5$DEly6INM|HYMOrvFfp9XRDj$l0s?8Od6JVrc8vJ-Kn^mX(_HTv|{y}fXDPZ*c
zIU%I9$L7zT^8==+63Kn-5P?#OELDEMDMho59gX~1v@M;90eq3=DS4JWI3Y!$!syk*
z5lbO~Z2Vf}kgCAt$uxlur@dvBoqQ4^WQe(yl@zw$;2~S`^uQqe7?fCFpVgHH3JOnW
zVTL`!xi6;fO-g7GhW^sV^DqdL6^JzOzVDQCw5((ud6NZfUA^WhggrL
zjj2E%u@AA#=hz|S>9v%hk_?-mPC|)DWGypg#cQh~zKvLN*;qA0KWFN^aEJdplt%u&
z66d3bL!j==8q<6*QSh?(P4V$37y%FV1!NBHiKG}Z>-5?>CJ2kf|7D8{uVHb*Hou={
zTX)(p9E@rlsq}|Cp0eB5Yxf6qXvP-GQ096oKFg18p3f2&xhG~nvy$7AgvA_!53keI
zh7+hzf$1e&c5DuE~VusU;qFONB{s3@IjjnN#PGBQw2OT|Nfp300093K}e=R1XV{M3fc3vW!lY_8P^tiC2)_uo~5<(@yPqK
zAgh3A$Sv@ajO^qY^p<~OSkzC~!Sx+^Mgek{$vpsvc?_`*!k)B_2OUf7eUk?$N4D93
z00Gf5R-viqn;fGLF%bX>8n~;Fj-z6i1m%|=R8vL~hP3q!x}TRp)0OpZDIR_F^!oHx
zV8}!;G#jzfL<>RY3Lx1^E2^+%hQf|or@TnX6#TZHz}*Be-AbXcey0EX?=l4z1Eq6x
zLgdY5^j^{Y!MiF}<9tP*Pb9U+E3kArX(9?8`Cn9~FWsQJ_Lc4#jj4^_bY+YhS(u)+
zm>3ShZ7FP?*}|soE*~-_PD$f-ea9*R8T=gM?5D1Lc<+8&7-b2zaIjey{^*&BUil9L
z>6bfw2FJji4%s3FMsMsaXg9oFi`CJsU3HFq^AR)MW~7vPh|2TLRUYupf-?{buvx#y
zyQf(J5TM=5KB#wefR)RMl0uYgt~`I?|Ep&06`L=ELLWVz1K=bw_NBd{XT-2_7%dcdsDftdoJ%vEdn0UJi+
z^}i@CjT8^x{O#5#6ScV3N`%**&rZine73OLp3jHgb>_PntWkcv^8~cI#jVbdfo7iN
zl;QMwOpSi9zd4>cnd?~7CRz3#KM7#!ci8hu>jkfUiv#bI`Z;>|A&7L`)R$Co)5950
z`tq4o|L3b1!_d>VsF187`kZ}WxAXFDIvpYe3}bA
zN%klv8^69O(wnZhNs`lL5Qo`(AAUaCDR$XDHf4XchpLMyO$l5w!0`8}>;MI<{bPkY
zUiIrpfG9`~HzL;hFK+%N(glBAqMx(CqM(MIzA5=9&Mm+;5Uw2f&Q$lLr+siR)NNwn
z3gdHbbHx?c@-KeCf^f(HwvoG*S--Z40@o^0A1!84I;QOEB3{~GWCqSBCtfwxH}W!z
zm^nTQ{u0LZmqCTFQ!c`iIu`BWNc%tG(pe2RSDSU4|#moWpAF>aXp~e1oDJQLcDC4EaS9MO(|$K=$|$A>vD?&13iZc8f#c@TJpfA5(4JmQFjg6GV5X8r{5D*
z)E2EPhLmz~60*F|X9z0OF9G+%P_T7BuiK&fz{WDW+{4MHhw^_F=j9;7zkxcA2_qc#
zs%^^+*j4u$;w-gcs+iZ~$X~9MTN&r31j5#IN{|IsNkwVcIGCy8EEerV(CgtnM5iEW
z8qs3~)_w_YXi2`lM#x_zxmKU(^$QEY7krr{$d%LDeKQ6ID@p8nKKAlsoCCsHVp_b)glcOLCiUaNwNk+5k=b^m2VegPQU
z&yZGz%-|fgC*Rke-!H2pl@Xeu@^L=Y#C;uR=Q({%5yC~?P}U!RY8VITThr^5{tmQi
zy6Dab7yuEBAXmj!?;2#&Kt{Kx~
z?MP-tRb9bi0YT#|4o|`GRYw^*6><4K3A&;_QRTG_}VgW@c_IuI{U0x
zQa_@pSN-~`NN3G5-P^Q9eC@?DbX<*0f_cAxrl7<4CAmScC0#R_!f4l2
z&(rLGqpIz_zPpvR8Y!$6!afvux^5fRdD#CtbjJBZ`26JKu;(XxF%`ptF^BWm`;55{
z&2>N_$$vtGlK8^ICdG`H)flH^Z%u$7Iq`hjzrev@=JnS`cW!CZC?RP2L@ghh8p~I$R|{D>y!o-MTmE$f>t!F-i8#skBO*55p`f0a6ugO)Fl;WoxG1Z
z;0u`Cieru^*864RaRmbALin4EVmk)WdLY=j-Oxt|vptv=Vm9b4LObI`-U9ZRhc=V^
z8|(Q8=6CRC+yyAali2wYz^WEWrIPabDY?EIZU}z&cuPLKOVnhYRRM{W9pyaWGM)hl
zfais)I34X~}hL!ejNdvSO#eFZ-pP<0x)M)8kLU!ftRXc;reD!
zzrl`a$RY)i3UNXGH(9I_>>8^)rIyOJlp0>}*{06x)=VL4LQ3LJG~xgxGCP}hr2bcF$Bw=Xl
zHE44PaRjzky!k@6!k8b=m#?h=01KPSu33Y#KK3yy3a*hI+;lE&us0MZ(Zw~Oww^2E
z&JML~!K*UpUa?Zuj|9W4rH|-JDO!@v0-=UtbKecL`>6X9>+o|XuJKTkYBzp;`#Odx
za341LjbPhCHaes``vcfsfEiXe?H4
zF;LoO&*yg!_@XDkDP?A;%c;2KGstkIslX#W172fxZD|~%a@h6~EIVs~o^iI0RK%x>
zKdLCK-Y{?H!31Q(vp17s+ymW1hV&n=qC8DL7fT9YFl}-v?^XIYre0_o>TvfgnNGgq
zqa2jp{pZYFcdRibh`*}kaPPI%R(zq5?C5ORT<%Nb&o%S5365ho4>wG1rKLJRb3oeV
zGTiqa>a(1j>6w3@*<9D@VLc=TmF|=BF)i3us^{ZdIX=c|Q+m>c^Ron10X$(A9Z}XP
z6IkrsCZVV!^nAvCGwfP)G7P4Ra8~RT7}F7d`ns!&SjG&8jrQAZnj4*aixlKa$Z`vK
zv|1J0AAVE{Zb-|ae(}I5g0HdqBJr1Gz_~VU5p2aq1$}VkX><8_+XG?O7*sTu+jVyp
z#YL)8ZE-yijuq8Ya7HskKK16T{*m4(wAo5j_4!?od`c1^G`9{1!dM~Fo?}Qk1pBgi
zL%7@%&G#n3{d9a#<9pRRDG4-K>A|)OoiL;304J7|Z?4&QCtGE2bs}{VI~u@7aDE&q
zXDGXA!}hUej>9EmWVP5lxw$<#8yyQx%M5;?|6`D9z9$DRZJ-GYU7HXrJbfP(Jh;gP
zolO^)001j`Abz^E1Eca66V0XUBf-`%~t%s4H77es8sH9XLQP0sfPd&(M)gq0x9rppj(&J}+qR2k(EXVH1)*8TQ%w
z^5)+tK6dOP7qhjnM+Q`IDUSVnf@0X2<20hU#9`DDqHd
zSp?e&1iMG@A+ER4$=lM?4bV6HYT#LEnyWg5g%O`{L;{n7g=A%$bh%ZWmDl3fll4=E
ztOROA{r0u&Yv90%{FUSw942jZC>H
z`*5Sb5?{seA#x!fc7&}*sV{yi(H#^dksUhzy)*CTdJkulDN?z`nt3RQ>T@bytf{m7
zGLb%$Ss0Pb+^+<_NwGe|gPIgjLZ4=akwH)j+4f_pG(Gv@{n
zj#(TX?OUwu%);zt&I_^FX(bKp%6UPI^GsA7e8q;FA^qSLWPn_PfF%|GA6BmJA8b8^
zlu}_mOjb~%EKdgf!WRnr@+d(K!QNe$e+6)QGU!DZDt&%h{=G$4u%>8j*YS${B)R3Gc)u@QO=skum5LN;QTZ|R(
zSb9`+nm~09P4XfwZSZt>;R}hAX#Yfr+F2%OOMS;c&l~{OiU@FLumO^g?`BrCDDnB1
zc=87wn2q<+q~jX1k^t3823D~UAc!bC&50BAdU&S!XcVb*)c^nmk^!GBYC?Yi00RIK
z-L%0-DZqL)W*2SSyuMN;&56>>036oHNYLOWu>UI-yNSVPHndKXRfI4&y$LdQJ`A9vBoAV%
zY7;4DhByo9M^VG=UuEY(TQ>WYC>m2oNS|4m_>?YIo}RtWo2NF|YJ@<%tPowI0R8TZ
zf*$+N_WfnDXB!aCIGLAtvs~dGpB<8V)!05VQ^uLyCh}x>hphc&FFCz~=LdWn;Pea+
zd>l33q^WW9SvL9kuJs}&t^qk@V(=Th4U5RwQ1=QVX`av7Q#ezwdA&^YsZS!pxwJ&-(forlf!l&
zS-W$2wGM2;q3&Q4)Ri)+WiD@(6vnSP=qOh00bbokit+q~5pSLUI`8665;Ivl$%(I&
zGII1)*gcQpRg^rk-#UNH(A{(k@VA#=8io4KW&HgYpbEg{xNhU`rCE2DFOzYsl|e-E
z_XSTWk@IPzE-6FU_3pjkx7Jo>V67fvVT7d+jQ~|@Q0mJ^v4yv@%^rrg!g!}3`>B!B*oA-A3ttNR=N@ES
zzI{>w7oO?VA}HIo4TN?YewR5f`*XyoY
z!#2o(Y^9k!A4Pup5M4XMBc>}99u{u5N7DnGkJKRXKWicvR520LFn#}(exfhO(R#0n
zD!~u|cR_7DjqOC*sG8YpY#mRWq>EsTSgEI*Ev8T8+@{eBg+WE~d&s}}A+Rol
zd!|L21sDP}yi@WD?YCK(uUVuqAt0F{SVC&YUpLHtTi4}m-iUl*((Ja|22WtW?-fz!
z$h(yV6@SE|KEJ+#)r}qF1dqcQn7e>bJ;>QApt>Nb<9SmnE0ny6oALu{3N`^D@cnIQ
zF`|RTF>}1(5+i?E^S44B-oHfxlcaXEV0i`~ddeG*R&?=i{(%S@8*q&37z27=g{r75
zTDzV#4=r7~!&tLkbIZfSlLH=~&^dt5^90gO3ew1sH}~&vdI0!M2Ggt*V}kYDbm%BR
zP!poCjI>|;kyb<4$T=YX%2
zL~hG=CbLL%$DpZR&%^pV^us|BTKm2LvW=iwsU9zofr4ZF$1IduGsk8>l_O7gzn57X
zlds(#IA(wV09U~P01H?_n=eV>4<=Ir8UOyC5C8xJ06|0;q|iy>NwiJLtwaC(?6Zsd
z_uv4t4(w{FH7~KS>(~6V7jLA&d@yuy+ShbSWFUTaHn=o)w5Tnfv&aYvBJiA@=
z#;;f$L;)L$8%#S_b@R4M-R(ibqN!3ZG>&}w&a6-D`GJeiEuaBXV@8M;IqKtszT0rY
zok*GX;Zx$2fd|)|_q9QroG+{SSni*Bi8V3Lz8@u9;ekmY02$kEQ-XvhHSPuoiy4jL
z)IbGqzL|ba@I$vc3s|cOr;#zBPN?H(@bi&zLbN>9Qco^xjS%f;ZpqDf?-*06i1tXwG;Vkg2
zX$0?)WpE;h;&8^!wKIsw{0vQ9mEuxVvhueaf3u;j_~tW{B1GgRQ$``qhOGoWtdYVA
zAG|ozvqVl@Gax?06af|u;151MRNO_orj8h0thnv;IPl0n@UhAR16bJ#Z;6NBG-W+N
z{@vS;9P_@n8BHv?CQGvg2XEZk09LJX^W4Xx$=w<1IE%qd@E|fKjWPR|MDg7#OI>yBEx}GTK>nRYBo}4u9MlYK(vd~b4
z7Ms&zemiVWIueogs0l#x2%OuC4l3)}NhdqU(7eREHVSRCJj4=lVu@=79Vntm&y?U{
zR-HWlB%xG^emjiNUa+mJA*ZR2-Y0Fj?4(htp=w#7l
z?+dZfxRqhb-_AH9to2XW;FV{zfMDVfPa1{{4vS+#?n0LReT^e>3Fb+(HFAiWV?yrO>iqtbE;Y_!W$^heoSArr{
z4bHVC!W5?oTmIdf_nML>1QDJMouTkfUE#|#FRSZgIjD@!d!2ALj^T+JZk$Ot2^TP1|NUV?;*n=-
z^JliXGIP1+qFdEj-$U)fS9?xo>{tIz0W3jnbv_oBRx1fLEkk`^PE}i1gev)pUuZf+KNsE