Skip to content

Commit

Permalink
tmp: debug wavegrad test
Browse files Browse the repository at this point in the history
  • Loading branch information
eginhard committed Dec 16, 2024
1 parent 162f527 commit f6eb3ad
Showing 1 changed file with 44 additions and 45 deletions.
89 changes: 44 additions & 45 deletions tests/vocoder_tests/test_wavegrad.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import unittest

import numpy as np
import torch
from torch import optim
Expand All @@ -10,50 +8,51 @@
# pylint: disable=unused-variable

torch.manual_seed(1)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


class WavegradTrainTest(unittest.TestCase):
def test_train_step(self): # pylint: disable=no-self-use
"""Test if all layers are updated in a basic training cycle"""
input_dummy = torch.rand(8, 1, 20 * 300).to(device)
mel_spec = torch.rand(8, 80, 20).to(device)

criterion = torch.nn.L1Loss().to(device)
args = WavegradArgs(
in_channels=80,
out_channels=1,
upsample_factors=[5, 5, 3, 2, 2],
upsample_dilations=[[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 4, 8], [1, 2, 4, 8], [1, 2, 4, 8]],
def test_train_step():
"""Test if all layers are updated in a basic training cycle"""
input_dummy = torch.rand(8, 1, 20 * 300).to(device)
mel_spec = torch.rand(8, 80, 20).to(device)

criterion = torch.nn.L1Loss().to(device)
args = WavegradArgs(
in_channels=80,
out_channels=1,
upsample_factors=[5, 5, 3, 2, 2],
upsample_dilations=[[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 4, 8], [1, 2, 4, 8], [1, 2, 4, 8]],
)
config = WavegradConfig(model_params=args)
model = Wavegrad(config)

model_ref = Wavegrad(config)
model.train()
model.to(device)

model_device = next(model.parameters()).device
assert all(param.requires_grad for param in model.parameters()), "Some model parameters are not trainable."
assert input_dummy.device == model_device, "Input and model are on different devices."
assert mel_spec.device == model_device, "Input and model are on different devices."
print(torch.__version__)

betas = np.linspace(1e-6, 1e-2, 1000)
model.compute_noise_level(betas)
model_ref.load_state_dict(model.state_dict())
model_ref.to(device)
for param, param_ref in zip(model.parameters(), model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
optimizer = optim.Adam(model.parameters(), lr=0.001)
for _ in range(5):
y_hat = model.forward(input_dummy, mel_spec, torch.rand(8).to(device))
optimizer.zero_grad()
loss = criterion(y_hat, input_dummy)
loss.backward()
optimizer.step()
# check parameter changes
for i, (param, param_ref) in enumerate(zip(model.parameters(), model_ref.parameters())):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
assert (param != param_ref).any(), "param {} with shape {} not updated!! \n{}\n{}".format(
i, param.shape, param, param_ref
)
config = WavegradConfig(model_params=args)
model = Wavegrad(config)

model_ref = Wavegrad(config)
model.train()
model.to(device)
betas = np.linspace(1e-6, 1e-2, 1000)
model.compute_noise_level(betas)
model_ref.load_state_dict(model.state_dict())
model_ref.to(device)
count = 0
for param, param_ref in zip(model.parameters(), model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=0.001)
for i in range(5):
y_hat = model.forward(input_dummy, mel_spec, torch.rand(8).to(device))
optimizer.zero_grad()
loss = criterion(y_hat, input_dummy)
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for param, param_ref in zip(model.parameters(), model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
assert (param != param_ref).any(), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref
)
count += 1

0 comments on commit f6eb3ad

Please sign in to comment.