-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmodel.py
53 lines (42 loc) · 1.82 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import torch
import torch.nn as nn
class DurationModel(nn.Module):
def __init__(self, D_in, H, D_out, layers):
super(DurationModel, self).__init__()
self.input_linear = nn.Linear(D_in, H)
self.hidden_layers = nn.ModuleList(
[nn.Linear(H, H) for _ in range(layers)])
self.output_linear = nn.Linear(H, D_out)
self.relu = nn.Tanh()
def forward(self, x):
h = self.relu(self.input_linear(x))
for hl in self.hidden_layers:
h = self.relu(hl(h))
return self.output_linear(h)
class AcousticModel(nn.Module):
def __init__(self, D_in, H, D_out, layers):
super(AcousticModel, self).__init__()
self.input_linear = nn.Linear(D_in, H)
self.hidden_layers = nn.ModuleList(
[nn.Linear(H, H) for _ in range(layers)])
self.output_linear = nn.Linear(H, D_out)
self.relu = nn.Tanh()
def forward(self, x):
h = self.relu(self.input_linear(x))
for hl in self.hidden_layers:
h = self.relu(hl(h))
return self.output_linear(h)
class RNNet(nn.Module):
def __init__(self, D_in, H, D_out, num_layers=1, bidirectional=False):
super(RNNet, self).__init__()
num_directions = 2 if bidirectional else 1
self.gru = nn.GRU(D_in, H, num_layers, batch_first=True, bidirectional=bidirectional)
self.output_linear = nn.Linear(num_directions * H, D_out)
def forward(self, inputs, input_lengths=None):
if input_lengths is not None:
inputs = nn.utils.rnn.pack_padded_sequence(inputs, input_lengths, batch_first=True)
outputs, _ = self.gru(inputs)
if input_lengths is not None:
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
outputs = self.output_linear(outputs)
return outputs