Skip to content

Commit

Permalink
use_dropout_after_self_att
Browse files Browse the repository at this point in the history
  • Loading branch information
albertz committed Nov 13, 2022
1 parent 7d67e96 commit da2de97
Show file tree
Hide file tree
Showing 82 changed files with 164 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.encoder = nn.ConformerEncoder(
in_dim,
nn.FeatureDim("enc", 512),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -378,6 +378,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
enc_l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
enc_l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ def __init__(self, in_dim: nn.Dim, *,
l2: float = 0.0001,
):
super(Model, self).__init__()
if nn.ConformerEncoderLayer.use_dropout_after_self_att:
nn.ConformerEncoderLayer.use_dropout_after_self_att = False
self.in_dim = in_dim
self.encoder = nn.ConformerEncoder(
in_dim,
Expand Down
Loading

0 comments on commit da2de97

Please sign in to comment.