Skip to content

Commit c6c7c55

Browse files
committed
Optional activations for MLP
1 parent 46c2557 commit c6c7c55

File tree

1 file changed

+6
-4
lines changed

1 file changed

+6
-4
lines changed

supar/modules/mlp.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66

77
class MLP(nn.Module):
88
r"""
9-
Applies a linear transformation together with :class:`~torch.nn.LeakyReLU` activation to the incoming tensor:
10-
:math:`y = \mathrm{LeakyReLU}(x A^T + b)`
9+
Applies a linear transformation together with a non-linear activation to the incoming tensor:
10+
:math:`y = \mathrm{Activation}(x A^T + b)`
1111
1212
Args:
1313
n_in (~torch.Tensor):
@@ -16,15 +16,17 @@ class MLP(nn.Module):
1616
The size of each output feature.
1717
dropout (float):
1818
If non-zero, introduce a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
19+
activation (bool):
20+
Whether to use activations. Default: True.
1921
"""
2022

21-
def __init__(self, n_in, n_out, dropout=0):
23+
def __init__(self, n_in, n_out, dropout=0, activation=True):
2224
super().__init__()
2325

2426
self.n_in = n_in
2527
self.n_out = n_out
2628
self.linear = nn.Linear(n_in, n_out)
27-
self.activation = nn.LeakyReLU(negative_slope=0.1)
29+
self.activation = nn.LeakyReLU(negative_slope=0.1) if activation else nn.Identity()
2830
self.dropout = SharedDropout(p=dropout)
2931

3032
self.reset_parameters()

0 commit comments

Comments
 (0)