Skip to content

Commit

Permalink
Change name to num_hidden_layers
Browse files Browse the repository at this point in the history
  • Loading branch information
RaulPPelaez committed Apr 5, 2024
1 parent 8fc74ba commit b77bb0c
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 7 deletions.
2 changes: 1 addition & 1 deletion torchmdnet/models/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def create_model(args, prior_model=None, mean=None, std=None):
activation=args["activation"],
reduce_op=args["reduce_op"],
dtype=dtype,
num_layers=args.get("output_mlp_num_layers", 0),
num_hidden_layers=args.get("output_mlp_num_layers", 0),
)

# combine representation and output network
Expand Down
5 changes: 3 additions & 2 deletions torchmdnet/models/output_modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ class OutputModel(nn.Module, metaclass=ABCMeta):
Derive this class to make custom output models.
As an example, have a look at the :py:mod:`torchmdnet.output_modules.Scalar` output model.
"""

def __init__(self, allow_prior_model, reduce_op):
super(OutputModel, self).__init__()
self.allow_prior_model = allow_prior_model
Expand Down Expand Up @@ -75,7 +76,7 @@ def __init__(
out_channels=1,
hidden_channels=hidden_channels // 2,
activation=activation,
num_layers=kwargs.get("num_layers", 0),
num_hidden_layers=kwargs.get("num_layers", 0),
dtype=dtype,
)
self.reset_parameters()
Expand Down Expand Up @@ -214,7 +215,7 @@ def __init__(
out_channels=1,
hidden_channels=hidden_channels // 2,
activation=activation,
num_layers=kwargs.get("num_layers", 0),
num_hidden_layers=kwargs.get("num_layers", 0),
dtype=dtype,
)
atomic_mass = torch.from_numpy(atomic_masses).to(dtype)
Expand Down
17 changes: 13 additions & 4 deletions torchmdnet/models/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -437,12 +437,21 @@ def forward(self, distances: Tensor) -> Tensor:
class MLP(nn.Module):
"""A simple multi-layer perceptron with a given number of layers and hidden channels.
The simplest MLP has no hidden layers and is composed of two linear layers with a non-linear activation function in between:
.. math::
\text{MLP}(x) = \text{Linear}_o(\text{act}(\text{Linear}_i(x)))
Where :math:`\text{Linear}_i` has input size :math:`\text{in_channels}` and output size :math:`\text{hidden_channels}` and :math:`\text{Linear}_o` has input size :math:`\text{hidden_channels}` and output size :math:`\text{out_channels}`.
Args:
in_channels (int): Number of input features.
out_channels (int): Number of output features.
hidden_channels (int): Number of hidden features.
activation (str): Activation function to use.
num_layers (int, optional): Number of layers. Defaults to 0.
num_hidden_layers (int, optional): Number of hidden layers. Defaults to 0.
dtype (torch.dtype, optional): Data type to use. Defaults to torch.float32.
"""

Expand All @@ -452,7 +461,7 @@ def __init__(
out_channels,
hidden_channels,
activation,
num_layers=0,
num_hidden_layers=0,
dtype=torch.float32,
):
super(MLP, self).__init__()
Expand All @@ -461,7 +470,7 @@ def __init__(
self.layers = nn.Sequential()
self.layers.append(nn.Linear(in_channels, hidden_channels, dtype=dtype))
self.layers.append(self.act)
for _ in range(num_layers):
for _ in range(num_hidden_layers):
self.layers.append(nn.Linear(hidden_channels, hidden_channels, dtype=dtype))
self.layers.append(self.act)
self.layers.append(nn.Linear(hidden_channels, out_channels, dtype=dtype))
Expand Down Expand Up @@ -510,7 +519,7 @@ def __init__(
out_channels=out_channels * 2,
hidden_channels=intermediate_channels,
activation=activation,
num_layers=0,
num_hidden_layers=0,
dtype=dtype,
)
self.act = act_class() if scalar_activation else None
Expand Down

0 comments on commit b77bb0c

Please sign in to comment.