Source code for monai.networks.blocks.mlp

# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#     http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import torch.nn as nn


[docs]class MLPBlock(nn.Module): """ A multi-layer perceptron block, based on: "Dosovitskiy et al., An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>" """ def __init__( self, hidden_size: int, mlp_dim: int, dropout_rate: float = 0.0, ) -> None: """ Args: hidden_size: dimension of hidden layer. mlp_dim: dimension of feedforward layer. dropout_rate: faction of the input units to drop. """ super().__init__() if not (0 <= dropout_rate <= 1): raise AssertionError("dropout_rate should be between 0 and 1.") self.linear1 = nn.Linear(hidden_size, mlp_dim) self.linear2 = nn.Linear(mlp_dim, hidden_size) self.fn = nn.GELU() self.drop1 = nn.Dropout(dropout_rate) self.drop2 = nn.Dropout(dropout_rate)
[docs] def forward(self, x): x = self.fn(self.linear1(x)) x = self.drop1(x) x = self.linear2(x) x = self.drop2(x) return x