"...git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "0b7daa6de99b759f4c6c0b723716b70e4107a2c8"
Unverified Commit f47e70e8 authored by nps1ngh's avatar nps1ngh Committed by GitHub
Browse files

Use `inplace=None` as default in `ops.MLP` (#7209)


Co-authored-by: default avatarNicolas Hug <contact@nicolas-hug.com>
parent e171bee8
...@@ -268,7 +268,8 @@ class MLP(torch.nn.Sequential): ...@@ -268,7 +268,8 @@ class MLP(torch.nn.Sequential):
hidden_channels (List[int]): List of the hidden channel dimensions hidden_channels (List[int]): List of the hidden channel dimensions
norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the linear layer. If ``None`` this layer won't be used. Default: ``None`` norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the linear layer. If ``None`` this layer won't be used. Default: ``None``
activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the linear layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU`` activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the linear layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True`` inplace (bool, optional): Parameter for the activation layer, which can optionally do the operation in-place.
Default is ``None``, which uses the respective default values of the ``activation_layer`` and Dropout layer.
bias (bool): Whether to use bias in the linear layer. Default ``True`` bias (bool): Whether to use bias in the linear layer. Default ``True``
dropout (float): The probability for the dropout layer. Default: 0.0 dropout (float): The probability for the dropout layer. Default: 0.0
""" """
...@@ -279,7 +280,7 @@ class MLP(torch.nn.Sequential): ...@@ -279,7 +280,7 @@ class MLP(torch.nn.Sequential):
hidden_channels: List[int], hidden_channels: List[int],
norm_layer: Optional[Callable[..., torch.nn.Module]] = None, norm_layer: Optional[Callable[..., torch.nn.Module]] = None,
activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU, activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU,
inplace: Optional[bool] = True, inplace: Optional[bool] = None,
bias: bool = True, bias: bool = True,
dropout: float = 0.0, dropout: float = 0.0,
): ):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment