Unverified Commit cd96a84a authored by Rui Xu's avatar Rui Xu Committed by GitHub
Browse files

[Feature] Add clamp activation layer. (#685)



* add clamp without unittest

* add clamp-act with unit test

* fix name bug

* use logical and

* fix logical_and

* fix linting

* rename ClampLayer to Clamp

* rename ClampLayer to Clamp
Co-authored-by: default avatarnbei <631557085@qq.com>
parent 1e925a05
import torch
import torch.nn as nn import torch.nn as nn
from mmcv.utils import build_from_cfg from mmcv.utils import build_from_cfg
...@@ -10,6 +11,38 @@ for module in [ ...@@ -10,6 +11,38 @@ for module in [
ACTIVATION_LAYERS.register_module(module=module) ACTIVATION_LAYERS.register_module(module=module)
@ACTIVATION_LAYERS.register_module(name='Clip')
@ACTIVATION_LAYERS.register_module()
class Clamp(nn.Module):
"""Clamp activation layer.
This activation function is to clamp the feature map value within
:math:`[min, max]`. More details can be found in ``torch.clamp()``.
Args:
min (Number | optional): Lower-bound of the range to be clamped to.
Default to -1.
max (Number | optional): Upper-bound of the range to be clamped to.
Default to 1.
"""
def __init__(self, min=-1., max=1.):
super(Clamp, self).__init__()
self.min = min
self.max = max
def forward(self, x):
"""Forward function.
Args:
x (torch.Tensor): The input tensor.
Returns:
torch.Tensor: Clamped tensor.
"""
return torch.clamp(x, min=self.min, max=self.max)
def build_activation_layer(cfg): def build_activation_layer(cfg):
"""Build activation layer. """Build activation layer.
......
import numpy as np
import pytest import pytest
import torch import torch
import torch.nn as nn import torch.nn as nn
...@@ -182,6 +183,18 @@ def test_build_activation_layer(): ...@@ -182,6 +183,18 @@ def test_build_activation_layer():
layer = build_activation_layer(cfg) layer = build_activation_layer(cfg)
assert isinstance(layer, module) assert isinstance(layer, module)
# sanity check for Clamp
act = build_activation_layer(dict(type='Clamp'))
x = torch.randn(10) * 1000
y = act(x)
assert np.logical_and((y >= -1).numpy(), (y <= 1).numpy()).all()
act = build_activation_layer(dict(type='Clip', min=0))
y = act(x)
assert np.logical_and((y >= 0).numpy(), (y <= 1).numpy()).all()
act = build_activation_layer(dict(type='Clamp', max=0))
y = act(x)
assert np.logical_and((y >= -1).numpy(), (y <= 0).numpy()).all()
def test_build_padding_layer(): def test_build_padding_layer():
with pytest.raises(TypeError): with pytest.raises(TypeError):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment