"...git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "4fb3d3a0f6f0da95548a5e6bd02850d468c85e32"
Unverified Commit 32295b15 authored by Funtowicz Morgan's avatar Funtowicz Morgan Committed by GitHub
Browse files

Gelu10 (#15676)

* Add GeLU10 (clipped version of GeLU) to transformers to improve quantization performances.

* Add unittests.

* Import tensorflow after `is_tf_available` check.

* Fix tensorflow wrong function `tf.tensor` to `tf.constant`

* style.

* use `tf.math.max`

* Fix tf tests.

* style.

* style style style style style style

* style style style style style style

* Address @sgugger comments.

* Fix wrong operator for raising ValueError for ClippedGELUActivation.
parent 2c3fcc64
......@@ -74,6 +74,31 @@ class QuickGELUActivation(nn.Module):
return input * torch.sigmoid(1.702 * input)
class ClippedGELUActivation(nn.Module):
"""
Clip the range of possible GeLU outputs between [min, max]. This is especially useful for quantization purpose, as
it allows mapping negatives values in the GeLU spectrum. For more information on this trick, please refer to
https://arxiv.org/abs/2004.09602.
Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 +
torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))). See https://arxiv.org/abs/1606.08415
"""
def __init__(self, min: float, max: float):
if min > max:
raise ValueError(f"min should be < max (got min: {min}, max: {max})")
super().__init__()
self.min = min
self.max = max
def forward(self, x: Tensor) -> Tensor:
return torch.clip(gelu(x), self.min, self.max)
class SiLUActivation(nn.Module):
"""
See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear
......@@ -136,6 +161,7 @@ ACT2FN = {
"gelu_new": NewGELUActivation(),
"gelu_fast": FastGELUActivation(),
"quick_gelu": QuickGELUActivation(),
"gelu_10": ClippedGELUActivation(-10, 10),
"mish": MishActivation(),
"linear": LinearActivation(),
"sigmoid": nn.Sigmoid(),
......
......@@ -69,6 +69,20 @@ def quick_gelu(x):
return x * tf.math.sigmoid(coeff * x)
def gelu_10(x):
"""
Clip the range of possible GeLU outputs between [-10, 10]. This is especially useful for quantization purpose, as
it allows mapping 2 negatives values in the GeLU spectrum. For more information on this trick, please refer to
https://arxiv.org/abs/2004.09602
Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see
https://arxiv.org/abs/1606.08415 :param x: :return:
"""
return tf.clip_by_value(_gelu(x), -10, 10)
def glu(x, axis=-1):
"""
Gated Linear Unit. Implementation as defined in the original paper (see https://arxiv.org/abs/1612.08083), where
......@@ -107,6 +121,7 @@ ACT2FN = {
"tanh": tf.keras.activations.tanh,
"gelu_fast": gelu_fast,
"quick_gelu": quick_gelu,
"gelu_10": gelu_10,
"glu": glu,
}
......
......@@ -32,6 +32,19 @@ class TestActivations(unittest.TestCase):
self.assertTrue(torch.allclose(gelu_python(x), torch_builtin(x)))
self.assertFalse(torch.allclose(gelu_python(x), gelu_new(x)))
def test_gelu_10(self):
x = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
torch_builtin = get_activation("gelu")
gelu10 = get_activation("gelu_10")
y_gelu = torch_builtin(x)
y_gelu_10 = gelu10(x)
clipped_mask = torch.where(y_gelu_10 < 10.0, 1, 0)
self.assertTrue(torch.max(y_gelu_10).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask, y_gelu_10 * clipped_mask))
def test_get_activation(self):
get_activation("swish")
get_activation("silu")
......@@ -40,6 +53,7 @@ class TestActivations(unittest.TestCase):
get_activation("gelu_new")
get_activation("gelu_fast")
get_activation("gelu_python")
get_activation("gelu_10")
get_activation("quick_gelu")
get_activation("mish")
get_activation("linear")
......
......@@ -14,16 +14,33 @@
import unittest
import numpy as np
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers.activations_tf import get_tf_activation
@require_tf
class TestTFActivations(unittest.TestCase):
def test_gelu_10(self):
x = tf.constant([-100, -1.0, -0.1, 0, 0.1, 1.0, 100.0])
gelu = get_tf_activation("gelu")
gelu10 = get_tf_activation("gelu_10")
y_gelu = gelu(x)
y_gelu_10 = gelu10(x)
clipped_mask = tf.where(y_gelu_10 < 10.0, 1.0, 0.0)
self.assertEqual(tf.math.reduce_max(y_gelu_10).numpy().item(), 10.0)
self.assertTrue(np.allclose(y_gelu * clipped_mask, y_gelu_10 * clipped_mask))
def test_get_activation(self):
get_tf_activation("swish")
get_tf_activation("silu")
......@@ -32,6 +49,7 @@ class TestTFActivations(unittest.TestCase):
get_tf_activation("tanh")
get_tf_activation("gelu_new")
get_tf_activation("gelu_fast")
get_tf_activation("gelu_10")
get_tf_activation("mish")
get_tf_activation("quick_gelu")
get_tf_activation("glu")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment