Unverified Commit 2703a37a authored by jiangmingyan's avatar jiangmingyan Committed by GitHub
Browse files

[amp] Add naive amp demo (#3774)

* [mixed_precison] add naive amp demo

* [mixed_precison] add naive amp demo
parent 48bd0567
from .bf16 import BF16MixedPrecision from .bf16 import BF16MixedPrecision
from .fp8 import FP8MixedPrecision from .fp8 import FP8MixedPrecision
from .fp16_apex import FP16ApexMixedPrecision from .fp16_apex import FP16ApexMixedPrecision
from .fp16_naive import FP16NaiveMixedPrecision
from .fp16_torch import FP16TorchMixedPrecision from .fp16_torch import FP16TorchMixedPrecision
from .mixed_precision_base import MixedPrecision from .mixed_precision_base import MixedPrecision
__all__ = [ __all__ = [
'MixedPrecision', 'mixed_precision_factory', 'FP16_Apex_MixedPrecision', 'FP16_Torch_MixedPrecision', 'MixedPrecision', 'mixed_precision_factory', 'FP16_Apex_MixedPrecision', 'FP16_Torch_MixedPrecision',
'FP32_MixedPrecision', 'BF16_MixedPrecision', 'FP8_MixedPrecision' 'FP32_MixedPrecision', 'BF16_MixedPrecision', 'FP8_MixedPrecision', 'FP16NaiveMixedPrecision'
] ]
_mixed_precision_mapping = { _mixed_precision_mapping = {
'fp16': FP16TorchMixedPrecision, 'fp16': FP16TorchMixedPrecision,
'fp16_apex': FP16ApexMixedPrecision, 'fp16_apex': FP16ApexMixedPrecision,
'fp16_naive': FP16NaiveMixedPrecision,
'bf16': BF16MixedPrecision, 'bf16': BF16MixedPrecision,
'fp8': FP8MixedPrecision 'fp8': FP8MixedPrecision
} }
......
from .mixed_precision_base import MixedPrecision
class FP16NaiveMixedPrecision(MixedPrecision):
pass
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment