Commit 1fa1a073 authored by Michael Carilli's avatar Michael Carilli
Browse files

Warning message for FusedAdam import if unavailable

parent e0bc5d62
...@@ -3,4 +3,7 @@ ...@@ -3,4 +3,7 @@
from . import fp16_utils from . import fp16_utils
from . import parallel from . import parallel
from . import amp from . import amp
from . import optimizers try:
from . import optimizers
except ImportError:
print("Warning: apex was installed without --cuda_ext. FusedAdam will be unavailable.")
...@@ -3,10 +3,9 @@ import torch ...@@ -3,10 +3,9 @@ import torch
from .distributed import DistributedDataParallel, Reducer from .distributed import DistributedDataParallel, Reducer
try: try:
import syncbn import syncbn
print("using fused syncBN")
from .optimized_sync_batchnorm import SyncBatchNorm from .optimized_sync_batchnorm import SyncBatchNorm
except ImportError: except ImportError:
print("using non-fused syncBN, try install apex with 'python setup.py install --cuda_ext' to enable fused syncBN for better performance") print("Warning: apex was installed without --cuda_ext. Fused syncbn kernels will be unavailable. Python fallbacks will be used instead.")
from .sync_batchnorm import SyncBatchNorm from .sync_batchnorm import SyncBatchNorm
def convert_syncbn_model(module): def convert_syncbn_model(module):
......
...@@ -4,7 +4,7 @@ try: ...@@ -4,7 +4,7 @@ try:
from apex_C import flatten from apex_C import flatten
from apex_C import unflatten from apex_C import unflatten
except ImportError: except ImportError:
print("Apex was built without --cpp_ext; falling back to Python flatten and unflatten") print("Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten.")
from torch._utils import _flatten_dense_tensors as flatten from torch._utils import _flatten_dense_tensors as flatten
from torch._utils import _unflatten_dense_tensors as unflatten from torch._utils import _unflatten_dense_tensors as unflatten
import torch.distributed as dist import torch.distributed as dist
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment