Unverified Commit 097238f8 authored by mcarilli's avatar mcarilli Committed by GitHub
Browse files

Add Pyprof removal warnings that point to new repo (#862)


Co-authored-by: default avatarMichael Carilli <mcarilli@nvidia.com>
parent 76026a35
...@@ -138,10 +138,8 @@ A Python-only build omits: ...@@ -138,10 +138,8 @@ A Python-only build omits:
- Fused kernels that improve the performance of `apex.parallel.DistributedDataParallel` and `apex.amp`. - Fused kernels that improve the performance of `apex.parallel.DistributedDataParallel` and `apex.amp`.
`DistributedDataParallel`, `amp`, and `SyncBatchNorm` will still be usable, but they may be slower. `DistributedDataParallel`, `amp`, and `SyncBatchNorm` will still be usable, but they may be slower.
To enable PyProf support, you need to install the packages required by PyProf. To do so, add the "--pyprof" option at installation time: Pyprof support has been moved to its own [dedicated repository](https://github.com/NVIDIA/PyProf).
``` The codebase is deprecated in Apex and will be removed soon.
$ pip install -v --no-cache-dir --global-option="--pyprof" --global-option="--cpp_ext" --global-option="--cuda_ext" ./
```
### Windows support ### Windows support
Windows support is experimental, and Linux is recommended. `pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" .` may work if you were able to build Pytorch from source Windows support is experimental, and Linux is recommended. `pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" .` may work if you were able to build Pytorch from source
......
...@@ -204,6 +204,13 @@ def patchClass(cls): ...@@ -204,6 +204,13 @@ def patchClass(cls):
add_wrapper(cls, f) add_wrapper(cls, f)
def init(): def init():
string = "\n\nPyprof has been moved to its own dedicated repository and will " + \
"soon be removed from Apex. Please visit\n" + \
"https://github.com/NVIDIA/PyProf\n" + \
"for the latest version.\n\n"
# print regardless of warning state
print(string)
print("Initializing NVTX monkey patches") print("Initializing NVTX monkey patches")
for cls in [torch, torch.Tensor, torch.nn.functional,]: for cls in [torch, torch.Tensor, torch.nn.functional,]:
patchClass(cls) patchClass(cls)
......
...@@ -22,7 +22,7 @@ if not torch.cuda.is_available(): ...@@ -22,7 +22,7 @@ if not torch.cuda.is_available():
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None: if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5" os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("torch.__version__ = ", torch.__version__) print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1]) TORCH_MINOR = int(torch.__version__.split('.')[1])
...@@ -35,6 +35,11 @@ ext_modules = [] ...@@ -35,6 +35,11 @@ ext_modules = []
extras = {} extras = {}
if "--pyprof" in sys.argv: if "--pyprof" in sys.argv:
string = "\n\nPyprof has been moved to its own dedicated repository and will " + \
"soon be removed from Apex. Please visit\n" + \
"https://github.com/NVIDIA/PyProf\n" + \
"for the latest version."
warnings.warn(string, DeprecationWarning)
with open('requirements.txt') as f: with open('requirements.txt') as f:
required_packages = f.read().splitlines() required_packages = f.read().splitlines()
extras['pyprof'] = required_packages extras['pyprof'] = required_packages
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment