Unverified Commit 5de49cc9 authored by Jithun Nair's avatar Jithun Nair Committed by GitHub
Browse files

Cherry-pick b2fdf9c4 from upstream Apex and resolve conflicts (#68)

parent cfe106d6
...@@ -30,7 +30,12 @@ ...@@ -30,7 +30,12 @@
#include <cuda.h> #include <cuda.h>
#include <vector> #include <vector>
#ifdef OLD_GENERATOR_PATH
#include <ATen/CUDAGeneratorImpl.h>
#else
#include <ATen/cuda/CUDAGeneratorImpl.h> #include <ATen/cuda/CUDAGeneratorImpl.h>
#endif
#include <ATen/cuda/CUDAGraphsUtils.cuh> #include <ATen/cuda/CUDAGraphsUtils.cuh>
#include <fmha_utils.h> #include <fmha_utils.h>
......
#include <ATen/ATen.h> #include <ATen/ATen.h>
#ifdef OLD_GENERATOR #ifdef OLD_GENERATOR_PATH
#include <ATen/CUDAGenerator.h> #include <ATen/CUDAGeneratorImpl.h>
#else #else
#include <ATen/cuda/CUDAGeneratorImpl.h> #include <ATen/cuda/CUDAGeneratorImpl.h>
#endif #endif
...@@ -178,15 +178,10 @@ void apex_fused_dropout_cuda(scalar_t const *inputs, scalar_t *outputs, ...@@ -178,15 +178,10 @@ void apex_fused_dropout_cuda(scalar_t const *inputs, scalar_t *outputs,
std::pair<uint64_t, uint64_t> rng_engine_inputs; std::pair<uint64_t, uint64_t> rng_engine_inputs;
{ {
// See Note [Acquire lock when using random generators] // See Note [Acquire lock when using random generators]
#ifdef OLD_GENERATOR
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(counter_offset);
#else
std::lock_guard<std::mutex> lock(gen.mutex()); std::lock_guard<std::mutex> lock(gen.mutex());
rng_engine_inputs = rng_engine_inputs =
at::check_generator<at::CUDAGeneratorImpl>(gen)->philox_engine_inputs( at::check_generator<at::CUDAGeneratorImpl>(gen)->philox_engine_inputs(
counter_offset); counter_offset);
#endif
} }
apex_fused_dropout_kernel<scalar_t, accscalar_t, IndexType> apex_fused_dropout_kernel<scalar_t, accscalar_t, IndexType>
...@@ -219,15 +214,10 @@ void apex_dropout_add_cuda(scalar_t const *inputs, scalar_t const *add_inputs, ...@@ -219,15 +214,10 @@ void apex_dropout_add_cuda(scalar_t const *inputs, scalar_t const *add_inputs,
std::pair<uint64_t, uint64_t> rng_engine_inputs; std::pair<uint64_t, uint64_t> rng_engine_inputs;
{ {
// See Note [Acquire lock when using random generators] // See Note [Acquire lock when using random generators]
#ifdef OLD_GENERATOR
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(counter_offset);
#else
std::lock_guard<std::mutex> lock(gen.mutex()); std::lock_guard<std::mutex> lock(gen.mutex());
rng_engine_inputs = rng_engine_inputs =
at::check_generator<at::CUDAGeneratorImpl>(gen)->philox_engine_inputs( at::check_generator<at::CUDAGeneratorImpl>(gen)->philox_engine_inputs(
counter_offset); counter_offset);
#endif
} }
apex_dropout_add_kernel<scalar_t, accscalar_t, IndexType> apex_dropout_add_kernel<scalar_t, accscalar_t, IndexType>
......
#pragma once #pragma once
#include "philox.h" #include "philox.h"
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/cuda/CUDAGraphsUtils.cuh> #include <ATen/cuda/CUDAGraphsUtils.cuh>
#include <curand_kernel.h> #include <curand_kernel.h>
#ifdef OLD_GENERATOR_PATH
#include <ATen/CUDAGeneratorImpl.h>
#else
#include <ATen/cuda/CUDAGeneratorImpl.h>
#endif
#include <assert.h> #include <assert.h>
#include <cfloat> #include <cfloat>
#include <cmath> #include <cmath>
......
...@@ -4,7 +4,13 @@ ...@@ -4,7 +4,13 @@
#include <torch/extension.h> #include <torch/extension.h>
#include <ATen/AccumulateType.h> #include <ATen/AccumulateType.h>
#ifdef OLD_GENERATOR_PATH
#include <ATen/CUDAGeneratorImpl.h>
#else
#include <ATen/cuda/CUDAGeneratorImpl.h> #include <ATen/cuda/CUDAGeneratorImpl.h>
#endif
#include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAGraphsUtils.cuh> #include <ATen/cuda/CUDAGraphsUtils.cuh>
#include <c10/macros/Macros.h> #include <c10/macros/Macros.h>
......
...@@ -362,11 +362,12 @@ if "--deprecated_fused_lamb" in sys.argv or "--cuda_ext" in sys.argv: ...@@ -362,11 +362,12 @@ if "--deprecated_fused_lamb" in sys.argv or "--cuda_ext" in sys.argv:
include_dirs=[os.path.join(this_dir, 'csrc')], include_dirs=[os.path.join(this_dir, 'csrc')],
extra_compile_args = nvcc_args_fused_lamb if not IS_ROCM_PYTORCH else hipcc_args_fused_lamb)) extra_compile_args = nvcc_args_fused_lamb if not IS_ROCM_PYTORCH else hipcc_args_fused_lamb))
# Check, if ATen/CUDAGenerator.h is found, otherwise use the new ATen/CUDAGeneratorImpl.h, due to breaking change in https://github.com/pytorch/pytorch/pull/36026 # Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
# See https://github.com/pytorch/pytorch/pull/70650
generator_flag = [] generator_flag = []
torch_dir = torch.__path__[0] torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, 'include', 'ATen', 'CUDAGenerator.h')): if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
generator_flag = ['-DOLD_GENERATOR'] generator_flag = ["-DOLD_GENERATOR_PATH"]
if "--fast_layer_norm" in sys.argv: if "--fast_layer_norm" in sys.argv:
sys.argv.remove("--fast_layer_norm") sys.argv.remove("--fast_layer_norm")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment