Commit 04057fa6 authored by Eli Uriegas's avatar Eli Uriegas Committed by Facebook GitHub Bot
Browse files

Replaced CHECK_ by TORCH_CHECK_ (#2582)

Summary:
Pull Request resolved: https://github.com/pytorch/audio/pull/2582

CHECK_ were deprecated in upstream so we should replace them here as
well

Similar to https://github.com/pytorch/vision/pull/6322, relates to https://github.com/pytorch/pytorch/pull/82032

Signed-off-by: default avatarEli Uriegas <eliuriegas@fb.com>

Test Plan: Imported from OSS

Reviewed By: malfet, mthrok

Differential Revision: D38208356

Pulled By: seemethere

fbshipit-source-id: 6f42d517362f415e0775803514eee2628402918f
parent 34ef7e9c
...@@ -81,7 +81,7 @@ std::tuple<torch::Tensor, c10::optional<torch::Tensor>> compute( ...@@ -81,7 +81,7 @@ std::tuple<torch::Tensor, c10::optional<torch::Tensor>> compute(
options.blank_ = blank; options.blank_ = blank;
options.clamp_ = clamp; options.clamp_ = clamp;
CHECK_EQ(logits.device().type(), torch::DeviceType::CPU); TORCH_CHECK_EQ(logits.device().type(), torch::DeviceType::CPU);
options.device_ = CPU; options.device_ = CPU;
torch::Tensor costs = torch::empty( torch::Tensor costs = torch::empty(
......
...@@ -21,7 +21,7 @@ torch::Tensor compute_alphas( ...@@ -21,7 +21,7 @@ torch::Tensor compute_alphas(
options.blank_ = blank; options.blank_ = blank;
options.clamp_ = clamp; options.clamp_ = clamp;
CHECK_EQ(logits.device().type(), torch::DeviceType::CPU); TORCH_CHECK_EQ(logits.device().type(), torch::DeviceType::CPU);
options.device_ = CPU; options.device_ = CPU;
torch::Tensor alphas = torch::zeros( torch::Tensor alphas = torch::zeros(
......
...@@ -21,7 +21,7 @@ torch::Tensor compute_betas( ...@@ -21,7 +21,7 @@ torch::Tensor compute_betas(
options.blank_ = blank; options.blank_ = blank;
options.clamp_ = clamp; options.clamp_ = clamp;
CHECK_EQ(logits.device().type(), torch::DeviceType::CPU); TORCH_CHECK_EQ(logits.device().type(), torch::DeviceType::CPU);
options.device_ = CPU; options.device_ = CPU;
torch::Tensor costs = torch::empty( torch::Tensor costs = torch::empty(
......
...@@ -48,7 +48,7 @@ class TensorView { ...@@ -48,7 +48,7 @@ class TensorView {
} }
DTYPE& operator()(const std::vector<int>& indices) { DTYPE& operator()(const std::vector<int>& indices) {
CHECK_EQ(indices.size(), dims_.size()); TORCH_CHECK_EQ(indices.size(), dims_.size());
int index = indices.back(); int index = indices.back();
for (int i = indices.size() - 2; i >= 0; --i) { for (int i = indices.size() - 2; i >= 0; --i) {
index += indices[i] * strides_[i]; index += indices[i] * strides_[i];
......
...@@ -28,7 +28,7 @@ status_t Compute( ...@@ -28,7 +28,7 @@ status_t Compute(
DTYPE* gradients = nullptr) { DTYPE* gradients = nullptr) {
const Options& options = workspace.GetOptions(); const Options& options = workspace.GetOptions();
CHECK_EQ(options.device_, CPU); TORCH_CHECK_EQ(options.device_, CPU);
const int& B = options.batchSize_; const int& B = options.batchSize_;
const int& maxT = options.maxSrcLen_; const int& maxT = options.maxSrcLen_;
...@@ -91,7 +91,7 @@ status_t ComputeAlphas( ...@@ -91,7 +91,7 @@ status_t ComputeAlphas(
DTYPE* alphas) { DTYPE* alphas) {
const Options& options = workspace.GetOptions(); const Options& options = workspace.GetOptions();
CHECK_EQ(options.device_, CPU); TORCH_CHECK_EQ(options.device_, CPU);
const int& B = options.batchSize_; const int& B = options.batchSize_;
const int& maxT = options.maxSrcLen_; const int& maxT = options.maxSrcLen_;
...@@ -140,7 +140,7 @@ status_t ComputeBetas( ...@@ -140,7 +140,7 @@ status_t ComputeBetas(
DTYPE* betas) { DTYPE* betas) {
const Options& options = workspace.GetOptions(); const Options& options = workspace.GetOptions();
CHECK_EQ(options.device_, CPU); TORCH_CHECK_EQ(options.device_, CPU);
const int& B = options.batchSize_; const int& B = options.batchSize_;
const int& maxT = options.maxSrcLen_; const int& maxT = options.maxSrcLen_;
......
...@@ -82,7 +82,7 @@ std::tuple<torch::Tensor, c10::optional<torch::Tensor>> compute( ...@@ -82,7 +82,7 @@ std::tuple<torch::Tensor, c10::optional<torch::Tensor>> compute(
options.blank_ = blank; options.blank_ = blank;
options.clamp_ = clamp; options.clamp_ = clamp;
CHECK_EQ(logits.device().type(), torch::DeviceType::CUDA); TORCH_CHECK_EQ(logits.device().type(), torch::DeviceType::CUDA);
options.stream_ = at::cuda::getCurrentCUDAStream(); options.stream_ = at::cuda::getCurrentCUDAStream();
cudaSetDevice(logits.get_device()); cudaSetDevice(logits.get_device());
options.device_ = GPU; options.device_ = GPU;
......
...@@ -22,7 +22,7 @@ torch::Tensor compute_alphas( ...@@ -22,7 +22,7 @@ torch::Tensor compute_alphas(
options.blank_ = blank; options.blank_ = blank;
options.clamp_ = clamp; options.clamp_ = clamp;
CHECK_EQ(logits.device().type(), torch::DeviceType::CUDA); TORCH_CHECK_EQ(logits.device().type(), torch::DeviceType::CUDA);
options.stream_ = at::cuda::getCurrentCUDAStream(); options.stream_ = at::cuda::getCurrentCUDAStream();
cudaSetDevice(logits.get_device()); cudaSetDevice(logits.get_device());
options.device_ = GPU; options.device_ = GPU;
......
...@@ -22,7 +22,7 @@ torch::Tensor compute_betas( ...@@ -22,7 +22,7 @@ torch::Tensor compute_betas(
options.blank_ = blank; options.blank_ = blank;
options.clamp_ = clamp; options.clamp_ = clamp;
CHECK_EQ(logits.device().type(), torch::DeviceType::CUDA); TORCH_CHECK_EQ(logits.device().type(), torch::DeviceType::CUDA);
options.stream_ = at::cuda::getCurrentCUDAStream(); options.stream_ = at::cuda::getCurrentCUDAStream();
cudaSetDevice(logits.get_device()); cudaSetDevice(logits.get_device());
options.device_ = GPU; options.device_ = GPU;
......
...@@ -27,7 +27,7 @@ class DtypeWorkspace { ...@@ -27,7 +27,7 @@ class DtypeWorkspace {
~DtypeWorkspace() {} ~DtypeWorkspace() {}
static int ComputeSizeFromOptions(const Options& options) { static int ComputeSizeFromOptions(const Options& options) {
CHECK_NE(options.device_, UNDEFINED); TORCH_CHECK_NE(options.device_, UNDEFINED);
return ComputeSizeForDenominators(options) + return ComputeSizeForDenominators(options) +
ComputeSizeForLogProbs(options) + ComputeSizeForAlphas(options) + ComputeSizeForLogProbs(options) + ComputeSizeForAlphas(options) +
ComputeSizeForBetas(options); ComputeSizeForBetas(options);
...@@ -36,7 +36,7 @@ class DtypeWorkspace { ...@@ -36,7 +36,7 @@ class DtypeWorkspace {
void Free(); void Free();
void Reset(const Options& options, DTYPE* data, int size) { void Reset(const Options& options, DTYPE* data, int size) {
int needed_size = ComputeSizeFromOptions(options); int needed_size = ComputeSizeFromOptions(options);
CHECK_LE(needed_size, size); TORCH_CHECK_LE(needed_size, size);
options_ = options; options_ = options;
data_ = data; data_ = data;
size_ = size; size_ = size;
...@@ -98,7 +98,7 @@ class IntWorkspace { ...@@ -98,7 +98,7 @@ class IntWorkspace {
void Reset(const Options& options, int* data, int size) { void Reset(const Options& options, int* data, int size) {
int needed_size = ComputeSizeFromOptions(options); int needed_size = ComputeSizeFromOptions(options);
CHECK_LE(needed_size, size); TORCH_CHECK_LE(needed_size, size);
options_ = options; options_ = options;
data_ = data; data_ = data;
size_ = size; size_ = size;
...@@ -109,11 +109,11 @@ class IntWorkspace { ...@@ -109,11 +109,11 @@ class IntWorkspace {
} }
int* GetPointerToAlphaCounters() const { int* GetPointerToAlphaCounters() const {
CHECK_EQ(options_.device_, GPU); TORCH_CHECK_EQ(options_.device_, GPU);
return data_; return data_;
} }
int* GetPointerToBetaCounters() const { int* GetPointerToBetaCounters() const {
CHECK_EQ(options_.device_, GPU); TORCH_CHECK_EQ(options_.device_, GPU);
return GetPointerToAlphaCounters() + ComputeSizeForAlphaCounters(options_); return GetPointerToAlphaCounters() + ComputeSizeForAlphaCounters(options_);
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment