Commit 04057fa6 authored by Eli Uriegas's avatar Eli Uriegas Committed by Facebook GitHub Bot
Browse files

Replaced CHECK_ by TORCH_CHECK_ (#2582)

Summary:
Pull Request resolved: https://github.com/pytorch/audio/pull/2582

CHECK_ were deprecated in upstream so we should replace them here as
well

Similar to https://github.com/pytorch/vision/pull/6322, relates to https://github.com/pytorch/pytorch/pull/82032

Signed-off-by: default avatarEli Uriegas <eliuriegas@fb.com>

Test Plan: Imported from OSS

Reviewed By: malfet, mthrok

Differential Revision: D38208356

Pulled By: seemethere

fbshipit-source-id: 6f42d517362f415e0775803514eee2628402918f
parent 34ef7e9c
......@@ -81,7 +81,7 @@ std::tuple<torch::Tensor, c10::optional<torch::Tensor>> compute(
options.blank_ = blank;
options.clamp_ = clamp;
CHECK_EQ(logits.device().type(), torch::DeviceType::CPU);
TORCH_CHECK_EQ(logits.device().type(), torch::DeviceType::CPU);
options.device_ = CPU;
torch::Tensor costs = torch::empty(
......
......@@ -21,7 +21,7 @@ torch::Tensor compute_alphas(
options.blank_ = blank;
options.clamp_ = clamp;
CHECK_EQ(logits.device().type(), torch::DeviceType::CPU);
TORCH_CHECK_EQ(logits.device().type(), torch::DeviceType::CPU);
options.device_ = CPU;
torch::Tensor alphas = torch::zeros(
......
......@@ -21,7 +21,7 @@ torch::Tensor compute_betas(
options.blank_ = blank;
options.clamp_ = clamp;
CHECK_EQ(logits.device().type(), torch::DeviceType::CPU);
TORCH_CHECK_EQ(logits.device().type(), torch::DeviceType::CPU);
options.device_ = CPU;
torch::Tensor costs = torch::empty(
......
......@@ -48,7 +48,7 @@ class TensorView {
}
DTYPE& operator()(const std::vector<int>& indices) {
CHECK_EQ(indices.size(), dims_.size());
TORCH_CHECK_EQ(indices.size(), dims_.size());
int index = indices.back();
for (int i = indices.size() - 2; i >= 0; --i) {
index += indices[i] * strides_[i];
......
......@@ -28,7 +28,7 @@ status_t Compute(
DTYPE* gradients = nullptr) {
const Options& options = workspace.GetOptions();
CHECK_EQ(options.device_, CPU);
TORCH_CHECK_EQ(options.device_, CPU);
const int& B = options.batchSize_;
const int& maxT = options.maxSrcLen_;
......@@ -91,7 +91,7 @@ status_t ComputeAlphas(
DTYPE* alphas) {
const Options& options = workspace.GetOptions();
CHECK_EQ(options.device_, CPU);
TORCH_CHECK_EQ(options.device_, CPU);
const int& B = options.batchSize_;
const int& maxT = options.maxSrcLen_;
......@@ -140,7 +140,7 @@ status_t ComputeBetas(
DTYPE* betas) {
const Options& options = workspace.GetOptions();
CHECK_EQ(options.device_, CPU);
TORCH_CHECK_EQ(options.device_, CPU);
const int& B = options.batchSize_;
const int& maxT = options.maxSrcLen_;
......
......@@ -82,7 +82,7 @@ std::tuple<torch::Tensor, c10::optional<torch::Tensor>> compute(
options.blank_ = blank;
options.clamp_ = clamp;
CHECK_EQ(logits.device().type(), torch::DeviceType::CUDA);
TORCH_CHECK_EQ(logits.device().type(), torch::DeviceType::CUDA);
options.stream_ = at::cuda::getCurrentCUDAStream();
cudaSetDevice(logits.get_device());
options.device_ = GPU;
......
......@@ -22,7 +22,7 @@ torch::Tensor compute_alphas(
options.blank_ = blank;
options.clamp_ = clamp;
CHECK_EQ(logits.device().type(), torch::DeviceType::CUDA);
TORCH_CHECK_EQ(logits.device().type(), torch::DeviceType::CUDA);
options.stream_ = at::cuda::getCurrentCUDAStream();
cudaSetDevice(logits.get_device());
options.device_ = GPU;
......
......@@ -22,7 +22,7 @@ torch::Tensor compute_betas(
options.blank_ = blank;
options.clamp_ = clamp;
CHECK_EQ(logits.device().type(), torch::DeviceType::CUDA);
TORCH_CHECK_EQ(logits.device().type(), torch::DeviceType::CUDA);
options.stream_ = at::cuda::getCurrentCUDAStream();
cudaSetDevice(logits.get_device());
options.device_ = GPU;
......
......@@ -27,7 +27,7 @@ class DtypeWorkspace {
~DtypeWorkspace() {}
static int ComputeSizeFromOptions(const Options& options) {
CHECK_NE(options.device_, UNDEFINED);
TORCH_CHECK_NE(options.device_, UNDEFINED);
return ComputeSizeForDenominators(options) +
ComputeSizeForLogProbs(options) + ComputeSizeForAlphas(options) +
ComputeSizeForBetas(options);
......@@ -36,7 +36,7 @@ class DtypeWorkspace {
void Free();
void Reset(const Options& options, DTYPE* data, int size) {
int needed_size = ComputeSizeFromOptions(options);
CHECK_LE(needed_size, size);
TORCH_CHECK_LE(needed_size, size);
options_ = options;
data_ = data;
size_ = size;
......@@ -98,7 +98,7 @@ class IntWorkspace {
void Reset(const Options& options, int* data, int size) {
int needed_size = ComputeSizeFromOptions(options);
CHECK_LE(needed_size, size);
TORCH_CHECK_LE(needed_size, size);
options_ = options;
data_ = data;
size_ = size;
......@@ -109,11 +109,11 @@ class IntWorkspace {
}
int* GetPointerToAlphaCounters() const {
CHECK_EQ(options_.device_, GPU);
TORCH_CHECK_EQ(options_.device_, GPU);
return data_;
}
int* GetPointerToBetaCounters() const {
CHECK_EQ(options_.device_, GPU);
TORCH_CHECK_EQ(options_.device_, GPU);
return GetPointerToAlphaCounters() + ComputeSizeForAlphaCounters(options_);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment