Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
apex
Commits
adee29f6
Commit
adee29f6
authored
Jul 03, 2019
by
Michael Carilli
Browse files
Changing AT_CHECK to TORCH_CHECK
parent
b9336b1e
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
25 additions
and
18 deletions
+25
-18
csrc/compat.h
csrc/compat.h
+3
-0
csrc/layer_norm_cuda.cpp
csrc/layer_norm_cuda.cpp
+5
-4
csrc/multi_tensor_apply.cuh
csrc/multi_tensor_apply.cuh
+7
-6
csrc/multi_tensor_sgd_kernel.cu
csrc/multi_tensor_sgd_kernel.cu
+2
-1
csrc/welford.cu
csrc/welford.cu
+8
-7
No files found.
csrc/compat.h
0 → 100644
View file @
adee29f6
#ifndef TORCH_CHECK
#define TORCH_CHECK AT_CHECK
#endif
csrc/layer_norm_cuda.cpp
View file @
adee29f6
#include <torch/extension.h>
#include <vector>
#include <cassert>
#include "compat.h"
namespace
{
void
compute_n1_n2
(
...
...
@@ -35,8 +36,8 @@ void check_args(
at
::
Tensor
beta
)
{
A
T_CHECK
(
!
gamma
.
defined
()
||
gamma
.
sizes
().
equals
(
normalized_shape
));
A
T_CHECK
(
!
beta
.
defined
()
||
beta
.
sizes
().
equals
(
normalized_shape
));
T
ORCH
_CHECK
(
!
gamma
.
defined
()
||
gamma
.
sizes
().
equals
(
normalized_shape
));
T
ORCH
_CHECK
(
!
beta
.
defined
()
||
beta
.
sizes
().
equals
(
normalized_shape
));
}
void
check_args
(
...
...
@@ -113,8 +114,8 @@ void cuda_layer_norm(
at
::
Tensor
*
beta
,
double
epsilon
);
#define CHECK_CUDA(x)
A
T_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x)
A
T_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_CUDA(x) T
ORCH
_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) T
ORCH
_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
std
::
vector
<
at
::
Tensor
>
layer_norm
(
...
...
csrc/multi_tensor_apply.cuh
View file @
adee29f6
...
...
@@ -2,6 +2,7 @@
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
#include "compat.h"
#include <assert.h>
...
...
@@ -45,19 +46,19 @@ void multi_tensor_apply(
T
callable
,
ArgTypes
...
args
)
{
A
T_CHECK
(
tensor_lists
.
size
()
==
depth
,
"tensor_lists.size() != depth"
);
T
ORCH
_CHECK
(
tensor_lists
.
size
()
==
depth
,
"tensor_lists.size() != depth"
);
int
len0
=
tensor_lists
[
0
].
size
();
A
T_CHECK
(
len0
>
0
,
"tensor_lists[0].size() is not > 0"
);
T
ORCH
_CHECK
(
len0
>
0
,
"tensor_lists[0].size() is not > 0"
);
for
(
int
l
=
0
;
l
<
tensor_lists
.
size
();
l
++
)
// No range-based for because I need indices
{
A
T_CHECK
(
tensor_lists
[
l
].
size
()
==
len0
,
"Size mismatch among tensor lists"
);
T
ORCH
_CHECK
(
tensor_lists
[
l
].
size
()
==
len0
,
"Size mismatch among tensor lists"
);
for
(
int
t
=
0
;
t
<
tensor_lists
[
l
].
size
();
t
++
)
{
// TODO: Print which tensor fails.
A
T_CHECK
(
tensor_lists
[
l
][
t
].
is_contiguous
(),
"A tensor was not contiguous."
);
A
T_CHECK
(
tensor_lists
[
l
][
t
].
is_cuda
(),
"A tensor was not cuda."
);
A
T_CHECK
(
tensor_lists
[
l
][
t
].
numel
()
==
tensor_lists
[
0
][
t
].
numel
(),
"Size mismatch"
);
T
ORCH
_CHECK
(
tensor_lists
[
l
][
t
].
is_contiguous
(),
"A tensor was not contiguous."
);
T
ORCH
_CHECK
(
tensor_lists
[
l
][
t
].
is_cuda
(),
"A tensor was not cuda."
);
T
ORCH
_CHECK
(
tensor_lists
[
l
][
t
].
numel
()
==
tensor_lists
[
0
][
t
].
numel
(),
"Size mismatch"
);
}
}
...
...
csrc/multi_tensor_sgd_kernel.cu
View file @
adee29f6
...
...
@@ -3,6 +3,7 @@
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
#include "multi_tensor_apply.cuh"
#include "compat.h"
#include <assert.h>
#include <cuda_runtime.h>
...
...
@@ -156,7 +157,7 @@ void multi_tensor_sgd_cuda(
if
(
num_tensors
==
4
)
for
(
int
i
=
0
;
i
<
tensor_lists
[
3
].
size
();
i
++
)
A
T_CHECK
(
tensor_lists
[
3
][
i
].
scalar_type
()
==
at
::
ScalarType
::
Half
,
T
ORCH
_CHECK
(
tensor_lists
[
3
][
i
].
scalar_type
()
==
at
::
ScalarType
::
Half
,
"Additional output tensors should always be fp16."
);
// We have 3 possibilities to handle here, in terms of
...
...
csrc/welford.cu
View file @
adee29f6
...
...
@@ -9,6 +9,7 @@
#include <vector>
#include "type_shim.h"
#include "compat.h"
__device__
__forceinline__
int
lastpow2
(
int
n
)
...
...
@@ -953,7 +954,7 @@ at::Tensor batchnorm_forward_CUDA(
);
}
else
{
if
(
weight
.
has_value
())
{
A
T_CHECK
(
input
.
scalar_type
()
==
weight
.
value
().
scalar_type
(),
T
ORCH
_CHECK
(
input
.
scalar_type
()
==
weight
.
value
().
scalar_type
(),
"input.scalar_type() is not supported with weight.scalar_type()"
);
}
using
namespace
at
;
...
...
@@ -1027,7 +1028,7 @@ std::vector<at::Tensor> reduce_bn_CUDA(
);
}
else
{
if
(
weight
.
has_value
())
{
A
T_CHECK
(
input
.
scalar_type
()
==
weight
.
value
().
scalar_type
(),
T
ORCH
_CHECK
(
input
.
scalar_type
()
==
weight
.
value
().
scalar_type
(),
"input.scalar_type() is not supported with weight.scalar_type()"
);
}
using
namespace
at
;
...
...
@@ -1095,7 +1096,7 @@ at::Tensor batchnorm_backward_CUDA(
);
}
else
{
if
(
weight
.
has_value
())
{
A
T_CHECK
(
input
.
scalar_type
()
==
weight
.
value
().
scalar_type
(),
T
ORCH
_CHECK
(
input
.
scalar_type
()
==
weight
.
value
().
scalar_type
(),
"input.scalar_type() is not supported with weight.scalar_type()"
);
}
using
namespace
at
;
...
...
@@ -1237,7 +1238,7 @@ at::Tensor batchnorm_forward_c_last_CUDA(
);
}
else
{
if
(
weight
.
has_value
())
{
A
T_CHECK
(
input
.
scalar_type
()
==
weight
.
value
().
scalar_type
(),
T
ORCH
_CHECK
(
input
.
scalar_type
()
==
weight
.
value
().
scalar_type
(),
"input.scalar_type() is not supported with weight.scalar_type()"
);
}
using
namespace
at
;
...
...
@@ -1320,7 +1321,7 @@ std::vector<at::Tensor> reduce_bn_c_last_CUDA(
);
}
else
{
if
(
weight
.
has_value
())
{
A
T_CHECK
(
input
.
scalar_type
()
==
weight
.
value
().
scalar_type
(),
T
ORCH
_CHECK
(
input
.
scalar_type
()
==
weight
.
value
().
scalar_type
(),
"input.scalar_type() is not supported with weight.scalar_type()"
);
}
using
namespace
at
;
...
...
@@ -1387,7 +1388,7 @@ at::Tensor batchnorm_backward_c_last_CUDA(
);
}
else
{
if
(
weight
.
has_value
())
{
A
T_CHECK
(
input
.
scalar_type
()
==
weight
.
value
().
scalar_type
(),
T
ORCH
_CHECK
(
input
.
scalar_type
()
==
weight
.
value
().
scalar_type
(),
"input.scalar_type() is not supported with weight.scalar_type()"
);
}
using
namespace
at
;
...
...
@@ -1451,7 +1452,7 @@ at::Tensor relu_backward_c_last_CUDA(
);
}
else
{
if
(
weight
.
has_value
())
{
A
T_CHECK
(
input
.
scalar_type
()
==
weight
.
value
().
scalar_type
(),
T
ORCH
_CHECK
(
input
.
scalar_type
()
==
weight
.
value
().
scalar_type
(),
"input.scalar_type() is not supported with weight.scalar_type()"
);
}
using
namespace
at
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment