Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-scatter
Commits
b88d5613
Commit
b88d5613
authored
Dec 20, 2017
by
rusty1s
Browse files
typo
parent
ede5e330
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
10 additions
and
10 deletions
+10
-10
torch_scatter/kernel/THCAtomics.cuh
torch_scatter/kernel/THCAtomics.cuh
+10
-10
No files found.
torch_scatter/kernel/THCAtomics.cuh
View file @
b88d5613
...
@@ -70,9 +70,9 @@ struct AtomicIntegerImpl<T, 8> {
...
@@ -70,9 +70,9 @@ struct AtomicIntegerImpl<T, 8> {
template
<
typename
T
,
size_t
n
>
template
<
typename
T
,
size_t
n
>
struct
AtomicDecimalImpl
;
struct
AtomicDecimalImpl
;
template
<
>
template
<
typename
T
>
struct
AtomicDecimalImpl
<
float
,
4
>
{
struct
AtomicDecimalImpl
<
T
,
4
>
{
inline
__device__
void
operator
()(
float
*
address
,
float
val
)
{
inline
__device__
void
operator
()(
T
*
address
,
T
val
)
{
int
*
address_as_i
=
(
int
*
)
address
;
int
*
address_as_i
=
(
int
*
)
address
;
int
old
=
*
address_as_i
;
int
old
=
*
address_as_i
;
int
assumed
;
int
assumed
;
...
@@ -84,9 +84,9 @@ struct AtomicDecimalImpl<float, 4> {
...
@@ -84,9 +84,9 @@ struct AtomicDecimalImpl<float, 4> {
}
}
};
};
template
<
>
template
<
typename
T
>
struct
AtomicDecimalImpl
<
double
,
8
>
{
struct
AtomicDecimalImpl
<
T
,
8
>
{
inline
__device__
void
operator
()(
double
*
address
,
double
val
)
{
inline
__device__
void
operator
()(
T
*
address
,
T
val
)
{
unsigned
long
long
int
*
address_as_ull
=
(
unsigned
long
long
int
*
)
address
;
unsigned
long
long
int
*
address_as_ull
=
(
unsigned
long
long
int
*
)
address
;
unsigned
long
long
int
old
=
*
address_as_ull
;
unsigned
long
long
int
old
=
*
address_as_ull
;
unsigned
long
long
int
assumed
;
unsigned
long
long
int
assumed
;
...
@@ -99,11 +99,11 @@ struct AtomicDecimalImpl<double, 8> {
...
@@ -99,11 +99,11 @@ struct AtomicDecimalImpl<double, 8> {
};
};
static
inline
__device__
void
atomicMax
(
uint8_t
*
address
,
uint8_t
val
)
{
AtomicIntegerImpl
<
uint8_t
,
sizeof
(
uint8_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
uint8_t
*
address
,
uint8_t
val
)
{
AtomicIntegerImpl
<
uint8_t
,
sizeof
(
uint8_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
int8_t
*
address
,
int8_t
val
)
{
AtomicIntegerImpl
<
int8_t
,
sizeof
(
int8_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
int8_t
*
address
,
int8_t
val
)
{
AtomicIntegerImpl
<
int8_t
,
sizeof
(
int8_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
int16_t
*
address
,
int16_t
val
)
{
AtomicIntegerImpl
<
int16_t
,
sizeof
(
int16_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
int16_t
*
address
,
int16_t
val
)
{
AtomicIntegerImpl
<
int16_t
,
sizeof
(
int16_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
int64_t
*
address
,
int64_t
val
)
{
AtomicIntegerImpl
<
int64_t
,
sizeof
(
int64_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
int64_t
*
address
,
int64_t
val
)
{
AtomicIntegerImpl
<
int64_t
,
sizeof
(
int64_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
float
*
address
,
float
val
)
{
AtomicDecimalImpl
<
float
,
sizeof
(
float
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
float
*
address
,
float
val
)
{
AtomicDecimalImpl
<
float
,
sizeof
(
float
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
double
*
address
,
double
val
)
{
AtomicDecimalImpl
<
double
,
sizeof
(
double
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
double
*
address
,
double
val
)
{
AtomicDecimalImpl
<
double
,
sizeof
(
double
)
>
()(
address
,
val
);
}
#ifdef CUDA_HALF_TENSOR
#ifdef CUDA_HALF_TENSOR
static
inline
__device__
void
atomicMax
(
half
*
address
,
half
val
)
{}
static
inline
__device__
void
atomicMax
(
half
*
address
,
half
val
)
{}
#endif
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment