Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-scatter
Commits
bdf2563a
"tests/git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "ea01a4c7f9ab979b00cb8cd38a2ecb409d8cf279"
Commit
bdf2563a
authored
Dec 20, 2017
by
rusty1s
Browse files
added all atomic ops
parent
406e523f
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
143 additions
and
129 deletions
+143
-129
torch_scatter/kernel/THCAtomics.cuh
torch_scatter/kernel/THCAtomics.cuh
+143
-129
No files found.
torch_scatter/kernel/THCAtomics.cuh
View file @
bdf2563a
#define OP(X, Y) max(X, Y)
#define ATOMIC_(NAME) \
template <typename T, size_t n> \
template
<
typename
T
,
size_t
n
>
struct TH_CONCAT_3(Atomic, NAME, IntegerImpl); \
struct
AtomicIntegerImpl
;
\
template<typename T> \
template
<
typename
T
>
struct TH_CONCAT_3(Atomic, NAME, IntegerImpl)<T, 1> { \
struct
AtomicIntegerImpl
<
T
,
1
>
{
inline __device__ void operator()(T *address, T val) { \
inline
__device__
void
operator
()(
T
*
address
,
T
val
)
{
uint32_t * address_as_ui = (uint32_t *) (address - ((size_t) address & 3)); \
uint32_t
*
address_as_ui
=
(
uint32_t
*
)
(
address
-
((
size_t
)
address
&
3
));
uint32_t old = *address_as_ui; \
uint32_t
old
=
*
address_as_ui
;
uint32_t shift = ((size_t) address & 3) * 8; \
uint32_t
shift
=
((
size_t
)
address
&
3
)
*
8
;
uint32_t res; \
uint32_t
res
;
uint32_t assumed; \
uint32_t
assumed
;
\
do { \
do
{
assumed = old; \
assumed
=
old
;
res = OP(val, T((old >> shift) & 0xff)); \
res
=
OP
(
val
,
T
((
old
>>
shift
)
&
0xff
));
old = (old & ~(0x000000ff << shift)) | (res << shift); \
old
=
(
old
&
~
(
0x000000ff
<<
shift
))
|
(
res
<<
shift
);
old = atomicCAS(address_as_ui, assumed, old); \
old
=
atomicCAS
(
address_as_ui
,
assumed
,
old
);
} while (assumed != old); \
}
while
(
assumed
!=
old
);
} \
}
}; \
\
template<typename T> \
struct TH_CONCAT_3(Atomic, NAME, IntegerImpl)<T, 2> { \
inline __device__ void operator()(T *address, T val) { \
uint32_t * address_as_ui = (uint32_t *) ((char *) address - ((size_t) address & 2)); \
uint32_t old = *address_as_ui; \
uint32_t res; \
uint32_t newval; \
uint32_t assumed; \
\
do { \
assumed = old; \
res = OP(val, (size_t) address & 2 ? T(old >> 16) : T(old & 0xffff)); \
newval = (size_t) address & 2 ? (old & 0xffff) | (res << 16) : (old & 0xffff0000) | res; \
old = atomicCAS(address_as_ui, assumed, newval); \
} while (assumed != old); \
} \
}; \
\
template<typename T> \
struct TH_CONCAT_3(Atomic, NAME, IntegerImpl)<T, 4> { \
inline __device__ void operator()(T *address, T val) { \
uint32_t *address_as_ui = (uint32_t *) address; \
uint32_t old = *address_as_ui; \
uint32_t assumed; \
\
do { \
assumed = old; \
old = atomicCAS(address_as_ui, assumed, OP(val, (T) old)); \
} while (assumed != old); \
} \
}; \
\
template<typename T> \
struct TH_CONCAT_3(Atomic, NAME, IntegerImpl)<T, 8> { \
inline __device__ void operator()(T *address, T val) { \
unsigned long long *address_as_ull = (unsigned long long *) address; \
unsigned long long old = *address_as_ull; \
unsigned long long assumed; \
\
do { \
assumed = old; \
old = atomicCAS(address_as_ull, assumed, OP(val, (T) old)); \
} while (assumed != old); \
} \
}; \
\
template <typename T, size_t n> \
struct TH_CONCAT_3(Atomic, NAME, DecimalImpl); \
\
template <typename T> \
struct TH_CONCAT_3(Atomic, NAME, DecimalImpl)<T, 4> { \
inline __device__ void operator()(T *address, T val) { \
int *address_as_i = (int *) address; \
int old = *address_as_i; \
int assumed; \
\
do { \
assumed = old; \
old = atomicCAS(address_as_i, assumed, __float_as_int(OP(val, __int_as_float(assumed)))); \
} while (assumed != old); \
} \
}; \
\
template <typename T> \
struct TH_CONCAT_3(Atomic, NAME, DecimalImpl)<T, 8> { \
inline __device__ void operator()(T *address, T val) { \
unsigned long long int *address_as_ull = (unsigned long long int *) address; \
unsigned long long int old = *address_as_ull; \
unsigned long long int assumed; \
\
do { \
assumed = old; \
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(OP(val, __longlong_as_double(assumed)))); \
} while (assumed != old); \
} \
};
};
template
<
typename
T
>
#define OP(X, Y) X + Y
struct
AtomicIntegerImpl
<
T
,
2
>
{
ATOMIC_
(
Add
)
inline
__device__
void
operator
()(
T
*
address
,
T
val
)
{
#undef OP
uint32_t
*
address_as_ui
=
(
uint32_t
*
)
((
char
*
)
address
-
((
size_t
)
address
&
2
));
static
inline
__device__
void
atomicAdd
(
uint8_t
*
address
,
uint8_t
val
)
{
AtomicAddIntegerImpl
<
uint8_t
,
sizeof
(
uint8_t
)
>
()(
address
,
val
);
}
uint32_t
old
=
*
address_as_ui
;
static
inline
__device__
void
atomicAdd
(
int8_t
*
address
,
int8_t
val
)
{
AtomicAddIntegerImpl
<
int8_t
,
sizeof
(
int8_t
)
>
()(
address
,
val
);
}
uint32_t
res
;
static
inline
__device__
void
atomicAdd
(
int16_t
*
address
,
int16_t
val
)
{
AtomicAddIntegerImpl
<
int16_t
,
sizeof
(
int16_t
)
>
()(
address
,
val
);
}
uint32_t
newval
;
static
inline
__device__
void
atomicAdd
(
int64_t
*
address
,
int64_t
val
)
{
AtomicAddIntegerImpl
<
int64_t
,
sizeof
(
int64_t
)
>
()(
address
,
val
);
}
uint32_t
assumed
;
do
{
assumed
=
old
;
res
=
OP
(
val
,
(
size_t
)
address
&
2
?
T
(
old
>>
16
)
:
T
(
old
&
0xffff
));
newval
=
(
size_t
)
address
&
2
?
(
old
&
0xffff
)
|
(
res
<<
16
)
:
(
old
&
0xffff0000
)
|
res
;
old
=
atomicCAS
(
address_as_ui
,
assumed
,
newval
);
}
while
(
assumed
!=
old
);
}
};
template
<
typename
T
>
struct
AtomicIntegerImpl
<
T
,
4
>
{
inline
__device__
void
operator
()(
T
*
address
,
T
val
)
{
uint32_t
*
address_as_ui
=
(
uint32_t
*
)
address
;
uint32_t
old
=
*
address_as_ui
;
uint32_t
assumed
;
do
{
assumed
=
old
;
old
=
atomicCAS
(
address_as_ui
,
assumed
,
OP
(
val
,
(
T
)
old
));
}
while
(
assumed
!=
old
);
}
};
template
<
typename
T
>
struct
AtomicIntegerImpl
<
T
,
8
>
{
inline
__device__
void
operator
()(
T
*
address
,
T
val
)
{
unsigned
long
long
*
address_as_ull
=
(
unsigned
long
long
*
)
address
;
unsigned
long
long
old
=
*
address_as_ull
;
unsigned
long
long
assumed
;
do
{
assumed
=
old
;
old
=
atomicCAS
(
address_as_ull
,
assumed
,
OP
(
val
,
(
T
)
old
));
}
while
(
assumed
!=
old
);
}
};
template
<
typename
T
,
size_t
n
>
struct
AtomicDecimalImpl
;
template
<
typename
T
>
struct
AtomicDecimalImpl
<
T
,
4
>
{
inline
__device__
void
operator
()(
T
*
address
,
T
val
)
{
int
*
address_as_i
=
(
int
*
)
address
;
int
old
=
*
address_as_i
;
int
assumed
;
do
{
assumed
=
old
;
old
=
atomicCAS
(
address_as_i
,
assumed
,
__float_as_int
(
OP
(
val
,
__int_as_float
(
assumed
))));
}
while
(
assumed
!=
old
);
}
};
template
<
typename
T
>
struct
AtomicDecimalImpl
<
T
,
8
>
{
inline
__device__
void
operator
()(
T
*
address
,
T
val
)
{
unsigned
long
long
int
*
address_as_ull
=
(
unsigned
long
long
int
*
)
address
;
unsigned
long
long
int
old
=
*
address_as_ull
;
unsigned
long
long
int
assumed
;
do
{
assumed
=
old
;
old
=
atomicCAS
(
address_as_ull
,
assumed
,
__double_as_longlong
(
OP
(
val
,
__longlong_as_double
(
assumed
))));
}
while
(
assumed
!=
old
);
}
};
static
inline
__device__
void
atomicAdd
(
uint8_t
*
address
,
uint8_t
val
)
{
AtomicIntegerImpl
<
uint8_t
,
sizeof
(
uint8_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicAdd
(
int8_t
*
address
,
int8_t
val
)
{
AtomicIntegerImpl
<
int8_t
,
sizeof
(
int8_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicAdd
(
int16_t
*
address
,
int16_t
val
)
{
AtomicIntegerImpl
<
int16_t
,
sizeof
(
int16_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicAdd
(
int64_t
*
address
,
int64_t
val
)
{
AtomicIntegerImpl
<
int64_t
,
sizeof
(
int64_t
)
>
()(
address
,
val
);
}
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600 || CUDA_VERSION < 8000)
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600 || CUDA_VERSION < 8000)
static
inline
__device__
void
atomicAdd
(
double
*
address
,
double
val
)
{
AtomicDecimalImpl
<
double
,
sizeof
(
double
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicAdd
(
double
*
address
,
double
val
)
{
Atomic
Add
DecimalImpl
<
double
,
sizeof
(
double
)
>
()(
address
,
val
);
}
#elif !defined(__CUDA_ARCH__) && (CUDA_VERSION < 8000)
#elif !defined(__CUDA_ARCH__) && (CUDA_VERSION < 8000)
static
inline
__device__
void
atomicAdd
(
double
*
address
,
double
val
)
{}
static
inline
__device__
void
atomicAdd
(
double
*
address
,
double
val
)
{}
#endif
#endif
...
@@ -111,44 +113,56 @@ static inline __device__ void atomicAdd( double *address, double val) {}
...
@@ -111,44 +113,56 @@ static inline __device__ void atomicAdd( double *address, double val) {}
static
inline
__device__
void
atomicAdd
(
half
*
address
,
half
val
)
{}
static
inline
__device__
void
atomicAdd
(
half
*
address
,
half
val
)
{}
#endif
#endif
static
inline
__device__
void
atomicMul
(
uint8_t
*
address
,
uint8_t
val
)
{
AtomicIntegerImpl
<
uint8_t
,
sizeof
(
uint8_t
)
>
()(
address
,
val
);
}
#define OP(X, Y) X * Y
static
inline
__device__
void
atomicMul
(
int8_t
*
address
,
int8_t
val
)
{
AtomicIntegerImpl
<
int8_t
,
sizeof
(
int8_t
)
>
()(
address
,
val
);
}
ATOMIC_
(
Mul
)
static
inline
__device__
void
atomicMul
(
int16_t
*
address
,
int16_t
val
)
{
AtomicIntegerImpl
<
int16_t
,
sizeof
(
int16_t
)
>
()(
address
,
val
);
}
#undef OP
static
inline
__device__
void
atomicMul
(
int32_t
*
address
,
int32_t
val
)
{
AtomicIntegerImpl
<
int32_t
,
sizeof
(
int32_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMul
(
uint8_t
*
address
,
uint8_t
val
)
{
AtomicMulIntegerImpl
<
uint8_t
,
sizeof
(
uint8_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMul
(
int64_t
*
address
,
int64_t
val
)
{
AtomicIntegerImpl
<
int64_t
,
sizeof
(
int64_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMul
(
int8_t
*
address
,
int8_t
val
)
{
AtomicMulIntegerImpl
<
int8_t
,
sizeof
(
int8_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMul
(
float
*
address
,
float
val
)
{
AtomicDecimalImpl
<
float
,
sizeof
(
float
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMul
(
int16_t
*
address
,
int16_t
val
)
{
AtomicMulIntegerImpl
<
int16_t
,
sizeof
(
int16_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMul
(
double
*
address
,
double
val
)
{
AtomicDecimalImpl
<
double
,
sizeof
(
double
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMul
(
int32_t
*
address
,
int32_t
val
)
{
AtomicMulIntegerImpl
<
int32_t
,
sizeof
(
int32_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMul
(
int64_t
*
address
,
int64_t
val
)
{
AtomicMulIntegerImpl
<
int64_t
,
sizeof
(
int64_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMul
(
float
*
address
,
float
val
)
{
AtomicMulDecimalImpl
<
float
,
sizeof
(
float
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMul
(
double
*
address
,
double
val
)
{
AtomicMulDecimalImpl
<
double
,
sizeof
(
double
)
>
()(
address
,
val
);
}
#ifdef CUDA_HALF_TENSOR
#ifdef CUDA_HALF_TENSOR
static
inline
__device__
void
atomicMul
(
half
*
address
,
half
val
)
{}
static
inline
__device__
void
atomicMul
(
half
*
address
,
half
val
)
{}
#endif
#endif
static
inline
__device__
void
atomicDiv
(
uint8_t
*
address
,
uint8_t
val
)
{
AtomicIntegerImpl
<
uint8_t
,
sizeof
(
uint8_t
)
>
()(
address
,
val
);
}
#define OP(X, Y) X / Y
static
inline
__device__
void
atomicDiv
(
int8_t
*
address
,
int8_t
val
)
{
AtomicIntegerImpl
<
int8_t
,
sizeof
(
int8_t
)
>
()(
address
,
val
);
}
ATOMIC_
(
Div
)
static
inline
__device__
void
atomicDiv
(
int16_t
*
address
,
int16_t
val
)
{
AtomicIntegerImpl
<
int16_t
,
sizeof
(
int16_t
)
>
()(
address
,
val
);
}
#undef OP
static
inline
__device__
void
atomicDiv
(
int32_t
*
address
,
int32_t
val
)
{
AtomicIntegerImpl
<
int32_t
,
sizeof
(
int32_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicDiv
(
uint8_t
*
address
,
uint8_t
val
)
{
AtomicDivIntegerImpl
<
uint8_t
,
sizeof
(
uint8_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicDiv
(
int64_t
*
address
,
int64_t
val
)
{
AtomicIntegerImpl
<
int64_t
,
sizeof
(
int64_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicDiv
(
int8_t
*
address
,
int8_t
val
)
{
AtomicDivIntegerImpl
<
int8_t
,
sizeof
(
int8_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicDiv
(
float
*
address
,
float
val
)
{
AtomicDecimalImpl
<
float
,
sizeof
(
float
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicDiv
(
int16_t
*
address
,
int16_t
val
)
{
AtomicDivIntegerImpl
<
int16_t
,
sizeof
(
int16_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicDiv
(
double
*
address
,
double
val
)
{
AtomicDecimalImpl
<
double
,
sizeof
(
double
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicDiv
(
int32_t
*
address
,
int32_t
val
)
{
AtomicDivIntegerImpl
<
int32_t
,
sizeof
(
int32_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicDiv
(
int64_t
*
address
,
int64_t
val
)
{
AtomicDivIntegerImpl
<
int64_t
,
sizeof
(
int64_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicDiv
(
float
*
address
,
float
val
)
{
AtomicDivDecimalImpl
<
float
,
sizeof
(
float
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicDiv
(
double
*
address
,
double
val
)
{
AtomicDivDecimalImpl
<
double
,
sizeof
(
double
)
>
()(
address
,
val
);
}
#ifdef CUDA_HALF_TENSOR
#ifdef CUDA_HALF_TENSOR
static
inline
__device__
void
atomicDiv
(
half
*
address
,
half
val
)
{}
static
inline
__device__
void
atomicDiv
(
half
*
address
,
half
val
)
{}
#endif
#endif
static
inline
__device__
void
atomicMax
(
uint8_t
*
address
,
uint8_t
val
)
{
AtomicIntegerImpl
<
uint8_t
,
sizeof
(
uint8_t
)
>
()(
address
,
val
);
}
#define OP(X, Y) max(X, Y)
static
inline
__device__
void
atomicMax
(
int8_t
*
address
,
int8_t
val
)
{
AtomicIntegerImpl
<
int8_t
,
sizeof
(
int8_t
)
>
()(
address
,
val
);
}
ATOMIC_
(
Max
)
static
inline
__device__
void
atomicMax
(
int16_t
*
address
,
int16_t
val
)
{
AtomicIntegerImpl
<
int16_t
,
sizeof
(
int16_t
)
>
()(
address
,
val
);
}
#undef OP
static
inline
__device__
void
atomicMax
(
int64_t
*
address
,
int64_t
val
)
{
AtomicIntegerImpl
<
int64_t
,
sizeof
(
int64_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
uint8_t
*
address
,
uint8_t
val
)
{
AtomicMaxIntegerImpl
<
uint8_t
,
sizeof
(
uint8_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
float
*
address
,
float
val
)
{
AtomicDecimalImpl
<
float
,
sizeof
(
float
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
int8_t
*
address
,
int8_t
val
)
{
AtomicMaxIntegerImpl
<
int8_t
,
sizeof
(
int8_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
double
*
address
,
double
val
)
{
AtomicDecimalImpl
<
double
,
sizeof
(
double
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
int16_t
*
address
,
int16_t
val
)
{
AtomicMaxIntegerImpl
<
int16_t
,
sizeof
(
int16_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
int64_t
*
address
,
int64_t
val
)
{
AtomicMaxIntegerImpl
<
int64_t
,
sizeof
(
int64_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
float
*
address
,
float
val
)
{
AtomicMaxDecimalImpl
<
float
,
sizeof
(
float
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMax
(
double
*
address
,
double
val
)
{
AtomicMaxDecimalImpl
<
double
,
sizeof
(
double
)
>
()(
address
,
val
);
}
#ifdef CUDA_HALF_TENSOR
#ifdef CUDA_HALF_TENSOR
static
inline
__device__
void
atomicMax
(
half
*
address
,
half
val
)
{}
static
inline
__device__
void
atomicMax
(
half
*
address
,
half
val
)
{}
#endif
#endif
static
inline
__device__
void
atomicMin
(
uint8_t
*
address
,
uint8_t
val
)
{
AtomicIntegerImpl
<
uint8_t
,
sizeof
(
uint8_t
)
>
()(
address
,
val
);
}
#define OP(X, Y) min(X, Y)
static
inline
__device__
void
atomicMin
(
int8_t
*
address
,
int8_t
val
)
{
AtomicIntegerImpl
<
int8_t
,
sizeof
(
int8_t
)
>
()(
address
,
val
);
}
ATOMIC_
(
Min
)
static
inline
__device__
void
atomicMin
(
int16_t
*
address
,
int16_t
val
)
{
AtomicIntegerImpl
<
int16_t
,
sizeof
(
int16_t
)
>
()(
address
,
val
);
}
#undef OP
static
inline
__device__
void
atomicMin
(
int64_t
*
address
,
int64_t
val
)
{
AtomicIntegerImpl
<
int64_t
,
sizeof
(
int64_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMin
(
uint8_t
*
address
,
uint8_t
val
)
{
AtomicMinIntegerImpl
<
uint8_t
,
sizeof
(
uint8_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMin
(
float
*
address
,
float
val
)
{
AtomicDecimalImpl
<
float
,
sizeof
(
float
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMin
(
int8_t
*
address
,
int8_t
val
)
{
AtomicMinIntegerImpl
<
int8_t
,
sizeof
(
int8_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMin
(
double
*
address
,
double
val
)
{
AtomicDecimalImpl
<
double
,
sizeof
(
double
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMin
(
int16_t
*
address
,
int16_t
val
)
{
AtomicMinIntegerImpl
<
int16_t
,
sizeof
(
int16_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMin
(
int64_t
*
address
,
int64_t
val
)
{
AtomicMinIntegerImpl
<
int64_t
,
sizeof
(
int64_t
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMin
(
float
*
address
,
float
val
)
{
AtomicMinDecimalImpl
<
float
,
sizeof
(
float
)
>
()(
address
,
val
);
}
static
inline
__device__
void
atomicMin
(
double
*
address
,
double
val
)
{
AtomicMinDecimalImpl
<
double
,
sizeof
(
double
)
>
()(
address
,
val
);
}
#ifdef CUDA_HALF_TENSOR
#ifdef CUDA_HALF_TENSOR
static
inline
__device__
void
atomicMin
(
half
*
address
,
half
val
)
{}
static
inline
__device__
void
atomicMin
(
half
*
address
,
half
val
)
{}
#endif
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment