Unverified Commit 889798fe authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] clang-format auto fix. (#4831)



* [Misc] clang-format auto fix.

* blabla

* nolint

* blabla
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 7c059e86
/*!
/**
* Copyright (c) 2022 by Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
......@@ -13,19 +13,19 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file array/cuda/bf16.cuh
* \brief bfloat16 related functions.
* @file array/cuda/bf16.cuh
* @brief bfloat16 related functions.
*/
#ifndef DGL_ARRAY_CUDA_BF16_CUH_
#define DGL_ARRAY_CUDA_BF16_CUH_
#if BF16_ENABLED
#include <cuda_bf16.h>
#include <algorithm>
static __device__ __forceinline__ __nv_bfloat16 max(__nv_bfloat16 a,
__nv_bfloat16 b) {
static __device__ __forceinline__ __nv_bfloat16
max(__nv_bfloat16 a, __nv_bfloat16 b) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
return __hmax(a, b);
#else
......@@ -33,8 +33,8 @@ static __device__ __forceinline__ __nv_bfloat16 max(__nv_bfloat16 a,
#endif
}
static __device__ __forceinline__ __nv_bfloat16 min(__nv_bfloat16 a,
__nv_bfloat16 b) {
static __device__ __forceinline__ __nv_bfloat16
min(__nv_bfloat16 a, __nv_bfloat16 b) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
return __hmin(a, b);
#else
......@@ -43,53 +43,68 @@ static __device__ __forceinline__ __nv_bfloat16 min(__nv_bfloat16 a,
}
#ifdef __CUDACC__
// Arithmetic BF16 operations for architecture >= 8.0 are already defined in cuda_bf16.h
// Arithmetic BF16 operations for architecture >= 8.0 are already defined in
// cuda_bf16.h
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)
__device__ __forceinline__ __nv_bfloat16 operator+(
const __nv_bfloat16& lh, const __nv_bfloat16& rh) {
__device__ __forceinline__ __nv_bfloat16
operator+(const __nv_bfloat16& lh, const __nv_bfloat16& rh) {
return __nv_bfloat16(float(lh) + float(rh)); // NOLINT
}
__device__ __forceinline__ __nv_bfloat16 operator-(
const __nv_bfloat16& lh, const __nv_bfloat16& rh) {
__device__ __forceinline__ __nv_bfloat16
operator-(const __nv_bfloat16& lh, const __nv_bfloat16& rh) {
return __nv_bfloat16(float(lh) - float(rh)); // NOLINT
}
__device__ __forceinline__ __nv_bfloat16 operator*(
const __nv_bfloat16& lh, const __nv_bfloat16& rh) {
__device__ __forceinline__ __nv_bfloat16
operator*(const __nv_bfloat16& lh, const __nv_bfloat16& rh) {
return __nv_bfloat16(float(lh) * float(rh)); // NOLINT
}
__device__ __forceinline__ __nv_bfloat16 operator/(
const __nv_bfloat16& lh, const __nv_bfloat16& rh) {
__device__ __forceinline__ __nv_bfloat16
operator/(const __nv_bfloat16& lh, const __nv_bfloat16& rh) {
return __nv_bfloat16(float(lh) / float(rh)); // NOLINT
}
__device__ __forceinline__ __nv_bfloat16& operator+=(
__nv_bfloat16& lh, const __nv_bfloat16& rh) { // NOLINT
lh = __nv_bfloat16(float(lh) + float(rh)); return lh; // NOLINT
lh = __nv_bfloat16(float(lh) + float(rh)); // NOLINT
return lh;
}
__device__ __forceinline__ __nv_bfloat16& operator-=(
__nv_bfloat16& lh, const __nv_bfloat16& rh) { // NOLINT
lh = __nv_bfloat16(float(lh) - float(rh)); return lh; // NOLINT
lh = __nv_bfloat16(float(lh) - float(rh)); // NOLINT
return lh;
}
__device__ __forceinline__ __nv_bfloat16& operator*=(
__nv_bfloat16& lh, const __nv_bfloat16& rh) { // NOLINT
lh = __nv_bfloat16(float(lh) * float(rh)); return lh; // NOLINT
lh = __nv_bfloat16(float(lh) * float(rh)); // NOLINT
return lh;
}
__device__ __forceinline__ __nv_bfloat16& operator/=(
__nv_bfloat16& lh, const __nv_bfloat16& rh) { // NOLINT
lh = __nv_bfloat16(float(lh) / float(rh)); return lh; // NOLINT
lh = __nv_bfloat16(float(lh) / float(rh)); // NOLINT
return lh;
}
__device__ __forceinline__ __nv_bfloat16& operator++(__nv_bfloat16& h) { // NOLINT
h = __nv_bfloat16(float(h) + 1.0f); return h; // NOLINT
__device__ __forceinline__ __nv_bfloat16& operator++(
__nv_bfloat16& h) { // NOLINT
h = __nv_bfloat16(float(h) + 1.0f); // NOLINT
return h;
}
__device__ __forceinline__ __nv_bfloat16& operator--(__nv_bfloat16& h) { // NOLINT
h = __nv_bfloat16(float(h) - 1.0f); return h; // NOLINT
__device__ __forceinline__ __nv_bfloat16& operator--(
__nv_bfloat16& h) { // NOLINT
h = __nv_bfloat16(float(h) - 1.0f); // NOLINT
return h;
}
__device__ __forceinline__ __nv_bfloat16 operator++(__nv_bfloat16& h, int) { // NOLINT
__nv_bfloat16 ret = h; h = __nv_bfloat16(float(h) + 1.0f); return ret; // NOLINT
__device__ __forceinline__ __nv_bfloat16
operator++(__nv_bfloat16& h, int) { // NOLINT
__nv_bfloat16 ret = h;
h = __nv_bfloat16(float(h) + 1.0f); // NOLINT
return ret;
}
__device__ __forceinline__ __nv_bfloat16 operator--(__nv_bfloat16& h, int) { // NOLINT
__nv_bfloat16 ret = h; h = __nv_bfloat16(float(h) - 1.0f); return ret; // NOLINT
__device__ __forceinline__ __nv_bfloat16
operator--(__nv_bfloat16& h, int) { // NOLINT
__nv_bfloat16 ret = h;
h = __nv_bfloat16(float(h) - 1.0f); // NOLINT
return ret;
}
__device__ __forceinline__ __nv_bfloat16 operator+(const __nv_bfloat16& h) {
......@@ -99,28 +114,28 @@ __device__ __forceinline__ __nv_bfloat16 operator-(const __nv_bfloat16& h) {
return __nv_bfloat16(-float(h)); // NOLINT
}
__device__ __forceinline__ bool operator==(const __nv_bfloat16& lh,
const __nv_bfloat16& rh) {
__device__ __forceinline__ bool operator==(
const __nv_bfloat16& lh, const __nv_bfloat16& rh) {
return float(lh) == float(rh); // NOLINT
}
__device__ __forceinline__ bool operator!=(const __nv_bfloat16& lh,
const __nv_bfloat16& rh) {
__device__ __forceinline__ bool operator!=(
const __nv_bfloat16& lh, const __nv_bfloat16& rh) {
return float(lh) != float(rh); // NOLINT
}
__device__ __forceinline__ bool operator> (const __nv_bfloat16& lh,
const __nv_bfloat16& rh) {
__device__ __forceinline__ bool operator>(
const __nv_bfloat16& lh, const __nv_bfloat16& rh) {
return float(lh) > float(rh); // NOLINT
}
__device__ __forceinline__ bool operator< (const __nv_bfloat16& lh,
const __nv_bfloat16& rh) {
__device__ __forceinline__ bool operator<(
const __nv_bfloat16& lh, const __nv_bfloat16& rh) {
return float(lh) < float(rh); // NOLINT
}
__device__ __forceinline__ bool operator>=(const __nv_bfloat16& lh,
const __nv_bfloat16& rh) {
__device__ __forceinline__ bool operator>=(
const __nv_bfloat16& lh, const __nv_bfloat16& rh) {
return float(lh) >= float(rh); // NOLINT
}
__device__ __forceinline__ bool operator<=(const __nv_bfloat16& lh,
const __nv_bfloat16& rh) {
__device__ __forceinline__ bool operator<=(
const __nv_bfloat16& lh, const __nv_bfloat16& rh) {
return float(lh) <= float(rh); // NOLINT
}
#endif // defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)
......
/*!
/**
* Copyright (c) 2019 by Contributors
* \file graph/serialize/dglstream.h
* \brief Graph serialization header
* @file graph/serialize/dglstream.h
* @brief Graph serialization header
*/
#ifndef DGL_GRAPH_SERIALIZE_DGLSTREAM_H_
#define DGL_GRAPH_SERIALIZE_DGLSTREAM_H_
......@@ -9,20 +9,22 @@
#include <dgl/aten/spmat.h>
#include <dmlc/io.h>
#include <dmlc/type_traits.h>
#include <memory>
namespace dgl {
namespace serialize {
/*!
* \brief DGLStream counts the bytes that already written into the
/**
* @brief DGLStream counts the bytes that already written into the
* underlying stream.
*/
class DGLStream : public dmlc::Stream {
public:
/*! \brief create a new DGLStream instance */
static DGLStream *Create(const char *uri, const char *const flag,
bool allow_null, dgl_format_code_t formats) {
/** @brief create a new DGLStream instance */
static DGLStream *Create(
const char *uri, const char *const flag, bool allow_null,
dgl_format_code_t formats) {
return new DGLStream(uri, flag, allow_null, formats);
}
......@@ -45,10 +47,11 @@ class DGLStream : public dmlc::Stream {
uint64_t FormatsToSave() const { return formats_to_save_; }
private:
DGLStream(const char *uri, const char *const flag, bool allow_null,
DGLStream(
const char *uri, const char *const flag, bool allow_null,
dgl_format_code_t formats)
: strm_(dmlc::Stream::Create(uri, flag, allow_null)), formats_to_save_(formats) {
}
: strm_(dmlc::Stream::Create(uri, flag, allow_null)),
formats_to_save_(formats) {}
// stream for serialization
std::unique_ptr<dmlc::Stream> strm_;
// size of already written to stream
......
......@@ -99,7 +99,9 @@ struct Frontiers {
/** @brief a vector store for the nodes/edges in all the frontiers */
std::vector<dgl_id_t> ids;
/** @brief a vector store for node/edge tags. Empty if no tags are requested */
/**
* @brief a vector store for node/edge tags. Empty if no tags are requested
*/
std::vector<int64_t> tags;
/** @brief a section vector to indicate each frontier */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment