Unverified Commit 619d735d authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace \xxx with @XXX in structured comment. (#4822)



* param

* brief

* note

* return

* tparam

* brief2

* file

* return2

* return

* blabla

* all
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 96297fb8
/*! /*!
* Copyright (c) 2021-2022 by Contributors * Copyright (c) 2021-2022 by Contributors
* \file array/cuda/array_index_select.cuh * @file array/cuda/array_index_select.cuh
* \brief Array index select GPU kernel implementation * @brief Array index select GPU kernel implementation
*/ */
#ifndef DGL_ARRAY_CUDA_ARRAY_INDEX_SELECT_CUH_ #ifndef DGL_ARRAY_CUDA_ARRAY_INDEX_SELECT_CUH_
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cpu/array_nonzero.cc * @file array/cpu/array_nonzero.cc
* \brief Array nonzero CPU implementation * @brief Array nonzero CPU implementation
*/ */
#include <dgl/array.h> #include <dgl/array.h>
......
/*! /*!
* Copyright (c) 2020-2021 by Contributors * Copyright (c) 2020-2021 by Contributors
* \file array/cuda/array_op_impl.cu * @file array/cuda/array_op_impl.cu
* \brief Array operator GPU implementation * @brief Array operator GPU implementation
*/ */
#include <dgl/array.h> #include <dgl/array.h>
#include "../../runtime/cuda/cuda_common.h" #include "../../runtime/cuda/cuda_common.h"
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file array/cuda/array_scatter.cu * @file array/cuda/array_scatter.cu
* \brief Array scatter GPU implementation * @brief Array scatter GPU implementation
*/ */
#include <dgl/array.h> #include <dgl/array.h>
#include "../../runtime/cuda/cuda_common.h" #include "../../runtime/cuda/cuda_common.h"
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cpu/array_sort.cu * @file array/cpu/array_sort.cu
* \brief Array sort GPU implementation * @brief Array sort GPU implementation
*/ */
#include <dgl/array.h> #include <dgl/array.h>
......
/*! /*!
* Copyright (c) 2019 by Contributors * Copyright (c) 2019 by Contributors
* \file array/cuda/atomic.cuh * @file array/cuda/atomic.cuh
* \brief Atomic functions * @brief Atomic functions
*/ */
#ifndef DGL_ARRAY_CUDA_ATOMIC_CUH_ #ifndef DGL_ARRAY_CUDA_ATOMIC_CUH_
#define DGL_ARRAY_CUDA_ATOMIC_CUH_ #define DGL_ARRAY_CUDA_ATOMIC_CUH_
...@@ -171,16 +171,16 @@ DEFINE_ATOMIC(Add) ...@@ -171,16 +171,16 @@ DEFINE_ATOMIC(Add)
/** /**
* \brief Performs an atomic compare-and-swap on 64 bit integers. That is, * @brief Performs an atomic compare-and-swap on 64 bit integers. That is,
* it the word `old` at the memory location `address`, computes * it the word `old` at the memory location `address`, computes
* `(old == compare ? val : old)` , and stores the result back to memory at * `(old == compare ? val : old)` , and stores the result back to memory at
* the same address. * the same address.
* *
* \param address The address to perform the atomic operation on. * @param address The address to perform the atomic operation on.
* \param compare The value to compare to. * @param compare The value to compare to.
* \param val The new value to conditionally store. * @param val The new value to conditionally store.
* *
* \return The old value at the address. * @return The old value at the address.
*/ */
inline __device__ int64_t AtomicCAS( inline __device__ int64_t AtomicCAS(
int64_t * const address, int64_t * const address,
...@@ -197,16 +197,16 @@ inline __device__ int64_t AtomicCAS( ...@@ -197,16 +197,16 @@ inline __device__ int64_t AtomicCAS(
} }
/** /**
* \brief Performs an atomic compare-and-swap on 32 bit integers. That is, * @brief Performs an atomic compare-and-swap on 32 bit integers. That is,
* it the word `old` at the memory location `address`, computes * it the word `old` at the memory location `address`, computes
* `(old == compare ? val : old)` , and stores the result back to memory at * `(old == compare ? val : old)` , and stores the result back to memory at
* the same address. * the same address.
* *
* \param address The address to perform the atomic operation on. * @param address The address to perform the atomic operation on.
* \param compare The value to compare to. * @param compare The value to compare to.
* \param val The new value to conditionally store. * @param val The new value to conditionally store.
* *
* \return The old value at the address. * @return The old value at the address.
*/ */
inline __device__ int32_t AtomicCAS( inline __device__ int32_t AtomicCAS(
int32_t * const address, int32_t * const address,
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cuda/coo2csr.cc * @file array/cuda/coo2csr.cc
* \brief COO2CSR * @brief COO2CSR
*/ */
#include <dgl/array.h> #include <dgl/array.h>
...@@ -59,7 +59,7 @@ CSRMatrix COOToCSR<kDGLCUDA, int32_t>(COOMatrix coo) { ...@@ -59,7 +59,7 @@ CSRMatrix COOToCSR<kDGLCUDA, int32_t>(COOMatrix coo) {
} }
/*! /*!
* \brief Search for the insertion positions for needle in the hay. * @brief Search for the insertion positions for needle in the hay.
* *
* The hay is a list of sorted elements and the result is the insertion position * The hay is a list of sorted elements and the result is the insertion position
* of each needle so that the insertion still gives sorted order. * of each needle so that the insertion still gives sorted order.
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cuda/coo_sort.cc * @file array/cuda/coo_sort.cc
* \brief Sort COO index * @brief Sort COO index
*/ */
#include <dgl/array.h> #include <dgl/array.h>
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cuda/csr2coo.cc * @file array/cuda/csr2coo.cc
* \brief CSR2COO * @brief CSR2COO
*/ */
#include <dgl/array.h> #include <dgl/array.h>
...@@ -46,12 +46,12 @@ COOMatrix CSRToCOO<kDGLCUDA, int32_t>(CSRMatrix csr) { ...@@ -46,12 +46,12 @@ COOMatrix CSRToCOO<kDGLCUDA, int32_t>(CSRMatrix csr) {
} }
/*! /*!
* \brief Repeat elements * @brief Repeat elements
* \param val Value to repeat * @param val Value to repeat
* \param repeats Number of repeats for each value * @param repeats Number of repeats for each value
* \param pos The position of the output buffer to write the value. * @param pos The position of the output buffer to write the value.
* \param out Output buffer. * @param out Output buffer.
* \param length Number of values * @param length Number of values
* *
* For example: * For example:
* val = [3, 0, 1] * val = [3, 0, 1]
......
/*! /*!
* Copyright (c) 2021 by Contributors * Copyright (c) 2021 by Contributors
* \file array/cuda/csr_get_data.cu * @file array/cuda/csr_get_data.cu
* \brief Retrieve entries of a CSR matrix * @brief Retrieve entries of a CSR matrix
*/ */
#include <dgl/array.h> #include <dgl/array.h>
#include <vector> #include <vector>
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cuda/csr_mm.cu * @file array/cuda/csr_mm.cu
* \brief SpSpMM/SpGEMM C APIs and definitions. * @brief SpSpMM/SpGEMM C APIs and definitions.
*/ */
#include <dgl/array.h> #include <dgl/array.h>
#include <dgl/runtime/device_api.h> #include <dgl/runtime/device_api.h>
...@@ -18,7 +18,7 @@ namespace cusparse { ...@@ -18,7 +18,7 @@ namespace cusparse {
#if 0 // disabling CUDA 11.0+ implementation for now because of problems on bigger graphs #if 0 // disabling CUDA 11.0+ implementation for now because of problems on bigger graphs
/*! \brief Cusparse implementation of SpGEMM on Csr format for CUDA 11.0+ */ /*! @brief Cusparse implementation of SpGEMM on Csr format for CUDA 11.0+ */
template <typename DType, typename IdType> template <typename DType, typename IdType>
std::pair<CSRMatrix, NDArray> CusparseSpgemm( std::pair<CSRMatrix, NDArray> CusparseSpgemm(
const CSRMatrix& A, const CSRMatrix& A,
...@@ -127,7 +127,7 @@ std::pair<CSRMatrix, NDArray> CusparseSpgemm( ...@@ -127,7 +127,7 @@ std::pair<CSRMatrix, NDArray> CusparseSpgemm(
#else // __CUDACC_VER_MAJOR__ != 11 #else // __CUDACC_VER_MAJOR__ != 11
/*! \brief Cusparse implementation of SpGEMM on Csr format for older CUDA versions */ /*! @brief Cusparse implementation of SpGEMM on Csr format for older CUDA versions */
template <typename DType, typename IdType> template <typename DType, typename IdType>
std::pair<CSRMatrix, NDArray> CusparseSpgemm( std::pair<CSRMatrix, NDArray> CusparseSpgemm(
const CSRMatrix& A, const CSRMatrix& A,
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cuda/csr_sort.cc * @file array/cuda/csr_sort.cc
* \brief Sort CSR index * @brief Sort CSR index
*/ */
#include <dgl/array.h> #include <dgl/array.h>
...@@ -17,7 +17,7 @@ namespace aten { ...@@ -17,7 +17,7 @@ namespace aten {
namespace impl { namespace impl {
/*! /*!
* \brief Check whether each row is sorted. * @brief Check whether each row is sorted.
*/ */
template <typename IdType> template <typename IdType>
__global__ void _SegmentIsSorted( __global__ void _SegmentIsSorted(
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cuda/spmm.cu * @file array/cuda/spmm.cu
* \brief SpGEAM C APIs and definitions. * @brief SpGEAM C APIs and definitions.
*/ */
#include <dgl/array.h> #include <dgl/array.h>
#include <dgl/runtime/device_api.h> #include <dgl/runtime/device_api.h>
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cuda/csr_transpose.cc * @file array/cuda/csr_transpose.cc
* \brief CSR transpose (convert to CSC) * @brief CSR transpose (convert to CSC)
*/ */
#include <dgl/array.h> #include <dgl/array.h>
......
/*! /*!
* Copyright (c) 2021 by Contributors * Copyright (c) 2021 by Contributors
* \file array/cuda/cuda_filter.cc * @file array/cuda/cuda_filter.cc
* \brief Object for selecting items in a set, or selecting items not in a set. * @brief Object for selecting items in a set, or selecting items not in a set.
*/ */
#include <dgl/runtime/device_api.h> #include <dgl/runtime/device_api.h>
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cuda/dispatcher.cuh * @file array/cuda/dispatcher.cuh
* \brief Templates to dispatch into different cuSPARSE routines based on the type * @brief Templates to dispatch into different cuSPARSE routines based on the type
* argument. * argument.
*/ */
#ifndef DGL_ARRAY_CUDA_CUSPARSE_DISPATCHER_CUH_ #ifndef DGL_ARRAY_CUDA_CUSPARSE_DISPATCHER_CUH_
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
namespace dgl { namespace dgl {
namespace aten { namespace aten {
/*! \brief cusparseXcsrgemm dispatcher */ /*! @brief cusparseXcsrgemm dispatcher */
template <typename DType> template <typename DType>
struct CSRGEMM { struct CSRGEMM {
template <typename... Args> template <typename... Args>
...@@ -122,7 +122,7 @@ struct CSRGEMM<double> { ...@@ -122,7 +122,7 @@ struct CSRGEMM<double> {
} }
}; };
/*! \brief cusparseXcsrgeam dispatcher */ /*! @brief cusparseXcsrgeam dispatcher */
template <typename DType> template <typename DType>
struct CSRGEAM { struct CSRGEAM {
template <typename... Args> template <typename... Args>
......
/*! /*!
* Copyright (c) 2021 by Contributors * Copyright (c) 2021 by Contributors
* \file cuda_common.h * @file cuda_common.h
* \brief Wrapper to place cub in dgl namespace. * @brief Wrapper to place cub in dgl namespace.
*/ */
#ifndef DGL_ARRAY_CUDA_DGL_CUB_CUH_ #ifndef DGL_ARRAY_CUDA_DGL_CUB_CUH_
......
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
* *
* \file array/gpu/disjoint_union.cu * @file array/gpu/disjoint_union.cu
* \brief Disjoint union GPU implementation. * @brief Disjoint union GPU implementation.
*/ */
#include <dgl/array.h> #include <dgl/array.h>
......
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
* *
* \file array/cuda/fp16.cuh * @file array/cuda/fp16.cuh
* \brief float16 related functions. * @brief float16 related functions.
* \note this file is modified from TVM project: * @note this file is modified from TVM project:
* https://github.com/apache/tvm/blob/e561007f0c330e3d14c2bc8a3ef40fb741db9004/src/target/source/literal/cuda_half_t.h. * https://github.com/apache/tvm/blob/e561007f0c330e3d14c2bc8a3ef40fb741db9004/src/target/source/literal/cuda_half_t.h.
*/ */
#ifndef DGL_ARRAY_CUDA_FP16_CUH_ #ifndef DGL_ARRAY_CUDA_FP16_CUH_
......
/*! /*!
* Copyright (c) 2020 by Contributors * Copyright (c) 2020 by Contributors
* \file array/cuda/functor.cuh * @file array/cuda/functor.cuh
* \brief Functors for template on CUDA * @brief Functors for template on CUDA
*/ */
#ifndef DGL_ARRAY_CUDA_FUNCTOR_CUH_ #ifndef DGL_ARRAY_CUDA_FUNCTOR_CUH_
#define DGL_ARRAY_CUDA_FUNCTOR_CUH_ #define DGL_ARRAY_CUDA_FUNCTOR_CUH_
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment