Unverified Commit 619d735d authored by Hongzhi (Steve), Chen's avatar Hongzhi (Steve), Chen Committed by GitHub
Browse files

[Misc] Replace \xxx with @XXX in structured comment. (#4822)



* param

* brief

* note

* return

* tparam

* brief2

* file

* return2

* return

* blabla

* all
Co-authored-by: default avatarSteve <ubuntu@ip-172-31-34-29.ap-northeast-1.compute.internal>
parent 96297fb8
/*!
* Copyright (c) 2018 by Contributors
* \file graph/shared_mem_manager.cc
* \brief DGL shared mem manager APIs
* @file graph/shared_mem_manager.cc
* @brief DGL shared mem manager APIs
*/
#ifndef DGL_GRAPH_SHARED_MEM_MANAGER_H_
......
/*!
* Copyright (c) 2020 by Contributors
* \file graph/subgraph.cc
* \brief Functions for extracting subgraphs.
* @file graph/subgraph.cc
* @brief Functions for extracting subgraphs.
*/
#include "./heterograph.h"
using namespace dgl::runtime;
......
......@@ -13,8 +13,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file graph/transform/compact.cc
* \brief Compact graph implementation
* @file graph/transform/compact.cc
* @brief Compact graph implementation
*/
#include "compact.h"
......
......@@ -13,8 +13,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file graph/transform/compact.h
* \brief Functions to find and eliminate the common isolated nodes across
* @file graph/transform/compact.h
* @brief Functions to find and eliminate the common isolated nodes across
* all given graphs with the same set of nodes.
*/
......
/*!
* Copyright (c) 2021 by Contributors
* \file graph/transform/cpu/kdtree_ndarray_adapter.h
* \brief NDArray adapter for nanoflann, without
* @file graph/transform/cpu/kdtree_ndarray_adapter.h
* @brief NDArray adapter for nanoflann, without
* duplicating the storage
*/
#ifndef DGL_GRAPH_TRANSFORM_CPU_KDTREE_NDARRAY_ADAPTER_H_
......@@ -19,19 +19,19 @@ namespace transform {
namespace knn_utils {
/*!
* \brief A simple 2D NDArray adapter for nanoflann, without duplicating the
* @brief A simple 2D NDArray adapter for nanoflann, without duplicating the
* storage.
*
* \tparam FloatType: The type of the point coordinates (typically, double or
* @tparam FloatType: The type of the point coordinates (typically, double or
* float).
* \tparam IdType: The type for indices in the KD-tree index (typically,
* @tparam IdType: The type for indices in the KD-tree index (typically,
* size_t of int)
* \tparam FeatureDim: If set to > 0, it specifies a compile-time fixed
* @tparam FeatureDim: If set to > 0, it specifies a compile-time fixed
* dimensionality for the points in the data set, allowing more compiler
* optimizations.
* \tparam Dist: The distance metric to use: nanoflann::metric_L1,
* @tparam Dist: The distance metric to use: nanoflann::metric_L1,
nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc.
* \note The spelling of dgl's adapter ("adapter") is different from naneflann
* @note The spelling of dgl's adapter ("adapter") is different from naneflann
* ("adaptor")
*/
template <
......@@ -66,7 +66,7 @@ class KDTreeNDArrayAdapter {
index_type* GetIndex() { return index_; }
/*!
* \brief Query for the \a num_closest points to a given point
* @brief Query for the \a num_closest points to a given point
* Note that this is a short-cut method for GetIndex()->findNeighbors().
*/
void query(
......@@ -77,20 +77,20 @@ class KDTreeNDArrayAdapter {
index_->findNeighbors(resultSet, query_pt, nanoflann::SearchParams());
}
/*! \brief Interface expected by KDTreeSingleIndexAdaptor */
/*! @brief Interface expected by KDTreeSingleIndexAdaptor */
const self_type& derived() const { return *this; }
/*! \brief Interface expected by KDTreeSingleIndexAdaptor */
/*! @brief Interface expected by KDTreeSingleIndexAdaptor */
self_type& derived() { return *this; }
/*!
* \brief Interface expected by KDTreeSingleIndexAdaptor,
* @brief Interface expected by KDTreeSingleIndexAdaptor,
* return the number of data points
*/
size_t kdtree_get_point_count() const { return data_->shape[0]; }
/*!
* \brief Interface expected by KDTreeSingleIndexAdaptor,
* @brief Interface expected by KDTreeSingleIndexAdaptor,
* return the dim'th component of the idx'th point
*/
FloatType kdtree_get_pt(const size_t idx, const size_t dim) const {
......@@ -98,7 +98,7 @@ class KDTreeNDArrayAdapter {
}
/*!
* \brief Interface expected by KDTreeSingleIndexAdaptor.
* @brief Interface expected by KDTreeSingleIndexAdaptor.
* Optional bounding-box computation: return false to
* default to a standard bbox computation loop.
*
......
/*!
* Copyright (c) 2019 by Contributors
* \file graph/transform/cpu/knn.cc
* \brief k-nearest-neighbor (KNN) implementation
* @file graph/transform/cpu/knn.cc
* @brief k-nearest-neighbor (KNN) implementation
*/
#include "../knn.h"
......@@ -28,7 +28,7 @@ namespace impl {
static constexpr int NN_DESCENT_BLOCK_SIZE = 16384;
/*!
* \brief Compute Euclidean distance between two vectors, return positive
* @brief Compute Euclidean distance between two vectors, return positive
* infinite value if the intermediate distance is greater than the worst
* distance.
*/
......@@ -54,7 +54,7 @@ FloatType EuclideanDistWithCheck(
}
}
/*! \brief Compute Euclidean distance between two vectors */
/*! @brief Compute Euclidean distance between two vectors */
template <typename FloatType, typename IdType>
FloatType EuclideanDist(
const FloatType* vec1, const FloatType* vec2, int64_t dim) {
......@@ -67,7 +67,7 @@ FloatType EuclideanDist(
return dist;
}
/*! \brief Insert a new element into a heap */
/*! @brief Insert a new element into a heap */
template <typename FloatType, typename IdType>
void HeapInsert(
IdType* out, FloatType* dist, IdType new_id, FloatType new_dist, int k,
......@@ -104,7 +104,7 @@ void HeapInsert(
}
}
/*! \brief Insert a new element and its flag into heap, return 1 if insert
/*! @brief Insert a new element and its flag into heap, return 1 if insert
* successfully */
template <typename FloatType, typename IdType>
int FlaggedHeapInsert(
......@@ -144,7 +144,7 @@ int FlaggedHeapInsert(
return 1;
}
/*! \brief Build heap for each point. Used by NN-descent */
/*! @brief Build heap for each point. Used by NN-descent */
template <typename FloatType, typename IdType>
void BuildHeap(IdType* index, FloatType* dist, int k) {
for (int i = k / 2 - 1; i >= 0; --i) {
......@@ -171,7 +171,7 @@ void BuildHeap(IdType* index, FloatType* dist, int k) {
}
/*!
* \brief Neighbor update process in NN-descent. The distance between
* @brief Neighbor update process in NN-descent. The distance between
* two points are computed. If this new distance is less than any worst
* distance of these two points, we update the neighborhood of that point.
*/
......@@ -208,7 +208,7 @@ int UpdateNeighbors(
return num_updates;
}
/*! \brief The kd-tree implementation of K-Nearest Neighbors */
/*! @brief The kd-tree implementation of K-Nearest Neighbors */
template <typename FloatType, typename IdType>
void KdTreeKNN(
const NDArray& data_points, const IdArray& data_offsets,
......
......@@ -13,8 +13,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file graph/transform/cuda/cuda_compact_graph.cu
* \brief Functions to find and eliminate the common isolated nodes across
* @file graph/transform/cuda/cuda_compact_graph.cu
* @brief Functions to find and eliminate the common isolated nodes across
* all given graphs with the same set of nodes.
*/
......@@ -41,15 +41,15 @@ namespace transform {
namespace {
/**
* \brief This function builds node maps for each node type, preserving the
* @brief This function builds node maps for each node type, preserving the
* order of the input nodes. Here it is assumed the nodes are not unique,
* and thus a unique list is generated.
*
* \param input_nodes The set of input nodes.
* \param node_maps The node maps to be constructed.
* \param count_unique_device The number of unique nodes (on the GPU).
* \param unique_nodes_device The unique nodes (on the GPU).
* \param stream The stream to operate on.
* @param input_nodes The set of input nodes.
* @param node_maps The node maps to be constructed.
* @param count_unique_device The number of unique nodes (on the GPU).
* @param unique_nodes_device The unique nodes (on the GPU).
* @param stream The stream to operate on.
*/
template <typename IdType>
void BuildNodeMaps(
......
......@@ -13,8 +13,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file graph/transform/cuda/cuda_map_edges.cuh
* \brief Device level functions for mapping edges.
* @file graph/transform/cuda/cuda_map_edges.cuh
* @brief Device level functions for mapping edges.
*/
#ifndef DGL_GRAPH_TRANSFORM_CUDA_CUDA_MAP_EDGES_CUH_
......@@ -59,20 +59,20 @@ __device__ void map_vertex_ids(
}
/**
* \brief Generate mapped edge endpoint ids.
* @brief Generate mapped edge endpoint ids.
*
* \tparam IdType The type of id.
* \tparam BLOCK_SIZE The size of each thread block.
* \tparam TILE_SIZE The number of edges to process per thread block.
* \param global_srcs_device The source ids to map.
* \param new_global_srcs_device The mapped source ids (output).
* \param global_dsts_device The destination ids to map.
* \param new_global_dsts_device The mapped destination ids (output).
* \param num_edges The number of edges to map.
* \param src_mapping The mapping of sources ids.
* \param src_hash_size The the size of source id hash table/mapping.
* \param dst_mapping The mapping of destination ids.
* \param dst_hash_size The the size of destination id hash table/mapping.
* @tparam IdType The type of id.
* @tparam BLOCK_SIZE The size of each thread block.
* @tparam TILE_SIZE The number of edges to process per thread block.
* @param global_srcs_device The source ids to map.
* @param new_global_srcs_device The mapped source ids (output).
* @param global_dsts_device The destination ids to map.
* @param new_global_dsts_device The mapped destination ids (output).
* @param num_edges The number of edges to map.
* @param src_mapping The mapping of sources ids.
* @param src_hash_size The the size of source id hash table/mapping.
* @param dst_mapping The mapping of destination ids.
* @param dst_hash_size The the size of destination id hash table/mapping.
*/
template <typename IdType, int BLOCK_SIZE, IdType TILE_SIZE>
__global__ void map_edge_ids(
......@@ -95,14 +95,14 @@ __global__ void map_edge_ids(
}
/**
* \brief Device level node maps for each node type.
* @brief Device level node maps for each node type.
*
* \param num_nodes Number of nodes per type.
* \param offset When offset is set to 0, LhsHashTable is identical to
* @param num_nodes Number of nodes per type.
* @param offset When offset is set to 0, LhsHashTable is identical to
* RhsHashTable. Or set to num_nodes.size()/2 to use seperated
* LhsHashTable and RhsHashTable.
* \param ctx The DGL context.
* \param stream The stream to operate on.
* @param ctx The DGL context.
* @param stream The stream to operate on.
*/
template <typename IdType>
class DeviceNodeMap {
......
......@@ -13,8 +13,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file graph/transform/cuda/cuda_to_block.cu
* \brief Functions to convert a set of edges into a graph block with local
* @file graph/transform/cuda/cuda_to_block.cu
* @brief Functions to convert a set of edges into a graph block with local
* ids.
*/
......@@ -50,16 +50,16 @@ class DeviceNodeMapMaker {
}
/**
* \brief This function builds node maps for each node type, preserving the
* @brief This function builds node maps for each node type, preserving the
* order of the input nodes. Here it is assumed the lhs_nodes are not unique,
* and thus a unique list is generated.
*
* \param lhs_nodes The set of source input nodes.
* \param rhs_nodes The set of destination input nodes.
* \param node_maps The node maps to be constructed.
* \param count_lhs_device The number of unique source nodes (on the GPU).
* \param lhs_device The unique source nodes (on the GPU).
* \param stream The stream to operate on.
* @param lhs_nodes The set of source input nodes.
* @param rhs_nodes The set of destination input nodes.
* @param node_maps The node maps to be constructed.
* @param count_lhs_device The number of unique source nodes (on the GPU).
* @param lhs_device The unique source nodes (on the GPU).
* @param stream The stream to operate on.
*/
void Make(
const std::vector<IdArray>& lhs_nodes,
......@@ -96,14 +96,14 @@ class DeviceNodeMapMaker {
}
/**
* \brief This function builds node maps for each node type, preserving the
* @brief This function builds node maps for each node type, preserving the
* order of the input nodes. Here it is assumed both lhs_nodes and rhs_nodes
* are unique.
*
* \param lhs_nodes The set of source input nodes.
* \param rhs_nodes The set of destination input nodes.
* \param node_maps The node maps to be constructed.
* \param stream The stream to operate on.
* @param lhs_nodes The set of source input nodes.
* @param rhs_nodes The set of destination input nodes.
* @param node_maps The node maps to be constructed.
* @param stream The stream to operate on.
*/
void Make(
const std::vector<IdArray>& lhs_nodes,
......
/*!
* Copyright (c) 2020 by Contributors
* \file graph/transform/cuda/knn.cu
* \brief k-nearest-neighbor (KNN) implementation (cuda)
* @file graph/transform/cuda/knn.cu
* @brief k-nearest-neighbor (KNN) implementation (cuda)
*/
#include <curand_kernel.h>
......@@ -23,7 +23,7 @@ namespace dgl {
namespace transform {
namespace impl {
/*!
* \brief Utility class used to avoid linker errors with extern
* @brief Utility class used to avoid linker errors with extern
* unsized shared memory arrays with templated type
*/
template <typename Type>
......@@ -54,7 +54,7 @@ struct SharedMemory<double> {
}
};
/*! \brief Compute Euclidean distance between two vectors in a cuda kernel */
/*! @brief Compute Euclidean distance between two vectors in a cuda kernel */
template <typename FloatType, typename IdType>
__device__ FloatType
EuclideanDist(const FloatType* vec1, const FloatType* vec2, const int64_t dim) {
......@@ -78,7 +78,7 @@ EuclideanDist(const FloatType* vec1, const FloatType* vec2, const int64_t dim) {
}
/*!
* \brief Compute Euclidean distance between two vectors in a cuda kernel,
* @brief Compute Euclidean distance between two vectors in a cuda kernel,
* return positive infinite value if the intermediate distance is greater
* than the worst distance.
*/
......@@ -239,7 +239,7 @@ __device__ bool FlaggedHeapInsert(
}
/*!
* \brief Brute force kNN kernel. Compute distance for each pair of input points
* @brief Brute force kNN kernel. Compute distance for each pair of input points
* and get the result directly (without a distance matrix).
*/
template <typename FloatType, typename IdType>
......@@ -279,7 +279,7 @@ __global__ void BruteforceKnnKernel(
}
/*!
* \brief Same as BruteforceKnnKernel, but use shared memory as buffer.
* @brief Same as BruteforceKnnKernel, but use shared memory as buffer.
* This kernel divides query points and data points into blocks. For each
* query block, it will make a loop over all data blocks and compute distances.
* This kernel is faster when the dimension of input points is not large.
......@@ -400,7 +400,7 @@ __global__ void BruteforceKnnShareKernel(
}
}
/*! \brief determine the number of blocks for each segment */
/*! @brief determine the number of blocks for each segment */
template <typename IdType>
__global__ void GetNumBlockPerSegment(
const IdType* offsets, IdType* out, const int64_t batch_size,
......@@ -411,7 +411,7 @@ __global__ void GetNumBlockPerSegment(
}
}
/*! \brief Get the batch index and local index in segment for each block */
/*! @brief Get the batch index and local index in segment for each block */
template <typename IdType>
__global__ void GetBlockInfo(
const IdType* num_block_prefixsum, IdType* block_batch_id,
......@@ -430,17 +430,17 @@ __global__ void GetBlockInfo(
}
/*!
* \brief Brute force kNN. Compute distance for each pair of input points and
* @brief Brute force kNN. Compute distance for each pair of input points and
* get the result directly (without a distance matrix).
*
* \tparam FloatType The type of input points.
* \tparam IdType The type of id.
* \param data_points NDArray of dataset points.
* \param data_offsets offsets of point index in data points.
* \param query_points NDArray of query points
* \param query_offsets offsets of point index in query points.
* \param k the number of nearest points
* \param result output array
* @tparam FloatType The type of input points.
* @tparam IdType The type of id.
* @param data_points NDArray of dataset points.
* @param data_offsets offsets of point index in data points.
* @param query_points NDArray of query points
* @param query_offsets offsets of point index in query points.
* @param k the number of nearest points
* @param result output array
*/
template <typename FloatType, typename IdType>
void BruteForceKNNCuda(
......@@ -473,19 +473,19 @@ void BruteForceKNNCuda(
}
/*!
* \brief Brute force kNN with shared memory.
* @brief Brute force kNN with shared memory.
* This function divides query points and data points into blocks. For each
* query block, it will make a loop over all data blocks and compute distances.
* It will be faster when the dimension of input points is not large.
*
* \tparam FloatType The type of input points.
* \tparam IdType The type of id.
* \param data_points NDArray of dataset points.
* \param data_offsets offsets of point index in data points.
* \param query_points NDArray of query points
* \param query_offsets offsets of point index in query points.
* \param k the number of nearest points
* \param result output array
* @tparam FloatType The type of input points.
* @tparam IdType The type of id.
* @param data_points NDArray of dataset points.
* @param data_offsets offsets of point index in data points.
* @param query_points NDArray of query points
* @param query_offsets offsets of point index in query points.
* @param k the number of nearest points
* @param result output array
*/
template <typename FloatType, typename IdType>
void BruteForceKNNSharedCuda(
......@@ -575,7 +575,7 @@ void BruteForceKNNSharedCuda(
device->FreeWorkspace(ctx, block_batch_id);
}
/*! \brief Setup rng state for nn-descent */
/*! @brief Setup rng state for nn-descent */
__global__ void SetupRngKernel(
curandState* states, const uint64_t seed, const size_t n) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -585,7 +585,7 @@ __global__ void SetupRngKernel(
}
/*!
* \brief Randomly initialize neighbors (sampling without replacement)
* @brief Randomly initialize neighbors (sampling without replacement)
* for each nodes
*/
template <typename FloatType, typename IdType>
......@@ -637,7 +637,7 @@ __global__ void RandomInitNeighborsKernel(
}
/*!
* \brief Randomly select candidates from current knn and reverse-knn graph for
* @brief Randomly select candidates from current knn and reverse-knn graph for
* nn-descent.
*/
template <typename IdType>
......@@ -735,7 +735,7 @@ __global__ void FindCandidatesKernel(
}
}
/*! \brief Update knn graph according to selected candidates for nn-descent */
/*! @brief Update knn graph according to selected candidates for nn-descent */
template <typename FloatType, typename IdType>
__global__ void UpdateNeighborsKernel(
const FloatType* points, const IdType* offsets, IdType* neighbors,
......
/*!
* Copyright (c) 2019 by Contributors
* \file graph/transform/knn.cc
* \brief k-nearest-neighbor (KNN) interface
* @file graph/transform/knn.cc
* @brief k-nearest-neighbor (KNN) interface
*/
#include "knn.h"
......
/*!
* Copyright (c) 2021 by Contributors
* \file graph/transform/knn.h
* \brief k-nearest-neighbor (KNN) implementation
* @file graph/transform/knn.h
* @brief k-nearest-neighbor (KNN) implementation
*/
#ifndef DGL_GRAPH_TRANSFORM_KNN_H_
......@@ -15,19 +15,19 @@ namespace dgl {
namespace transform {
/*!
* \brief For each point in each segment in \a query_points, find \a k nearest
* @brief For each point in each segment in \a query_points, find \a k nearest
* points in the same segment in \a data_points. \a data_offsets and \a
* query_offsets determine the start index of each segment in \a
* data_points and \a query_points.
*
* \param data_points dataset points.
* \param data_offsets offsets of point index in \a data_points.
* \param query_points query points.
* \param query_offsets offsets of point index in \a query_points.
* \param k the number of nearest points.
* \param result output array. A 2D tensor indicating the index relation
* @param data_points dataset points.
* @param data_offsets offsets of point index in \a data_points.
* @param query_points query points.
* @param query_offsets offsets of point index in \a query_points.
* @param k the number of nearest points.
* @param result output array. A 2D tensor indicating the index relation
* between \a query_points and \a data_points.
* \param algorithm algorithm used to compute the k-nearest neighbors.
* @param algorithm algorithm used to compute the k-nearest neighbors.
*/
template <DGLDeviceType XPU, typename FloatType, typename IdType>
void KNN(
......@@ -36,18 +36,18 @@ void KNN(
IdArray result, const std::string& algorithm);
/*!
* \brief For each input point, find \a k approximate nearest points in the same
* @brief For each input point, find \a k approximate nearest points in the same
* segment using NN-descent algorithm.
*
* \param points input points.
* \param offsets offsets of point index.
* \param result output array. A 2D tensor indicating the index relation between
* @param points input points.
* @param offsets offsets of point index.
* @param result output array. A 2D tensor indicating the index relation between
* points.
* \param k the number of nearest points.
* \param num_iters The maximum number of NN-descent iterations to perform.
* \param num_candidates The maximum number of candidates to be considered
* @param k the number of nearest points.
* @param num_iters The maximum number of NN-descent iterations to perform.
* @param num_candidates The maximum number of candidates to be considered
* during one iteration.
* \param delta A value controls the early abort.
* @param delta A value controls the early abort.
*/
template <DGLDeviceType XPU, typename FloatType, typename IdType>
void NNDescent(
......
/*!
* Copyright (c) 2020 by Contributors
* \file graph/transform/line_graph.cc
* \brief Line graph implementation
* @file graph/transform/line_graph.cc
* @brief Line graph implementation
*/
#include <dgl/array.h>
......@@ -23,11 +23,11 @@ using namespace dgl::aten;
namespace transform {
/*!
* \brief Create Line Graph.
* \param hg Graph.
* \param backtracking whether the pair of (v, u) (u, v) edges are treated as
* @brief Create Line Graph.
* @param hg Graph.
* @param backtracking whether the pair of (v, u) (u, v) edges are treated as
* linked.
* \return The Line Graph.
* @return The Line Graph.
*/
HeteroGraphPtr CreateLineGraph(HeteroGraphPtr hg, bool backtracking) {
const auto hgp = std::dynamic_pointer_cast<HeteroGraph>(hg);
......
/*!
* Copyright (c) 2020 by Contributors
* \file graph/metis_partition.cc
* \brief Call Metis partitioning
* @file graph/metis_partition.cc
* @brief Call Metis partitioning
*/
#include <dgl/base_heterograph.h>
......
/*!
* Copyright (c) 2020 by Contributors
* \file graph/metis_partition.cc
* \brief Call Metis partitioning
* @file graph/metis_partition.cc
* @brief Call Metis partitioning
*/
#include <dgl/base_heterograph.h>
......
/*!
* Copyright (c) 2019 by Contributors
* \file graph/transform/remove_edges.cc
* \brief Remove edges.
* @file graph/transform/remove_edges.cc
* @brief Remove edges.
*/
#include <dgl/array.h>
......
......@@ -13,8 +13,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file graph/transform/to_bipartite.cc
* \brief Convert a graph to a bipartite-structured graph.
* @file graph/transform/to_bipartite.cc
* @brief Convert a graph to a bipartite-structured graph.
*/
#include "to_bipartite.h"
......
......@@ -13,8 +13,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file graph/transform/to_bipartite.h
* \brief Functions to convert a set of edges into a graph block with local
* @file graph/transform/to_bipartite.h
* @brief Functions to convert a set of edges into a graph block with local
* ids.
*/
......
/*!
* Copyright (c) 2019 by Contributors
* \file graph/transform/to_simple.cc
* \brief Convert multigraphs to simple graphs
* @file graph/transform/to_simple.cc
* @brief Convert multigraphs to simple graphs
*/
#include <dgl/array.h>
......
/*!
* Copyright (c) 2020 by Contributors
* \file graph/transform/union_partition.cc
* \brief Functions for partition, union multiple graphs.
* @file graph/transform/union_partition.cc
* @brief Functions for partition, union multiple graphs.
*/
#include "../heterograph.h"
using namespace dgl::runtime;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment