"docker/git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "a519272d97f011332588e1aaa73d32952d80f3f5"
Unverified Commit 1ad65879 authored by Triston's avatar Triston Committed by GitHub
Browse files

[Fix] Fix compiler warnings - part 1 (#4051)



* Fix a cub compile error for CUDA 11.5

* Fix comparison of integer expressions of different signedness in coo_sort.cu file

* Fix comparison of integer expressions of different signedness in cuda_compact_graph.cu file

* Remove never referenced variable in spmm.cu

* Fix comparison of integer expressions of different signedness in rowwise_pick.h file

* Fix comparison of integer expressions of different signedness in choice.cc file

* Remove never referenced variable col_data in spat_op_impl_coo.cc

* Remove never referenced variable allowed in global_uniform.cc

* Fix comparison of integer expressions of different signedness in graph.cc

* Fix comparison of integer expressions of different signedness in graph_apis.cc

* Fix the un-used ctx variable in ndarray_partition.cc file for cpu only build

* Fix comparison of integer expressions of different signedness in libra_partition.cc

* Fix comparison of integer expressions of different signedness in graph_op.cc
Co-authored-by: default avatarTriston Cao <tristonc@nvidia.com>
Co-authored-by: default avatarQuan (Andy) Gan <coin2028@hotmail.com>
parent ab1b2811
......@@ -227,7 +227,7 @@ COOMatrix CSRRowWisePerEtypePick(CSRMatrix mat, IdArray rows, IdArray etypes,
}
runtime::parallel_for(0, num_rows, [&](size_t b, size_t e) {
for (int64_t i = b; i < e; ++i) {
for (size_t i = b; i < e; ++i) {
const IdxType rid = rows_data[i];
CHECK_LT(rid, mat.num_rows);
const IdxType off = indptr[rid];
......
......@@ -308,7 +308,6 @@ template <class IdType> CSRMatrix SortedCOOToCSR(const COOMatrix &coo) {
const int64_t N = coo.num_rows;
const int64_t NNZ = coo.row->shape[0];
const IdType *const row_data = static_cast<IdType *>(coo.row->data);
const IdType *const col_data = static_cast<IdType *>(coo.col->data);
const IdType *const data =
COOHasData(coo) ? static_cast<IdType *>(coo.data->data) : nullptr;
......
......@@ -74,7 +74,7 @@ int _NumberOfBits(const T& range) {
}
int bits = 1;
while (bits < sizeof(T)*8 && (1 << bits) < range) {
while (bits < static_cast<int>(sizeof(T)*8) && (1 << bits) < range) {
++bits;
}
......
......@@ -130,9 +130,9 @@ void LibraVertexCut(
if (u != v)
node_assignments[v].push_back(c);
CHECK(node_assignments[u].size() <= nc) <<
CHECK(node_assignments[u].size() <= static_cast<size_t>(nc)) <<
"[bug] 1. generated splits (u) are greater than nc!";
CHECK(node_assignments[v].size() <= nc) <<
CHECK(node_assignments[v].size() <= static_cast<size_t>(nc)) <<
"[bug] 1. generated splits (v) are greater than nc!";
edgenum_unassigned_ptr[u]--;
edgenum_unassigned_ptr[v]--;
......@@ -148,7 +148,7 @@ void LibraVertexCut(
community_weights_ptr[c] = community_weights_ptr[c] + w;
node_assignments[v].push_back(c);
CHECK(node_assignments[v].size() <= nc) <<
CHECK(node_assignments[v].size() <= static_cast<size_t>(nc)) <<
"[bug] 2. generated splits (v) are greater than nc!";
edgenum_unassigned_ptr[u]--;
edgenum_unassigned_ptr[v]--;
......@@ -166,7 +166,7 @@ void LibraVertexCut(
community_weights_ptr[c] = community_weights_ptr[c] + w;
node_assignments[u].push_back(c);
CHECK(node_assignments[u].size() <= nc) <<
CHECK(node_assignments[u].size() <= static_cast<size_t>(nc)) <<
"[bug] 3. generated splits (u) are greater than nc!";
edgenum_unassigned_ptr[u]--;
edgenum_unassigned_ptr[v]--;
......@@ -175,16 +175,16 @@ void LibraVertexCut(
for (int32_t j=0; j < nc; j++) setv[j] = 0;
int32_t interset = 0;
CHECK(node_assignments[u].size() <= nc) <<
CHECK(node_assignments[u].size() <= static_cast<size_t>(nc)) <<
"[bug] 4. generated splits (u) are greater than nc!";
CHECK(node_assignments[v].size() <= nc) <<
CHECK(node_assignments[v].size() <= static_cast<size_t>(nc)) <<
"[bug] 4. generated splits (v) are greater than nc!";
for (int32_t j=0; j < node_assignments[v].size(); j++) {
for (size_t j=0; j < node_assignments[v].size(); j++) {
CHECK(node_assignments[v][j] < nc) << "[bug] 4. Part assigned (v) greater than nc!";
setv[node_assignments[v][j]]++;
}
for (int32_t j=0; j < node_assignments[u].size(); j++) {
for (size_t j=0; j < node_assignments[u].size(); j++) {
CHECK(node_assignments[u][j] < nc) << "[bug] 4. Part assigned (u) greater than nc!";
setv[node_assignments[u][j]]++;
}
......@@ -197,7 +197,7 @@ void LibraVertexCut(
}
}
if (interset) {
for (int32_t j=0; j < intersetv.size(); j++) {
for (size_t j=0; j < intersetv.size(); j++) {
int32_t cind = intersetv[j];
cache[j] = community_edges[cind];
}
......@@ -228,7 +228,7 @@ void LibraVertexCut(
}
node_assignments[v].push_back(c);
CHECK(node_assignments[v].size() <= nc) <<
CHECK(node_assignments[v].size() <= static_cast<size_t>(nc)) <<
"[bug] 5. generated splits (v) greater than nc!!";
replication_list.push_back(v);
edgenum_unassigned_ptr[u]--;
......@@ -251,7 +251,7 @@ void LibraVertexCut(
if (u != v)
node_assignments[u].push_back(c);
CHECK(node_assignments[u].size() <= nc) <<
CHECK(node_assignments[u].size() <= static_cast<size_t>(nc)) <<
"[bug] 6. generated splits (u) greater than nc!!";
replication_list.push_back(u);
edgenum_unassigned_ptr[u]--;
......
......@@ -536,7 +536,7 @@ Subgraph Graph::EdgeSubgraph(IdArray eids, bool preserve_nodes) const {
rst.graph->AddEdge(src_id, dst_id);
}
for (int64_t i = 0; i < NumVertices(); ++i)
for (uint64_t i = 0; i < NumVertices(); ++i)
nodes.push_back(i);
rst.induced_vertices = IdArray::Empty(
......
......@@ -49,7 +49,7 @@ DGL_REGISTER_GLOBAL("graph_index._CAPI_DGLGraphCSRCreate")
IdArray edge_ids = IdArray::Empty({indices->shape[0]},
DLDataType{kDLInt, 64, 1}, DLContext{kDLCPU, 0});
int64_t *edge_data = static_cast<int64_t *>(edge_ids->data);
for (size_t i = 0; i < edge_ids->shape[0]; i++)
for (int64_t i = 0; i < edge_ids->shape[0]; i++)
edge_data[i] = i;
*rv = GraphRef(ImmutableGraph::CreateFromCSR(indptr, indices, edge_ids, edge_dir));
});
......
......@@ -117,8 +117,8 @@ GraphPtr GraphOp::DisjointUnion(std::vector<GraphPtr> graphs) {
CHECK(gr) << "All the input graphs should be immutable graphs.";
// TODO(minjie): why in csr?
const CSRPtr g_csrptr = gr->GetInCSR();
const int64_t g_num_nodes = g_csrptr->NumVertices();
const int64_t g_num_edges = g_csrptr->NumEdges();
const uint64_t g_num_nodes = g_csrptr->NumVertices();
const uint64_t g_num_edges = g_csrptr->NumEdges();
dgl_id_t* g_indptr = static_cast<dgl_id_t*>(g_csrptr->indptr()->data);
dgl_id_t* g_indices = static_cast<dgl_id_t*>(g_csrptr->indices()->data);
dgl_id_t* g_edge_ids = static_cast<dgl_id_t*>(g_csrptr->edge_ids()->data);
......@@ -234,11 +234,11 @@ std::vector<GraphPtr> GraphOp::DisjointPartitionBySizes(
g_indptr[l - start_pos] = indptr[l] - indptr[start_pos];
}
for (int j = indptr[start_pos]; j < indptr[end_pos]; ++j) {
for (dgl_id_t j = indptr[start_pos]; j < indptr[end_pos]; ++j) {
g_indices[j - idoff] = indices[j] - cumsum[i];
}
for (int k = indptr[start_pos]; k < indptr[end_pos]; ++k) {
for (dgl_id_t k = indptr[start_pos]; k < indptr[end_pos]; ++k) {
g_edge_ids[k - idoff] = edge_ids[k] - cum_sum_edges;
}
......@@ -390,7 +390,7 @@ GraphPtr GraphOp::ToBidirectedImmutableGraph(GraphPtr g) {
}
for (const dgl_id_t v : nbrs) {
const auto new_n_e = std::max(n_e[u][v], n_e[v][u]);
for (size_t i = 0; i < new_n_e; ++i) {
for (int i = 0; i < new_n_e; ++i) {
srcs.push_back(v);
dsts.push_back(u);
}
......
......@@ -26,8 +26,6 @@ std::pair<IdArray, IdArray> GlobalUniformNegativeSampling(
bool exclude_self_loops,
bool replace,
double redundancy) {
dgl_format_code_t allowed = hg->GetAllowedFormats();
auto format = hg->SelectFormat(etype, CSC_CODE | CSR_CODE);
if (format == SparseFormat::kCSC) {
CSRMatrix csc = hg->GetCSCMatrix(etype);
......
......@@ -95,7 +95,7 @@ CompactGraphsGPU(
CHECK_EQ(ctx.device_type, kDLGPU);
// Step 1: Collect the nodes that has connections for each type.
const int64_t num_ntypes = graphs[0]->NumVertexTypes();
const uint64_t num_ntypes = graphs[0]->NumVertexTypes();
std::vector<std::vector<EdgeArray>> all_edges(graphs.size()); // all_edges[i][etype]
// count the number of nodes per type
......@@ -122,7 +122,7 @@ CompactGraphsGPU(
std::vector<IdArray> all_nodes(num_ntypes);
std::vector<int64_t> node_offsets(num_ntypes, 0);
for (int64_t ntype = 0; ntype < num_ntypes; ++ntype) {
for (uint64_t ntype = 0; ntype < num_ntypes; ++ntype) {
all_nodes[ntype] = NewIdArray(max_vertex_cnt[ntype], ctx,
sizeof(IdType)*8);
// copy the nodes in always_preserve
......@@ -192,7 +192,7 @@ CompactGraphsGPU(
device->AllocWorkspace(ctx, sizeof(int64_t)*num_ntypes));
// the set of unique nodes per type
std::vector<IdArray> induced_nodes(num_ntypes);
for (int64_t ntype = 0; ntype < num_ntypes; ++ntype) {
for (uint64_t ntype = 0; ntype < num_ntypes; ++ntype) {
induced_nodes[ntype] = NewIdArray(max_vertex_cnt[ntype], ctx,
sizeof(IdType)*8);
}
......@@ -218,7 +218,7 @@ CompactGraphsGPU(
device->FreeWorkspace(ctx, count_unique_device);
// resize induced nodes
for (int64_t ntype = 0; ntype < num_ntypes; ++ntype) {
for (uint64_t ntype = 0; ntype < num_ntypes; ++ntype) {
induced_nodes[ntype]->shape[0] = num_induced_nodes[ntype];
}
......
......@@ -44,9 +44,8 @@ class RemainderPartition : public NDArrayPartition {
std::pair<IdArray, NDArray>
GeneratePermutation(
IdArray in_idx) const override {
auto ctx = in_idx->ctx;
#ifdef DGL_USE_CUDA
auto ctx = in_idx->ctx;
if (ctx.device_type == kDLGPU) {
ATEN_ID_TYPE_SWITCH(in_idx->dtype, IdType, {
return impl::GeneratePermutationFromRemainder<kDLGPU, IdType>(
......@@ -63,8 +62,8 @@ class RemainderPartition : public NDArrayPartition {
IdArray MapToLocal(
IdArray in_idx) const override {
auto ctx = in_idx->ctx;
#ifdef DGL_USE_CUDA
auto ctx = in_idx->ctx;
if (ctx.device_type == kDLGPU) {
ATEN_ID_TYPE_SWITCH(in_idx->dtype, IdType, {
return impl::MapToLocalFromRemainder<kDLGPU, IdType>(
......@@ -82,8 +81,8 @@ class RemainderPartition : public NDArrayPartition {
IdArray MapToGlobal(
IdArray in_idx,
const int part_id) const override {
auto ctx = in_idx->ctx;
#ifdef DGL_USE_CUDA
auto ctx = in_idx->ctx;
if (ctx.device_type == kDLGPU) {
ATEN_ID_TYPE_SWITCH(in_idx->dtype, IdType, {
return impl::MapToGlobalFromRemainder<kDLGPU, IdType>(
......@@ -129,9 +128,8 @@ class RangePartition : public NDArrayPartition {
std::pair<IdArray, NDArray>
GeneratePermutation(
IdArray in_idx) const override {
auto ctx = in_idx->ctx;
#ifdef DGL_USE_CUDA
auto ctx = in_idx->ctx;
if (ctx.device_type == kDLGPU) {
if (ctx.device_type != range_->ctx.device_type ||
ctx.device_id != range_->ctx.device_id) {
......@@ -155,8 +153,8 @@ class RangePartition : public NDArrayPartition {
IdArray MapToLocal(
IdArray in_idx) const override {
auto ctx = in_idx->ctx;
#ifdef DGL_USE_CUDA
auto ctx = in_idx->ctx;
if (ctx.device_type == kDLGPU) {
ATEN_ID_TYPE_SWITCH(in_idx->dtype, IdType, {
ATEN_ID_TYPE_SWITCH(range_->dtype, RangeType, {
......@@ -176,8 +174,8 @@ class RangePartition : public NDArrayPartition {
IdArray MapToGlobal(
IdArray in_idx,
const int part_id) const override {
auto ctx = in_idx->ctx;
#ifdef DGL_USE_CUDA
auto ctx = in_idx->ctx;
if (ctx.device_type == kDLGPU) {
ATEN_ID_TYPE_SWITCH(in_idx->dtype, IdType, {
ATEN_ID_TYPE_SWITCH(range_->dtype, RangeType, {
......
......@@ -93,7 +93,7 @@ void RandomEngine::UniformChoice(IdxType num, IdxType population, IdxType* out,
// case scenario, the time complexity is O(population^2). In practice,
// we use 1/10 since std::unordered_set is pretty slow.
std::unordered_set<IdxType> selected;
while (selected.size() < num) {
while (static_cast<IdxType>(selected.size()) < num) {
selected.insert(RandInt(population));
}
std::copy(selected.begin(), selected.end(), out);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment