sampler.cc 68.8 KB
Newer Older
1
/**
Da Zheng's avatar
Da Zheng committed
2
 *  Copyright (c) 2018 by Contributors
3
4
 * @file graph/sampler.cc
 * @brief DGL sampler implementation
Da Zheng's avatar
Da Zheng committed
5
 */
6
#include <dgl/array.h>
Da Zheng's avatar
Da Zheng committed
7
#include <dgl/immutable_graph.h>
8
#include <dgl/packed_func_ext.h>
9
#include <dgl/random.h>
10
#include <dgl/runtime/container.h>
11
#include <dgl/runtime/parallel_for.h>
12
#include <dgl/sampler.h>
13
#include <dmlc/omp.h>
14

Da Zheng's avatar
Da Zheng committed
15
#include <algorithm>
16
#include <cmath>
17
#include <cstdlib>
18
#include <numeric>
19

20
#include "../c_api_common.h"
Da Zheng's avatar
Da Zheng committed
21

22
using namespace dgl::runtime;
23

Da Zheng's avatar
Da Zheng committed
24
25
26
namespace dgl {

namespace {
27
/**
Da Zheng's avatar
Da Zheng committed
28
29
 * ArrayHeap is used to sample elements from vector
 */
30
template <typename ValueType>
Da Zheng's avatar
Da Zheng committed
31
32
class ArrayHeap {
 public:
33
  explicit ArrayHeap(const std::vector<ValueType> &prob) {
Da Zheng's avatar
Da Zheng committed
34
35
    vec_size_ = prob.size();
    bit_len_ = ceil(log2(vec_size_));
36
    limit_ = 1UL << bit_len_;
Da Zheng's avatar
Da Zheng committed
37
38
39
    // allocate twice the size
    heap_.resize(limit_ << 1, 0);
    // allocate the leaves
40
41
    for (size_t i = limit_; i < vec_size_ + limit_; ++i) {
      heap_[i] = prob[i - limit_];
Da Zheng's avatar
Da Zheng committed
42
43
    }
    // iterate up the tree (this is O(m))
44
    for (int i = bit_len_ - 1; i >= 0; --i) {
45
      for (size_t j = (1UL << i); j < (1UL << (i + 1)); ++j) {
Da Zheng's avatar
Da Zheng committed
46
47
48
49
50
51
        heap_[j] = heap_[j << 1] + heap_[(j << 1) + 1];
      }
    }
  }
  ~ArrayHeap() {}

52
  /**
Da Zheng's avatar
Da Zheng committed
53
54
55
56
   * Remove term from index (this costs O(log m) steps)
   */
  void Delete(size_t index) {
    size_t i = index + limit_;
57
58
    heap_[i] = 0;
    i /= 2;
59
    for (int j = bit_len_ - 1; j >= 0; --j) {
60
61
62
63
      // Using heap_[i] = heap_[i] - w will loss some precision in float.
      // Using addition to re-calculate the weight layer by layer.
      heap_[i] = heap_[i << 1] + heap_[(i << 1) + 1];
      i /= 2;
Da Zheng's avatar
Da Zheng committed
64
65
66
    }
  }

67
  /**
Da Zheng's avatar
Da Zheng committed
68
69
   * Add value w to index (this costs O(log m) steps)
   */
70
  void Add(size_t index, ValueType w) {
Da Zheng's avatar
Da Zheng committed
71
72
73
74
75
76
77
    size_t i = index + limit_;
    for (int j = bit_len_; j >= 0; --j) {
      heap_[i] += w;
      i = i >> 1;
    }
  }

78
  /**
Da Zheng's avatar
Da Zheng committed
79
80
   * Sample from arrayHeap
   */
81
  size_t Sample() {
82
    // heap_ is empty
83
    ValueType xi = heap_[1] * RandomEngine::ThreadLocal()->Uniform<float>();
84
    size_t i = 1;
Da Zheng's avatar
Da Zheng committed
85
86
87
88
89
90
91
92
93
94
    while (i < limit_) {
      i = i << 1;
      if (xi >= heap_[i]) {
        xi -= heap_[i];
        i += 1;
      }
    }
    return i - limit_;
  }

95
  /**
Da Zheng's avatar
Da Zheng committed
96
97
   * Sample a vector by given the size n
   */
98
  size_t SampleWithoutReplacement(size_t n, std::vector<size_t> *samples) {
Da Zheng's avatar
Da Zheng committed
99
    // sample n elements
100
101
102
103
104
105
    size_t i = 0;
    for (; i < n; ++i) {
      // heap is empty
      if (heap_[1] == 0) {
        break;
      }
106
      samples->at(i) = this->Sample();
Da Zheng's avatar
Da Zheng committed
107
108
      this->Delete(samples->at(i));
    }
109
110

    return i;
Da Zheng's avatar
Da Zheng committed
111
112
113
  }

 private:
114
  size_t vec_size_;  // sample size
115
  int bit_len_;      // bit size
116
  size_t limit_;
117
  std::vector<ValueType> heap_;
Da Zheng's avatar
Da Zheng committed
118
119
};

120
///////////////////////// Samplers //////////////////////////
121
class EdgeSamplerObject : public Object {
122
 public:
123
124
125
126
127
128
  EdgeSamplerObject(
      const GraphPtr gptr, IdArray seed_edges, const int64_t batch_size,
      const int64_t num_workers, const bool replacement, const bool reset,
      const std::string neg_mode, const int64_t neg_sample_size,
      const int64_t chunk_size, const bool exclude_positive,
      const bool check_false_neg, IdArray relations) {
129
130
131
132
133
134
    gptr_ = gptr;
    seed_edges_ = seed_edges;
    relations_ = relations;

    batch_size_ = batch_size;
    num_workers_ = num_workers;
135
136
    replacement_ = replacement;
    reset_ = reset;
137
138
139
140
    neg_mode_ = neg_mode;
    neg_sample_size_ = neg_sample_size;
    exclude_positive_ = exclude_positive;
    check_false_neg_ = check_false_neg;
141
    chunk_size_ = chunk_size;
142
143
144
145
  }

  ~EdgeSamplerObject() {}

146
  virtual void Fetch(DGLRetValue *rv) = 0;
147
  virtual void Reset() = 0;
148
149

 protected:
150
151
152
153
154
155
156
157
158
159
160
161
  virtual void randomSample(
      size_t set_size, size_t num, std::vector<size_t> *out) = 0;
  virtual void randomSample(
      size_t set_size, size_t num, const std::vector<size_t> &exclude,
      std::vector<size_t> *out) = 0;

  NegSubgraph genNegEdgeSubgraph(
      const Subgraph &pos_subg, const std::string &neg_mode,
      int64_t neg_sample_size, bool exclude_positive, bool check_false_neg);
  NegSubgraph genChunkedNegEdgeSubgraph(
      const Subgraph &pos_subg, const std::string &neg_mode,
      int64_t neg_sample_size, bool exclude_positive, bool check_false_neg);
162
163
164
165
166
167
168

  GraphPtr gptr_;
  IdArray seed_edges_;
  IdArray relations_;

  int64_t batch_size_;
  int64_t num_workers_;
169
170
  bool replacement_;
  int64_t reset_;
171
172
173
174
  std::string neg_mode_;
  int64_t neg_sample_size_;
  bool exclude_positive_;
  bool check_false_neg_;
175
  int64_t chunk_size_;
176
177
};

178
/**
Da Zheng's avatar
Da Zheng committed
179
180
 * Uniformly sample integers from [0, set_size) without replacement.
 */
181
void RandomSample(size_t set_size, size_t num, std::vector<size_t> *out) {
Da Zheng's avatar
Da Zheng committed
182
183
184
185
186
187
188
189
190
  if (num < set_size) {
    std::unordered_set<size_t> sampled_idxs;
    while (sampled_idxs.size() < num) {
      sampled_idxs.insert(RandomEngine::ThreadLocal()->RandInt(set_size));
    }
    out->insert(out->end(), sampled_idxs.begin(), sampled_idxs.end());
  } else {
    // If we need to sample all elements in the set, we don't need to
    // generate random numbers.
191
    for (size_t i = 0; i < set_size; i++) out->push_back(i);
Da Zheng's avatar
Da Zheng committed
192
  }
Da Zheng's avatar
Da Zheng committed
193
194
}

195
196
197
void RandomSample(
    size_t set_size, size_t num, const std::vector<size_t> &exclude,
    std::vector<size_t> *out) {
198
199
200
201
  std::unordered_map<size_t, int> sampled_idxs;
  for (auto v : exclude) {
    sampled_idxs.insert(std::pair<size_t, int>(v, 0));
  }
Da Zheng's avatar
Da Zheng committed
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
  if (num + exclude.size() < set_size) {
    while (sampled_idxs.size() < num + exclude.size()) {
      size_t rand = RandomEngine::ThreadLocal()->RandInt(set_size);
      sampled_idxs.insert(std::pair<size_t, int>(rand, 1));
    }
    for (auto it = sampled_idxs.begin(); it != sampled_idxs.end(); it++) {
      if (it->second) {
        out->push_back(it->first);
      }
    }
  } else {
    // If we need to sample all elements in the set, we don't need to
    // generate random numbers.
    for (size_t i = 0; i < set_size; i++) {
      // If the element doesn't exist in exclude.
      if (sampled_idxs.find(i) == sampled_idxs.end()) {
        out->push_back(i);
      }
220
221
222
223
    }
  }
}

224
/**
Da Zheng's avatar
Da Zheng committed
225
226
227
 * For a sparse array whose non-zeros are represented by nz_idxs,
 * negate the sparse array and outputs the non-zeros in the negated array.
 */
228
229
230
void NegateArray(
    const std::vector<size_t> &nz_idxs, size_t arr_size,
    std::vector<size_t> *out) {
Da Zheng's avatar
Da Zheng committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
  // nz_idxs must have been sorted.
  auto it = nz_idxs.begin();
  size_t i = 0;
  CHECK_GT(arr_size, nz_idxs.back());
  for (; i < arr_size && it != nz_idxs.end(); i++) {
    if (*it == i) {
      it++;
      continue;
    }
    out->push_back(i);
  }
  for (; i < arr_size; i++) {
    out->push_back(i);
  }
}

247
/**
Da Zheng's avatar
Da Zheng committed
248
249
 * Uniform sample vertices from a list of vertices.
 */
250
251
252
253
void GetUniformSample(
    const dgl_id_t *edge_id_list, const dgl_id_t *vid_list,
    const size_t ver_len, const size_t max_num_neighbor,
    std::vector<dgl_id_t> *out_ver, std::vector<dgl_id_t> *out_edge) {
Da Zheng's avatar
Da Zheng committed
254
255
256
257
258
259
260
261
262
263
  // Copy vid_list to output
  if (ver_len <= max_num_neighbor) {
    out_ver->insert(out_ver->end(), vid_list, vid_list + ver_len);
    out_edge->insert(out_edge->end(), edge_id_list, edge_id_list + ver_len);
    return;
  }
  // If we just sample a small number of elements from a large neighbor list.
  std::vector<size_t> sorted_idxs;
  if (ver_len > max_num_neighbor * 2) {
    sorted_idxs.reserve(max_num_neighbor);
264
    RandomSample(ver_len, max_num_neighbor, &sorted_idxs);
Da Zheng's avatar
Da Zheng committed
265
266
267
268
    std::sort(sorted_idxs.begin(), sorted_idxs.end());
  } else {
    std::vector<size_t> negate;
    negate.reserve(ver_len - max_num_neighbor);
269
    RandomSample(ver_len, ver_len - max_num_neighbor, &negate);
Da Zheng's avatar
Da Zheng committed
270
271
272
273
274
275
276
277
278
279
280
281
282
283
    std::sort(negate.begin(), negate.end());
    NegateArray(negate, ver_len, &sorted_idxs);
  }
  // verify the result.
  CHECK_EQ(sorted_idxs.size(), max_num_neighbor);
  for (size_t i = 1; i < sorted_idxs.size(); i++) {
    CHECK_GT(sorted_idxs[i], sorted_idxs[i - 1]);
  }
  for (auto idx : sorted_idxs) {
    out_ver->push_back(vid_list[idx]);
    out_edge->push_back(edge_id_list[idx]);
  }
}

284
/**
Da Zheng's avatar
Da Zheng committed
285
 * Non-uniform sample via ArrayHeap
286
 *
287
 * @param probability Transition probability on the entire graph, indexed by
288
 * edge ID
Da Zheng's avatar
Da Zheng committed
289
 */
290
291
292
293
294
295
template <typename ValueType>
void GetNonUniformSample(
    const ValueType *probability, const dgl_id_t *edge_id_list,
    const dgl_id_t *vid_list, const size_t ver_len,
    const size_t max_num_neighbor, std::vector<dgl_id_t> *out_ver,
    std::vector<dgl_id_t> *out_edge) {
Da Zheng's avatar
Da Zheng committed
296
297
298
299
300
301
302
303
  // Copy vid_list to output
  if (ver_len <= max_num_neighbor) {
    out_ver->insert(out_ver->end(), vid_list, vid_list + ver_len);
    out_edge->insert(out_edge->end(), edge_id_list, edge_id_list + ver_len);
    return;
  }
  // Make sample
  std::vector<size_t> sp_index(max_num_neighbor);
304
  std::vector<ValueType> sp_prob(ver_len);
Da Zheng's avatar
Da Zheng committed
305
  for (size_t i = 0; i < ver_len; ++i) {
306
    sp_prob[i] = probability[edge_id_list[i]];
Da Zheng's avatar
Da Zheng committed
307
  }
308
  ArrayHeap<ValueType> arrayHeap(sp_prob);
309
  arrayHeap.SampleWithoutReplacement(max_num_neighbor, &sp_index);
Da Zheng's avatar
Da Zheng committed
310
311
312
313
314
315
316
317
318
319
320
  out_ver->resize(max_num_neighbor);
  out_edge->resize(max_num_neighbor);
  for (size_t i = 0; i < max_num_neighbor; ++i) {
    size_t idx = sp_index[i];
    out_ver->at(i) = vid_list[idx];
    out_edge->at(i) = edge_id_list[idx];
  }
  sort(out_ver->begin(), out_ver->end());
  sort(out_edge->begin(), out_edge->end());
}

321
/**
Da Zheng's avatar
Da Zheng committed
322
323
324
325
326
 * Used for subgraph sampling
 */
struct neigh_list {
  std::vector<dgl_id_t> neighs;
  std::vector<dgl_id_t> edges;
327
328
329
  neigh_list(
      const std::vector<dgl_id_t> &_neighs, const std::vector<dgl_id_t> &_edges)
      : neighs(_neighs), edges(_edges) {}
Da Zheng's avatar
Da Zheng committed
330
331
332
333
334
335
336
337
338
339
340
341
342
343
};

struct neighbor_info {
  dgl_id_t id;
  size_t pos;
  size_t num_edges;

  neighbor_info(dgl_id_t id, size_t pos, size_t num_edges) {
    this->id = id;
    this->pos = pos;
    this->num_edges = num_edges;
  }
};

344
345
346
347
348
349
NodeFlow ConstructNodeFlow(
    std::vector<dgl_id_t> neighbor_list, std::vector<dgl_id_t> edge_list,
    std::vector<size_t> layer_offsets,
    std::vector<std::pair<dgl_id_t, int>> *sub_vers,
    std::vector<neighbor_info> *neigh_pos, const std::string &edge_type,
    int64_t num_edges, int num_hops) {
350
  NodeFlow nf = NodeFlow::Create();
Da Zheng's avatar
Da Zheng committed
351
  uint64_t num_vertices = sub_vers->size();
352
353
354
355
  nf->node_mapping = aten::NewIdArray(num_vertices);
  nf->edge_mapping = aten::NewIdArray(num_edges);
  nf->layer_offsets = aten::NewIdArray(num_hops + 1);
  nf->flow_offsets = aten::NewIdArray(num_hops);
Da Zheng's avatar
Da Zheng committed
356

357
358
359
360
  dgl_id_t *node_map_data = static_cast<dgl_id_t *>(nf->node_mapping->data);
  dgl_id_t *layer_off_data = static_cast<dgl_id_t *>(nf->layer_offsets->data);
  dgl_id_t *flow_off_data = static_cast<dgl_id_t *>(nf->flow_offsets->data);
  dgl_id_t *edge_map_data = static_cast<dgl_id_t *>(nf->edge_mapping->data);
Da Zheng's avatar
Da Zheng committed
361

362
363
  // Construct sub_csr_graph, we treat nodeflow as multigraph by default
  auto subg_csr = CSRPtr(new CSR(num_vertices, num_edges));
364
365
366
  dgl_id_t *indptr_out = static_cast<dgl_id_t *>(subg_csr->indptr()->data);
  dgl_id_t *col_list_out = static_cast<dgl_id_t *>(subg_csr->indices()->data);
  dgl_id_t *eid_out = static_cast<dgl_id_t *>(subg_csr->edge_ids()->data);
Da Zheng's avatar
Da Zheng committed
367
368
369
370
371
372
373
374
375
376
377
  size_t collected_nedges = 0;

  // The data from the previous steps:
  // * node data: sub_vers (vid, layer), neigh_pos,
  // * edge data: neighbor_list, edge_list, probability.
  // * layer_offsets: the offset in sub_vers.
  dgl_id_t ver_id = 0;
  std::vector<std::unordered_map<dgl_id_t, dgl_id_t>> layer_ver_maps;
  layer_ver_maps.resize(num_hops);
  size_t out_node_idx = 0;
  for (int layer_id = num_hops - 1; layer_id >= 0; layer_id--) {
378
379
380
381
    // We sort the vertices in a layer so that we don't need to sort the
    // neighbor Ids after remap to a subgraph. However, we don't need to sort
    // the first layer because we want the order of the nodes in the first layer
    // is the same as the input seed nodes.
382
    if (layer_id > 0) {
383
384
385
386
387
388
389
      std::sort(
          sub_vers->begin() + layer_offsets[layer_id],
          sub_vers->begin() + layer_offsets[layer_id + 1],
          [](const std::pair<dgl_id_t, dgl_id_t> &a1,
             const std::pair<dgl_id_t, dgl_id_t> &a2) {
            return a1.first < a2.first;
          });
390
    }
Da Zheng's avatar
Da Zheng committed
391
392

    // Save the sampled vertices and its layer Id.
393
394
    for (size_t i = layer_offsets[layer_id]; i < layer_offsets[layer_id + 1];
         i++) {
Da Zheng's avatar
Da Zheng committed
395
      node_map_data[out_node_idx++] = sub_vers->at(i).first;
396
397
      layer_ver_maps[layer_id].insert(
          std::pair<dgl_id_t, dgl_id_t>(sub_vers->at(i).first, ver_id++));
398
      CHECK_EQ(sub_vers->at(i).second, layer_id);
Da Zheng's avatar
Da Zheng committed
399
400
401
402
    }
  }
  CHECK(out_node_idx == num_vertices);

403
404
405
406
407
  // sampling algorithms have to start from the seed nodes, so the seed nodes
  // are in the first layer and the input nodes are in the last layer. When we
  // expose the sampled graph to a Python user, we say the input nodes are in
  // the first layer and the seed nodes are in the last layer. Thus, when we
  // copy sampled results to a CSR, we need to reverse the order of layers.
408
409
  std::fill(indptr_out, indptr_out + num_vertices + 1, 0);
  size_t row_idx = layer_offsets[num_hops] - layer_offsets[num_hops - 1];
Da Zheng's avatar
Da Zheng committed
410
411
  layer_off_data[0] = 0;
  layer_off_data[1] = layer_offsets[num_hops] - layer_offsets[num_hops - 1];
412
  int out_layer_idx = 1;
Da Zheng's avatar
Da Zheng committed
413
  for (int layer_id = num_hops - 2; layer_id >= 0; layer_id--) {
414
415
    // Because we don't sort the vertices in the first layer above, we can't
    // sort the neighbor positions of the vertices in the first layer either.
416
    if (layer_id > 0) {
417
418
419
420
421
422
      std::sort(
          neigh_pos->begin() + layer_offsets[layer_id],
          neigh_pos->begin() + layer_offsets[layer_id + 1],
          [](const neighbor_info &a1, const neighbor_info &a2) {
            return a1.id < a2.id;
          });
423
    }
Da Zheng's avatar
Da Zheng committed
424

425
426
    for (size_t i = layer_offsets[layer_id]; i < layer_offsets[layer_id + 1];
         i++) {
Da Zheng's avatar
Da Zheng committed
427
      dgl_id_t dst_id = sub_vers->at(i).first;
428
      CHECK_EQ(dst_id, neigh_pos->at(i).id);
Da Zheng's avatar
Da Zheng committed
429
      size_t pos = neigh_pos->at(i).pos;
430
      CHECK_LE(pos, neighbor_list.size());
431
432
      const size_t nedges = neigh_pos->at(i).num_edges;
      if (neighbor_list.empty()) CHECK_EQ(nedges, 0);
Da Zheng's avatar
Da Zheng committed
433
434
435

      // We need to map the Ids of the neighbors to the subgraph.
      auto neigh_it = neighbor_list.begin() + pos;
436
      for (size_t i = 0; i < nedges; i++) {
Da Zheng's avatar
Da Zheng committed
437
        dgl_id_t neigh = *(neigh_it + i);
438
439
440
441
442
        CHECK(
            layer_ver_maps[layer_id + 1].find(neigh) !=
            layer_ver_maps[layer_id + 1].end());
        col_list_out[collected_nedges + i] =
            layer_ver_maps[layer_id + 1][neigh];
Da Zheng's avatar
Da Zheng committed
443
444
      }
      // We can simply copy the edge Ids.
445
446
      std::copy_n(
          edge_list.begin() + pos, nedges, edge_map_data + collected_nedges);
447
      collected_nedges += nedges;
448
      indptr_out[row_idx + 1] = indptr_out[row_idx] + nedges;
Da Zheng's avatar
Da Zheng committed
449
450
      row_idx++;
    }
451
452
453
    layer_off_data[out_layer_idx + 1] = layer_off_data[out_layer_idx] +
                                        layer_offsets[layer_id + 1] -
                                        layer_offsets[layer_id];
Da Zheng's avatar
Da Zheng committed
454
455
    out_layer_idx++;
  }
456
457
458
459
  CHECK_EQ(row_idx, num_vertices);
  CHECK_EQ(indptr_out[row_idx], num_edges);
  CHECK_EQ(out_layer_idx, num_hops);
  CHECK_EQ(layer_off_data[out_layer_idx], num_vertices);
Da Zheng's avatar
Da Zheng committed
460
461
462

  // Copy flow offsets.
  flow_off_data[0] = 0;
463
464
  int out_flow_idx = 0;
  for (size_t i = 0; i < layer_offsets.size() - 2; i++) {
465
466
    size_t num_edges =
        indptr_out[layer_off_data[i + 2]] - indptr_out[layer_off_data[i + 1]];
Da Zheng's avatar
Da Zheng committed
467
468
469
470
    flow_off_data[out_flow_idx + 1] = flow_off_data[out_flow_idx] + num_edges;
    out_flow_idx++;
  }
  CHECK(out_flow_idx == num_hops - 1);
471
  CHECK(flow_off_data[num_hops - 1] == static_cast<uint64_t>(num_edges));
Da Zheng's avatar
Da Zheng committed
472

473
  std::iota(eid_out, eid_out + num_edges, 0);
Da Zheng's avatar
Da Zheng committed
474

475
  if (edge_type == std::string("in")) {
476
    nf->graph = GraphPtr(new ImmutableGraph(subg_csr, nullptr));
Da Zheng's avatar
Da Zheng committed
477
  } else {
478
    nf->graph = GraphPtr(new ImmutableGraph(nullptr, subg_csr));
Da Zheng's avatar
Da Zheng committed
479
480
481
482
483
  }

  return nf;
}

484
485
486
487
488
template <typename ValueType>
NodeFlow SampleSubgraph(
    const ImmutableGraph *graph, const std::vector<dgl_id_t> &seeds,
    const ValueType *probability, const std::string &edge_type, int num_hops,
    size_t num_neighbor, const bool add_self_loop) {
489
  CHECK_EQ(graph->NumBits(), 64) << "32 bit graph is not supported yet";
490
  const size_t num_seeds = seeds.size();
Da Zheng's avatar
Da Zheng committed
491
  auto orig_csr = edge_type == "in" ? graph->GetInCSR() : graph->GetOutCSR();
492
493
494
495
  const dgl_id_t *val_list =
      static_cast<dgl_id_t *>(orig_csr->edge_ids()->data);
  const dgl_id_t *col_list = static_cast<dgl_id_t *>(orig_csr->indices()->data);
  const dgl_id_t *indptr = static_cast<dgl_id_t *>(orig_csr->indptr()->data);
Da Zheng's avatar
Da Zheng committed
496
497

  std::unordered_set<dgl_id_t> sub_ver_map;  // The vertex Ids in a layer.
498
  std::vector<std::pair<dgl_id_t, int>> sub_vers;
Da Zheng's avatar
Da Zheng committed
499
500
501
  sub_vers.reserve(num_seeds * 10);
  // add seed vertices
  for (size_t i = 0; i < num_seeds; ++i) {
502
    auto ret = sub_ver_map.insert(seeds[i]);
Da Zheng's avatar
Da Zheng committed
503
504
    // If the vertex is inserted successfully.
    if (ret.second) {
505
      sub_vers.emplace_back(seeds[i], 0);
Da Zheng's avatar
Da Zheng committed
506
507
508
509
510
511
512
513
514
515
516
517
518
519
    }
  }
  std::vector<dgl_id_t> tmp_sampled_src_list;
  std::vector<dgl_id_t> tmp_sampled_edge_list;
  // ver_id, position
  std::vector<neighbor_info> neigh_pos;
  neigh_pos.reserve(num_seeds);
  std::vector<dgl_id_t> neighbor_list;
  std::vector<dgl_id_t> edge_list;
  std::vector<size_t> layer_offsets(num_hops + 1);
  int64_t num_edges = 0;

  layer_offsets[0] = 0;
  layer_offsets[1] = sub_vers.size();
520
  for (int layer_id = 1; layer_id < num_hops; layer_id++) {
Da Zheng's avatar
Da Zheng committed
521
522
523
524
525
    // We need to avoid resampling the same node in a layer, but we allow a node
    // to be resampled in multiple layers. We use `sub_ver_map` to keep track of
    // sampled nodes in a layer, and clear it when entering a new layer.
    sub_ver_map.clear();
    // Previous iteration collects all nodes in sub_vers, which are collected
526
527
528
529
    // in the previous layer. sub_vers is used both as a node collection and a
    // queue.
    for (size_t idx = layer_offsets[layer_id - 1];
         idx < layer_offsets[layer_id]; idx++) {
Da Zheng's avatar
Da Zheng committed
530
531
532
533
534
      dgl_id_t dst_id = sub_vers[idx].first;
      const int cur_node_level = sub_vers[idx].second;

      tmp_sampled_src_list.clear();
      tmp_sampled_edge_list.clear();
535
      dgl_id_t ver_len = *(indptr + dst_id + 1) - *(indptr + dst_id);
Da Zheng's avatar
Da Zheng committed
536
      if (probability == nullptr) {  // uniform-sample
537
538
539
540
        GetUniformSample(
            val_list + *(indptr + dst_id), col_list + *(indptr + dst_id),
            ver_len, num_neighbor, &tmp_sampled_src_list,
            &tmp_sampled_edge_list);
Da Zheng's avatar
Da Zheng committed
541
      } else {  // non-uniform-sample
542
543
544
545
        GetNonUniformSample(
            probability, val_list + *(indptr + dst_id),
            col_list + *(indptr + dst_id), ver_len, num_neighbor,
            &tmp_sampled_src_list, &tmp_sampled_edge_list);
Da Zheng's avatar
Da Zheng committed
546
      }
547
548
549
550
551
552
      // If we need to add self loop and it doesn't exist in the sampled
      // neighbor list.
      if (add_self_loop &&
          std::find(
              tmp_sampled_src_list.begin(), tmp_sampled_src_list.end(),
              dst_id) == tmp_sampled_src_list.end()) {
553
        tmp_sampled_src_list.push_back(dst_id);
Da Zheng's avatar
Da Zheng committed
554
555
        const dgl_id_t *src_list = col_list + *(indptr + dst_id);
        const dgl_id_t *eid_list = val_list + *(indptr + dst_id);
556
557
        // TODO(zhengda) this operation has O(N) complexity. It can be pretty
        // slow.
Da Zheng's avatar
Da Zheng committed
558
559
560
561
562
563
564
        const dgl_id_t *src = std::find(src_list, src_list + ver_len, dst_id);
        // If there doesn't exist a self loop in the graph.
        // we have to add -1 as the edge id for the self-loop edge.
        if (src == src_list + ver_len)
          tmp_sampled_edge_list.push_back(-1);
        else
          tmp_sampled_edge_list.push_back(eid_list[src - src_list]);
565
      }
Da Zheng's avatar
Da Zheng committed
566
      CHECK_EQ(tmp_sampled_src_list.size(), tmp_sampled_edge_list.size());
567
568
      neigh_pos.emplace_back(
          dst_id, neighbor_list.size(), tmp_sampled_src_list.size());
Da Zheng's avatar
Da Zheng committed
569
570
571
572
573
574
575
576
577
578
579
      // Then push the vertices
      for (size_t i = 0; i < tmp_sampled_src_list.size(); ++i) {
        neighbor_list.push_back(tmp_sampled_src_list[i]);
      }
      // Finally we push the edge list
      for (size_t i = 0; i < tmp_sampled_edge_list.size(); ++i) {
        edge_list.push_back(tmp_sampled_edge_list[i]);
      }
      num_edges += tmp_sampled_src_list.size();
      for (size_t i = 0; i < tmp_sampled_src_list.size(); ++i) {
        // We need to add the neighbor in the hashtable here. This ensures that
580
581
        // the vertex in the queue is unique. If we see a vertex before, we
        // don't need to add it to the queue again.
Da Zheng's avatar
Da Zheng committed
582
583
584
585
586
587
588
589
590
591
592
        auto ret = sub_ver_map.insert(tmp_sampled_src_list[i]);
        // If the sampled neighbor is inserted to the map successfully.
        if (ret.second) {
          sub_vers.emplace_back(tmp_sampled_src_list[i], cur_node_level + 1);
        }
      }
    }
    layer_offsets[layer_id + 1] = layer_offsets[layer_id] + sub_ver_map.size();
    CHECK_EQ(layer_offsets[layer_id + 1], sub_vers.size());
  }

593
594
595
  return ConstructNodeFlow(
      neighbor_list, edge_list, layer_offsets, &sub_vers, &neigh_pos, edge_type,
      num_edges, num_hops);
Da Zheng's avatar
Da Zheng committed
596
597
}

598
}  // namespace
Da Zheng's avatar
Da Zheng committed
599

600
DGL_REGISTER_GLOBAL("_deprecate.nodeflow._CAPI_NodeFlowGetGraph")
601
602
603
604
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      NodeFlow nflow = args[0];
      *rv = nflow->graph;
    });
605

606
DGL_REGISTER_GLOBAL("_deprecate.nodeflow._CAPI_NodeFlowGetNodeMapping")
607
608
609
610
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      NodeFlow nflow = args[0];
      *rv = nflow->node_mapping;
    });
611

612
DGL_REGISTER_GLOBAL("_deprecate.nodeflow._CAPI_NodeFlowGetEdgeMapping")
613
614
615
616
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      NodeFlow nflow = args[0];
      *rv = nflow->edge_mapping;
    });
617

618
DGL_REGISTER_GLOBAL("_deprecate.nodeflow._CAPI_NodeFlowGetLayerOffsets")
619
620
621
622
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      NodeFlow nflow = args[0];
      *rv = nflow->layer_offsets;
    });
623

624
DGL_REGISTER_GLOBAL("_deprecate.nodeflow._CAPI_NodeFlowGetBlockOffsets")
625
626
627
628
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      NodeFlow nflow = args[0];
      *rv = nflow->flow_offsets;
    });
629

630
631
632
633
634
635
636
637
template <typename ValueType>
NodeFlow SamplerOp::NeighborSample(
    const ImmutableGraph *graph, const std::vector<dgl_id_t> &seeds,
    const std::string &edge_type, int num_hops, int expand_factor,
    const bool add_self_loop, const ValueType *probability) {
  return SampleSubgraph(
      graph, seeds, probability, edge_type, num_hops + 1, expand_factor,
      add_self_loop);
Da Zheng's avatar
Da Zheng committed
638
639
}

640
namespace {
641
642
643
644
645
void ConstructLayers(
    const dgl_id_t *indptr, const dgl_id_t *indices,
    const std::vector<dgl_id_t> &seed_array, IdArray layer_sizes,
    std::vector<dgl_id_t> *layer_offsets, std::vector<dgl_id_t> *node_mapping,
    std::vector<int64_t> *actl_layer_sizes, std::vector<float> *probabilities) {
646
  /**
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
   * Given a graph and a collection of seed nodes, this function constructs
   * NodeFlow layers via uniform layer-wise sampling, and return the resultant
   * layers and their corresponding probabilities.
   */
  std::copy(
      seed_array.begin(), seed_array.end(), std::back_inserter(*node_mapping));
  actl_layer_sizes->push_back(node_mapping->size());
  probabilities->insert(probabilities->end(), node_mapping->size(), 1);
  const int64_t *layer_sizes_data = static_cast<int64_t *>(layer_sizes->data);
  const int64_t num_layers = layer_sizes->shape[0];

  size_t curr = 0;
  size_t next = node_mapping->size();
  for (int64_t i = num_layers - 1; i >= 0; --i) {
    const int64_t layer_size = layer_sizes_data[i];
    std::unordered_set<dgl_id_t> candidate_set;
    for (auto j = curr; j != next; ++j) {
      auto src = (*node_mapping)[j];
      candidate_set.insert(indices + indptr[src], indices + indptr[src + 1]);
    }
667

668
669
670
671
672
673
674
675
676
677
678
679
    std::vector<dgl_id_t> candidate_vector;
    std::copy(
        candidate_set.begin(), candidate_set.end(),
        std::back_inserter(candidate_vector));

    std::unordered_map<dgl_id_t, size_t> n_occurrences;
    auto n_candidates = candidate_vector.size();
    for (int64_t j = 0; j != layer_size; ++j) {
      auto dst =
          candidate_vector[RandomEngine::ThreadLocal()->RandInt(n_candidates)];
      if (!n_occurrences.insert(std::make_pair(dst, 1)).second) {
        ++n_occurrences[dst];
680
681
      }
    }
682
683
684
685
686

    for (auto const &pair : n_occurrences) {
      node_mapping->push_back(pair.first);
      float p = pair.second * n_candidates / static_cast<float>(layer_size);
      probabilities->push_back(p);
687
    }
688
689
690
691
692
693
694
695
696
697

    actl_layer_sizes->push_back(node_mapping->size() - next);
    curr = next;
    next = node_mapping->size();
  }
  std::reverse(node_mapping->begin(), node_mapping->end());
  std::reverse(actl_layer_sizes->begin(), actl_layer_sizes->end());
  layer_offsets->push_back(0);
  for (const auto &size : *actl_layer_sizes) {
    layer_offsets->push_back(size + layer_offsets->back());
698
  }
699
}
700

701
702
703
704
705
706
707
void ConstructFlows(
    const dgl_id_t *indptr, const dgl_id_t *indices, const dgl_id_t *eids,
    const std::vector<dgl_id_t> &node_mapping,
    const std::vector<int64_t> &actl_layer_sizes,
    std::vector<dgl_id_t> *sub_indptr, std::vector<dgl_id_t> *sub_indices,
    std::vector<dgl_id_t> *sub_eids, std::vector<dgl_id_t> *flow_offsets,
    std::vector<dgl_id_t> *edge_mapping) {
708
  /**
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
   * Given a graph and a sequence of NodeFlow layers, this function constructs
   * dense subgraphs (flows) between consecutive layers.
   */
  auto n_flows = actl_layer_sizes.size() - 1;
  for (int64_t i = 0; i < actl_layer_sizes.front() + 1; i++)
    sub_indptr->push_back(0);
  flow_offsets->push_back(0);
  int64_t first = 0;
  for (size_t i = 0; i < n_flows; ++i) {
    auto src_size = actl_layer_sizes[i];
    std::unordered_map<dgl_id_t, dgl_id_t> source_map;
    for (int64_t j = 0; j < src_size; ++j) {
      source_map.insert(std::make_pair(node_mapping[first + j], first + j));
    }
    auto dst_size = actl_layer_sizes[i + 1];
    for (int64_t j = 0; j < dst_size; ++j) {
      auto dst = node_mapping[first + src_size + j];
      typedef std::pair<dgl_id_t, dgl_id_t> id_pair;
      std::vector<id_pair> neighbor_indices;
      for (dgl_id_t k = indptr[dst]; k < indptr[dst + 1]; ++k) {
        // TODO(gaiyu): accelerate hash table lookup
        auto ret = source_map.find(indices[k]);
        if (ret != source_map.end()) {
          neighbor_indices.push_back(std::make_pair(ret->second, eids[k]));
733
734
        }
      }
735
736
737
738
739
740
741
742
743
      auto cmp = [](const id_pair p, const id_pair q) -> bool {
        return p.first < q.first;
      };
      std::sort(neighbor_indices.begin(), neighbor_indices.end(), cmp);
      for (const auto &pair : neighbor_indices) {
        sub_indices->push_back(pair.first);
        edge_mapping->push_back(pair.second);
      }
      sub_indptr->push_back(sub_indices->size());
744
    }
745
746
    flow_offsets->push_back(sub_indices->size());
    first += src_size;
747
  }
748
749
750
  sub_eids->resize(sub_indices->size());
  std::iota(sub_eids->begin(), sub_eids->end(), 0);
}
751
752
}  // namespace

753
754
755
756
757
758
759
760
NodeFlow SamplerOp::LayerUniformSample(
    const ImmutableGraph *graph, const std::vector<dgl_id_t> &seeds,
    const std::string &neighbor_type, IdArray layer_sizes) {
  const auto g_csr =
      neighbor_type == "in" ? graph->GetInCSR() : graph->GetOutCSR();
  const dgl_id_t *indptr = static_cast<dgl_id_t *>(g_csr->indptr()->data);
  const dgl_id_t *indices = static_cast<dgl_id_t *>(g_csr->indices()->data);
  const dgl_id_t *eids = static_cast<dgl_id_t *>(g_csr->edge_ids()->data);
761
762
763
764
765

  std::vector<dgl_id_t> layer_offsets;
  std::vector<dgl_id_t> node_mapping;
  std::vector<int64_t> actl_layer_sizes;
  std::vector<float> probabilities;
766
767
768
  ConstructLayers(
      indptr, indices, seeds, layer_sizes, &layer_offsets, &node_mapping,
      &actl_layer_sizes, &probabilities);
769

770
  std::vector<dgl_id_t> sub_indptr, sub_indices, sub_edge_ids;
771
772
  std::vector<dgl_id_t> flow_offsets;
  std::vector<dgl_id_t> edge_mapping;
773
774
775
  ConstructFlows(
      indptr, indices, eids, node_mapping, actl_layer_sizes, &sub_indptr,
      &sub_indices, &sub_edge_ids, &flow_offsets, &edge_mapping);
776
777
778
779
780
  // sanity check
  CHECK_GT(sub_indptr.size(), 0);
  CHECK_EQ(sub_indptr[0], 0);
  CHECK_EQ(sub_indptr.back(), sub_indices.size());
  CHECK_EQ(sub_indices.size(), sub_edge_ids.size());
781

782
  NodeFlow nf = NodeFlow::Create();
783
784
785
  auto sub_csr = CSRPtr(new CSR(
      aten::VecToIdArray(sub_indptr), aten::VecToIdArray(sub_indices),
      aten::VecToIdArray(sub_edge_ids)));
786
787

  if (neighbor_type == std::string("in")) {
788
    nf->graph = GraphPtr(new ImmutableGraph(sub_csr, nullptr));
789
  } else {
790
    nf->graph = GraphPtr(new ImmutableGraph(nullptr, sub_csr));
791
792
  }

793
794
795
796
  nf->node_mapping = aten::VecToIdArray(node_mapping);
  nf->edge_mapping = aten::VecToIdArray(edge_mapping);
  nf->layer_offsets = aten::VecToIdArray(layer_offsets);
  nf->flow_offsets = aten::VecToIdArray(flow_offsets);
797
798
799
800

  return nf;
}

Da Zheng's avatar
Da Zheng committed
801
802
803
804
805
806
807
808
809
810
811
812
void BuildCsr(const ImmutableGraph &g, const std::string neigh_type) {
  if (neigh_type == "in") {
    auto csr = g.GetInCSR();
    assert(csr);
  } else if (neigh_type == "out") {
    auto csr = g.GetOutCSR();
    assert(csr);
  } else {
    LOG(FATAL) << "We don't support sample from neighbor type " << neigh_type;
  }
}

813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
template <typename ValueType>
std::vector<NodeFlow> NeighborSamplingImpl(
    const ImmutableGraphPtr gptr, const IdArray seed_nodes,
    const int64_t batch_start_id, const int64_t batch_size,
    const int64_t max_num_workers, const int64_t expand_factor,
    const int64_t num_hops, const std::string neigh_type,
    const bool add_self_loop, const ValueType *probability) {
  // process args
  CHECK(aten::IsValidIdArray(seed_nodes));
  const dgl_id_t *seed_nodes_data = static_cast<dgl_id_t *>(seed_nodes->data);
  const int64_t num_seeds = seed_nodes->shape[0];
  const int64_t num_workers = std::min(
      max_num_workers,
      (num_seeds + batch_size - 1) / batch_size - batch_start_id);
  // We need to make sure we have the right CSR before we enter parallel
  // sampling.
  BuildCsr(*gptr, neigh_type);
  // generate node flows
  std::vector<NodeFlow> nflows(num_workers);
  runtime::parallel_for(0, num_workers, [&](size_t b, size_t e) {
    for (auto i = b; i < e; ++i) {
      // create per-worker seed nodes.
      const int64_t start = (batch_start_id + i) * batch_size;
      const int64_t end = std::min(start + batch_size, num_seeds);
      // TODO(minjie): the vector allocation/copy is unnecessary
      std::vector<dgl_id_t> worker_seeds(end - start);
      std::copy(
          seed_nodes_data + start, seed_nodes_data + end, worker_seeds.begin());
      nflows[i] = SamplerOp::NeighborSample(
842
843
          gptr.get(), worker_seeds, neigh_type, num_hops, expand_factor,
          add_self_loop, probability);
844
845
846
    }
  });
  return nflows;
847
848
849
}

DGL_REGISTER_GLOBAL("sampling._CAPI_UniformSampling")
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      // arguments
      const GraphRef g = args[0];
      const IdArray seed_nodes = args[1];
      const int64_t batch_start_id = args[2];
      const int64_t batch_size = args[3];
      const int64_t max_num_workers = args[4];
      const int64_t expand_factor = args[5];
      const int64_t num_hops = args[6];
      const std::string neigh_type = args[7];
      const bool add_self_loop = args[8];

      auto gptr = std::dynamic_pointer_cast<ImmutableGraph>(g.sptr());
      CHECK(gptr) << "sampling isn't implemented in mutable graph";

      CHECK(aten::IsValidIdArray(seed_nodes));
      CHECK_EQ(seed_nodes->ctx.device_type, kDGLCPU)
          << "UniformSampler only support CPU sampling";

      std::vector<NodeFlow> nflows = NeighborSamplingImpl<float>(
          gptr, seed_nodes, batch_start_id, batch_size, max_num_workers,
          expand_factor, num_hops, neigh_type, add_self_loop, nullptr);

      *rv = List<NodeFlow>(nflows);
    });
875
876

DGL_REGISTER_GLOBAL("sampling._CAPI_NeighborSampling")
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      // arguments
      const GraphRef g = args[0];
      const IdArray seed_nodes = args[1];
      const int64_t batch_start_id = args[2];
      const int64_t batch_size = args[3];
      const int64_t max_num_workers = args[4];
      const int64_t expand_factor = args[5];
      const int64_t num_hops = args[6];
      const std::string neigh_type = args[7];
      const bool add_self_loop = args[8];
      const NDArray probability = args[9];

      auto gptr = std::dynamic_pointer_cast<ImmutableGraph>(g.sptr());
      CHECK(gptr) << "sampling isn't implemented in mutable graph";

      CHECK(aten::IsValidIdArray(seed_nodes));
      CHECK_EQ(seed_nodes->ctx.device_type, kDGLCPU)
          << "NeighborSampler only support CPU sampling";

      std::vector<NodeFlow> nflows;

      CHECK(probability->dtype.code == kDGLFloat)
          << "transition probability must be float";
      CHECK(probability->ndim == 1)
          << "transition probability must be a 1-dimensional vector";
      CHECK_EQ(probability->ctx.device_type, kDGLCPU)
          << "NeighborSampling only support CPU sampling";

      ATEN_FLOAT_TYPE_SWITCH(
          probability->dtype, FloatType, "transition probability", {
            const FloatType *prob;

            if (aten::IsNullArray(probability)) {
              prob = nullptr;
            } else {
              CHECK(
                  probability->shape[0] ==
                  static_cast<int64_t>(gptr->NumEdges()))
                  << "transition probability must have same number of elements "
                     "as edges";
              CHECK(probability.IsContiguous())
                  << "transition probability must be contiguous tensor";
              prob = static_cast<const FloatType *>(probability->data);
            }

            nflows = NeighborSamplingImpl(
                gptr, seed_nodes, batch_start_id, batch_size, max_num_workers,
                expand_factor, num_hops, neigh_type, add_self_loop, prob);
          });

      *rv = List<NodeFlow>(nflows);
929
930
    });

931
DGL_REGISTER_GLOBAL("sampling._CAPI_LayerSampling")
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      // arguments
      GraphRef g = args[0];
      const IdArray seed_nodes = args[1];
      const int64_t batch_start_id = args[2];
      const int64_t batch_size = args[3];
      const int64_t max_num_workers = args[4];
      const IdArray layer_sizes = args[5];
      const std::string neigh_type = args[6];
      // process args
      auto gptr = std::dynamic_pointer_cast<ImmutableGraph>(g.sptr());
      CHECK(gptr) << "sampling isn't implemented in mutable graph";
      CHECK(aten::IsValidIdArray(seed_nodes));
      CHECK_EQ(seed_nodes->ctx.device_type, kDGLCPU)
          << "LayerSampler only support CPU sampling";

      CHECK(aten::IsValidIdArray(layer_sizes));
      CHECK_EQ(layer_sizes->ctx.device_type, kDGLCPU)
          << "LayerSampler only support CPU sampling";

      const dgl_id_t *seed_nodes_data =
          static_cast<dgl_id_t *>(seed_nodes->data);
      const int64_t num_seeds = seed_nodes->shape[0];
      const int64_t num_workers = std::min(
          max_num_workers,
          (num_seeds + batch_size - 1) / batch_size - batch_start_id);
      // We need to make sure we have the right CSR before we enter parallel
      // sampling.
      BuildCsr(*gptr, neigh_type);
      // generate node flows
      std::vector<NodeFlow> nflows(num_workers);
      runtime::parallel_for(0, num_workers, [&](size_t b, size_t e) {
        for (auto i = b; i < e; ++i) {
          // create per-worker seed nodes.
          const int64_t start = (batch_start_id + i) * batch_size;
          const int64_t end = std::min(start + batch_size, num_seeds);
          // TODO(minjie): the vector allocation/copy is unnecessary
          std::vector<dgl_id_t> worker_seeds(end - start);
          std::copy(
              seed_nodes_data + start, seed_nodes_data + end,
              worker_seeds.begin());
          nflows[i] = SamplerOp::LayerUniformSample(
              gptr.get(), worker_seeds, neigh_type, layer_sizes);
        }
      });
      *rv = List<NodeFlow>(nflows);
978
    });
979

980
981
982
983
984
985
986
namespace {

void BuildCoo(const ImmutableGraph &g) {
  auto coo = g.GetCOO();
  assert(coo);
}

987
988
dgl_id_t global2local_map(
    dgl_id_t global_id, std::unordered_map<dgl_id_t, dgl_id_t> *map) {
989
990
991
992
993
994
995
996
997
998
  auto it = map->find(global_id);
  if (it == map->end()) {
    dgl_id_t local_id = map->size();
    map->insert(std::pair<dgl_id_t, dgl_id_t>(global_id, local_id));
    return local_id;
  } else {
    return it->second;
  }
}

Da Zheng's avatar
Da Zheng committed
999
inline bool IsNegativeHeadMode(const std::string &mode) {
1000
1001
1002
  return mode == "head";
}

1003
IdArray GetGlobalVid(IdArray induced_nid, IdArray subg_nid) {
1004
1005
  IdArray gnid =
      IdArray::Empty({subg_nid->shape[0]}, subg_nid->dtype, subg_nid->ctx);
1006
1007
1008
1009
1010
1011
1012
1013
1014
  const dgl_id_t *induced_nid_data = static_cast<dgl_id_t *>(induced_nid->data);
  const dgl_id_t *subg_nid_data = static_cast<dgl_id_t *>(subg_nid->data);
  dgl_id_t *gnid_data = static_cast<dgl_id_t *>(gnid->data);
  for (int64_t i = 0; i < subg_nid->shape[0]; i++) {
    gnid_data[i] = induced_nid_data[subg_nid_data[i]];
  }
  return gnid;
}

1015
1016
1017
1018
IdArray CheckExistence(
    GraphPtr gptr, IdArray neg_src, IdArray neg_dst, IdArray induced_nid) {
  return gptr->HasEdgesBetween(
      GetGlobalVid(induced_nid, neg_src), GetGlobalVid(induced_nid, neg_dst));
1019
1020
}

1021
1022
1023
IdArray CheckExistence(
    GraphPtr gptr, IdArray relations, IdArray neg_src, IdArray neg_dst,
    IdArray induced_nid, IdArray neg_eid) {
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
  neg_src = GetGlobalVid(induced_nid, neg_src);
  neg_dst = GetGlobalVid(induced_nid, neg_dst);
  BoolArray exist = gptr->HasEdgesBetween(neg_src, neg_dst);
  dgl_id_t *neg_dst_data = static_cast<dgl_id_t *>(neg_dst->data);
  dgl_id_t *neg_src_data = static_cast<dgl_id_t *>(neg_src->data);
  dgl_id_t *neg_eid_data = static_cast<dgl_id_t *>(neg_eid->data);
  dgl_id_t *relation_data = static_cast<dgl_id_t *>(relations->data);
  // TODO(zhengda) is this right?
  dgl_id_t *exist_data = static_cast<dgl_id_t *>(exist->data);
  int64_t num_neg_edges = neg_src->shape[0];
  for (int64_t i = 0; i < num_neg_edges; i++) {
    // If the edge doesn't exist, we don't need to do anything.
1036
    if (!exist_data[i]) continue;
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
    // If the edge exists, we need to double check if the relations match.
    // If they match, this negative edge isn't really a negative edge.
    dgl_id_t eid1 = neg_eid_data[i];
    dgl_id_t orig_neg_rel1 = relation_data[eid1];
    IdArray eids = gptr->EdgeId(neg_src_data[i], neg_dst_data[i]);
    dgl_id_t *eid_data = static_cast<dgl_id_t *>(eids->data);
    int64_t num_edges_between = eids->shape[0];
    bool same_rel = false;
    for (int64_t j = 0; j < num_edges_between; j++) {
      dgl_id_t neg_rel1 = relation_data[eid_data[j]];
      if (neg_rel1 == orig_neg_rel1) {
        same_rel = true;
        break;
      }
    }
    exist_data[i] = same_rel;
  }
  return exist;
}

1057
1058
1059
std::vector<dgl_id_t> Global2Local(
    const std::vector<size_t> &ids,
    const std::unordered_map<dgl_id_t, dgl_id_t> &map) {
1060
1061
1062
1063
1064
1065
1066
1067
1068
  std::vector<dgl_id_t> local_ids(ids.size());
  for (size_t i = 0; i < ids.size(); i++) {
    auto it = map.find(ids[i]);
    assert(it != map.end());
    local_ids[i] = it->second;
  }
  return local_ids;
}

1069
1070
1071
NegSubgraph EdgeSamplerObject::genNegEdgeSubgraph(
    const Subgraph &pos_subg, const std::string &neg_mode,
    int64_t neg_sample_size, bool exclude_positive, bool check_false_neg) {
1072
  int64_t num_tot_nodes = gptr_->NumVertices();
1073
  if (neg_sample_size > num_tot_nodes) neg_sample_size = num_tot_nodes;
1074
1075
1076
1077
1078
1079
  std::vector<IdArray> adj = pos_subg.graph->GetAdj(false, "coo");
  IdArray coo = adj[0];
  int64_t num_pos_edges = coo->shape[0] / 2;
  int64_t num_neg_edges = num_pos_edges * neg_sample_size;
  IdArray neg_dst = IdArray::Empty({num_neg_edges}, coo->dtype, coo->ctx);
  IdArray neg_src = IdArray::Empty({num_neg_edges}, coo->dtype, coo->ctx);
1080
1081
  IdArray induced_neg_eid =
      IdArray::Empty({num_neg_edges}, coo->dtype, coo->ctx);
1082
1083
1084

  // These are vids in the positive subgraph.
  const dgl_id_t *dst_data = static_cast<const dgl_id_t *>(coo->data);
1085
1086
  const dgl_id_t *src_data =
      static_cast<const dgl_id_t *>(coo->data) + num_pos_edges;
1087
1088
1089
1090
  const dgl_id_t *induced_vid_data =
      static_cast<const dgl_id_t *>(pos_subg.induced_vertices->data);
  const dgl_id_t *induced_eid_data =
      static_cast<const dgl_id_t *>(pos_subg.induced_edges->data);
1091
  size_t num_pos_nodes = pos_subg.graph->NumVertices();
1092
1093
  std::vector<size_t> pos_nodes(
      induced_vid_data, induced_vid_data + num_pos_nodes);
1094
1095
1096

  dgl_id_t *neg_dst_data = static_cast<dgl_id_t *>(neg_dst->data);
  dgl_id_t *neg_src_data = static_cast<dgl_id_t *>(neg_src->data);
1097
1098
  dgl_id_t *induced_neg_eid_data =
      static_cast<dgl_id_t *>(induced_neg_eid->data);
1099

Da Zheng's avatar
Da Zheng committed
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
  const dgl_id_t *unchanged;
  dgl_id_t *neg_unchanged;
  dgl_id_t *neg_changed;
  if (IsNegativeHeadMode(neg_mode)) {
    unchanged = dst_data;
    neg_unchanged = neg_dst_data;
    neg_changed = neg_src_data;
  } else {
    unchanged = src_data;
    neg_unchanged = neg_src_data;
    neg_changed = neg_dst_data;
  }

1113
1114
1115
1116
  std::unordered_map<dgl_id_t, dgl_id_t> neg_map;
  std::vector<dgl_id_t> local_pos_vids;
  local_pos_vids.reserve(num_pos_edges);

1117
1118
  std::vector<size_t> neg_vids;
  neg_vids.reserve(neg_sample_size);
Da Zheng's avatar
Da Zheng committed
1119
1120
1121
1122
1123
1124
1125
1126
  // If we don't exclude positive edges, we are actually sampling more than
  // the total number of nodes in the graph.
  if (!exclude_positive && neg_sample_size >= num_tot_nodes) {
    // We add all nodes as negative nodes.
    for (int64_t i = 0; i < num_tot_nodes; i++) {
      neg_vids.push_back(i);
      neg_map[i] = i;
    }
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147

    // Get all nodes in the positive side.
    for (int64_t i = 0; i < num_pos_edges; i++) {
      dgl_id_t vid = induced_vid_data[unchanged[i]];
      local_pos_vids.push_back(neg_map[vid]);
    }
    // There is no guarantee that the nodes in the vector are unique.
    std::sort(local_pos_vids.begin(), local_pos_vids.end());
    auto it = std::unique(local_pos_vids.begin(), local_pos_vids.end());
    local_pos_vids.resize(it - local_pos_vids.begin());
  } else {
    // Collect nodes in the positive side.
    dgl_id_t local_vid = 0;
    for (int64_t i = 0; i < num_pos_edges; i++) {
      dgl_id_t vid = induced_vid_data[unchanged[i]];
      auto it = neg_map.find(vid);
      if (it == neg_map.end()) {
        local_pos_vids.push_back(local_vid);
        neg_map.insert(std::pair<dgl_id_t, dgl_id_t>(vid, local_vid++));
      }
    }
Da Zheng's avatar
Da Zheng committed
1148
1149
  }

1150
  int64_t prev_neg_offset = 0;
1151
1152
1153
1154
1155
  for (int64_t i = 0; i < num_pos_edges; i++) {
    size_t neg_idx = i * neg_sample_size;

    std::vector<size_t> neighbors;
    DGLIdIters neigh_it;
Da Zheng's avatar
Da Zheng committed
1156
    if (IsNegativeHeadMode(neg_mode)) {
1157
      neigh_it = gptr_->PredVec(induced_vid_data[unchanged[i]]);
1158
    } else {
1159
      neigh_it = gptr_->SuccVec(induced_vid_data[unchanged[i]]);
1160
1161
    }

Da Zheng's avatar
Da Zheng committed
1162
1163
1164
    // If the number of negative nodes is smaller than the number of total nodes
    // in the graph.
    if (exclude_positive && neg_sample_size < num_tot_nodes) {
1165
1166
      std::vector<size_t> exclude;
      for (auto it = neigh_it.begin(); it != neigh_it.end(); it++) {
1167
1168
        dgl_id_t global_vid = *it;
        exclude.push_back(global_vid);
1169
      }
1170
      prev_neg_offset = neg_vids.size();
1171
      randomSample(num_tot_nodes, neg_sample_size, exclude, &neg_vids);
1172
1173
1174
      assert(
          static_cast<size_t>(prev_neg_offset + neg_sample_size) ==
          neg_vids.size());
Da Zheng's avatar
Da Zheng committed
1175
    } else if (neg_sample_size < num_tot_nodes) {
1176
      prev_neg_offset = neg_vids.size();
1177
      randomSample(num_tot_nodes, neg_sample_size, &neg_vids);
1178
1179
1180
      assert(
          static_cast<size_t>(prev_neg_offset + neg_sample_size) ==
          neg_vids.size());
Da Zheng's avatar
Da Zheng committed
1181
    } else if (exclude_positive) {
1182
1183
      LOG(FATAL) << "We can't exclude positive edges"
                    "when sampling negative edges with all nodes.";
Da Zheng's avatar
Da Zheng committed
1184
1185
1186
1187
1188
    } else {
      // We don't need to do anything here.
      // In this case, every edge has the same negative edges. That is,
      // neg_vids contains all nodes of the graph. They have been generated
      // before the for loop.
1189
1190
1191
1192
1193
1194
1195
    }

    dgl_id_t global_unchanged = induced_vid_data[unchanged[i]];
    dgl_id_t local_unchanged = global2local_map(global_unchanged, &neg_map);

    for (int64_t j = 0; j < neg_sample_size; j++) {
      neg_unchanged[neg_idx + j] = local_unchanged;
1196
1197
      dgl_id_t local_changed =
          global2local_map(neg_vids[j + prev_neg_offset], &neg_map);
1198
1199
1200
1201
1202
1203
1204
1205
      neg_changed[neg_idx + j] = local_changed;
      // induced negative eid references to the positive one.
      induced_neg_eid_data[neg_idx + j] = induced_eid_data[i];
    }
  }

  // Now we know the number of vertices in the negative graph.
  int64_t num_neg_nodes = neg_map.size();
1206
1207
1208
1209
  IdArray induced_neg_vid =
      IdArray::Empty({num_neg_nodes}, coo->dtype, coo->ctx);
  dgl_id_t *induced_neg_vid_data =
      static_cast<dgl_id_t *>(induced_neg_vid->data);
1210
1211
1212
1213
  for (auto it = neg_map.begin(); it != neg_map.end(); it++) {
    induced_neg_vid_data[it->second] = it->first;
  }

1214
  NegSubgraph neg_subg;
1215
1216
  // We sample negative vertices without replacement.
  // There shouldn't be duplicated edges.
1217
  COOPtr neg_coo(new COO(num_neg_nodes, neg_src, neg_dst));
1218
1219
1220
  neg_subg.graph = GraphPtr(new ImmutableGraph(neg_coo));
  neg_subg.induced_vertices = induced_neg_vid;
  neg_subg.induced_edges = induced_neg_eid;
1221

1222
1223
1224
1225
1226
1227
1228
  if (IsNegativeHeadMode(neg_mode)) {
    neg_subg.head_nid = aten::VecToIdArray(Global2Local(neg_vids, neg_map));
    neg_subg.tail_nid = aten::VecToIdArray(local_pos_vids);
  } else {
    neg_subg.head_nid = aten::VecToIdArray(local_pos_vids);
    neg_subg.tail_nid = aten::VecToIdArray(Global2Local(neg_vids, neg_map));
  }
1229
  // TODO(zhengda) we should provide an array of 1s if exclude_positive
Da Zheng's avatar
Da Zheng committed
1230
  if (check_false_neg) {
1231
    if (aten::IsNullArray(relations_)) {
1232
      neg_subg.exist = CheckExistence(gptr_, neg_src, neg_dst, induced_neg_vid);
Da Zheng's avatar
Da Zheng committed
1233
    } else {
1234
1235
1236
      neg_subg.exist = CheckExistence(
          gptr_, relations_, neg_src, neg_dst, induced_neg_vid,
          induced_neg_eid);
Da Zheng's avatar
Da Zheng committed
1237
    }
1238
  }
1239
1240
1241
  return neg_subg;
}

1242
1243
1244
NegSubgraph EdgeSamplerObject::genChunkedNegEdgeSubgraph(
    const Subgraph &pos_subg, const std::string &neg_mode,
    int64_t neg_sample_size, bool exclude_positive, bool check_false_neg) {
1245
  int64_t num_tot_nodes = gptr_->NumVertices();
1246
1247
1248
  std::vector<IdArray> adj = pos_subg.graph->GetAdj(false, "coo");
  IdArray coo = adj[0];
  int64_t num_pos_edges = coo->shape[0] / 2;
1249
  if (neg_sample_size > num_tot_nodes) neg_sample_size = num_tot_nodes;
1250

1251
1252
  int64_t chunk_size = chunk_size_;
  CHECK_GT(chunk_size, 0) << "chunk size has to be positive";
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
  // If num_pos_edges isn't divisible by chunk_size, the actual number of chunks
  // is num_chunks + 1 and the last chunk size is last_chunk_size.
  // Otherwise, the actual number of chunks is num_chunks, the last chunk size
  // is 0.
  int64_t num_chunks = num_pos_edges / chunk_size;
  int64_t last_chunk_size = num_pos_edges - num_chunks * chunk_size;

  // The number of negative edges.
  int64_t num_neg_edges = neg_sample_size * chunk_size * num_chunks;
  int64_t num_neg_edges_last_chunk = neg_sample_size * last_chunk_size;
  int64_t num_all_neg_edges = num_neg_edges + num_neg_edges_last_chunk;

  // We should include the last chunk.
1266
  if (last_chunk_size > 0) num_chunks++;
1267
1268
1269

  IdArray neg_dst = IdArray::Empty({num_all_neg_edges}, coo->dtype, coo->ctx);
  IdArray neg_src = IdArray::Empty({num_all_neg_edges}, coo->dtype, coo->ctx);
1270
1271
  IdArray induced_neg_eid =
      IdArray::Empty({num_all_neg_edges}, coo->dtype, coo->ctx);
1272
1273
1274

  // These are vids in the positive subgraph.
  const dgl_id_t *dst_data = static_cast<const dgl_id_t *>(coo->data);
1275
1276
  const dgl_id_t *src_data =
      static_cast<const dgl_id_t *>(coo->data) + num_pos_edges;
1277
1278
1279
1280
1281
  const dgl_id_t *induced_vid_data =
      static_cast<const dgl_id_t *>(pos_subg.induced_vertices->data);
  const dgl_id_t *induced_eid_data =
      static_cast<const dgl_id_t *>(pos_subg.induced_edges->data);
  int64_t num_pos_nodes = pos_subg.graph->NumVertices();
1282
1283
  std::vector<dgl_id_t> pos_nodes(
      induced_vid_data, induced_vid_data + num_pos_nodes);
1284
1285
1286

  dgl_id_t *neg_dst_data = static_cast<dgl_id_t *>(neg_dst->data);
  dgl_id_t *neg_src_data = static_cast<dgl_id_t *>(neg_src->data);
1287
1288
  dgl_id_t *induced_neg_eid_data =
      static_cast<dgl_id_t *>(induced_neg_eid->data);
1289
1290
1291
1292

  const dgl_id_t *unchanged;
  dgl_id_t *neg_unchanged;
  dgl_id_t *neg_changed;
Da Zheng's avatar
Da Zheng committed
1293
  if (IsNegativeHeadMode(neg_mode)) {
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
    unchanged = dst_data;
    neg_unchanged = neg_dst_data;
    neg_changed = neg_src_data;
  } else {
    unchanged = src_data;
    neg_unchanged = neg_src_data;
    neg_changed = neg_dst_data;
  }

  // We first sample all negative edges.
Da Zheng's avatar
Da Zheng committed
1304
1305
  std::vector<size_t> global_neg_vids;
  std::vector<size_t> local_neg_vids;
1306
  randomSample(num_tot_nodes, num_chunks * neg_sample_size, &global_neg_vids);
1307
  CHECK_EQ(num_chunks * neg_sample_size, global_neg_vids.size());
1308
1309

  std::unordered_map<dgl_id_t, dgl_id_t> neg_map;
1310
  dgl_id_t local_vid = 0;
1311

1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
  // Collect nodes in the positive side.
  std::vector<dgl_id_t> local_pos_vids;
  local_pos_vids.reserve(num_pos_edges);
  for (int64_t i = 0; i < num_pos_edges; i++) {
    dgl_id_t vid = induced_vid_data[unchanged[i]];
    auto it = neg_map.find(vid);
    if (it == neg_map.end()) {
      local_pos_vids.push_back(local_vid);
      neg_map.insert(std::pair<dgl_id_t, dgl_id_t>(vid, local_vid++));
    }
  }

Da Zheng's avatar
Da Zheng committed
1324
1325
1326
1327
  // We should map the global negative nodes to local Ids in advance
  // to reduce computation overhead.
  local_neg_vids.resize(global_neg_vids.size());
  for (size_t i = 0; i < global_neg_vids.size(); i++) {
1328
    local_neg_vids[i] = global2local_map(global_neg_vids[i], &neg_map);
Da Zheng's avatar
Da Zheng committed
1329
1330
  }

1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
  for (int64_t i_chunk = 0; i_chunk < num_chunks; i_chunk++) {
    // for each chunk.
    int64_t neg_idx = neg_sample_size * chunk_size * i_chunk;
    int64_t pos_edge_idx = chunk_size * i_chunk;
    int64_t neg_node_idx = neg_sample_size * i_chunk;
    // The actual chunk size. It'll be different for the last chunk.
    int64_t chunk_size1;
    if (i_chunk == num_chunks - 1 && last_chunk_size > 0)
      chunk_size1 = last_chunk_size;
    else
      chunk_size1 = chunk_size;

    for (int64_t in_chunk = 0; in_chunk != chunk_size1; ++in_chunk) {
      // For each positive node in a chunk.
1345
1346
      dgl_id_t global_unchanged =
          induced_vid_data[unchanged[pos_edge_idx + in_chunk]];
1347
1348
1349
      dgl_id_t local_unchanged = global2local_map(global_unchanged, &neg_map);
      for (int64_t j = 0; j < neg_sample_size; ++j) {
        neg_unchanged[neg_idx] = local_unchanged;
Da Zheng's avatar
Da Zheng committed
1350
        neg_changed[neg_idx] = local_neg_vids[neg_node_idx + j];
1351
1352
        induced_neg_eid_data[neg_idx] =
            induced_eid_data[pos_edge_idx + in_chunk];
1353
1354
1355
1356
1357
1358
1359
        neg_idx++;
      }
    }
  }

  // Now we know the number of vertices in the negative graph.
  int64_t num_neg_nodes = neg_map.size();
1360
1361
1362
1363
  IdArray induced_neg_vid =
      IdArray::Empty({num_neg_nodes}, coo->dtype, coo->ctx);
  dgl_id_t *induced_neg_vid_data =
      static_cast<dgl_id_t *>(induced_neg_vid->data);
1364
1365
1366
1367
  for (auto it = neg_map.begin(); it != neg_map.end(); it++) {
    induced_neg_vid_data[it->second] = it->first;
  }

1368
  NegSubgraph neg_subg;
1369
1370
  // We sample negative vertices without replacement.
  // There shouldn't be duplicated edges.
1371
  COOPtr neg_coo(new COO(num_neg_nodes, neg_src, neg_dst));
1372
1373
1374
  neg_subg.graph = GraphPtr(new ImmutableGraph(neg_coo));
  neg_subg.induced_vertices = induced_neg_vid;
  neg_subg.induced_edges = induced_neg_eid;
1375
  if (IsNegativeHeadMode(neg_mode)) {
1376
1377
    neg_subg.head_nid =
        aten::VecToIdArray(Global2Local(global_neg_vids, neg_map));
1378
1379
1380
    neg_subg.tail_nid = aten::VecToIdArray(local_pos_vids);
  } else {
    neg_subg.head_nid = aten::VecToIdArray(local_pos_vids);
1381
1382
    neg_subg.tail_nid =
        aten::VecToIdArray(Global2Local(global_neg_vids, neg_map));
1383
  }
Da Zheng's avatar
Da Zheng committed
1384
  if (check_false_neg) {
1385
    if (aten::IsNullArray(relations_)) {
1386
      neg_subg.exist = CheckExistence(gptr_, neg_src, neg_dst, induced_neg_vid);
Da Zheng's avatar
Da Zheng committed
1387
    } else {
1388
1389
1390
      neg_subg.exist = CheckExistence(
          gptr_, relations_, neg_src, neg_dst, induced_neg_vid,
          induced_neg_eid);
Da Zheng's avatar
Da Zheng committed
1391
    }
1392
  }
1393
1394
1395
  return neg_subg;
}

1396
inline SubgraphRef ConvertRef(const Subgraph &subg) {
1397
  return SubgraphRef(std::shared_ptr<Subgraph>(new Subgraph(subg)));
1398
1399
}

1400
inline SubgraphRef ConvertRef(const NegSubgraph &subg) {
1401
  return SubgraphRef(std::shared_ptr<Subgraph>(new NegSubgraph(subg)));
1402
1403
}

1404
1405
}  // namespace

1406
DGL_REGISTER_GLOBAL("sampling._CAPI_GetNegEdgeExistence")
1407
1408
1409
1410
1411
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      SubgraphRef g = args[0];
      auto gptr = std::dynamic_pointer_cast<NegSubgraph>(g.sptr());
      *rv = gptr->exist;
    });
1412

1413
DGL_REGISTER_GLOBAL("sampling._CAPI_GetEdgeSubgraphHead")
1414
1415
1416
1417
1418
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      SubgraphRef g = args[0];
      auto gptr = std::dynamic_pointer_cast<NegSubgraph>(g.sptr());
      *rv = gptr->head_nid;
    });
1419
1420

DGL_REGISTER_GLOBAL("sampling._CAPI_GetEdgeSubgraphTail")
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      SubgraphRef g = args[0];
      auto gptr = std::dynamic_pointer_cast<NegSubgraph>(g.sptr());
      *rv = gptr->tail_nid;
    });

class UniformEdgeSamplerObject : public EdgeSamplerObject {
 public:
  explicit UniformEdgeSamplerObject(
      const GraphPtr gptr, IdArray seed_edges, const int64_t batch_size,
      const int64_t num_workers, const bool replacement, const bool reset,
      const std::string neg_mode, const int64_t neg_sample_size,
      const int64_t chunk_size, const bool exclude_positive,
      const bool check_false_neg, IdArray relations)
      : EdgeSamplerObject(
            gptr, seed_edges, batch_size, num_workers, replacement, reset,
            neg_mode, neg_sample_size, chunk_size, exclude_positive,
            check_false_neg, relations) {
1439
1440
1441
    batch_curr_id_ = 0;
    num_seeds_ = seed_edges->shape[0];
    max_batch_id_ = (num_seeds_ + batch_size - 1) / batch_size;
1442

1443
1444
1445
1446
1447
    // TODO(song): Tricky thing here to make sure gptr_ has coo cache
    gptr_->FindEdge(0);
  }
  ~UniformEdgeSamplerObject() {}

1448
1449
1450
  void Fetch(DGLRetValue *rv) {
    const int64_t num_workers =
        std::min(num_workers_, max_batch_id_ - batch_curr_id_);
1451
1452
1453
    // generate subgraphs.
    std::vector<SubgraphRef> positive_subgs(num_workers);
    std::vector<SubgraphRef> negative_subgs(num_workers);
1454

1455
1456
1457
1458
1459
1460
    runtime::parallel_for(0, num_workers, [&](size_t b, size_t e) {
      for (auto i = b; i < e; ++i) {
        const int64_t start = (batch_curr_id_ + i) * batch_size_;
        const int64_t end = std::min(start + batch_size_, num_seeds_);
        const int64_t num_edges = end - start;
        IdArray worker_seeds;
1461

1462
        if (replacement_ == false) {
1463
1464
1465
          worker_seeds = seed_edges_.CreateView(
              {num_edges}, DGLDataType{kDGLInt, 64, 1},
              sizeof(dgl_id_t) * start);
1466
1467
        } else {
          std::vector<dgl_id_t> seeds;
1468
1469
          const dgl_id_t *seed_edge_ids =
              static_cast<const dgl_id_t *>(seed_edges_->data);
1470
1471
1472
1473
1474
1475
1476
1477
1478
          // sampling of each edge is a standalone event
          for (int64_t i = 0; i < num_edges; ++i) {
            int64_t seed = static_cast<const int64_t>(
                RandomEngine::ThreadLocal()->RandInt(num_seeds_));
            seeds.push_back(seed_edge_ids[seed]);
          }

          worker_seeds = aten::VecToIdArray(seeds, seed_edges_->dtype.bits);
        }
1479

1480
1481
1482
1483
1484
        EdgeArray arr = gptr_->FindEdges(worker_seeds);
        const dgl_id_t *src_ids = static_cast<const dgl_id_t *>(arr.src->data);
        const dgl_id_t *dst_ids = static_cast<const dgl_id_t *>(arr.dst->data);
        std::vector<dgl_id_t> src_vec(src_ids, src_ids + num_edges);
        std::vector<dgl_id_t> dst_vec(dst_ids, dst_ids + num_edges);
1485
1486
        // TODO(zhengda) what if there are duplicates in the src and dst
        // vectors.
1487
1488
1489

        Subgraph subg = gptr_->EdgeSubgraph(worker_seeds, false);
        positive_subgs[i] = ConvertRef(subg);
1490
1491
        // For chunked negative sampling, we accept "chunk-head" for corrupting
        // head nodes and "chunk-tail" for corrupting tail nodes.
1492
        if (neg_mode_.substr(0, 5) == "chunk") {
1493
1494
1495
          NegSubgraph neg_subg = genChunkedNegEdgeSubgraph(
              subg, neg_mode_.substr(6), neg_sample_size_, exclude_positive_,
              check_false_neg_);
1496
1497
          negative_subgs[i] = ConvertRef(neg_subg);
        } else if (neg_mode_ == "head" || neg_mode_ == "tail") {
1498
1499
1500
          NegSubgraph neg_subg = genNegEdgeSubgraph(
              subg, neg_mode_, neg_sample_size_, exclude_positive_,
              check_false_neg_);
1501
1502
          negative_subgs[i] = ConvertRef(neg_subg);
        }
1503
      }
1504
    });
1505
    if (neg_mode_.size() > 0) {
1506
1507
      positive_subgs.insert(
          positive_subgs.end(), negative_subgs.begin(), negative_subgs.end());
1508
    }
1509
1510
    batch_curr_id_ += num_workers;

1511
1512
1513
1514
    if (batch_curr_id_ >= max_batch_id_ && reset_ == true) {
      Reset();
    }

1515
    *rv = List<SubgraphRef>(positive_subgs);
1516
  }
1517

1518
1519
1520
1521
1522
  void Reset() {
    batch_curr_id_ = 0;
    if (replacement_ == false) {
      // Now we should shuffle the data and reset the sampler.
      dgl_id_t *seed_ids = static_cast<dgl_id_t *>(seed_edges_->data);
1523
1524
1525
      std::shuffle(
          seed_ids, seed_ids + seed_edges_->shape[0],
          std::default_random_engine());
1526
1527
1528
    }
  }

1529
  DGL_DECLARE_OBJECT_TYPE_INFO(UniformEdgeSamplerObject, Object);
1530

1531
1532
 private:
  void randomSample(size_t set_size, size_t num, std::vector<size_t> *out) {
1533
1534
1535
    RandomSample(set_size, num, out);
  }

1536
1537
1538
  void randomSample(
      size_t set_size, size_t num, const std::vector<size_t> &exclude,
      std::vector<size_t> *out) {
1539
1540
1541
1542
1543
1544
1545
1546
    RandomSample(set_size, num, exclude, out);
  }

  int64_t batch_curr_id_;
  int64_t max_batch_id_;
  int64_t num_seeds_;
};

1547
class UniformEdgeSampler : public ObjectRef {
1548
1549
 public:
  UniformEdgeSampler() {}
1550
1551
  explicit UniformEdgeSampler(std::shared_ptr<runtime::Object> obj)
      : ObjectRef(obj) {}
1552

1553
1554
  UniformEdgeSamplerObject *operator->() const {
    return static_cast<UniformEdgeSamplerObject *>(obj_.get());
1555
1556
1557
  }

  std::shared_ptr<UniformEdgeSamplerObject> sptr() const {
1558
1559
    return CHECK_NOTNULL(
        std::dynamic_pointer_cast<UniformEdgeSamplerObject>(obj_));
1560
1561
1562
1563
1564
1565
1566
  }

  operator bool() const { return this->defined(); }
  using ContainerType = UniformEdgeSamplerObject;
};

DGL_REGISTER_GLOBAL("sampling._CAPI_CreateUniformEdgeSampler")
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      // arguments
      GraphRef g = args[0];
      IdArray seed_edges = args[1];
      const int64_t batch_size = args[2];
      const int64_t max_num_workers = args[3];
      const bool replacement = args[4];
      const bool reset = args[5];
      const std::string neg_mode = args[6];
      const int neg_sample_size = args[7];
      const bool exclude_positive = args[8];
      const bool check_false_neg = args[9];
      IdArray relations = args[10];
      const int64_t chunk_size = args[11];
      // process args
      auto gptr = std::dynamic_pointer_cast<ImmutableGraph>(g.sptr());
      CHECK(gptr) << "sampling isn't implemented in mutable graph";
      CHECK(aten::IsValidIdArray(seed_edges));
      CHECK_EQ(seed_edges->ctx.device_type, kDGLCPU)
          << "UniformEdgeSampler only support CPU sampling";

      if (relations->shape[0] > 0) {
        CHECK(aten::IsValidIdArray(relations));
        CHECK_EQ(relations->ctx.device_type, kDGLCPU)
            << "WeightedEdgeSampler only support CPU sampling";
      }
      BuildCoo(*gptr);

      auto o = std::make_shared<UniformEdgeSamplerObject>(
          gptr, seed_edges, batch_size, max_num_workers, replacement, reset,
          neg_mode, neg_sample_size, chunk_size, exclude_positive,
          check_false_neg, relations);
      *rv = o;
    });
1601

1602
DGL_REGISTER_GLOBAL("sampling._CAPI_FetchUniformEdgeSample")
1603
1604
1605
1606
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      UniformEdgeSampler sampler = args[0];
      sampler->Fetch(rv);
    });
1607

1608
DGL_REGISTER_GLOBAL("sampling._CAPI_ResetUniformEdgeSample")
1609
1610
1611
1612
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      UniformEdgeSampler sampler = args[0];
      sampler->Reset();
    });
1613

1614
1615
template <typename ValueType>
class WeightedEdgeSamplerObject : public EdgeSamplerObject {
1616
 public:
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
  explicit WeightedEdgeSamplerObject(
      const GraphPtr gptr, IdArray seed_edges, NDArray edge_weight,
      NDArray node_weight, const int64_t batch_size, const int64_t num_workers,
      const bool replacement, const bool reset, const std::string neg_mode,
      const int64_t neg_sample_size, const int64_t chunk_size,
      const bool exclude_positive, const bool check_false_neg,
      IdArray relations)
      : EdgeSamplerObject(
            gptr, seed_edges, batch_size, num_workers, replacement, reset,
            neg_mode, neg_sample_size, chunk_size, exclude_positive,
            check_false_neg, relations) {
1628
    const int64_t num_edges = edge_weight->shape[0];
1629
1630
    const ValueType *edge_prob =
        static_cast<const ValueType *>(edge_weight->data);
1631
    std::vector<ValueType> eprob(num_edges);
1632
    for (int64_t i = 0; i < num_edges; ++i) {
1633
1634
1635
      eprob[i] = edge_prob[i];
    }
    edge_selector_ = std::make_shared<ArrayHeap<ValueType>>(eprob);
1636
    edge_weight_ = edge_weight;
1637
1638
1639
1640
1641

    const size_t num_nodes = node_weight->shape[0];
    if (num_nodes == 0) {
      node_selector_ = nullptr;
    } else {
1642
1643
      const ValueType *node_prob =
          static_cast<const ValueType *>(node_weight->data);
1644
1645
1646
1647
1648
1649
1650
      std::vector<ValueType> nprob(num_nodes);
      for (size_t i = 0; i < num_nodes; ++i) {
        nprob[i] = node_prob[i];
      }
      node_selector_ = std::make_shared<ArrayHeap<ValueType>>(nprob);
    }

1651
1652
1653
    curr_batch_id_ = 0;
    // handle int64 overflow here
    max_batch_id_ = (num_edges + batch_size - 1) / batch_size;
1654
1655
1656
1657
    // TODO(song): Tricky thing here to make sure gptr_ has coo cache
    gptr_->FindEdge(0);
  }

1658
  ~WeightedEdgeSamplerObject() {}
1659

1660
1661
1662
  void Fetch(DGLRetValue *rv) {
    const int64_t num_workers =
        std::min(num_workers_, max_batch_id_ - curr_batch_id_);
1663
    // generate subgraphs.
1664
1665
1666
    std::vector<SubgraphRef> positive_subgs(num_workers);
    std::vector<SubgraphRef> negative_subgs(num_workers);

1667
#pragma omp parallel for
1668
    for (int i = 0; i < num_workers; i++) {
1669
1670
      const dgl_id_t *seed_edge_ids =
          static_cast<const dgl_id_t *>(seed_edges_->data);
1671
1672
1673
1674
1675
1676
      std::vector<size_t> edge_ids(batch_size_);

      if (replacement_ == false) {
        size_t n = batch_size_;
        size_t num_ids = 0;
#pragma omp critical
1677
        { num_ids = edge_selector_->SampleWithoutReplacement(n, &edge_ids); }
1678
1679
1680
        edge_ids.resize(num_ids);
        for (size_t i = 0; i < num_ids; ++i) {
          edge_ids[i] = seed_edge_ids[edge_ids[i]];
1681
1682
1683
1684
1685
1686
1687
        }
      } else {
        // sampling of each edge is a standalone event
        for (int i = 0; i < batch_size_; ++i) {
          size_t edge_id = edge_selector_->Sample();
          edge_ids[i] = seed_edge_ids[edge_id];
        }
1688
      }
1689

1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
      auto worker_seeds = aten::VecToIdArray(edge_ids, seed_edges_->dtype.bits);

      EdgeArray arr = gptr_->FindEdges(worker_seeds);
      const dgl_id_t *src_ids = static_cast<const dgl_id_t *>(arr.src->data);
      const dgl_id_t *dst_ids = static_cast<const dgl_id_t *>(arr.dst->data);
      std::vector<dgl_id_t> src_vec(src_ids, src_ids + batch_size_);
      std::vector<dgl_id_t> dst_vec(dst_ids, dst_ids + batch_size_);
      // TODO(zhengda) what if there are duplicates in the src and dst vectors.
      Subgraph subg = gptr_->EdgeSubgraph(worker_seeds, false);
      positive_subgs[i] = ConvertRef(subg);
1700
1701
      // For chunked negative sampling, we accept "chunk-head" for corrupting
      // head nodes and "chunk-tail" for corrupting tail nodes.
1702
      if (neg_mode_.substr(0, 5) == "chunk") {
1703
1704
1705
        NegSubgraph neg_subg = genChunkedNegEdgeSubgraph(
            subg, neg_mode_.substr(6), neg_sample_size_, exclude_positive_,
            check_false_neg_);
1706
        negative_subgs[i] = ConvertRef(neg_subg);
1707
      } else if (neg_mode_ == "head" || neg_mode_ == "tail") {
1708
1709
1710
        NegSubgraph neg_subg = genNegEdgeSubgraph(
            subg, neg_mode_, neg_sample_size_, exclude_positive_,
            check_false_neg_);
1711
1712
1713
        negative_subgs[i] = ConvertRef(neg_subg);
      }
    }
1714
1715
1716
1717
1718
    curr_batch_id_ += num_workers;

    if (curr_batch_id_ >= max_batch_id_ && reset_ == true) {
      Reset();
    }
1719
1720

    if (neg_mode_.size() > 0) {
1721
1722
      positive_subgs.insert(
          positive_subgs.end(), negative_subgs.begin(), negative_subgs.end());
1723
1724
1725
1726
    }
    *rv = List<SubgraphRef>(positive_subgs);
  }

1727
1728
1729
1730
  void Reset() {
    curr_batch_id_ = 0;
    if (replacement_ == false) {
      const int64_t num_edges = edge_weight_->shape[0];
1731
1732
      const ValueType *edge_prob =
          static_cast<const ValueType *>(edge_weight_->data);
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
      std::vector<ValueType> eprob(num_edges);
      for (int64_t i = 0; i < num_edges; ++i) {
        eprob[i] = edge_prob[i];
      }

      // rebuild the edge_selector_
      edge_selector_ = std::make_shared<ArrayHeap<ValueType>>(eprob);
    }
  }

1743
1744
  DGL_DECLARE_OBJECT_TYPE_INFO(WeightedEdgeSamplerObject<ValueType>, Object);

1745
1746
 private:
  void randomSample(size_t set_size, size_t num, std::vector<size_t> *out) {
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
    if (num < set_size) {
      std::unordered_set<size_t> sampled_idxs;
      while (sampled_idxs.size() < num) {
        if (node_selector_ == nullptr) {
          sampled_idxs.insert(RandomEngine::ThreadLocal()->RandInt(set_size));
        } else {
          size_t id = node_selector_->Sample();
          sampled_idxs.insert(id);
        }
      }

      out->insert(out->end(), sampled_idxs.begin(), sampled_idxs.end());
    } else {
      // If we need to sample all elements in the set, we don't need to
      // generate random numbers.
1762
      for (size_t i = 0; i < set_size; i++) out->push_back(i);
1763
1764
1765
    }
  }

1766
1767
1768
  void randomSample(
      size_t set_size, size_t num, const std::vector<size_t> &exclude,
      std::vector<size_t> *out) {
1769
1770
1771
1772
1773
1774
1775
1776
    std::unordered_map<size_t, int> sampled_idxs;
    for (auto v : exclude) {
      sampled_idxs.insert(std::pair<size_t, int>(v, 0));
    }
    if (num + exclude.size() < set_size) {
      while (sampled_idxs.size() < num + exclude.size()) {
        size_t rand;
        if (node_selector_ == nullptr) {
1777
          rand = RandomEngine::ThreadLocal()->RandInt(set_size);
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
        } else {
          rand = node_selector_->Sample();
        }
        sampled_idxs.insert(std::pair<size_t, int>(rand, 1));
      }
      for (auto it = sampled_idxs.begin(); it != sampled_idxs.end(); it++) {
        if (it->second) {
          out->push_back(it->first);
        }
      }
    } else {
      // If we need to sample all elements in the set, we don't need to
      // generate random numbers.
      for (size_t i = 0; i < set_size; i++) {
        // If the element doesn't exist in exclude.
        if (sampled_idxs.find(i) == sampled_idxs.end()) {
          out->push_back(i);
        }
      }
    }
  }

1800
 private:
1801
1802
  std::shared_ptr<ArrayHeap<ValueType>> edge_selector_;
  std::shared_ptr<ArrayHeap<ValueType>> node_selector_;
1803
1804
1805
1806

  NDArray edge_weight_;
  int64_t curr_batch_id_;
  int64_t max_batch_id_;
1807
1808
1809
1810
};

template class WeightedEdgeSamplerObject<float>;

1811
class FloatWeightedEdgeSampler : public ObjectRef {
1812
1813
 public:
  FloatWeightedEdgeSampler() {}
1814
1815
  explicit FloatWeightedEdgeSampler(std::shared_ptr<runtime::Object> obj)
      : ObjectRef(obj) {}
1816

1817
1818
  WeightedEdgeSamplerObject<float> *operator->() const {
    return static_cast<WeightedEdgeSamplerObject<float> *>(obj_.get());
1819
1820
1821
  }

  std::shared_ptr<WeightedEdgeSamplerObject<float>> sptr() const {
1822
1823
    return CHECK_NOTNULL(
        std::dynamic_pointer_cast<WeightedEdgeSamplerObject<float>>(obj_));
1824
1825
1826
1827
1828
1829
1830
  }

  operator bool() const { return this->defined(); }
  using ContainerType = WeightedEdgeSamplerObject<float>;
};

DGL_REGISTER_GLOBAL("sampling._CAPI_CreateWeightedEdgeSampler")
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      // arguments
      GraphRef g = args[0];
      IdArray seed_edges = args[1];
      NDArray edge_weight = args[2];
      NDArray node_weight = args[3];
      const int64_t batch_size = args[4];
      const int64_t max_num_workers = args[5];
      const bool replacement = args[6];
      const bool reset = args[7];
      const std::string neg_mode = args[8];
      const int64_t neg_sample_size = args[9];
      const bool exclude_positive = args[10];
      const bool check_false_neg = args[11];
      IdArray relations = args[12];
      const int64_t chunk_size = args[13];

      auto gptr = std::dynamic_pointer_cast<ImmutableGraph>(g.sptr());
      CHECK(gptr) << "sampling isn't implemented in mutable graph";
      CHECK(aten::IsValidIdArray(seed_edges));
      CHECK_EQ(seed_edges->ctx.device_type, kDGLCPU)
          << "WeightedEdgeSampler only support CPU sampling";
      CHECK(edge_weight->dtype.code == kDGLFloat)
          << "edge_weight should be FloatType";
      CHECK(edge_weight->dtype.bits == 32)
          << "WeightedEdgeSampler only support float weight";
      CHECK_EQ(edge_weight->ctx.device_type, kDGLCPU)
          << "WeightedEdgeSampler only support CPU sampling";
      if (node_weight->shape[0] > 0) {
        CHECK(node_weight->dtype.code == kDGLFloat)
            << "node_weight should be FloatType";
        CHECK(node_weight->dtype.bits == 32)
            << "WeightedEdgeSampler only support float weight";
        CHECK_EQ(node_weight->ctx.device_type, kDGLCPU)
            << "WeightedEdgeSampler only support CPU sampling";
      }
      if (relations->shape[0] > 0) {
        CHECK(aten::IsValidIdArray(relations));
        CHECK_EQ(relations->ctx.device_type, kDGLCPU)
            << "WeightedEdgeSampler only support CPU sampling";
      }
      BuildCoo(*gptr);

      const int64_t num_seeds = seed_edges->shape[0];
      const int64_t num_workers =
          std::min(max_num_workers, (num_seeds + batch_size - 1) / batch_size);

      auto o = std::make_shared<WeightedEdgeSamplerObject<float>>(
          gptr, seed_edges, edge_weight, node_weight, batch_size, num_workers,
          replacement, reset, neg_mode, neg_sample_size, chunk_size,
          exclude_positive, check_false_neg, relations);
      *rv = o;
    });
1884
1885

DGL_REGISTER_GLOBAL("sampling._CAPI_FetchWeightedEdgeSample")
1886
1887
1888
1889
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      FloatWeightedEdgeSampler sampler = args[0];
      sampler->Fetch(rv);
    });
1890

1891
DGL_REGISTER_GLOBAL("sampling._CAPI_ResetWeightedEdgeSample")
1892
1893
1894
1895
    .set_body([](DGLArgs args, DGLRetValue *rv) {
      FloatWeightedEdgeSampler sampler = args[0];
      sampler->Reset();
    });
1896

Da Zheng's avatar
Da Zheng committed
1897
}  // namespace dgl