"docs/vscode:/vscode.git/clone" did not exist on "bbb4a9a11de9aa3d28773405b11d1af8690be122"
sampler.cc 73.7 KB
Newer Older
Da Zheng's avatar
Da Zheng committed
1
2
3
4
5
6
7
/*!
 *  Copyright (c) 2018 by Contributors
 * \file graph/sampler.cc
 * \brief DGL sampler implementation
 */
#include <dgl/sampler.h>
#include <dgl/immutable_graph.h>
8
9
#include <dgl/runtime/container.h>
#include <dgl/packed_func_ext.h>
10
#include <dgl/random.h>
11
#include <dmlc/omp.h>
Da Zheng's avatar
Da Zheng committed
12
#include <algorithm>
13
14
#include <cstdlib>
#include <cmath>
15
#include <numeric>
16
#include "../c_api_common.h"
17
#include "../array/common.h"  // for ATEN_FLOAT_TYPE_SWITCH
Da Zheng's avatar
Da Zheng committed
18

19
using namespace dgl::runtime;
20

Da Zheng's avatar
Da Zheng committed
21
22
23
24
25
26
namespace dgl {

namespace {
/*
 * ArrayHeap is used to sample elements from vector
 */
27
template<typename ValueType>
Da Zheng's avatar
Da Zheng committed
28
29
class ArrayHeap {
 public:
30
  explicit ArrayHeap(const std::vector<ValueType>& prob) {
Da Zheng's avatar
Da Zheng committed
31
32
    vec_size_ = prob.size();
    bit_len_ = ceil(log2(vec_size_));
33
    limit_ = 1UL << bit_len_;
Da Zheng's avatar
Da Zheng committed
34
35
36
    // allocate twice the size
    heap_.resize(limit_ << 1, 0);
    // allocate the leaves
37
    for (size_t i = limit_; i < vec_size_+limit_; ++i) {
Da Zheng's avatar
Da Zheng committed
38
39
40
41
      heap_[i] = prob[i-limit_];
    }
    // iterate up the tree (this is O(m))
    for (int i = bit_len_-1; i >= 0; --i) {
42
      for (size_t j = (1UL << i); j < (1UL << (i + 1)); ++j) {
Da Zheng's avatar
Da Zheng committed
43
44
45
46
47
48
49
50
51
52
53
        heap_[j] = heap_[j << 1] + heap_[(j << 1) + 1];
      }
    }
  }
  ~ArrayHeap() {}

  /*
   * Remove term from index (this costs O(log m) steps)
   */
  void Delete(size_t index) {
    size_t i = index + limit_;
54
    ValueType w = heap_[i];
Da Zheng's avatar
Da Zheng committed
55
56
57
58
59
60
61
62
63
    for (int j = bit_len_; j >= 0; --j) {
      heap_[i] -= w;
      i = i >> 1;
    }
  }

  /*
   * Add value w to index (this costs O(log m) steps)
   */
64
  void Add(size_t index, ValueType w) {
Da Zheng's avatar
Da Zheng committed
65
66
67
68
69
70
71
72
73
74
    size_t i = index + limit_;
    for (int j = bit_len_; j >= 0; --j) {
      heap_[i] += w;
      i = i >> 1;
    }
  }

  /*
   * Sample from arrayHeap
   */
75
  size_t Sample() {
76
    // heap_ is empty
77
    ValueType xi = heap_[1] * RandomEngine::ThreadLocal()->Uniform<float>();
78
    size_t i = 1;
Da Zheng's avatar
Da Zheng committed
79
80
81
82
83
84
85
86
87
88
89
90
91
    while (i < limit_) {
      i = i << 1;
      if (xi >= heap_[i]) {
        xi -= heap_[i];
        i += 1;
      }
    }
    return i - limit_;
  }

  /*
   * Sample a vector by given the size n
   */
92
  size_t SampleWithoutReplacement(size_t n, std::vector<size_t>* samples) {
Da Zheng's avatar
Da Zheng committed
93
    // sample n elements
94
95
96
97
98
99
    size_t i = 0;
    for (; i < n; ++i) {
      // heap is empty
      if (heap_[1] == 0) {
        break;
      }
100
      samples->at(i) = this->Sample();
Da Zheng's avatar
Da Zheng committed
101
102
      this->Delete(samples->at(i));
    }
103
104

    return i;
Da Zheng's avatar
Da Zheng committed
105
106
107
  }

 private:
108
  size_t vec_size_;  // sample size
Da Zheng's avatar
Da Zheng committed
109
  int bit_len_;   // bit size
110
  size_t limit_;
111
  std::vector<ValueType> heap_;
Da Zheng's avatar
Da Zheng committed
112
113
};

114
115
116
117
118
119
120
///////////////////////// Samplers //////////////////////////
class EdgeSamplerObject: public Object {
 public:
  EdgeSamplerObject(const GraphPtr gptr,
                    IdArray seed_edges,
                    const int64_t batch_size,
                    const int64_t num_workers,
121
122
                    const bool replacement,
                    const bool reset,
123
124
125
126
127
128
129
130
131
132
133
                    const std::string neg_mode,
                    const int64_t neg_sample_size,
                    const bool exclude_positive,
                    const bool check_false_neg,
                    IdArray relations) {
    gptr_ = gptr;
    seed_edges_ = seed_edges;
    relations_ = relations;

    batch_size_ = batch_size;
    num_workers_ = num_workers;
134
135
    replacement_ = replacement;
    reset_ = reset;
136
137
138
139
140
141
142
143
144
    neg_mode_ = neg_mode;
    neg_sample_size_ = neg_sample_size;
    exclude_positive_ = exclude_positive;
    check_false_neg_ = check_false_neg;
  }

  ~EdgeSamplerObject() {}

  virtual void Fetch(DGLRetValue* rv) = 0;
145
  virtual void Reset() = 0;
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168

 protected:
  virtual void randomSample(size_t set_size, size_t num, std::vector<size_t>* out) = 0;
  virtual void randomSample(size_t set_size, size_t num, const std::vector<size_t> &exclude,
                    std::vector<size_t>* out) = 0;

  NegSubgraph genNegEdgeSubgraph(const Subgraph &pos_subg,
                                 const std::string &neg_mode,
                                 int64_t neg_sample_size,
                                 bool exclude_positive,
                                 bool check_false_neg);
  NegSubgraph genPBGNegEdgeSubgraph(const Subgraph &pos_subg,
                                    const std::string &neg_mode,
                                    int64_t neg_sample_size,
                                    bool exclude_positive,
                                    bool check_false_neg);

  GraphPtr gptr_;
  IdArray seed_edges_;
  IdArray relations_;

  int64_t batch_size_;
  int64_t num_workers_;
169
170
  bool replacement_;
  int64_t reset_;
171
172
173
174
175
176
  std::string neg_mode_;
  int64_t neg_sample_size_;
  bool exclude_positive_;
  bool check_false_neg_;
};

Da Zheng's avatar
Da Zheng committed
177
178
179
/*
 * Uniformly sample integers from [0, set_size) without replacement.
 */
180
void RandomSample(size_t set_size, size_t num, std::vector<size_t>* out) {
Da Zheng's avatar
Da Zheng committed
181
182
183
184
185
186
187
188
189
190
191
192
  if (num < set_size) {
    std::unordered_set<size_t> sampled_idxs;
    while (sampled_idxs.size() < num) {
      sampled_idxs.insert(RandomEngine::ThreadLocal()->RandInt(set_size));
    }
    out->insert(out->end(), sampled_idxs.begin(), sampled_idxs.end());
  } else {
    // If we need to sample all elements in the set, we don't need to
    // generate random numbers.
    for (size_t i = 0; i < set_size; i++)
      out->push_back(i);
  }
Da Zheng's avatar
Da Zheng committed
193
194
}

195
196
197
198
199
200
void RandomSample(size_t set_size, size_t num, const std::vector<size_t> &exclude,
                  std::vector<size_t>* out) {
  std::unordered_map<size_t, int> sampled_idxs;
  for (auto v : exclude) {
    sampled_idxs.insert(std::pair<size_t, int>(v, 0));
  }
Da Zheng's avatar
Da Zheng committed
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
  if (num + exclude.size() < set_size) {
    while (sampled_idxs.size() < num + exclude.size()) {
      size_t rand = RandomEngine::ThreadLocal()->RandInt(set_size);
      sampled_idxs.insert(std::pair<size_t, int>(rand, 1));
    }
    for (auto it = sampled_idxs.begin(); it != sampled_idxs.end(); it++) {
      if (it->second) {
        out->push_back(it->first);
      }
    }
  } else {
    // If we need to sample all elements in the set, we don't need to
    // generate random numbers.
    for (size_t i = 0; i < set_size; i++) {
      // If the element doesn't exist in exclude.
      if (sampled_idxs.find(i) == sampled_idxs.end()) {
        out->push_back(i);
      }
219
220
221
222
    }
  }
}

Da Zheng's avatar
Da Zheng committed
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
/*
 * For a sparse array whose non-zeros are represented by nz_idxs,
 * negate the sparse array and outputs the non-zeros in the negated array.
 */
void NegateArray(const std::vector<size_t> &nz_idxs,
                 size_t arr_size,
                 std::vector<size_t>* out) {
  // nz_idxs must have been sorted.
  auto it = nz_idxs.begin();
  size_t i = 0;
  CHECK_GT(arr_size, nz_idxs.back());
  for (; i < arr_size && it != nz_idxs.end(); i++) {
    if (*it == i) {
      it++;
      continue;
    }
    out->push_back(i);
  }
  for (; i < arr_size; i++) {
    out->push_back(i);
  }
}

/*
 * Uniform sample vertices from a list of vertices.
 */
void GetUniformSample(const dgl_id_t* edge_id_list,
                      const dgl_id_t* vid_list,
                      const size_t ver_len,
                      const size_t max_num_neighbor,
                      std::vector<dgl_id_t>* out_ver,
254
                      std::vector<dgl_id_t>* out_edge) {
Da Zheng's avatar
Da Zheng committed
255
256
257
258
259
260
261
262
263
264
  // Copy vid_list to output
  if (ver_len <= max_num_neighbor) {
    out_ver->insert(out_ver->end(), vid_list, vid_list + ver_len);
    out_edge->insert(out_edge->end(), edge_id_list, edge_id_list + ver_len);
    return;
  }
  // If we just sample a small number of elements from a large neighbor list.
  std::vector<size_t> sorted_idxs;
  if (ver_len > max_num_neighbor * 2) {
    sorted_idxs.reserve(max_num_neighbor);
265
    RandomSample(ver_len, max_num_neighbor, &sorted_idxs);
Da Zheng's avatar
Da Zheng committed
266
267
268
269
    std::sort(sorted_idxs.begin(), sorted_idxs.end());
  } else {
    std::vector<size_t> negate;
    negate.reserve(ver_len - max_num_neighbor);
270
    RandomSample(ver_len, ver_len - max_num_neighbor, &negate);
Da Zheng's avatar
Da Zheng committed
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
    std::sort(negate.begin(), negate.end());
    NegateArray(negate, ver_len, &sorted_idxs);
  }
  // verify the result.
  CHECK_EQ(sorted_idxs.size(), max_num_neighbor);
  for (size_t i = 1; i < sorted_idxs.size(); i++) {
    CHECK_GT(sorted_idxs[i], sorted_idxs[i - 1]);
  }
  for (auto idx : sorted_idxs) {
    out_ver->push_back(vid_list[idx]);
    out_edge->push_back(edge_id_list[idx]);
  }
}

/*
 * Non-uniform sample via ArrayHeap
287
288
 *
 * \param probability Transition probability on the entire graph, indexed by edge ID
Da Zheng's avatar
Da Zheng committed
289
 */
290
291
template<typename ValueType>
void GetNonUniformSample(const ValueType* probability,
Da Zheng's avatar
Da Zheng committed
292
293
294
295
296
                         const dgl_id_t* edge_id_list,
                         const dgl_id_t* vid_list,
                         const size_t ver_len,
                         const size_t max_num_neighbor,
                         std::vector<dgl_id_t>* out_ver,
297
                         std::vector<dgl_id_t>* out_edge) {
Da Zheng's avatar
Da Zheng committed
298
299
300
301
302
303
304
305
  // Copy vid_list to output
  if (ver_len <= max_num_neighbor) {
    out_ver->insert(out_ver->end(), vid_list, vid_list + ver_len);
    out_edge->insert(out_edge->end(), edge_id_list, edge_id_list + ver_len);
    return;
  }
  // Make sample
  std::vector<size_t> sp_index(max_num_neighbor);
306
  std::vector<ValueType> sp_prob(ver_len);
Da Zheng's avatar
Da Zheng committed
307
  for (size_t i = 0; i < ver_len; ++i) {
308
    sp_prob[i] = probability[edge_id_list[i]];
Da Zheng's avatar
Da Zheng committed
309
  }
310
  ArrayHeap<ValueType> arrayHeap(sp_prob);
311
  arrayHeap.SampleWithoutReplacement(max_num_neighbor, &sp_index);
Da Zheng's avatar
Da Zheng committed
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
  out_ver->resize(max_num_neighbor);
  out_edge->resize(max_num_neighbor);
  for (size_t i = 0; i < max_num_neighbor; ++i) {
    size_t idx = sp_index[i];
    out_ver->at(i) = vid_list[idx];
    out_edge->at(i) = edge_id_list[idx];
  }
  sort(out_ver->begin(), out_ver->end());
  sort(out_edge->begin(), out_edge->end());
}

/*
 * Used for subgraph sampling
 */
struct neigh_list {
  std::vector<dgl_id_t> neighs;
  std::vector<dgl_id_t> edges;
  neigh_list(const std::vector<dgl_id_t> &_neighs,
             const std::vector<dgl_id_t> &_edges)
    : neighs(_neighs), edges(_edges) {}
};

struct neighbor_info {
  dgl_id_t id;
  size_t pos;
  size_t num_edges;

  neighbor_info(dgl_id_t id, size_t pos, size_t num_edges) {
    this->id = id;
    this->pos = pos;
    this->num_edges = num_edges;
  }
};

NodeFlow ConstructNodeFlow(std::vector<dgl_id_t> neighbor_list,
                           std::vector<dgl_id_t> edge_list,
                           std::vector<size_t> layer_offsets,
                           std::vector<std::pair<dgl_id_t, int> > *sub_vers,
                           std::vector<neighbor_info> *neigh_pos,
                           const std::string &edge_type,
                           int64_t num_edges, int num_hops, bool is_multigraph) {
353
  NodeFlow nf = NodeFlow::Create();
Da Zheng's avatar
Da Zheng committed
354
  uint64_t num_vertices = sub_vers->size();
355
356
357
358
  nf->node_mapping = aten::NewIdArray(num_vertices);
  nf->edge_mapping = aten::NewIdArray(num_edges);
  nf->layer_offsets = aten::NewIdArray(num_hops + 1);
  nf->flow_offsets = aten::NewIdArray(num_hops);
Da Zheng's avatar
Da Zheng committed
359

360
361
362
363
  dgl_id_t *node_map_data = static_cast<dgl_id_t *>(nf->node_mapping->data);
  dgl_id_t *layer_off_data = static_cast<dgl_id_t *>(nf->layer_offsets->data);
  dgl_id_t *flow_off_data = static_cast<dgl_id_t *>(nf->flow_offsets->data);
  dgl_id_t *edge_map_data = static_cast<dgl_id_t *>(nf->edge_mapping->data);
Da Zheng's avatar
Da Zheng committed
364
365

  // Construct sub_csr_graph
366
367
368
369
370
  // TODO(minjie): is nodeflow a multigraph?
  auto subg_csr = CSRPtr(new CSR(num_vertices, num_edges, is_multigraph));
  dgl_id_t* indptr_out = static_cast<dgl_id_t*>(subg_csr->indptr()->data);
  dgl_id_t* col_list_out = static_cast<dgl_id_t*>(subg_csr->indices()->data);
  dgl_id_t* eid_out = static_cast<dgl_id_t*>(subg_csr->edge_ids()->data);
Da Zheng's avatar
Da Zheng committed
371
372
373
374
375
376
377
378
379
380
381
382
  size_t collected_nedges = 0;

  // The data from the previous steps:
  // * node data: sub_vers (vid, layer), neigh_pos,
  // * edge data: neighbor_list, edge_list, probability.
  // * layer_offsets: the offset in sub_vers.
  dgl_id_t ver_id = 0;
  std::vector<std::unordered_map<dgl_id_t, dgl_id_t>> layer_ver_maps;
  layer_ver_maps.resize(num_hops);
  size_t out_node_idx = 0;
  for (int layer_id = num_hops - 1; layer_id >= 0; layer_id--) {
    // We sort the vertices in a layer so that we don't need to sort the neighbor Ids
383
384
385
386
387
388
389
390
391
392
393
    // after remap to a subgraph. However, we don't need to sort the first layer
    // because we want the order of the nodes in the first layer is the same as
    // the input seed nodes.
    if (layer_id > 0) {
      std::sort(sub_vers->begin() + layer_offsets[layer_id],
                sub_vers->begin() + layer_offsets[layer_id + 1],
                [](const std::pair<dgl_id_t, dgl_id_t> &a1,
                   const std::pair<dgl_id_t, dgl_id_t> &a2) {
        return a1.first < a2.first;
      });
    }
Da Zheng's avatar
Da Zheng committed
394
395
396
397
398
399

    // Save the sampled vertices and its layer Id.
    for (size_t i = layer_offsets[layer_id]; i < layer_offsets[layer_id + 1]; i++) {
      node_map_data[out_node_idx++] = sub_vers->at(i).first;
      layer_ver_maps[layer_id].insert(std::pair<dgl_id_t, dgl_id_t>(sub_vers->at(i).first,
                                                                    ver_id++));
400
      CHECK_EQ(sub_vers->at(i).second, layer_id);
Da Zheng's avatar
Da Zheng committed
401
402
403
404
405
406
407
408
409
    }
  }
  CHECK(out_node_idx == num_vertices);

  // sampling algorithms have to start from the seed nodes, so the seed nodes are
  // in the first layer and the input nodes are in the last layer.
  // When we expose the sampled graph to a Python user, we say the input nodes
  // are in the first layer and the seed nodes are in the last layer.
  // Thus, when we copy sampled results to a CSR, we need to reverse the order of layers.
410
411
  std::fill(indptr_out, indptr_out + num_vertices + 1, 0);
  size_t row_idx = layer_offsets[num_hops] - layer_offsets[num_hops - 1];
Da Zheng's avatar
Da Zheng committed
412
413
  layer_off_data[0] = 0;
  layer_off_data[1] = layer_offsets[num_hops] - layer_offsets[num_hops - 1];
414
  int out_layer_idx = 1;
Da Zheng's avatar
Da Zheng committed
415
  for (int layer_id = num_hops - 2; layer_id >= 0; layer_id--) {
416
417
418
419
420
421
422
423
424
    // Because we don't sort the vertices in the first layer above, we can't sort
    // the neighbor positions of the vertices in the first layer either.
    if (layer_id > 0) {
      std::sort(neigh_pos->begin() + layer_offsets[layer_id],
                neigh_pos->begin() + layer_offsets[layer_id + 1],
                [](const neighbor_info &a1, const neighbor_info &a2) {
                  return a1.id < a2.id;
                });
    }
Da Zheng's avatar
Da Zheng committed
425
426
427

    for (size_t i = layer_offsets[layer_id]; i < layer_offsets[layer_id + 1]; i++) {
      dgl_id_t dst_id = sub_vers->at(i).first;
428
      CHECK_EQ(dst_id, neigh_pos->at(i).id);
Da Zheng's avatar
Da Zheng committed
429
      size_t pos = neigh_pos->at(i).pos;
430
      CHECK_LE(pos, neighbor_list.size());
431
432
      const size_t nedges = neigh_pos->at(i).num_edges;
      if (neighbor_list.empty()) CHECK_EQ(nedges, 0);
Da Zheng's avatar
Da Zheng committed
433
434
435

      // We need to map the Ids of the neighbors to the subgraph.
      auto neigh_it = neighbor_list.begin() + pos;
436
      for (size_t i = 0; i < nedges; i++) {
Da Zheng's avatar
Da Zheng committed
437
        dgl_id_t neigh = *(neigh_it + i);
438
        CHECK(layer_ver_maps[layer_id + 1].find(neigh) != layer_ver_maps[layer_id + 1].end());
Da Zheng's avatar
Da Zheng committed
439
440
441
442
        col_list_out[collected_nedges + i] = layer_ver_maps[layer_id + 1][neigh];
      }
      // We can simply copy the edge Ids.
      std::copy_n(edge_list.begin() + pos,
443
444
445
                  nedges, edge_map_data + collected_nedges);
      collected_nedges += nedges;
      indptr_out[row_idx+1] = indptr_out[row_idx] + nedges;
Da Zheng's avatar
Da Zheng committed
446
447
448
449
450
451
      row_idx++;
    }
    layer_off_data[out_layer_idx + 1] = layer_off_data[out_layer_idx]
        + layer_offsets[layer_id + 1] - layer_offsets[layer_id];
    out_layer_idx++;
  }
452
453
454
455
  CHECK_EQ(row_idx, num_vertices);
  CHECK_EQ(indptr_out[row_idx], num_edges);
  CHECK_EQ(out_layer_idx, num_hops);
  CHECK_EQ(layer_off_data[out_layer_idx], num_vertices);
Da Zheng's avatar
Da Zheng committed
456
457
458

  // Copy flow offsets.
  flow_off_data[0] = 0;
459
460
  int out_flow_idx = 0;
  for (size_t i = 0; i < layer_offsets.size() - 2; i++) {
461
    size_t num_edges = indptr_out[layer_off_data[i + 2]] - indptr_out[layer_off_data[i + 1]];
Da Zheng's avatar
Da Zheng committed
462
463
464
465
    flow_off_data[out_flow_idx + 1] = flow_off_data[out_flow_idx] + num_edges;
    out_flow_idx++;
  }
  CHECK(out_flow_idx == num_hops - 1);
466
  CHECK(flow_off_data[num_hops - 1] == static_cast<uint64_t>(num_edges));
Da Zheng's avatar
Da Zheng committed
467

468
  std::iota(eid_out, eid_out + num_edges, 0);
Da Zheng's avatar
Da Zheng committed
469

470
  if (edge_type == std::string("in")) {
471
    nf->graph = GraphPtr(new ImmutableGraph(subg_csr, nullptr));
Da Zheng's avatar
Da Zheng committed
472
  } else {
473
    nf->graph = GraphPtr(new ImmutableGraph(nullptr, subg_csr));
Da Zheng's avatar
Da Zheng committed
474
475
476
477
478
  }

  return nf;
}

479
template<typename ValueType>
Da Zheng's avatar
Da Zheng committed
480
NodeFlow SampleSubgraph(const ImmutableGraph *graph,
481
                        const std::vector<dgl_id_t>& seeds,
482
                        const ValueType* probability,
Da Zheng's avatar
Da Zheng committed
483
484
                        const std::string &edge_type,
                        int num_hops,
485
486
                        size_t num_neighbor,
                        const bool add_self_loop) {
487
  CHECK_EQ(graph->NumBits(), 64) << "32 bit graph is not supported yet";
488
  const size_t num_seeds = seeds.size();
Da Zheng's avatar
Da Zheng committed
489
  auto orig_csr = edge_type == "in" ? graph->GetInCSR() : graph->GetOutCSR();
490
491
492
  const dgl_id_t* val_list = static_cast<dgl_id_t*>(orig_csr->edge_ids()->data);
  const dgl_id_t* col_list = static_cast<dgl_id_t*>(orig_csr->indices()->data);
  const dgl_id_t* indptr = static_cast<dgl_id_t*>(orig_csr->indptr()->data);
Da Zheng's avatar
Da Zheng committed
493
494
495
496
497
498

  std::unordered_set<dgl_id_t> sub_ver_map;  // The vertex Ids in a layer.
  std::vector<std::pair<dgl_id_t, int> > sub_vers;
  sub_vers.reserve(num_seeds * 10);
  // add seed vertices
  for (size_t i = 0; i < num_seeds; ++i) {
499
    auto ret = sub_ver_map.insert(seeds[i]);
Da Zheng's avatar
Da Zheng committed
500
501
    // If the vertex is inserted successfully.
    if (ret.second) {
502
      sub_vers.emplace_back(seeds[i], 0);
Da Zheng's avatar
Da Zheng committed
503
504
505
506
507
508
509
510
511
512
513
514
515
516
    }
  }
  std::vector<dgl_id_t> tmp_sampled_src_list;
  std::vector<dgl_id_t> tmp_sampled_edge_list;
  // ver_id, position
  std::vector<neighbor_info> neigh_pos;
  neigh_pos.reserve(num_seeds);
  std::vector<dgl_id_t> neighbor_list;
  std::vector<dgl_id_t> edge_list;
  std::vector<size_t> layer_offsets(num_hops + 1);
  int64_t num_edges = 0;

  layer_offsets[0] = 0;
  layer_offsets[1] = sub_vers.size();
517
  for (int layer_id = 1; layer_id < num_hops; layer_id++) {
Da Zheng's avatar
Da Zheng committed
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
    // We need to avoid resampling the same node in a layer, but we allow a node
    // to be resampled in multiple layers. We use `sub_ver_map` to keep track of
    // sampled nodes in a layer, and clear it when entering a new layer.
    sub_ver_map.clear();
    // Previous iteration collects all nodes in sub_vers, which are collected
    // in the previous layer. sub_vers is used both as a node collection and a queue.
    for (size_t idx = layer_offsets[layer_id - 1]; idx < layer_offsets[layer_id]; idx++) {
      dgl_id_t dst_id = sub_vers[idx].first;
      const int cur_node_level = sub_vers[idx].second;

      tmp_sampled_src_list.clear();
      tmp_sampled_edge_list.clear();
      dgl_id_t ver_len = *(indptr+dst_id+1) - *(indptr+dst_id);
      if (probability == nullptr) {  // uniform-sample
        GetUniformSample(val_list + *(indptr + dst_id),
                         col_list + *(indptr + dst_id),
                         ver_len,
                         num_neighbor,
                         &tmp_sampled_src_list,
537
                         &tmp_sampled_edge_list);
Da Zheng's avatar
Da Zheng committed
538
539
540
541
542
543
544
      } else {  // non-uniform-sample
        GetNonUniformSample(probability,
                            val_list + *(indptr + dst_id),
                            col_list + *(indptr + dst_id),
                            ver_len,
                            num_neighbor,
                            &tmp_sampled_src_list,
545
                            &tmp_sampled_edge_list);
Da Zheng's avatar
Da Zheng committed
546
      }
Da Zheng's avatar
Da Zheng committed
547
548
549
      // If we need to add self loop and it doesn't exist in the sampled neighbor list.
      if (add_self_loop && std::find(tmp_sampled_src_list.begin(), tmp_sampled_src_list.end(),
                                     dst_id) == tmp_sampled_src_list.end()) {
550
        tmp_sampled_src_list.push_back(dst_id);
Da Zheng's avatar
Da Zheng committed
551
552
553
554
555
556
557
558
559
560
        const dgl_id_t *src_list = col_list + *(indptr + dst_id);
        const dgl_id_t *eid_list = val_list + *(indptr + dst_id);
        // TODO(zhengda) this operation has O(N) complexity. It can be pretty slow.
        const dgl_id_t *src = std::find(src_list, src_list + ver_len, dst_id);
        // If there doesn't exist a self loop in the graph.
        // we have to add -1 as the edge id for the self-loop edge.
        if (src == src_list + ver_len)
          tmp_sampled_edge_list.push_back(-1);
        else
          tmp_sampled_edge_list.push_back(eid_list[src - src_list]);
561
      }
Da Zheng's avatar
Da Zheng committed
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
      CHECK_EQ(tmp_sampled_src_list.size(), tmp_sampled_edge_list.size());
      neigh_pos.emplace_back(dst_id, neighbor_list.size(), tmp_sampled_src_list.size());
      // Then push the vertices
      for (size_t i = 0; i < tmp_sampled_src_list.size(); ++i) {
        neighbor_list.push_back(tmp_sampled_src_list[i]);
      }
      // Finally we push the edge list
      for (size_t i = 0; i < tmp_sampled_edge_list.size(); ++i) {
        edge_list.push_back(tmp_sampled_edge_list[i]);
      }
      num_edges += tmp_sampled_src_list.size();
      for (size_t i = 0; i < tmp_sampled_src_list.size(); ++i) {
        // We need to add the neighbor in the hashtable here. This ensures that
        // the vertex in the queue is unique. If we see a vertex before, we don't
        // need to add it to the queue again.
        auto ret = sub_ver_map.insert(tmp_sampled_src_list[i]);
        // If the sampled neighbor is inserted to the map successfully.
        if (ret.second) {
          sub_vers.emplace_back(tmp_sampled_src_list[i], cur_node_level + 1);
        }
      }
    }
    layer_offsets[layer_id + 1] = layer_offsets[layer_id] + sub_ver_map.size();
    CHECK_EQ(layer_offsets[layer_id + 1], sub_vers.size());
  }

  return ConstructNodeFlow(neighbor_list, edge_list, layer_offsets, &sub_vers, &neigh_pos,
                           edge_type, num_edges, num_hops, graph->IsMultigraph());
}

592
}  // namespace
Da Zheng's avatar
Da Zheng committed
593

594
595
DGL_REGISTER_GLOBAL("nodeflow._CAPI_NodeFlowGetGraph")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
596
597
    NodeFlow nflow = args[0];
    *rv = nflow->graph;
598
599
600
601
  });

DGL_REGISTER_GLOBAL("nodeflow._CAPI_NodeFlowGetNodeMapping")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
602
    NodeFlow nflow = args[0];
603
604
605
606
607
    *rv = nflow->node_mapping;
  });

DGL_REGISTER_GLOBAL("nodeflow._CAPI_NodeFlowGetEdgeMapping")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
608
    NodeFlow nflow = args[0];
609
610
611
612
613
    *rv = nflow->edge_mapping;
  });

DGL_REGISTER_GLOBAL("nodeflow._CAPI_NodeFlowGetLayerOffsets")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
614
    NodeFlow nflow = args[0];
615
616
617
618
619
    *rv = nflow->layer_offsets;
  });

DGL_REGISTER_GLOBAL("nodeflow._CAPI_NodeFlowGetBlockOffsets")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
620
    NodeFlow nflow = args[0];
621
622
623
    *rv = nflow->flow_offsets;
  });

624
625
626
627
628
629
630
template<typename ValueType>
NodeFlow SamplerOp::NeighborSample(const ImmutableGraph *graph,
                                   const std::vector<dgl_id_t>& seeds,
                                   const std::string &edge_type,
                                   int num_hops, int expand_factor,
                                   const bool add_self_loop,
                                   const ValueType *probability) {
Da Zheng's avatar
Da Zheng committed
631
  return SampleSubgraph(graph,
632
633
                        seeds,
                        probability,
Da Zheng's avatar
Da Zheng committed
634
635
                        edge_type,
                        num_hops + 1,
636
637
                        expand_factor,
                        add_self_loop);
Da Zheng's avatar
Da Zheng committed
638
639
}

640
namespace {
641
  void ConstructLayers(const dgl_id_t *indptr,
642
                       const dgl_id_t *indices,
643
644
                       const std::vector<dgl_id_t>& seed_array,
                       IdArray layer_sizes,
645
646
647
648
649
650
651
652
653
                       std::vector<dgl_id_t> *layer_offsets,
                       std::vector<dgl_id_t> *node_mapping,
                       std::vector<int64_t> *actl_layer_sizes,
                       std::vector<float> *probabilities) {
    /*
     * Given a graph and a collection of seed nodes, this function constructs NodeFlow
     * layers via uniform layer-wise sampling, and return the resultant layers and their
     * corresponding probabilities.
     */
654
    std::copy(seed_array.begin(), seed_array.end(), std::back_inserter(*node_mapping));
655
656
    actl_layer_sizes->push_back(node_mapping->size());
    probabilities->insert(probabilities->end(), node_mapping->size(), 1);
657
658
    const int64_t* layer_sizes_data = static_cast<int64_t*>(layer_sizes->data);
    const int64_t num_layers = layer_sizes->shape[0];
659
660
661

    size_t curr = 0;
    size_t next = node_mapping->size();
662
663
    for (int64_t i = num_layers - 1; i >= 0; --i) {
      const int64_t layer_size = layer_sizes_data[i];
664
665
666
667
668
669
670
671
672
673
674
675
      std::unordered_set<dgl_id_t> candidate_set;
      for (auto j = curr; j != next; ++j) {
        auto src = (*node_mapping)[j];
        candidate_set.insert(indices + indptr[src], indices + indptr[src + 1]);
      }

      std::vector<dgl_id_t> candidate_vector;
      std::copy(candidate_set.begin(), candidate_set.end(),
                std::back_inserter(candidate_vector));

      std::unordered_map<dgl_id_t, size_t> n_occurrences;
      auto n_candidates = candidate_vector.size();
676
      for (int64_t j = 0; j != layer_size; ++j) {
677
678
        auto dst = candidate_vector[
          RandomEngine::ThreadLocal()->RandInt(n_candidates)];
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
        if (!n_occurrences.insert(std::make_pair(dst, 1)).second) {
          ++n_occurrences[dst];
        }
      }

      for (auto const &pair : n_occurrences) {
        node_mapping->push_back(pair.first);
        float p = pair.second * n_candidates / static_cast<float>(layer_size);
        probabilities->push_back(p);
      }

      actl_layer_sizes->push_back(node_mapping->size() - next);
      curr = next;
      next = node_mapping->size();
    }
    std::reverse(node_mapping->begin(), node_mapping->end());
    std::reverse(actl_layer_sizes->begin(), actl_layer_sizes->end());
    layer_offsets->push_back(0);
    for (const auto &size : *actl_layer_sizes) {
      layer_offsets->push_back(size + layer_offsets->back());
    }
  }

702
  void ConstructFlows(const dgl_id_t *indptr,
703
704
705
706
                      const dgl_id_t *indices,
                      const dgl_id_t *eids,
                      const std::vector<dgl_id_t> &node_mapping,
                      const std::vector<int64_t> &actl_layer_sizes,
707
708
709
                      std::vector<dgl_id_t> *sub_indptr,
                      std::vector<dgl_id_t> *sub_indices,
                      std::vector<dgl_id_t> *sub_eids,
710
711
712
713
714
715
716
                      std::vector<dgl_id_t> *flow_offsets,
                      std::vector<dgl_id_t> *edge_mapping) {
    /*
     * Given a graph and a sequence of NodeFlow layers, this function constructs dense
     * subgraphs (flows) between consecutive layers.
     */
    auto n_flows = actl_layer_sizes.size() - 1;
717
718
    for (int64_t i = 0; i < actl_layer_sizes.front() + 1; i++)
      sub_indptr->push_back(0);
719
720
721
722
723
724
725
726
727
728
729
730
731
    flow_offsets->push_back(0);
    int64_t first = 0;
    for (size_t i = 0; i < n_flows; ++i) {
      auto src_size = actl_layer_sizes[i];
      std::unordered_map<dgl_id_t, dgl_id_t> source_map;
      for (int64_t j = 0; j < src_size; ++j) {
        source_map.insert(std::make_pair(node_mapping[first + j], first + j));
      }
      auto dst_size = actl_layer_sizes[i + 1];
      for (int64_t j = 0; j < dst_size; ++j) {
        auto dst = node_mapping[first + src_size + j];
        typedef std::pair<dgl_id_t, dgl_id_t> id_pair;
        std::vector<id_pair> neighbor_indices;
732
        for (dgl_id_t k = indptr[dst]; k < indptr[dst + 1]; ++k) {
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
          // TODO(gaiyu): accelerate hash table lookup
          auto ret = source_map.find(indices[k]);
          if (ret != source_map.end()) {
            neighbor_indices.push_back(std::make_pair(ret->second, eids[k]));
          }
        }
        auto cmp = [](const id_pair p, const id_pair q)->bool { return p.first < q.first; };
        std::sort(neighbor_indices.begin(), neighbor_indices.end(), cmp);
        for (const auto &pair : neighbor_indices) {
          sub_indices->push_back(pair.first);
          edge_mapping->push_back(pair.second);
        }
        sub_indptr->push_back(sub_indices->size());
      }
      flow_offsets->push_back(sub_indices->size());
      first += src_size;
    }
    sub_eids->resize(sub_indices->size());
    std::iota(sub_eids->begin(), sub_eids->end(), 0);
  }
}  // namespace

NodeFlow SamplerOp::LayerUniformSample(const ImmutableGraph *graph,
756
                                       const std::vector<dgl_id_t>& seeds,
757
                                       const std::string &neighbor_type,
758
                                       IdArray layer_sizes) {
759
  const auto g_csr = neighbor_type == "in" ? graph->GetInCSR() : graph->GetOutCSR();
760
761
762
  const dgl_id_t *indptr = static_cast<dgl_id_t*>(g_csr->indptr()->data);
  const dgl_id_t *indices = static_cast<dgl_id_t*>(g_csr->indices()->data);
  const dgl_id_t *eids = static_cast<dgl_id_t*>(g_csr->edge_ids()->data);
763
764
765
766
767
768
769

  std::vector<dgl_id_t> layer_offsets;
  std::vector<dgl_id_t> node_mapping;
  std::vector<int64_t> actl_layer_sizes;
  std::vector<float> probabilities;
  ConstructLayers(indptr,
                  indices,
770
                  seeds,
771
772
773
774
775
776
                  layer_sizes,
                  &layer_offsets,
                  &node_mapping,
                  &actl_layer_sizes,
                  &probabilities);

777
  std::vector<dgl_id_t> sub_indptr, sub_indices, sub_edge_ids;
778
779
780
781
782
783
784
  std::vector<dgl_id_t> flow_offsets;
  std::vector<dgl_id_t> edge_mapping;
  ConstructFlows(indptr,
                 indices,
                 eids,
                 node_mapping,
                 actl_layer_sizes,
785
786
787
                 &sub_indptr,
                 &sub_indices,
                 &sub_edge_ids,
788
789
                 &flow_offsets,
                 &edge_mapping);
790
791
792
793
794
  // sanity check
  CHECK_GT(sub_indptr.size(), 0);
  CHECK_EQ(sub_indptr[0], 0);
  CHECK_EQ(sub_indptr.back(), sub_indices.size());
  CHECK_EQ(sub_indices.size(), sub_edge_ids.size());
795

796
  NodeFlow nf = NodeFlow::Create();
797
798
799
  auto sub_csr = CSRPtr(new CSR(aten::VecToIdArray(sub_indptr),
                                aten::VecToIdArray(sub_indices),
                                aten::VecToIdArray(sub_edge_ids)));
800
801

  if (neighbor_type == std::string("in")) {
802
    nf->graph = GraphPtr(new ImmutableGraph(sub_csr, nullptr));
803
  } else {
804
    nf->graph = GraphPtr(new ImmutableGraph(nullptr, sub_csr));
805
806
  }

807
808
809
810
  nf->node_mapping = aten::VecToIdArray(node_mapping);
  nf->edge_mapping = aten::VecToIdArray(edge_mapping);
  nf->layer_offsets = aten::VecToIdArray(layer_offsets);
  nf->flow_offsets = aten::VecToIdArray(flow_offsets);
811
812
813
814

  return nf;
}

Da Zheng's avatar
Da Zheng committed
815
816
817
818
819
820
821
822
823
824
825
826
void BuildCsr(const ImmutableGraph &g, const std::string neigh_type) {
  if (neigh_type == "in") {
    auto csr = g.GetInCSR();
    assert(csr);
  } else if (neigh_type == "out") {
    auto csr = g.GetOutCSR();
    assert(csr);
  } else {
    LOG(FATAL) << "We don't support sample from neighbor type " << neigh_type;
  }
}

827
828
829
830
831
832
833
834
835
836
837
template<typename ValueType>
std::vector<NodeFlow> NeighborSamplingImpl(const ImmutableGraphPtr gptr,
                                           const IdArray seed_nodes,
                                           const int64_t batch_start_id,
                                           const int64_t batch_size,
                                           const int64_t max_num_workers,
                                           const int64_t expand_factor,
                                           const int64_t num_hops,
                                           const std::string neigh_type,
                                           const bool add_self_loop,
                                           const ValueType *probability) {
838
    // process args
839
    CHECK(aten::IsValidIdArray(seed_nodes));
840
841
842
843
    const dgl_id_t* seed_nodes_data = static_cast<dgl_id_t*>(seed_nodes->data);
    const int64_t num_seeds = seed_nodes->shape[0];
    const int64_t num_workers = std::min(max_num_workers,
        (num_seeds + batch_size - 1) / batch_size - batch_start_id);
Da Zheng's avatar
Da Zheng committed
844
845
    // We need to make sure we have the right CSR before we enter parallel sampling.
    BuildCsr(*gptr, neigh_type);
846
    // generate node flows
847
    std::vector<NodeFlow> nflows(num_workers);
848
849
850
851
852
853
854
855
856
#pragma omp parallel for
    for (int i = 0; i < num_workers; i++) {
      // create per-worker seed nodes.
      const int64_t start = (batch_start_id + i) * batch_size;
      const int64_t end = std::min(start + batch_size, num_seeds);
      // TODO(minjie): the vector allocation/copy is unnecessary
      std::vector<dgl_id_t> worker_seeds(end - start);
      std::copy(seed_nodes_data + start, seed_nodes_data + end,
                worker_seeds.begin());
857
858
859
      nflows[i] = SamplerOp::NeighborSample(
          gptr.get(), worker_seeds, neigh_type, num_hops, expand_factor,
          add_self_loop, probability);
860
    }
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
    return nflows;
}

DGL_REGISTER_GLOBAL("sampling._CAPI_UniformSampling")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
    // arguments
    const GraphRef g = args[0];
    const IdArray seed_nodes = args[1];
    const int64_t batch_start_id = args[2];
    const int64_t batch_size = args[3];
    const int64_t max_num_workers = args[4];
    const int64_t expand_factor = args[5];
    const int64_t num_hops = args[6];
    const std::string neigh_type = args[7];
    const bool add_self_loop = args[8];

    auto gptr = std::dynamic_pointer_cast<ImmutableGraph>(g.sptr());
    CHECK(gptr) << "sampling isn't implemented in mutable graph";

880
881
882
883
    CHECK(aten::IsValidIdArray(seed_nodes));
    CHECK_EQ(seed_nodes->ctx.device_type, kDLCPU)
      << "UniformSampler only support CPU sampling";

884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
    std::vector<NodeFlow> nflows = NeighborSamplingImpl<float>(
        gptr, seed_nodes, batch_start_id, batch_size, max_num_workers,
        expand_factor, num_hops, neigh_type, add_self_loop, nullptr);

    *rv = List<NodeFlow>(nflows);
  });

DGL_REGISTER_GLOBAL("sampling._CAPI_NeighborSampling")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
    // arguments
    const GraphRef g = args[0];
    const IdArray seed_nodes = args[1];
    const int64_t batch_start_id = args[2];
    const int64_t batch_size = args[3];
    const int64_t max_num_workers = args[4];
    const int64_t expand_factor = args[5];
    const int64_t num_hops = args[6];
    const std::string neigh_type = args[7];
    const bool add_self_loop = args[8];
    const NDArray probability = args[9];

    auto gptr = std::dynamic_pointer_cast<ImmutableGraph>(g.sptr());
    CHECK(gptr) << "sampling isn't implemented in mutable graph";

908
909
910
911
    CHECK(aten::IsValidIdArray(seed_nodes));
    CHECK_EQ(seed_nodes->ctx.device_type, kDLCPU)
      << "NeighborSampler only support CPU sampling";

912
913
914
915
916
917
    std::vector<NodeFlow> nflows;

    CHECK(probability->dtype.code == kDLFloat)
      << "transition probability must be float";
    CHECK(probability->ndim == 1)
      << "transition probability must be a 1-dimensional vector";
918
919
    CHECK_EQ(probability->ctx.device_type, kDLCPU)
      << "NeighborSampling only support CPU sampling";
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942

    ATEN_FLOAT_TYPE_SWITCH(
      probability->dtype,
      FloatType,
      "transition probability",
      {
        const FloatType *prob;

        if (probability->ndim == 1 && probability->shape[0] == 0) {
          prob = nullptr;
        } else {
          CHECK(probability->shape[0] == gptr->NumEdges())
            << "transition probability must have same number of elements as edges";
          CHECK(probability.IsContiguous())
            << "transition probability must be contiguous tensor";
          prob = static_cast<const FloatType *>(probability->data);
        }

        nflows = NeighborSamplingImpl(
            gptr, seed_nodes, batch_start_id, batch_size, max_num_workers,
            expand_factor, num_hops, neigh_type, add_self_loop, prob);
    });

943
    *rv = List<NodeFlow>(nflows);
944
945
946
947
948
  });

DGL_REGISTER_GLOBAL("sampling._CAPI_LayerSampling")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
    // arguments
949
    GraphRef g = args[0];
950
    const IdArray seed_nodes = args[1];
951
952
953
    const int64_t batch_start_id = args[2];
    const int64_t batch_size = args[3];
    const int64_t max_num_workers = args[4];
954
    const IdArray layer_sizes = args[5];
955
956
    const std::string neigh_type = args[6];
    // process args
957
    auto gptr = std::dynamic_pointer_cast<ImmutableGraph>(g.sptr());
958
    CHECK(gptr) << "sampling isn't implemented in mutable graph";
959
    CHECK(aten::IsValidIdArray(seed_nodes));
960
961
962
963
964
965
966
    CHECK_EQ(seed_nodes->ctx.device_type, kDLCPU)
      << "LayerSampler only support CPU sampling";

    CHECK(aten::IsValidIdArray(layer_sizes));
    CHECK_EQ(layer_sizes->ctx.device_type, kDLCPU)
      << "LayerSampler only support CPU sampling";

967
968
969
970
    const dgl_id_t* seed_nodes_data = static_cast<dgl_id_t*>(seed_nodes->data);
    const int64_t num_seeds = seed_nodes->shape[0];
    const int64_t num_workers = std::min(max_num_workers,
        (num_seeds + batch_size - 1) / batch_size - batch_start_id);
Da Zheng's avatar
Da Zheng committed
971
972
    // We need to make sure we have the right CSR before we enter parallel sampling.
    BuildCsr(*gptr, neigh_type);
973
    // generate node flows
974
    std::vector<NodeFlow> nflows(num_workers);
975
976
977
978
979
980
981
982
983
#pragma omp parallel for
    for (int i = 0; i < num_workers; i++) {
      // create per-worker seed nodes.
      const int64_t start = (batch_start_id + i) * batch_size;
      const int64_t end = std::min(start + batch_size, num_seeds);
      // TODO(minjie): the vector allocation/copy is unnecessary
      std::vector<dgl_id_t> worker_seeds(end - start);
      std::copy(seed_nodes_data + start, seed_nodes_data + end,
                worker_seeds.begin());
984
985
      nflows[i] = SamplerOp::LayerUniformSample(
          gptr.get(), worker_seeds, neigh_type, layer_sizes);
986
    }
987
    *rv = List<NodeFlow>(nflows);
988
989
  });

990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
namespace {

void BuildCoo(const ImmutableGraph &g) {
  auto coo = g.GetCOO();
  assert(coo);
}


dgl_id_t global2local_map(dgl_id_t global_id,
                          std::unordered_map<dgl_id_t, dgl_id_t> *map) {
  auto it = map->find(global_id);
  if (it == map->end()) {
    dgl_id_t local_id = map->size();
    map->insert(std::pair<dgl_id_t, dgl_id_t>(global_id, local_id));
    return local_id;
  } else {
    return it->second;
  }
}

Da Zheng's avatar
Da Zheng committed
1010
inline bool IsNegativeHeadMode(const std::string &mode) {
1011
1012
1013
  return mode == "head";
}

1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
IdArray GetGlobalVid(IdArray induced_nid, IdArray subg_nid) {
  IdArray gnid = IdArray::Empty({subg_nid->shape[0]}, subg_nid->dtype, subg_nid->ctx);
  const dgl_id_t *induced_nid_data = static_cast<dgl_id_t *>(induced_nid->data);
  const dgl_id_t *subg_nid_data = static_cast<dgl_id_t *>(subg_nid->data);
  dgl_id_t *gnid_data = static_cast<dgl_id_t *>(gnid->data);
  for (int64_t i = 0; i < subg_nid->shape[0]; i++) {
    gnid_data[i] = induced_nid_data[subg_nid_data[i]];
  }
  return gnid;
}

IdArray CheckExistence(GraphPtr gptr, IdArray neg_src, IdArray neg_dst,
                       IdArray induced_nid) {
  return gptr->HasEdgesBetween(GetGlobalVid(induced_nid, neg_src),
                               GetGlobalVid(induced_nid, neg_dst));
}

IdArray CheckExistence(GraphPtr gptr, IdArray relations,
                       IdArray neg_src, IdArray neg_dst,
                       IdArray induced_nid, IdArray neg_eid) {
  neg_src = GetGlobalVid(induced_nid, neg_src);
  neg_dst = GetGlobalVid(induced_nid, neg_dst);
  BoolArray exist = gptr->HasEdgesBetween(neg_src, neg_dst);
  dgl_id_t *neg_dst_data = static_cast<dgl_id_t *>(neg_dst->data);
  dgl_id_t *neg_src_data = static_cast<dgl_id_t *>(neg_src->data);
  dgl_id_t *neg_eid_data = static_cast<dgl_id_t *>(neg_eid->data);
  dgl_id_t *relation_data = static_cast<dgl_id_t *>(relations->data);
  // TODO(zhengda) is this right?
  dgl_id_t *exist_data = static_cast<dgl_id_t *>(exist->data);
  int64_t num_neg_edges = neg_src->shape[0];
  for (int64_t i = 0; i < num_neg_edges; i++) {
    // If the edge doesn't exist, we don't need to do anything.
    if (!exist_data[i])
      continue;
    // If the edge exists, we need to double check if the relations match.
    // If they match, this negative edge isn't really a negative edge.
    dgl_id_t eid1 = neg_eid_data[i];
    dgl_id_t orig_neg_rel1 = relation_data[eid1];
    IdArray eids = gptr->EdgeId(neg_src_data[i], neg_dst_data[i]);
    dgl_id_t *eid_data = static_cast<dgl_id_t *>(eids->data);
    int64_t num_edges_between = eids->shape[0];
    bool same_rel = false;
    for (int64_t j = 0; j < num_edges_between; j++) {
      dgl_id_t neg_rel1 = relation_data[eid_data[j]];
      if (neg_rel1 == orig_neg_rel1) {
        same_rel = true;
        break;
      }
    }
    exist_data[i] = same_rel;
  }
  return exist;
}

Da Zheng's avatar
Da Zheng committed
1068
std::vector<dgl_id_t> Global2Local(const std::vector<size_t> &ids,
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
                                   const std::unordered_map<dgl_id_t, dgl_id_t> &map) {
  std::vector<dgl_id_t> local_ids(ids.size());
  for (size_t i = 0; i < ids.size(); i++) {
    auto it = map.find(ids[i]);
    assert(it != map.end());
    local_ids[i] = it->second;
  }
  return local_ids;
}

1079
1080
1081
1082
1083
1084
NegSubgraph EdgeSamplerObject::genNegEdgeSubgraph(const Subgraph &pos_subg,
                                                  const std::string &neg_mode,
                                                  int64_t neg_sample_size,
                                                  bool exclude_positive,
                                                  bool check_false_neg) {
  int64_t num_tot_nodes = gptr_->NumVertices();
Da Zheng's avatar
Da Zheng committed
1085
1086
  if (neg_sample_size > num_tot_nodes)
    neg_sample_size = num_tot_nodes;
1087
  bool is_multigraph = gptr_->IsMultigraph();
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
  std::vector<IdArray> adj = pos_subg.graph->GetAdj(false, "coo");
  IdArray coo = adj[0];
  int64_t num_pos_edges = coo->shape[0] / 2;
  int64_t num_neg_edges = num_pos_edges * neg_sample_size;
  IdArray neg_dst = IdArray::Empty({num_neg_edges}, coo->dtype, coo->ctx);
  IdArray neg_src = IdArray::Empty({num_neg_edges}, coo->dtype, coo->ctx);
  IdArray neg_eid = IdArray::Empty({num_neg_edges}, coo->dtype, coo->ctx);
  IdArray induced_neg_eid = IdArray::Empty({num_neg_edges}, coo->dtype, coo->ctx);

  // These are vids in the positive subgraph.
  const dgl_id_t *dst_data = static_cast<const dgl_id_t *>(coo->data);
  const dgl_id_t *src_data = static_cast<const dgl_id_t *>(coo->data) + num_pos_edges;
1100
1101
1102
1103
  const dgl_id_t *induced_vid_data =
      static_cast<const dgl_id_t *>(pos_subg.induced_vertices->data);
  const dgl_id_t *induced_eid_data =
      static_cast<const dgl_id_t *>(pos_subg.induced_edges->data);
1104
1105
1106
1107
1108
1109
1110
1111
  size_t num_pos_nodes = pos_subg.graph->NumVertices();
  std::vector<size_t> pos_nodes(induced_vid_data, induced_vid_data + num_pos_nodes);

  dgl_id_t *neg_dst_data = static_cast<dgl_id_t *>(neg_dst->data);
  dgl_id_t *neg_src_data = static_cast<dgl_id_t *>(neg_src->data);
  dgl_id_t *neg_eid_data = static_cast<dgl_id_t *>(neg_eid->data);
  dgl_id_t *induced_neg_eid_data = static_cast<dgl_id_t *>(induced_neg_eid->data);

Da Zheng's avatar
Da Zheng committed
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
  const dgl_id_t *unchanged;
  dgl_id_t *neg_unchanged;
  dgl_id_t *neg_changed;
  if (IsNegativeHeadMode(neg_mode)) {
    unchanged = dst_data;
    neg_unchanged = neg_dst_data;
    neg_changed = neg_src_data;
  } else {
    unchanged = src_data;
    neg_unchanged = neg_src_data;
    neg_changed = neg_dst_data;
  }

1125
1126
1127
1128
  std::unordered_map<dgl_id_t, dgl_id_t> neg_map;
  std::vector<dgl_id_t> local_pos_vids;
  local_pos_vids.reserve(num_pos_edges);

1129
1130
1131
  dgl_id_t curr_eid = 0;
  std::vector<size_t> neg_vids;
  neg_vids.reserve(neg_sample_size);
Da Zheng's avatar
Da Zheng committed
1132
1133
1134
1135
1136
1137
1138
1139
  // If we don't exclude positive edges, we are actually sampling more than
  // the total number of nodes in the graph.
  if (!exclude_positive && neg_sample_size >= num_tot_nodes) {
    // We add all nodes as negative nodes.
    for (int64_t i = 0; i < num_tot_nodes; i++) {
      neg_vids.push_back(i);
      neg_map[i] = i;
    }
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160

    // Get all nodes in the positive side.
    for (int64_t i = 0; i < num_pos_edges; i++) {
      dgl_id_t vid = induced_vid_data[unchanged[i]];
      local_pos_vids.push_back(neg_map[vid]);
    }
    // There is no guarantee that the nodes in the vector are unique.
    std::sort(local_pos_vids.begin(), local_pos_vids.end());
    auto it = std::unique(local_pos_vids.begin(), local_pos_vids.end());
    local_pos_vids.resize(it - local_pos_vids.begin());
  } else {
    // Collect nodes in the positive side.
    dgl_id_t local_vid = 0;
    for (int64_t i = 0; i < num_pos_edges; i++) {
      dgl_id_t vid = induced_vid_data[unchanged[i]];
      auto it = neg_map.find(vid);
      if (it == neg_map.end()) {
        local_pos_vids.push_back(local_vid);
        neg_map.insert(std::pair<dgl_id_t, dgl_id_t>(vid, local_vid++));
      }
    }
Da Zheng's avatar
Da Zheng committed
1161
1162
  }

1163
  int64_t prev_neg_offset = 0;
1164
1165
1166
1167
1168
  for (int64_t i = 0; i < num_pos_edges; i++) {
    size_t neg_idx = i * neg_sample_size;

    std::vector<size_t> neighbors;
    DGLIdIters neigh_it;
Da Zheng's avatar
Da Zheng committed
1169
    if (IsNegativeHeadMode(neg_mode)) {
1170
      neigh_it = gptr_->PredVec(induced_vid_data[unchanged[i]]);
1171
    } else {
1172
      neigh_it = gptr_->SuccVec(induced_vid_data[unchanged[i]]);
1173
1174
    }

Da Zheng's avatar
Da Zheng committed
1175
1176
1177
    // If the number of negative nodes is smaller than the number of total nodes
    // in the graph.
    if (exclude_positive && neg_sample_size < num_tot_nodes) {
1178
1179
      std::vector<size_t> exclude;
      for (auto it = neigh_it.begin(); it != neigh_it.end(); it++) {
1180
1181
        dgl_id_t global_vid = *it;
        exclude.push_back(global_vid);
1182
      }
1183
      prev_neg_offset = neg_vids.size();
1184
      randomSample(num_tot_nodes, neg_sample_size, exclude, &neg_vids);
1185
      assert(prev_neg_offset + neg_sample_size == neg_vids.size());
Da Zheng's avatar
Da Zheng committed
1186
    } else if (neg_sample_size < num_tot_nodes) {
1187
      prev_neg_offset = neg_vids.size();
1188
      randomSample(num_tot_nodes, neg_sample_size, &neg_vids);
1189
      assert(prev_neg_offset + neg_sample_size == neg_vids.size());
Da Zheng's avatar
Da Zheng committed
1190
    } else if (exclude_positive) {
1191
1192
      LOG(FATAL) << "We can't exclude positive edges"
                    "when sampling negative edges with all nodes.";
Da Zheng's avatar
Da Zheng committed
1193
1194
1195
1196
1197
    } else {
      // We don't need to do anything here.
      // In this case, every edge has the same negative edges. That is,
      // neg_vids contains all nodes of the graph. They have been generated
      // before the for loop.
1198
1199
1200
1201
1202
1203
1204
1205
    }

    dgl_id_t global_unchanged = induced_vid_data[unchanged[i]];
    dgl_id_t local_unchanged = global2local_map(global_unchanged, &neg_map);

    for (int64_t j = 0; j < neg_sample_size; j++) {
      neg_unchanged[neg_idx + j] = local_unchanged;
      neg_eid_data[neg_idx + j] = curr_eid++;
1206
      dgl_id_t local_changed = global2local_map(neg_vids[j + prev_neg_offset], &neg_map);
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
      neg_changed[neg_idx + j] = local_changed;
      // induced negative eid references to the positive one.
      induced_neg_eid_data[neg_idx + j] = induced_eid_data[i];
    }
  }

  // Now we know the number of vertices in the negative graph.
  int64_t num_neg_nodes = neg_map.size();
  IdArray induced_neg_vid = IdArray::Empty({num_neg_nodes}, coo->dtype, coo->ctx);
  dgl_id_t *induced_neg_vid_data = static_cast<dgl_id_t *>(induced_neg_vid->data);
  for (auto it = neg_map.begin(); it != neg_map.end(); it++) {
    induced_neg_vid_data[it->second] = it->first;
  }

1221
  NegSubgraph neg_subg;
1222
1223
1224
1225
1226
1227
  // We sample negative vertices without replacement.
  // There shouldn't be duplicated edges.
  COOPtr neg_coo(new COO(num_neg_nodes, neg_src, neg_dst, is_multigraph));
  neg_subg.graph = GraphPtr(new ImmutableGraph(neg_coo));
  neg_subg.induced_vertices = induced_neg_vid;
  neg_subg.induced_edges = induced_neg_eid;
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
  // If we didn't sample all nodes to form negative edges, some of the nodes
  // in the vector might be redundant.
  if (neg_sample_size < num_tot_nodes) {
    std::sort(neg_vids.begin(), neg_vids.end());
    auto it = std::unique(neg_vids.begin(), neg_vids.end());
    neg_vids.resize(it - neg_vids.begin());
  }
  if (IsNegativeHeadMode(neg_mode)) {
    neg_subg.head_nid = aten::VecToIdArray(Global2Local(neg_vids, neg_map));
    neg_subg.tail_nid = aten::VecToIdArray(local_pos_vids);
  } else {
    neg_subg.head_nid = aten::VecToIdArray(local_pos_vids);
    neg_subg.tail_nid = aten::VecToIdArray(Global2Local(neg_vids, neg_map));
  }
1242
  // TODO(zhengda) we should provide an array of 1s if exclude_positive
Da Zheng's avatar
Da Zheng committed
1243
  if (check_false_neg) {
1244
1245
    if (relations_->shape[0] == 0) {
      neg_subg.exist = CheckExistence(gptr_, neg_src, neg_dst, induced_neg_vid);
Da Zheng's avatar
Da Zheng committed
1246
    } else {
1247
      neg_subg.exist = CheckExistence(gptr_, relations_, neg_src, neg_dst,
Da Zheng's avatar
Da Zheng committed
1248
1249
                                      induced_neg_vid, induced_neg_eid);
    }
1250
  }
1251
1252
1253
  return neg_subg;
}

1254
1255
1256
1257
1258
1259
NegSubgraph EdgeSamplerObject::genPBGNegEdgeSubgraph(const Subgraph &pos_subg,
                                                     const std::string &neg_mode,
                                                     int64_t neg_sample_size,
                                                     bool exclude_positive,
                                                     bool check_false_neg) {
  int64_t num_tot_nodes = gptr_->NumVertices();
1260
1261
1262
  std::vector<IdArray> adj = pos_subg.graph->GetAdj(false, "coo");
  IdArray coo = adj[0];
  int64_t num_pos_edges = coo->shape[0] / 2;
Da Zheng's avatar
Da Zheng committed
1263
1264
  if (neg_sample_size > num_tot_nodes)
    neg_sample_size = num_tot_nodes;
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290

  int64_t chunk_size = neg_sample_size;
  // If num_pos_edges isn't divisible by chunk_size, the actual number of chunks
  // is num_chunks + 1 and the last chunk size is last_chunk_size.
  // Otherwise, the actual number of chunks is num_chunks, the last chunk size
  // is 0.
  int64_t num_chunks = num_pos_edges / chunk_size;
  int64_t last_chunk_size = num_pos_edges - num_chunks * chunk_size;

  // The number of negative edges.
  int64_t num_neg_edges = neg_sample_size * chunk_size * num_chunks;
  int64_t num_neg_edges_last_chunk = neg_sample_size * last_chunk_size;
  int64_t num_all_neg_edges = num_neg_edges + num_neg_edges_last_chunk;

  // We should include the last chunk.
  if (last_chunk_size > 0)
    num_chunks++;

  IdArray neg_dst = IdArray::Empty({num_all_neg_edges}, coo->dtype, coo->ctx);
  IdArray neg_src = IdArray::Empty({num_all_neg_edges}, coo->dtype, coo->ctx);
  IdArray neg_eid = IdArray::Empty({num_all_neg_edges}, coo->dtype, coo->ctx);
  IdArray induced_neg_eid = IdArray::Empty({num_all_neg_edges}, coo->dtype, coo->ctx);

  // These are vids in the positive subgraph.
  const dgl_id_t *dst_data = static_cast<const dgl_id_t *>(coo->data);
  const dgl_id_t *src_data = static_cast<const dgl_id_t *>(coo->data) + num_pos_edges;
1291
1292
1293
1294
1295
1296
  const dgl_id_t *induced_vid_data =
      static_cast<const dgl_id_t *>(pos_subg.induced_vertices->data);
  const dgl_id_t *induced_eid_data =
      static_cast<const dgl_id_t *>(pos_subg.induced_edges->data);
  int64_t num_pos_nodes = pos_subg.graph->NumVertices();
  std::vector<dgl_id_t> pos_nodes(induced_vid_data, induced_vid_data + num_pos_nodes);
1297
1298
1299
1300
1301
1302
1303
1304
1305

  dgl_id_t *neg_dst_data = static_cast<dgl_id_t *>(neg_dst->data);
  dgl_id_t *neg_src_data = static_cast<dgl_id_t *>(neg_src->data);
  dgl_id_t *neg_eid_data = static_cast<dgl_id_t *>(neg_eid->data);
  dgl_id_t *induced_neg_eid_data = static_cast<dgl_id_t *>(induced_neg_eid->data);

  const dgl_id_t *unchanged;
  dgl_id_t *neg_unchanged;
  dgl_id_t *neg_changed;
Da Zheng's avatar
Da Zheng committed
1306
  if (IsNegativeHeadMode(neg_mode)) {
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
    unchanged = dst_data;
    neg_unchanged = neg_dst_data;
    neg_changed = neg_src_data;
  } else {
    unchanged = src_data;
    neg_unchanged = neg_src_data;
    neg_changed = neg_dst_data;
  }

  // We first sample all negative edges.
  std::vector<size_t> neg_vids;
1318
  randomSample(num_tot_nodes,
1319
1320
1321
1322
1323
               num_chunks * neg_sample_size,
               &neg_vids);

  dgl_id_t curr_eid = 0;
  std::unordered_map<dgl_id_t, dgl_id_t> neg_map;
1324
  dgl_id_t local_vid = 0;
1325

1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
  // Collect nodes in the positive side.
  std::vector<dgl_id_t> local_pos_vids;
  local_pos_vids.reserve(num_pos_edges);
  for (int64_t i = 0; i < num_pos_edges; i++) {
    dgl_id_t vid = induced_vid_data[unchanged[i]];
    auto it = neg_map.find(vid);
    if (it == neg_map.end()) {
      local_pos_vids.push_back(local_vid);
      neg_map.insert(std::pair<dgl_id_t, dgl_id_t>(vid, local_vid++));
    }
  }

1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
  for (int64_t i_chunk = 0; i_chunk < num_chunks; i_chunk++) {
    // for each chunk.
    int64_t neg_idx = neg_sample_size * chunk_size * i_chunk;
    int64_t pos_edge_idx = chunk_size * i_chunk;
    int64_t neg_node_idx = neg_sample_size * i_chunk;
    // The actual chunk size. It'll be different for the last chunk.
    int64_t chunk_size1;
    if (i_chunk == num_chunks - 1 && last_chunk_size > 0)
      chunk_size1 = last_chunk_size;
    else
      chunk_size1 = chunk_size;

    for (int64_t in_chunk = 0; in_chunk != chunk_size1; ++in_chunk) {
      // For each positive node in a chunk.
      dgl_id_t global_unchanged = induced_vid_data[unchanged[pos_edge_idx + in_chunk]];
      dgl_id_t local_unchanged = global2local_map(global_unchanged, &neg_map);
      for (int64_t j = 0; j < neg_sample_size; ++j) {
        neg_unchanged[neg_idx] = local_unchanged;
        neg_eid_data[neg_idx] = curr_eid++;
        dgl_id_t global_changed_vid = neg_vids[neg_node_idx + j];

        // TODO(zhengda) we can avoid the hashtable lookup here.
        dgl_id_t local_changed = global2local_map(global_changed_vid, &neg_map);
        neg_changed[neg_idx] = local_changed;
        induced_neg_eid_data[neg_idx] = induced_eid_data[pos_edge_idx + in_chunk];
        neg_idx++;
      }
    }
  }

  // Now we know the number of vertices in the negative graph.
  int64_t num_neg_nodes = neg_map.size();
  IdArray induced_neg_vid = IdArray::Empty({num_neg_nodes}, coo->dtype, coo->ctx);
  dgl_id_t *induced_neg_vid_data = static_cast<dgl_id_t *>(induced_neg_vid->data);
  for (auto it = neg_map.begin(); it != neg_map.end(); it++) {
    induced_neg_vid_data[it->second] = it->first;
  }

1376
  NegSubgraph neg_subg;
1377
1378
  // We sample negative vertices without replacement.
  // There shouldn't be duplicated edges.
1379
  COOPtr neg_coo(new COO(num_neg_nodes, neg_src, neg_dst, gptr_->IsMultigraph()));
1380
1381
1382
  neg_subg.graph = GraphPtr(new ImmutableGraph(neg_coo));
  neg_subg.induced_vertices = induced_neg_vid;
  neg_subg.induced_edges = induced_neg_eid;
1383
1384
1385
1386
1387
1388
1389
  if (IsNegativeHeadMode(neg_mode)) {
    neg_subg.head_nid = aten::VecToIdArray(Global2Local(neg_vids, neg_map));
    neg_subg.tail_nid = aten::VecToIdArray(local_pos_vids);
  } else {
    neg_subg.head_nid = aten::VecToIdArray(local_pos_vids);
    neg_subg.tail_nid = aten::VecToIdArray(Global2Local(neg_vids, neg_map));
  }
Da Zheng's avatar
Da Zheng committed
1390
  if (check_false_neg) {
1391
1392
    if (relations_->shape[0] == 0) {
      neg_subg.exist = CheckExistence(gptr_, neg_src, neg_dst, induced_neg_vid);
Da Zheng's avatar
Da Zheng committed
1393
    } else {
1394
      neg_subg.exist = CheckExistence(gptr_, relations_, neg_src, neg_dst,
Da Zheng's avatar
Da Zheng committed
1395
1396
                                      induced_neg_vid, induced_neg_eid);
    }
1397
  }
1398
1399
1400
  return neg_subg;
}

1401
1402
1403
1404
inline SubgraphRef ConvertRef(const Subgraph &subg) {
    return SubgraphRef(std::shared_ptr<Subgraph>(new Subgraph(subg)));
}

1405
1406
1407
1408
inline SubgraphRef ConvertRef(const NegSubgraph &subg) {
    return SubgraphRef(std::shared_ptr<Subgraph>(new NegSubgraph(subg)));
}

1409
1410
}  // namespace

1411
DGL_REGISTER_GLOBAL("sampling._CAPI_GetNegEdgeExistence")
1412
.set_body([] (DGLArgs args, DGLRetValue* rv) {
1413
1414
1415
1416
  SubgraphRef g = args[0];
  auto gptr = std::dynamic_pointer_cast<NegSubgraph>(g.sptr());
  *rv = gptr->exist;
});
1417

1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
DGL_REGISTER_GLOBAL("sampling._CAPI_GetEdgeSubgraphHead")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
  SubgraphRef g = args[0];
  auto gptr = std::dynamic_pointer_cast<NegSubgraph>(g.sptr());
  *rv = gptr->head_nid;
});

DGL_REGISTER_GLOBAL("sampling._CAPI_GetEdgeSubgraphTail")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
  SubgraphRef g = args[0];
  auto gptr = std::dynamic_pointer_cast<NegSubgraph>(g.sptr());
  *rv = gptr->tail_nid;
});

class UniformEdgeSamplerObject: public EdgeSamplerObject {
public:
  explicit UniformEdgeSamplerObject(const GraphPtr gptr,
                                    IdArray seed_edges,
                                    const int64_t batch_size,
                                    const int64_t num_workers,
1438
1439
                                    const bool replacement,
                                    const bool reset,
1440
1441
1442
1443
1444
1445
1446
1447
1448
                                    const std::string neg_mode,
                                    const int64_t neg_sample_size,
                                    const bool exclude_positive,
                                    const bool check_false_neg,
                                    IdArray relations)
                                    : EdgeSamplerObject(gptr,
                                        seed_edges,
                                        batch_size,
                                        num_workers,
1449
1450
                                        replacement,
                                        reset,
1451
1452
1453
1454
1455
1456
1457
1458
                                        neg_mode,
                                        neg_sample_size,
                                        exclude_positive,
                                        check_false_neg,
                                        relations) {
    batch_curr_id_ = 0;
    num_seeds_ = seed_edges->shape[0];
    max_batch_id_ = (num_seeds_ + batch_size - 1) / batch_size;
1459

1460
1461
1462
1463
1464
1465
1466
    // TODO(song): Tricky thing here to make sure gptr_ has coo cache
    gptr_->FindEdge(0);
  }
  ~UniformEdgeSamplerObject() {}

  void Fetch(DGLRetValue* rv) {
    const int64_t num_workers = std::min(num_workers_, max_batch_id_ - batch_curr_id_);
1467
1468
1469
    // generate subgraphs.
    std::vector<SubgraphRef> positive_subgs(num_workers);
    std::vector<SubgraphRef> negative_subgs(num_workers);
1470

1471
#pragma omp parallel for
1472
1473
1474
    for (int64_t i = 0; i < num_workers; i++) {
      const int64_t start = (batch_curr_id_ + i) * batch_size_;
      const int64_t end = std::min(start + batch_size_, num_seeds_);
1475
      const int64_t num_edges = end - start;
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
      IdArray worker_seeds;

      if (replacement_ == false) {
        worker_seeds = seed_edges_.CreateView({num_edges}, DLDataType{kDLInt, 64, 1},
                                              sizeof(dgl_id_t) * start);
      } else {
        std::vector<dgl_id_t> seeds;
        // sampling of each edge is a standalone event
        for (int64_t i = 0; i < num_edges; ++i) {
          seeds.push_back(RandomEngine::ThreadLocal()->RandInt(num_seeds_));
        }

        worker_seeds = aten::VecToIdArray(seeds);
      }

1491
      EdgeArray arr = gptr_->FindEdges(worker_seeds);
1492
1493
1494
1495
1496
1497
      const dgl_id_t *src_ids = static_cast<const dgl_id_t *>(arr.src->data);
      const dgl_id_t *dst_ids = static_cast<const dgl_id_t *>(arr.dst->data);
      std::vector<dgl_id_t> src_vec(src_ids, src_ids + num_edges);
      std::vector<dgl_id_t> dst_vec(dst_ids, dst_ids + num_edges);
      // TODO(zhengda) what if there are duplicates in the src and dst vectors.

1498
      Subgraph subg = gptr_->EdgeSubgraph(worker_seeds, false);
1499
      positive_subgs[i] = ConvertRef(subg);
1500
1501
      // For PBG negative sampling, we accept "PBG-head" for corrupting head
      // nodes and "PBG-tail" for corrupting tail nodes.
1502
1503
1504
1505
1506
      if (neg_mode_.substr(0, 3) == "PBG") {
        NegSubgraph neg_subg = genPBGNegEdgeSubgraph(subg, neg_mode_.substr(4),
                                                     neg_sample_size_,
                                                     exclude_positive_,
                                                     check_false_neg_);
1507
        negative_subgs[i] = ConvertRef(neg_subg);
1508
1509
1510
1511
1512
      } else if (neg_mode_.size() > 0) {
        NegSubgraph neg_subg = genNegEdgeSubgraph(subg, neg_mode_,
                                                  neg_sample_size_,
                                                  exclude_positive_,
                                                  check_false_neg_);
1513
1514
1515
        negative_subgs[i] = ConvertRef(neg_subg);
      }
    }
1516
    if (neg_mode_.size() > 0) {
1517
1518
      positive_subgs.insert(positive_subgs.end(), negative_subgs.begin(), negative_subgs.end());
    }
1519
1520
    batch_curr_id_ += num_workers;

1521
1522
1523
1524
    if (batch_curr_id_ >= max_batch_id_ && reset_ == true) {
      Reset();
    }

1525
    *rv = List<SubgraphRef>(positive_subgs);
1526
  }
1527

1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
  void Reset() {
    batch_curr_id_ = 0;
    if (replacement_ == false) {
      // Now we should shuffle the data and reset the sampler.
      dgl_id_t *seed_ids = static_cast<dgl_id_t *>(seed_edges_->data);
      std::shuffle(seed_ids, seed_ids + seed_edges_->shape[0],
                   std::default_random_engine());
    }
  }

1538
  DGL_DECLARE_OBJECT_TYPE_INFO(UniformEdgeSamplerObject, Object);
1539

1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
private:
  void randomSample(size_t set_size, size_t num, std::vector<size_t>* out) {
    RandomSample(set_size, num, out);
  }

  void randomSample(size_t set_size, size_t num, const std::vector<size_t> &exclude,
                    std::vector<size_t>* out) {
    RandomSample(set_size, num, exclude, out);
  }

  int64_t batch_curr_id_;
  int64_t max_batch_id_;
  int64_t num_seeds_;
};

class UniformEdgeSampler: public ObjectRef {
 public:
  UniformEdgeSampler() {}
  explicit UniformEdgeSampler(std::shared_ptr<runtime::Object> obj): ObjectRef(obj) {}

  UniformEdgeSamplerObject* operator->() const {
    return static_cast<UniformEdgeSamplerObject*>(obj_.get());
  }

  std::shared_ptr<UniformEdgeSamplerObject> sptr() const {
    return CHECK_NOTNULL(std::dynamic_pointer_cast<UniformEdgeSamplerObject>(obj_));
  }

  operator bool() const { return this->defined(); }
  using ContainerType = UniformEdgeSamplerObject;
};

DGL_REGISTER_GLOBAL("sampling._CAPI_CreateUniformEdgeSampler")
1573
.set_body([] (DGLArgs args, DGLRetValue* rv) {
1574
1575
1576
1577
1578
    // arguments
    GraphRef g = args[0];
    IdArray seed_edges = args[1];
    const int64_t batch_size = args[2];
    const int64_t max_num_workers = args[3];
1579
1580
1581
1582
1583
1584
1585
    const bool replacement = args[4];
    const bool reset = args[5];
    const std::string neg_mode = args[6];
    const int neg_sample_size = args[7];
    const bool exclude_positive = args[8];
    const bool check_false_neg = args[9];
    IdArray relations = args[10];
1586
1587
1588
1589
    // process args
    auto gptr = std::dynamic_pointer_cast<ImmutableGraph>(g.sptr());
    CHECK(gptr) << "sampling isn't implemented in mutable graph";
    CHECK(aten::IsValidIdArray(seed_edges));
1590
1591
1592
1593
1594
1595
1596
1597
    CHECK_EQ(seed_edges->ctx.device_type, kDLCPU)
      << "UniformEdgeSampler only support CPU sampling";

    if (relations->shape[0] > 0) {
      CHECK(aten::IsValidIdArray(relations));
      CHECK_EQ(relations->ctx.device_type, kDLCPU)
        << "WeightedEdgeSampler only support CPU sampling";
    }
1598
1599
1600
1601
1602
1603
    BuildCoo(*gptr);

    auto o = std::make_shared<UniformEdgeSamplerObject>(gptr,
                                                        seed_edges,
                                                        batch_size,
                                                        max_num_workers,
1604
1605
                                                        replacement,
                                                        reset,
1606
1607
1608
1609
1610
1611
                                                        neg_mode,
                                                        neg_sample_size,
                                                        exclude_positive,
                                                        check_false_neg,
                                                        relations);
    *rv = o;
1612
1613
});

1614
DGL_REGISTER_GLOBAL("sampling._CAPI_FetchUniformEdgeSample")
1615
.set_body([] (DGLArgs args, DGLRetValue* rv) {
1616
1617
  UniformEdgeSampler sampler = args[0];
  sampler->Fetch(rv);
1618
1619
});

1620
1621
1622
1623
1624
1625
DGL_REGISTER_GLOBAL("sampling._CAPI_ResetUniformEdgeSample")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
  UniformEdgeSampler sampler = args[0];
  sampler->Reset();
});

1626
1627
template<typename ValueType>
class WeightedEdgeSamplerObject: public EdgeSamplerObject {
1628
 public:
1629
1630
1631
1632
1633
1634
  explicit WeightedEdgeSamplerObject(const GraphPtr gptr,
                                     IdArray seed_edges,
                                     NDArray edge_weight,
                                     NDArray node_weight,
                                     const int64_t batch_size,
                                     const int64_t num_workers,
1635
1636
                                     const bool replacement,
                                     const bool reset,
1637
1638
1639
1640
1641
1642
1643
1644
1645
                                     const std::string neg_mode,
                                     const int64_t neg_sample_size,
                                     const bool exclude_positive,
                                     const bool check_false_neg,
                                     IdArray relations)
                                     : EdgeSamplerObject(gptr,
                                        seed_edges,
                                        batch_size,
                                        num_workers,
1646
1647
                                        replacement,
                                        reset,
1648
1649
1650
1651
1652
                                        neg_mode,
                                        neg_sample_size,
                                        exclude_positive,
                                        check_false_neg,
                                        relations) {
1653
    const int64_t num_edges = edge_weight->shape[0];
1654
1655
    const ValueType *edge_prob = static_cast<const ValueType*>(edge_weight->data);
    std::vector<ValueType> eprob(num_edges);
1656
    for (int64_t i = 0; i < num_edges; ++i) {
1657
1658
1659
      eprob[i] = edge_prob[i];
    }
    edge_selector_ = std::make_shared<ArrayHeap<ValueType>>(eprob);
1660
    edge_weight_ = edge_weight;
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673

    const size_t num_nodes = node_weight->shape[0];
    if (num_nodes == 0) {
      node_selector_ = nullptr;
    } else {
      const ValueType *node_prob = static_cast<const ValueType*>(node_weight->data);
      std::vector<ValueType> nprob(num_nodes);
      for (size_t i = 0; i < num_nodes; ++i) {
        nprob[i] = node_prob[i];
      }
      node_selector_ = std::make_shared<ArrayHeap<ValueType>>(nprob);
    }

1674
1675
1676
1677
    curr_batch_id_ = 0;
    // handle int64 overflow here
    max_batch_id_ = (num_edges + batch_size - 1) / batch_size;

1678
1679
1680
1681
1682
1683
1684
1685
    // TODO(song): Tricky thing here to make sure gptr_ has coo cache
    gptr_->FindEdge(0);
  }

  ~WeightedEdgeSamplerObject() {
  }

  void Fetch(DGLRetValue* rv) {
1686
    const int64_t num_workers = std::min(num_workers_, max_batch_id_ - curr_batch_id_);
1687
    // generate subgraphs.
1688
1689
1690
    std::vector<SubgraphRef> positive_subgs(num_workers);
    std::vector<SubgraphRef> negative_subgs(num_workers);

1691
#pragma omp parallel for
1692
    for (int i = 0; i < num_workers; i++) {
1693
      const dgl_id_t *seed_edge_ids = static_cast<const dgl_id_t *>(seed_edges_->data);
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
      std::vector<size_t> edge_ids(batch_size_);

      if (replacement_ == false) {
        size_t n = batch_size_;
        size_t num_ids = 0;
#pragma omp critical
        num_ids = edge_selector_->SampleWithoutReplacement(n, &edge_ids);
        while (edge_ids.size() > num_ids) {
          edge_ids.pop_back();
        }
      } else {
        // sampling of each edge is a standalone event
        for (int i = 0; i < batch_size_; ++i) {
          size_t edge_id = edge_selector_->Sample();
          edge_ids[i] = seed_edge_ids[edge_id];
        }
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
      }
      auto worker_seeds = aten::VecToIdArray(edge_ids, seed_edges_->dtype.bits);

      EdgeArray arr = gptr_->FindEdges(worker_seeds);
      const dgl_id_t *src_ids = static_cast<const dgl_id_t *>(arr.src->data);
      const dgl_id_t *dst_ids = static_cast<const dgl_id_t *>(arr.dst->data);
      std::vector<dgl_id_t> src_vec(src_ids, src_ids + batch_size_);
      std::vector<dgl_id_t> dst_vec(dst_ids, dst_ids + batch_size_);
      // TODO(zhengda) what if there are duplicates in the src and dst vectors.

      Subgraph subg = gptr_->EdgeSubgraph(worker_seeds, false);
      positive_subgs[i] = ConvertRef(subg);
      // For PBG negative sampling, we accept "PBG-head" for corrupting head
      // nodes and "PBG-tail" for corrupting tail nodes.
      if (neg_mode_.substr(0, 3) == "PBG") {
        NegSubgraph neg_subg = genPBGNegEdgeSubgraph(subg, neg_mode_.substr(4),
                                                     neg_sample_size_,
                                                     exclude_positive_,
                                                     check_false_neg_);
        negative_subgs[i] = ConvertRef(neg_subg);
      } else if (neg_mode_.size() > 0) {
        NegSubgraph neg_subg = genNegEdgeSubgraph(subg, neg_mode_,
                                                  neg_sample_size_,
                                                  exclude_positive_,
                                                  check_false_neg_);
        negative_subgs[i] = ConvertRef(neg_subg);
      }
    }
1738
1739
1740
1741
1742
    curr_batch_id_ += num_workers;

    if (curr_batch_id_ >= max_batch_id_ && reset_ == true) {
      Reset();
    }
1743
1744
1745
1746
1747
1748
1749

    if (neg_mode_.size() > 0) {
      positive_subgs.insert(positive_subgs.end(), negative_subgs.begin(), negative_subgs.end());
    }
    *rv = List<SubgraphRef>(positive_subgs);
  }

1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
  void Reset() {
    curr_batch_id_ = 0;
    if (replacement_ == false) {
      const int64_t num_edges = edge_weight_->shape[0];
      const ValueType *edge_prob = static_cast<const ValueType*>(edge_weight_->data);
      std::vector<ValueType> eprob(num_edges);
      for (int64_t i = 0; i < num_edges; ++i) {
        eprob[i] = edge_prob[i];
      }

      // rebuild the edge_selector_
      edge_selector_ = std::make_shared<ArrayHeap<ValueType>>(eprob);
    }
  }

1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
  DGL_DECLARE_OBJECT_TYPE_INFO(WeightedEdgeSamplerObject<ValueType>, Object);

private:
  void randomSample(size_t set_size, size_t num, std::vector<size_t>* out) {
    if (num < set_size) {
      std::unordered_set<size_t> sampled_idxs;
      while (sampled_idxs.size() < num) {
        if (node_selector_ == nullptr) {
          sampled_idxs.insert(RandomEngine::ThreadLocal()->RandInt(set_size));
        } else {
          size_t id = node_selector_->Sample();
          sampled_idxs.insert(id);
        }
      }

      out->insert(out->end(), sampled_idxs.begin(), sampled_idxs.end());
    } else {
      // If we need to sample all elements in the set, we don't need to
      // generate random numbers.
      for (size_t i = 0; i < set_size; i++)
        out->push_back(i);
    }
  }

  void randomSample(size_t set_size, size_t num, const std::vector<size_t> &exclude,
                    std::vector<size_t>* out) {
    std::unordered_map<size_t, int> sampled_idxs;
    for (auto v : exclude) {
      sampled_idxs.insert(std::pair<size_t, int>(v, 0));
    }
    if (num + exclude.size() < set_size) {
      while (sampled_idxs.size() < num + exclude.size()) {
        size_t rand;
        if (node_selector_ == nullptr) {
          rand =  RandomEngine::ThreadLocal()->RandInt(set_size);
        } else {
          rand = node_selector_->Sample();
        }
        sampled_idxs.insert(std::pair<size_t, int>(rand, 1));
      }
      for (auto it = sampled_idxs.begin(); it != sampled_idxs.end(); it++) {
        if (it->second) {
          out->push_back(it->first);
        }
      }
    } else {
      // If we need to sample all elements in the set, we don't need to
      // generate random numbers.
      for (size_t i = 0; i < set_size; i++) {
        // If the element doesn't exist in exclude.
        if (sampled_idxs.find(i) == sampled_idxs.end()) {
          out->push_back(i);
        }
      }
    }
  }

private:
  std::shared_ptr<ArrayHeap<ValueType>> edge_selector_;
  std::shared_ptr<ArrayHeap<ValueType>> node_selector_;
1825
1826
1827
1828

  NDArray edge_weight_;
  int64_t curr_batch_id_;
  int64_t max_batch_id_;
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
};

template class WeightedEdgeSamplerObject<float>;

class FloatWeightedEdgeSampler: public ObjectRef {
 public:
  FloatWeightedEdgeSampler() {}
  explicit FloatWeightedEdgeSampler(std::shared_ptr<runtime::Object> obj): ObjectRef(obj) {}

  WeightedEdgeSamplerObject<float>* operator->() const {
    return static_cast<WeightedEdgeSamplerObject<float>*>(obj_.get());
  }

  std::shared_ptr<WeightedEdgeSamplerObject<float>> sptr() const {
    return CHECK_NOTNULL(std::dynamic_pointer_cast<WeightedEdgeSamplerObject<float>>(obj_));
  }

  operator bool() const { return this->defined(); }
  using ContainerType = WeightedEdgeSamplerObject<float>;
};

DGL_REGISTER_GLOBAL("sampling._CAPI_CreateWeightedEdgeSampler")
1851
.set_body([] (DGLArgs args, DGLRetValue* rv) {
1852
1853
1854
1855
1856
1857
1858
    // arguments
    GraphRef g = args[0];
    IdArray seed_edges = args[1];
    NDArray edge_weight = args[2];
    NDArray node_weight = args[3];
    const int64_t batch_size = args[4];
    const int64_t max_num_workers = args[5];
1859
1860
1861
1862
1863
1864
1865
    const bool replacement = args[6];
    const bool reset = args[7];
    const std::string neg_mode = args[8];
    const int64_t neg_sample_size = args[9];
    const bool exclude_positive = args[10];
    const bool check_false_neg = args[11];
    IdArray relations = args[12];
1866
1867
1868
1869

    auto gptr = std::dynamic_pointer_cast<ImmutableGraph>(g.sptr());
    CHECK(gptr) << "sampling isn't implemented in mutable graph";
    CHECK(aten::IsValidIdArray(seed_edges));
1870
1871
    CHECK_EQ(seed_edges->ctx.device_type, kDLCPU)
      << "WeightedEdgeSampler only support CPU sampling";
1872
1873
    CHECK(edge_weight->dtype.code == kDLFloat) << "edge_weight should be FloatType";
    CHECK(edge_weight->dtype.bits == 32) << "WeightedEdgeSampler only support float weight";
1874
1875
    CHECK_EQ(edge_weight->ctx.device_type, kDLCPU)
      << "WeightedEdgeSampler only support CPU sampling";
1876
1877
1878
    if (node_weight->shape[0] > 0) {
      CHECK(node_weight->dtype.code == kDLFloat) << "node_weight should be FloatType";
      CHECK(node_weight->dtype.bits == 32) << "WeightedEdgeSampler only support float weight";
1879
1880
1881
1882
1883
1884
1885
      CHECK_EQ(node_weight->ctx.device_type, kDLCPU)
        << "WeightedEdgeSampler only support CPU sampling";
    }
    if (relations->shape[0] > 0) {
      CHECK(aten::IsValidIdArray(relations));
      CHECK_EQ(relations->ctx.device_type, kDLCPU)
        << "WeightedEdgeSampler only support CPU sampling";
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
    }
    BuildCoo(*gptr);

    const int64_t num_seeds = seed_edges->shape[0];
    const int64_t num_workers = std::min(max_num_workers,
        (num_seeds + batch_size - 1) / batch_size);

    auto o = std::make_shared<WeightedEdgeSamplerObject<float>>(gptr,
                                                                seed_edges,
                                                                edge_weight,
                                                                node_weight,
                                                                batch_size,
                                                                num_workers,
1899
1900
                                                                replacement,
                                                                reset,
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
                                                                neg_mode,
                                                                neg_sample_size,
                                                                exclude_positive,
                                                                check_false_neg,
                                                                relations);
    *rv = o;
});

DGL_REGISTER_GLOBAL("sampling._CAPI_FetchWeightedEdgeSample")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
  FloatWeightedEdgeSampler sampler = args[0];
  sampler->Fetch(rv);
1913
1914
});

1915
1916
1917
1918
1919
1920
DGL_REGISTER_GLOBAL("sampling._CAPI_ResetWeightedEdgeSample")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
  FloatWeightedEdgeSampler sampler = args[0];
  sampler->Reset();
});

Da Zheng's avatar
Da Zheng committed
1921
}  // namespace dgl