sampler.cc 45.6 KB
Newer Older
Da Zheng's avatar
Da Zheng committed
1
2
3
4
5
6
7
/*!
 *  Copyright (c) 2018 by Contributors
 * \file graph/sampler.cc
 * \brief DGL sampler implementation
 */
#include <dgl/sampler.h>
#include <dgl/immutable_graph.h>
8
9
#include <dgl/runtime/container.h>
#include <dgl/packed_func_ext.h>
10
#include <dgl/random.h>
11
#include <dmlc/omp.h>
Da Zheng's avatar
Da Zheng committed
12
#include <algorithm>
13
14
#include <cstdlib>
#include <cmath>
15
#include <numeric>
16
#include "../c_api_common.h"
17
#include "../array/common.h"  // for ATEN_FLOAT_TYPE_SWITCH
Da Zheng's avatar
Da Zheng committed
18

19
using namespace dgl::runtime;
20

Da Zheng's avatar
Da Zheng committed
21
22
23
24
25
26
namespace dgl {

namespace {
/*
 * ArrayHeap is used to sample elements from vector
 */
27
template<typename ValueType>
Da Zheng's avatar
Da Zheng committed
28
29
class ArrayHeap {
 public:
30
  explicit ArrayHeap(const std::vector<ValueType>& prob) {
Da Zheng's avatar
Da Zheng committed
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
    vec_size_ = prob.size();
    bit_len_ = ceil(log2(vec_size_));
    limit_ = 1 << bit_len_;
    // allocate twice the size
    heap_.resize(limit_ << 1, 0);
    // allocate the leaves
    for (int i = limit_; i < vec_size_+limit_; ++i) {
      heap_[i] = prob[i-limit_];
    }
    // iterate up the tree (this is O(m))
    for (int i = bit_len_-1; i >= 0; --i) {
      for (int j = (1 << i); j < (1 << (i + 1)); ++j) {
        heap_[j] = heap_[j << 1] + heap_[(j << 1) + 1];
      }
    }
  }
  ~ArrayHeap() {}

  /*
   * Remove term from index (this costs O(log m) steps)
   */
  void Delete(size_t index) {
    size_t i = index + limit_;
54
    ValueType w = heap_[i];
Da Zheng's avatar
Da Zheng committed
55
56
57
58
59
60
61
62
63
    for (int j = bit_len_; j >= 0; --j) {
      heap_[i] -= w;
      i = i >> 1;
    }
  }

  /*
   * Add value w to index (this costs O(log m) steps)
   */
64
  void Add(size_t index, ValueType w) {
Da Zheng's avatar
Da Zheng committed
65
66
67
68
69
70
71
72
73
74
    size_t i = index + limit_;
    for (int j = bit_len_; j >= 0; --j) {
      heap_[i] += w;
      i = i >> 1;
    }
  }

  /*
   * Sample from arrayHeap
   */
75
  size_t Sample() {
76
    ValueType xi = heap_[1] * RandomEngine::ThreadLocal()->Uniform<float>();
Da Zheng's avatar
Da Zheng committed
77
78
79
80
81
82
83
84
85
86
87
88
89
90
    int i = 1;
    while (i < limit_) {
      i = i << 1;
      if (xi >= heap_[i]) {
        xi -= heap_[i];
        i += 1;
      }
    }
    return i - limit_;
  }

  /*
   * Sample a vector by given the size n
   */
91
  void SampleWithoutReplacement(size_t n, std::vector<size_t>* samples) {
Da Zheng's avatar
Da Zheng committed
92
93
    // sample n elements
    for (size_t i = 0; i < n; ++i) {
94
      samples->at(i) = this->Sample();
Da Zheng's avatar
Da Zheng committed
95
96
97
98
99
100
101
102
      this->Delete(samples->at(i));
    }
  }

 private:
  int vec_size_;  // sample size
  int bit_len_;   // bit size
  int limit_;
103
  std::vector<ValueType> heap_;
Da Zheng's avatar
Da Zheng committed
104
105
106
107
108
};

/*
 * Uniformly sample integers from [0, set_size) without replacement.
 */
109
void RandomSample(size_t set_size, size_t num, std::vector<size_t>* out) {
Da Zheng's avatar
Da Zheng committed
110
111
  std::unordered_set<size_t> sampled_idxs;
  while (sampled_idxs.size() < num) {
112
    sampled_idxs.insert(RandomEngine::ThreadLocal()->RandInt(set_size));
Da Zheng's avatar
Da Zheng committed
113
114
115
116
117
  }
  out->clear();
  out->insert(out->end(), sampled_idxs.begin(), sampled_idxs.end());
}

118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
void RandomSample(size_t set_size, size_t num, const std::vector<size_t> &exclude,
                  std::vector<size_t>* out) {
  std::unordered_map<size_t, int> sampled_idxs;
  for (auto v : exclude) {
    sampled_idxs.insert(std::pair<size_t, int>(v, 0));
  }
  while (sampled_idxs.size() < num + exclude.size()) {
    size_t rand = RandomEngine::ThreadLocal()->RandInt(set_size);
    sampled_idxs.insert(std::pair<size_t, int>(rand, 1));
  }
  out->clear();
  for (auto it = sampled_idxs.begin(); it != sampled_idxs.end(); it++) {
    if (it->second) {
      out->push_back(it->first);
    }
  }
}

Da Zheng's avatar
Da Zheng committed
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
/*
 * For a sparse array whose non-zeros are represented by nz_idxs,
 * negate the sparse array and outputs the non-zeros in the negated array.
 */
void NegateArray(const std::vector<size_t> &nz_idxs,
                 size_t arr_size,
                 std::vector<size_t>* out) {
  // nz_idxs must have been sorted.
  auto it = nz_idxs.begin();
  size_t i = 0;
  CHECK_GT(arr_size, nz_idxs.back());
  for (; i < arr_size && it != nz_idxs.end(); i++) {
    if (*it == i) {
      it++;
      continue;
    }
    out->push_back(i);
  }
  for (; i < arr_size; i++) {
    out->push_back(i);
  }
}

/*
 * Uniform sample vertices from a list of vertices.
 */
void GetUniformSample(const dgl_id_t* edge_id_list,
                      const dgl_id_t* vid_list,
                      const size_t ver_len,
                      const size_t max_num_neighbor,
                      std::vector<dgl_id_t>* out_ver,
167
                      std::vector<dgl_id_t>* out_edge) {
Da Zheng's avatar
Da Zheng committed
168
169
170
171
172
173
174
175
176
177
  // Copy vid_list to output
  if (ver_len <= max_num_neighbor) {
    out_ver->insert(out_ver->end(), vid_list, vid_list + ver_len);
    out_edge->insert(out_edge->end(), edge_id_list, edge_id_list + ver_len);
    return;
  }
  // If we just sample a small number of elements from a large neighbor list.
  std::vector<size_t> sorted_idxs;
  if (ver_len > max_num_neighbor * 2) {
    sorted_idxs.reserve(max_num_neighbor);
178
    RandomSample(ver_len, max_num_neighbor, &sorted_idxs);
Da Zheng's avatar
Da Zheng committed
179
180
181
182
    std::sort(sorted_idxs.begin(), sorted_idxs.end());
  } else {
    std::vector<size_t> negate;
    negate.reserve(ver_len - max_num_neighbor);
183
    RandomSample(ver_len, ver_len - max_num_neighbor, &negate);
Da Zheng's avatar
Da Zheng committed
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
    std::sort(negate.begin(), negate.end());
    NegateArray(negate, ver_len, &sorted_idxs);
  }
  // verify the result.
  CHECK_EQ(sorted_idxs.size(), max_num_neighbor);
  for (size_t i = 1; i < sorted_idxs.size(); i++) {
    CHECK_GT(sorted_idxs[i], sorted_idxs[i - 1]);
  }
  for (auto idx : sorted_idxs) {
    out_ver->push_back(vid_list[idx]);
    out_edge->push_back(edge_id_list[idx]);
  }
}

/*
 * Non-uniform sample via ArrayHeap
200
201
 *
 * \param probability Transition probability on the entire graph, indexed by edge ID
Da Zheng's avatar
Da Zheng committed
202
 */
203
204
template<typename ValueType>
void GetNonUniformSample(const ValueType* probability,
Da Zheng's avatar
Da Zheng committed
205
206
207
208
209
                         const dgl_id_t* edge_id_list,
                         const dgl_id_t* vid_list,
                         const size_t ver_len,
                         const size_t max_num_neighbor,
                         std::vector<dgl_id_t>* out_ver,
210
                         std::vector<dgl_id_t>* out_edge) {
Da Zheng's avatar
Da Zheng committed
211
212
213
214
215
216
217
218
  // Copy vid_list to output
  if (ver_len <= max_num_neighbor) {
    out_ver->insert(out_ver->end(), vid_list, vid_list + ver_len);
    out_edge->insert(out_edge->end(), edge_id_list, edge_id_list + ver_len);
    return;
  }
  // Make sample
  std::vector<size_t> sp_index(max_num_neighbor);
219
  std::vector<ValueType> sp_prob(ver_len);
Da Zheng's avatar
Da Zheng committed
220
  for (size_t i = 0; i < ver_len; ++i) {
221
    sp_prob[i] = probability[edge_id_list[i]];
Da Zheng's avatar
Da Zheng committed
222
  }
223
  ArrayHeap<ValueType> arrayHeap(sp_prob);
224
  arrayHeap.SampleWithoutReplacement(max_num_neighbor, &sp_index);
Da Zheng's avatar
Da Zheng committed
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
  out_ver->resize(max_num_neighbor);
  out_edge->resize(max_num_neighbor);
  for (size_t i = 0; i < max_num_neighbor; ++i) {
    size_t idx = sp_index[i];
    out_ver->at(i) = vid_list[idx];
    out_edge->at(i) = edge_id_list[idx];
  }
  sort(out_ver->begin(), out_ver->end());
  sort(out_edge->begin(), out_edge->end());
}

/*
 * Used for subgraph sampling
 */
struct neigh_list {
  std::vector<dgl_id_t> neighs;
  std::vector<dgl_id_t> edges;
  neigh_list(const std::vector<dgl_id_t> &_neighs,
             const std::vector<dgl_id_t> &_edges)
    : neighs(_neighs), edges(_edges) {}
};

struct neighbor_info {
  dgl_id_t id;
  size_t pos;
  size_t num_edges;

  neighbor_info(dgl_id_t id, size_t pos, size_t num_edges) {
    this->id = id;
    this->pos = pos;
    this->num_edges = num_edges;
  }
};

NodeFlow ConstructNodeFlow(std::vector<dgl_id_t> neighbor_list,
                           std::vector<dgl_id_t> edge_list,
                           std::vector<size_t> layer_offsets,
                           std::vector<std::pair<dgl_id_t, int> > *sub_vers,
                           std::vector<neighbor_info> *neigh_pos,
                           const std::string &edge_type,
                           int64_t num_edges, int num_hops, bool is_multigraph) {
266
  NodeFlow nf = NodeFlow::Create();
Da Zheng's avatar
Da Zheng committed
267
  uint64_t num_vertices = sub_vers->size();
268
269
270
271
  nf->node_mapping = aten::NewIdArray(num_vertices);
  nf->edge_mapping = aten::NewIdArray(num_edges);
  nf->layer_offsets = aten::NewIdArray(num_hops + 1);
  nf->flow_offsets = aten::NewIdArray(num_hops);
Da Zheng's avatar
Da Zheng committed
272

273
274
275
276
  dgl_id_t *node_map_data = static_cast<dgl_id_t *>(nf->node_mapping->data);
  dgl_id_t *layer_off_data = static_cast<dgl_id_t *>(nf->layer_offsets->data);
  dgl_id_t *flow_off_data = static_cast<dgl_id_t *>(nf->flow_offsets->data);
  dgl_id_t *edge_map_data = static_cast<dgl_id_t *>(nf->edge_mapping->data);
Da Zheng's avatar
Da Zheng committed
277
278

  // Construct sub_csr_graph
279
280
281
282
283
  // TODO(minjie): is nodeflow a multigraph?
  auto subg_csr = CSRPtr(new CSR(num_vertices, num_edges, is_multigraph));
  dgl_id_t* indptr_out = static_cast<dgl_id_t*>(subg_csr->indptr()->data);
  dgl_id_t* col_list_out = static_cast<dgl_id_t*>(subg_csr->indices()->data);
  dgl_id_t* eid_out = static_cast<dgl_id_t*>(subg_csr->edge_ids()->data);
Da Zheng's avatar
Da Zheng committed
284
285
286
287
288
289
290
291
292
293
294
295
  size_t collected_nedges = 0;

  // The data from the previous steps:
  // * node data: sub_vers (vid, layer), neigh_pos,
  // * edge data: neighbor_list, edge_list, probability.
  // * layer_offsets: the offset in sub_vers.
  dgl_id_t ver_id = 0;
  std::vector<std::unordered_map<dgl_id_t, dgl_id_t>> layer_ver_maps;
  layer_ver_maps.resize(num_hops);
  size_t out_node_idx = 0;
  for (int layer_id = num_hops - 1; layer_id >= 0; layer_id--) {
    // We sort the vertices in a layer so that we don't need to sort the neighbor Ids
296
297
298
299
300
301
302
303
304
305
306
    // after remap to a subgraph. However, we don't need to sort the first layer
    // because we want the order of the nodes in the first layer is the same as
    // the input seed nodes.
    if (layer_id > 0) {
      std::sort(sub_vers->begin() + layer_offsets[layer_id],
                sub_vers->begin() + layer_offsets[layer_id + 1],
                [](const std::pair<dgl_id_t, dgl_id_t> &a1,
                   const std::pair<dgl_id_t, dgl_id_t> &a2) {
        return a1.first < a2.first;
      });
    }
Da Zheng's avatar
Da Zheng committed
307
308
309
310
311
312

    // Save the sampled vertices and its layer Id.
    for (size_t i = layer_offsets[layer_id]; i < layer_offsets[layer_id + 1]; i++) {
      node_map_data[out_node_idx++] = sub_vers->at(i).first;
      layer_ver_maps[layer_id].insert(std::pair<dgl_id_t, dgl_id_t>(sub_vers->at(i).first,
                                                                    ver_id++));
313
      CHECK_EQ(sub_vers->at(i).second, layer_id);
Da Zheng's avatar
Da Zheng committed
314
315
316
317
318
319
320
321
322
    }
  }
  CHECK(out_node_idx == num_vertices);

  // sampling algorithms have to start from the seed nodes, so the seed nodes are
  // in the first layer and the input nodes are in the last layer.
  // When we expose the sampled graph to a Python user, we say the input nodes
  // are in the first layer and the seed nodes are in the last layer.
  // Thus, when we copy sampled results to a CSR, we need to reverse the order of layers.
323
324
  std::fill(indptr_out, indptr_out + num_vertices + 1, 0);
  size_t row_idx = layer_offsets[num_hops] - layer_offsets[num_hops - 1];
Da Zheng's avatar
Da Zheng committed
325
326
  layer_off_data[0] = 0;
  layer_off_data[1] = layer_offsets[num_hops] - layer_offsets[num_hops - 1];
327
  int out_layer_idx = 1;
Da Zheng's avatar
Da Zheng committed
328
  for (int layer_id = num_hops - 2; layer_id >= 0; layer_id--) {
329
330
331
332
333
334
335
336
337
    // Because we don't sort the vertices in the first layer above, we can't sort
    // the neighbor positions of the vertices in the first layer either.
    if (layer_id > 0) {
      std::sort(neigh_pos->begin() + layer_offsets[layer_id],
                neigh_pos->begin() + layer_offsets[layer_id + 1],
                [](const neighbor_info &a1, const neighbor_info &a2) {
                  return a1.id < a2.id;
                });
    }
Da Zheng's avatar
Da Zheng committed
338
339
340

    for (size_t i = layer_offsets[layer_id]; i < layer_offsets[layer_id + 1]; i++) {
      dgl_id_t dst_id = sub_vers->at(i).first;
341
      CHECK_EQ(dst_id, neigh_pos->at(i).id);
Da Zheng's avatar
Da Zheng committed
342
      size_t pos = neigh_pos->at(i).pos;
343
      CHECK_LE(pos, neighbor_list.size());
344
345
      const size_t nedges = neigh_pos->at(i).num_edges;
      if (neighbor_list.empty()) CHECK_EQ(nedges, 0);
Da Zheng's avatar
Da Zheng committed
346
347
348

      // We need to map the Ids of the neighbors to the subgraph.
      auto neigh_it = neighbor_list.begin() + pos;
349
      for (size_t i = 0; i < nedges; i++) {
Da Zheng's avatar
Da Zheng committed
350
        dgl_id_t neigh = *(neigh_it + i);
351
        CHECK(layer_ver_maps[layer_id + 1].find(neigh) != layer_ver_maps[layer_id + 1].end());
Da Zheng's avatar
Da Zheng committed
352
353
354
355
        col_list_out[collected_nedges + i] = layer_ver_maps[layer_id + 1][neigh];
      }
      // We can simply copy the edge Ids.
      std::copy_n(edge_list.begin() + pos,
356
357
358
                  nedges, edge_map_data + collected_nedges);
      collected_nedges += nedges;
      indptr_out[row_idx+1] = indptr_out[row_idx] + nedges;
Da Zheng's avatar
Da Zheng committed
359
360
361
362
363
364
      row_idx++;
    }
    layer_off_data[out_layer_idx + 1] = layer_off_data[out_layer_idx]
        + layer_offsets[layer_id + 1] - layer_offsets[layer_id];
    out_layer_idx++;
  }
365
366
367
368
  CHECK_EQ(row_idx, num_vertices);
  CHECK_EQ(indptr_out[row_idx], num_edges);
  CHECK_EQ(out_layer_idx, num_hops);
  CHECK_EQ(layer_off_data[out_layer_idx], num_vertices);
Da Zheng's avatar
Da Zheng committed
369
370
371

  // Copy flow offsets.
  flow_off_data[0] = 0;
372
373
  int out_flow_idx = 0;
  for (size_t i = 0; i < layer_offsets.size() - 2; i++) {
374
    size_t num_edges = indptr_out[layer_off_data[i + 2]] - indptr_out[layer_off_data[i + 1]];
Da Zheng's avatar
Da Zheng committed
375
376
377
378
    flow_off_data[out_flow_idx + 1] = flow_off_data[out_flow_idx] + num_edges;
    out_flow_idx++;
  }
  CHECK(out_flow_idx == num_hops - 1);
379
  CHECK(flow_off_data[num_hops - 1] == static_cast<uint64_t>(num_edges));
Da Zheng's avatar
Da Zheng committed
380

381
  std::iota(eid_out, eid_out + num_edges, 0);
Da Zheng's avatar
Da Zheng committed
382

383
  if (edge_type == std::string("in")) {
384
    nf->graph = GraphPtr(new ImmutableGraph(subg_csr, nullptr));
Da Zheng's avatar
Da Zheng committed
385
  } else {
386
    nf->graph = GraphPtr(new ImmutableGraph(nullptr, subg_csr));
Da Zheng's avatar
Da Zheng committed
387
388
389
390
391
  }

  return nf;
}

392
template<typename ValueType>
Da Zheng's avatar
Da Zheng committed
393
NodeFlow SampleSubgraph(const ImmutableGraph *graph,
394
                        const std::vector<dgl_id_t>& seeds,
395
                        const ValueType* probability,
Da Zheng's avatar
Da Zheng committed
396
397
                        const std::string &edge_type,
                        int num_hops,
398
399
                        size_t num_neighbor,
                        const bool add_self_loop) {
400
  CHECK_EQ(graph->NumBits(), 64) << "32 bit graph is not supported yet";
401
  const size_t num_seeds = seeds.size();
Da Zheng's avatar
Da Zheng committed
402
  auto orig_csr = edge_type == "in" ? graph->GetInCSR() : graph->GetOutCSR();
403
404
405
  const dgl_id_t* val_list = static_cast<dgl_id_t*>(orig_csr->edge_ids()->data);
  const dgl_id_t* col_list = static_cast<dgl_id_t*>(orig_csr->indices()->data);
  const dgl_id_t* indptr = static_cast<dgl_id_t*>(orig_csr->indptr()->data);
Da Zheng's avatar
Da Zheng committed
406
407
408
409
410
411

  std::unordered_set<dgl_id_t> sub_ver_map;  // The vertex Ids in a layer.
  std::vector<std::pair<dgl_id_t, int> > sub_vers;
  sub_vers.reserve(num_seeds * 10);
  // add seed vertices
  for (size_t i = 0; i < num_seeds; ++i) {
412
    auto ret = sub_ver_map.insert(seeds[i]);
Da Zheng's avatar
Da Zheng committed
413
414
    // If the vertex is inserted successfully.
    if (ret.second) {
415
      sub_vers.emplace_back(seeds[i], 0);
Da Zheng's avatar
Da Zheng committed
416
417
418
419
420
421
422
423
424
425
426
427
428
429
    }
  }
  std::vector<dgl_id_t> tmp_sampled_src_list;
  std::vector<dgl_id_t> tmp_sampled_edge_list;
  // ver_id, position
  std::vector<neighbor_info> neigh_pos;
  neigh_pos.reserve(num_seeds);
  std::vector<dgl_id_t> neighbor_list;
  std::vector<dgl_id_t> edge_list;
  std::vector<size_t> layer_offsets(num_hops + 1);
  int64_t num_edges = 0;

  layer_offsets[0] = 0;
  layer_offsets[1] = sub_vers.size();
430
  for (int layer_id = 1; layer_id < num_hops; layer_id++) {
Da Zheng's avatar
Da Zheng committed
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
    // We need to avoid resampling the same node in a layer, but we allow a node
    // to be resampled in multiple layers. We use `sub_ver_map` to keep track of
    // sampled nodes in a layer, and clear it when entering a new layer.
    sub_ver_map.clear();
    // Previous iteration collects all nodes in sub_vers, which are collected
    // in the previous layer. sub_vers is used both as a node collection and a queue.
    for (size_t idx = layer_offsets[layer_id - 1]; idx < layer_offsets[layer_id]; idx++) {
      dgl_id_t dst_id = sub_vers[idx].first;
      const int cur_node_level = sub_vers[idx].second;

      tmp_sampled_src_list.clear();
      tmp_sampled_edge_list.clear();
      dgl_id_t ver_len = *(indptr+dst_id+1) - *(indptr+dst_id);
      if (probability == nullptr) {  // uniform-sample
        GetUniformSample(val_list + *(indptr + dst_id),
                         col_list + *(indptr + dst_id),
                         ver_len,
                         num_neighbor,
                         &tmp_sampled_src_list,
450
                         &tmp_sampled_edge_list);
Da Zheng's avatar
Da Zheng committed
451
452
453
454
455
456
457
      } else {  // non-uniform-sample
        GetNonUniformSample(probability,
                            val_list + *(indptr + dst_id),
                            col_list + *(indptr + dst_id),
                            ver_len,
                            num_neighbor,
                            &tmp_sampled_src_list,
458
                            &tmp_sampled_edge_list);
Da Zheng's avatar
Da Zheng committed
459
      }
Da Zheng's avatar
Da Zheng committed
460
461
462
      // If we need to add self loop and it doesn't exist in the sampled neighbor list.
      if (add_self_loop && std::find(tmp_sampled_src_list.begin(), tmp_sampled_src_list.end(),
                                     dst_id) == tmp_sampled_src_list.end()) {
463
        tmp_sampled_src_list.push_back(dst_id);
Da Zheng's avatar
Da Zheng committed
464
465
466
467
468
469
470
471
472
473
        const dgl_id_t *src_list = col_list + *(indptr + dst_id);
        const dgl_id_t *eid_list = val_list + *(indptr + dst_id);
        // TODO(zhengda) this operation has O(N) complexity. It can be pretty slow.
        const dgl_id_t *src = std::find(src_list, src_list + ver_len, dst_id);
        // If there doesn't exist a self loop in the graph.
        // we have to add -1 as the edge id for the self-loop edge.
        if (src == src_list + ver_len)
          tmp_sampled_edge_list.push_back(-1);
        else
          tmp_sampled_edge_list.push_back(eid_list[src - src_list]);
474
      }
Da Zheng's avatar
Da Zheng committed
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
      CHECK_EQ(tmp_sampled_src_list.size(), tmp_sampled_edge_list.size());
      neigh_pos.emplace_back(dst_id, neighbor_list.size(), tmp_sampled_src_list.size());
      // Then push the vertices
      for (size_t i = 0; i < tmp_sampled_src_list.size(); ++i) {
        neighbor_list.push_back(tmp_sampled_src_list[i]);
      }
      // Finally we push the edge list
      for (size_t i = 0; i < tmp_sampled_edge_list.size(); ++i) {
        edge_list.push_back(tmp_sampled_edge_list[i]);
      }
      num_edges += tmp_sampled_src_list.size();
      for (size_t i = 0; i < tmp_sampled_src_list.size(); ++i) {
        // We need to add the neighbor in the hashtable here. This ensures that
        // the vertex in the queue is unique. If we see a vertex before, we don't
        // need to add it to the queue again.
        auto ret = sub_ver_map.insert(tmp_sampled_src_list[i]);
        // If the sampled neighbor is inserted to the map successfully.
        if (ret.second) {
          sub_vers.emplace_back(tmp_sampled_src_list[i], cur_node_level + 1);
        }
      }
    }
    layer_offsets[layer_id + 1] = layer_offsets[layer_id] + sub_ver_map.size();
    CHECK_EQ(layer_offsets[layer_id + 1], sub_vers.size());
  }

  return ConstructNodeFlow(neighbor_list, edge_list, layer_offsets, &sub_vers, &neigh_pos,
                           edge_type, num_edges, num_hops, graph->IsMultigraph());
}

505
}  // namespace
Da Zheng's avatar
Da Zheng committed
506

507
508
DGL_REGISTER_GLOBAL("nodeflow._CAPI_NodeFlowGetGraph")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
509
510
    NodeFlow nflow = args[0];
    *rv = nflow->graph;
511
512
513
514
  });

DGL_REGISTER_GLOBAL("nodeflow._CAPI_NodeFlowGetNodeMapping")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
515
    NodeFlow nflow = args[0];
516
517
518
519
520
    *rv = nflow->node_mapping;
  });

DGL_REGISTER_GLOBAL("nodeflow._CAPI_NodeFlowGetEdgeMapping")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
521
    NodeFlow nflow = args[0];
522
523
524
525
526
    *rv = nflow->edge_mapping;
  });

DGL_REGISTER_GLOBAL("nodeflow._CAPI_NodeFlowGetLayerOffsets")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
527
    NodeFlow nflow = args[0];
528
529
530
531
532
    *rv = nflow->layer_offsets;
  });

DGL_REGISTER_GLOBAL("nodeflow._CAPI_NodeFlowGetBlockOffsets")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
533
    NodeFlow nflow = args[0];
534
535
536
    *rv = nflow->flow_offsets;
  });

537
538
539
540
541
542
543
template<typename ValueType>
NodeFlow SamplerOp::NeighborSample(const ImmutableGraph *graph,
                                   const std::vector<dgl_id_t>& seeds,
                                   const std::string &edge_type,
                                   int num_hops, int expand_factor,
                                   const bool add_self_loop,
                                   const ValueType *probability) {
Da Zheng's avatar
Da Zheng committed
544
  return SampleSubgraph(graph,
545
546
                        seeds,
                        probability,
Da Zheng's avatar
Da Zheng committed
547
548
                        edge_type,
                        num_hops + 1,
549
550
                        expand_factor,
                        add_self_loop);
Da Zheng's avatar
Da Zheng committed
551
552
}

553
namespace {
554
  void ConstructLayers(const dgl_id_t *indptr,
555
                       const dgl_id_t *indices,
556
557
                       const std::vector<dgl_id_t>& seed_array,
                       IdArray layer_sizes,
558
559
560
561
562
563
564
565
566
                       std::vector<dgl_id_t> *layer_offsets,
                       std::vector<dgl_id_t> *node_mapping,
                       std::vector<int64_t> *actl_layer_sizes,
                       std::vector<float> *probabilities) {
    /*
     * Given a graph and a collection of seed nodes, this function constructs NodeFlow
     * layers via uniform layer-wise sampling, and return the resultant layers and their
     * corresponding probabilities.
     */
567
    std::copy(seed_array.begin(), seed_array.end(), std::back_inserter(*node_mapping));
568
569
    actl_layer_sizes->push_back(node_mapping->size());
    probabilities->insert(probabilities->end(), node_mapping->size(), 1);
570
571
    const int64_t* layer_sizes_data = static_cast<int64_t*>(layer_sizes->data);
    const int64_t num_layers = layer_sizes->shape[0];
572
573
574

    size_t curr = 0;
    size_t next = node_mapping->size();
575
576
    for (int64_t i = num_layers - 1; i >= 0; --i) {
      const int64_t layer_size = layer_sizes_data[i];
577
578
579
580
581
582
583
584
585
586
587
588
      std::unordered_set<dgl_id_t> candidate_set;
      for (auto j = curr; j != next; ++j) {
        auto src = (*node_mapping)[j];
        candidate_set.insert(indices + indptr[src], indices + indptr[src + 1]);
      }

      std::vector<dgl_id_t> candidate_vector;
      std::copy(candidate_set.begin(), candidate_set.end(),
                std::back_inserter(candidate_vector));

      std::unordered_map<dgl_id_t, size_t> n_occurrences;
      auto n_candidates = candidate_vector.size();
589
      for (int64_t j = 0; j != layer_size; ++j) {
590
591
        auto dst = candidate_vector[
          RandomEngine::ThreadLocal()->RandInt(n_candidates)];
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
        if (!n_occurrences.insert(std::make_pair(dst, 1)).second) {
          ++n_occurrences[dst];
        }
      }

      for (auto const &pair : n_occurrences) {
        node_mapping->push_back(pair.first);
        float p = pair.second * n_candidates / static_cast<float>(layer_size);
        probabilities->push_back(p);
      }

      actl_layer_sizes->push_back(node_mapping->size() - next);
      curr = next;
      next = node_mapping->size();
    }
    std::reverse(node_mapping->begin(), node_mapping->end());
    std::reverse(actl_layer_sizes->begin(), actl_layer_sizes->end());
    layer_offsets->push_back(0);
    for (const auto &size : *actl_layer_sizes) {
      layer_offsets->push_back(size + layer_offsets->back());
    }
  }

615
  void ConstructFlows(const dgl_id_t *indptr,
616
617
618
619
                      const dgl_id_t *indices,
                      const dgl_id_t *eids,
                      const std::vector<dgl_id_t> &node_mapping,
                      const std::vector<int64_t> &actl_layer_sizes,
620
621
622
                      std::vector<dgl_id_t> *sub_indptr,
                      std::vector<dgl_id_t> *sub_indices,
                      std::vector<dgl_id_t> *sub_eids,
623
624
625
626
627
628
629
                      std::vector<dgl_id_t> *flow_offsets,
                      std::vector<dgl_id_t> *edge_mapping) {
    /*
     * Given a graph and a sequence of NodeFlow layers, this function constructs dense
     * subgraphs (flows) between consecutive layers.
     */
    auto n_flows = actl_layer_sizes.size() - 1;
630
631
    for (int64_t i = 0; i < actl_layer_sizes.front() + 1; i++)
      sub_indptr->push_back(0);
632
633
634
635
636
637
638
639
640
641
642
643
644
    flow_offsets->push_back(0);
    int64_t first = 0;
    for (size_t i = 0; i < n_flows; ++i) {
      auto src_size = actl_layer_sizes[i];
      std::unordered_map<dgl_id_t, dgl_id_t> source_map;
      for (int64_t j = 0; j < src_size; ++j) {
        source_map.insert(std::make_pair(node_mapping[first + j], first + j));
      }
      auto dst_size = actl_layer_sizes[i + 1];
      for (int64_t j = 0; j < dst_size; ++j) {
        auto dst = node_mapping[first + src_size + j];
        typedef std::pair<dgl_id_t, dgl_id_t> id_pair;
        std::vector<id_pair> neighbor_indices;
645
        for (dgl_id_t k = indptr[dst]; k < indptr[dst + 1]; ++k) {
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
          // TODO(gaiyu): accelerate hash table lookup
          auto ret = source_map.find(indices[k]);
          if (ret != source_map.end()) {
            neighbor_indices.push_back(std::make_pair(ret->second, eids[k]));
          }
        }
        auto cmp = [](const id_pair p, const id_pair q)->bool { return p.first < q.first; };
        std::sort(neighbor_indices.begin(), neighbor_indices.end(), cmp);
        for (const auto &pair : neighbor_indices) {
          sub_indices->push_back(pair.first);
          edge_mapping->push_back(pair.second);
        }
        sub_indptr->push_back(sub_indices->size());
      }
      flow_offsets->push_back(sub_indices->size());
      first += src_size;
    }
    sub_eids->resize(sub_indices->size());
    std::iota(sub_eids->begin(), sub_eids->end(), 0);
  }
}  // namespace

NodeFlow SamplerOp::LayerUniformSample(const ImmutableGraph *graph,
669
                                       const std::vector<dgl_id_t>& seeds,
670
                                       const std::string &neighbor_type,
671
                                       IdArray layer_sizes) {
672
  const auto g_csr = neighbor_type == "in" ? graph->GetInCSR() : graph->GetOutCSR();
673
674
675
  const dgl_id_t *indptr = static_cast<dgl_id_t*>(g_csr->indptr()->data);
  const dgl_id_t *indices = static_cast<dgl_id_t*>(g_csr->indices()->data);
  const dgl_id_t *eids = static_cast<dgl_id_t*>(g_csr->edge_ids()->data);
676
677
678
679
680
681
682

  std::vector<dgl_id_t> layer_offsets;
  std::vector<dgl_id_t> node_mapping;
  std::vector<int64_t> actl_layer_sizes;
  std::vector<float> probabilities;
  ConstructLayers(indptr,
                  indices,
683
                  seeds,
684
685
686
687
688
689
                  layer_sizes,
                  &layer_offsets,
                  &node_mapping,
                  &actl_layer_sizes,
                  &probabilities);

690
  std::vector<dgl_id_t> sub_indptr, sub_indices, sub_edge_ids;
691
692
693
694
695
696
697
  std::vector<dgl_id_t> flow_offsets;
  std::vector<dgl_id_t> edge_mapping;
  ConstructFlows(indptr,
                 indices,
                 eids,
                 node_mapping,
                 actl_layer_sizes,
698
699
700
                 &sub_indptr,
                 &sub_indices,
                 &sub_edge_ids,
701
702
                 &flow_offsets,
                 &edge_mapping);
703
704
705
706
707
  // sanity check
  CHECK_GT(sub_indptr.size(), 0);
  CHECK_EQ(sub_indptr[0], 0);
  CHECK_EQ(sub_indptr.back(), sub_indices.size());
  CHECK_EQ(sub_indices.size(), sub_edge_ids.size());
708

709
  NodeFlow nf = NodeFlow::Create();
710
711
712
  auto sub_csr = CSRPtr(new CSR(aten::VecToIdArray(sub_indptr),
                                aten::VecToIdArray(sub_indices),
                                aten::VecToIdArray(sub_edge_ids)));
713
714

  if (neighbor_type == std::string("in")) {
715
    nf->graph = GraphPtr(new ImmutableGraph(sub_csr, nullptr));
716
  } else {
717
    nf->graph = GraphPtr(new ImmutableGraph(nullptr, sub_csr));
718
719
  }

720
721
722
723
  nf->node_mapping = aten::VecToIdArray(node_mapping);
  nf->edge_mapping = aten::VecToIdArray(edge_mapping);
  nf->layer_offsets = aten::VecToIdArray(layer_offsets);
  nf->flow_offsets = aten::VecToIdArray(flow_offsets);
724
725
726
727

  return nf;
}

Da Zheng's avatar
Da Zheng committed
728
729
730
731
732
733
734
735
736
737
738
739
void BuildCsr(const ImmutableGraph &g, const std::string neigh_type) {
  if (neigh_type == "in") {
    auto csr = g.GetInCSR();
    assert(csr);
  } else if (neigh_type == "out") {
    auto csr = g.GetOutCSR();
    assert(csr);
  } else {
    LOG(FATAL) << "We don't support sample from neighbor type " << neigh_type;
  }
}

740
741
742
743
744
745
746
747
748
749
750
template<typename ValueType>
std::vector<NodeFlow> NeighborSamplingImpl(const ImmutableGraphPtr gptr,
                                           const IdArray seed_nodes,
                                           const int64_t batch_start_id,
                                           const int64_t batch_size,
                                           const int64_t max_num_workers,
                                           const int64_t expand_factor,
                                           const int64_t num_hops,
                                           const std::string neigh_type,
                                           const bool add_self_loop,
                                           const ValueType *probability) {
751
    // process args
752
    CHECK(aten::IsValidIdArray(seed_nodes));
753
754
755
756
    const dgl_id_t* seed_nodes_data = static_cast<dgl_id_t*>(seed_nodes->data);
    const int64_t num_seeds = seed_nodes->shape[0];
    const int64_t num_workers = std::min(max_num_workers,
        (num_seeds + batch_size - 1) / batch_size - batch_start_id);
Da Zheng's avatar
Da Zheng committed
757
758
    // We need to make sure we have the right CSR before we enter parallel sampling.
    BuildCsr(*gptr, neigh_type);
759
    // generate node flows
760
    std::vector<NodeFlow> nflows(num_workers);
761
762
763
764
765
766
767
768
769
#pragma omp parallel for
    for (int i = 0; i < num_workers; i++) {
      // create per-worker seed nodes.
      const int64_t start = (batch_start_id + i) * batch_size;
      const int64_t end = std::min(start + batch_size, num_seeds);
      // TODO(minjie): the vector allocation/copy is unnecessary
      std::vector<dgl_id_t> worker_seeds(end - start);
      std::copy(seed_nodes_data + start, seed_nodes_data + end,
                worker_seeds.begin());
770
771
772
      nflows[i] = SamplerOp::NeighborSample(
          gptr.get(), worker_seeds, neigh_type, num_hops, expand_factor,
          add_self_loop, probability);
773
    }
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
    return nflows;
}

DGL_REGISTER_GLOBAL("sampling._CAPI_UniformSampling")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
    // arguments
    const GraphRef g = args[0];
    const IdArray seed_nodes = args[1];
    const int64_t batch_start_id = args[2];
    const int64_t batch_size = args[3];
    const int64_t max_num_workers = args[4];
    const int64_t expand_factor = args[5];
    const int64_t num_hops = args[6];
    const std::string neigh_type = args[7];
    const bool add_self_loop = args[8];

    auto gptr = std::dynamic_pointer_cast<ImmutableGraph>(g.sptr());
    CHECK(gptr) << "sampling isn't implemented in mutable graph";

    std::vector<NodeFlow> nflows = NeighborSamplingImpl<float>(
        gptr, seed_nodes, batch_start_id, batch_size, max_num_workers,
        expand_factor, num_hops, neigh_type, add_self_loop, nullptr);

    *rv = List<NodeFlow>(nflows);
  });

DGL_REGISTER_GLOBAL("sampling._CAPI_NeighborSampling")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
    // arguments
    const GraphRef g = args[0];
    const IdArray seed_nodes = args[1];
    const int64_t batch_start_id = args[2];
    const int64_t batch_size = args[3];
    const int64_t max_num_workers = args[4];
    const int64_t expand_factor = args[5];
    const int64_t num_hops = args[6];
    const std::string neigh_type = args[7];
    const bool add_self_loop = args[8];
    const NDArray probability = args[9];

    auto gptr = std::dynamic_pointer_cast<ImmutableGraph>(g.sptr());
    CHECK(gptr) << "sampling isn't implemented in mutable graph";

    std::vector<NodeFlow> nflows;

    CHECK(probability->dtype.code == kDLFloat)
      << "transition probability must be float";
    CHECK(probability->ndim == 1)
      << "transition probability must be a 1-dimensional vector";

    ATEN_FLOAT_TYPE_SWITCH(
      probability->dtype,
      FloatType,
      "transition probability",
      {
        const FloatType *prob;

        if (probability->ndim == 1 && probability->shape[0] == 0) {
          prob = nullptr;
        } else {
          CHECK(probability->shape[0] == gptr->NumEdges())
            << "transition probability must have same number of elements as edges";
          CHECK(probability.IsContiguous())
            << "transition probability must be contiguous tensor";
          prob = static_cast<const FloatType *>(probability->data);
        }

        nflows = NeighborSamplingImpl(
            gptr, seed_nodes, batch_start_id, batch_size, max_num_workers,
            expand_factor, num_hops, neigh_type, add_self_loop, prob);
    });

846
    *rv = List<NodeFlow>(nflows);
847
848
849
850
851
  });

DGL_REGISTER_GLOBAL("sampling._CAPI_LayerSampling")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
    // arguments
852
    GraphRef g = args[0];
853
    const IdArray seed_nodes = args[1];
854
855
856
    const int64_t batch_start_id = args[2];
    const int64_t batch_size = args[3];
    const int64_t max_num_workers = args[4];
857
    const IdArray layer_sizes = args[5];
858
859
    const std::string neigh_type = args[6];
    // process args
860
    auto gptr = std::dynamic_pointer_cast<ImmutableGraph>(g.sptr());
861
    CHECK(gptr) << "sampling isn't implemented in mutable graph";
862
    CHECK(aten::IsValidIdArray(seed_nodes));
863
864
865
866
    const dgl_id_t* seed_nodes_data = static_cast<dgl_id_t*>(seed_nodes->data);
    const int64_t num_seeds = seed_nodes->shape[0];
    const int64_t num_workers = std::min(max_num_workers,
        (num_seeds + batch_size - 1) / batch_size - batch_start_id);
Da Zheng's avatar
Da Zheng committed
867
868
    // We need to make sure we have the right CSR before we enter parallel sampling.
    BuildCsr(*gptr, neigh_type);
869
    // generate node flows
870
    std::vector<NodeFlow> nflows(num_workers);
871
872
873
874
875
876
877
878
879
#pragma omp parallel for
    for (int i = 0; i < num_workers; i++) {
      // create per-worker seed nodes.
      const int64_t start = (batch_start_id + i) * batch_size;
      const int64_t end = std::min(start + batch_size, num_seeds);
      // TODO(minjie): the vector allocation/copy is unnecessary
      std::vector<dgl_id_t> worker_seeds(end - start);
      std::copy(seed_nodes_data + start, seed_nodes_data + end,
                worker_seeds.begin());
880
881
      nflows[i] = SamplerOp::LayerUniformSample(
          gptr.get(), worker_seeds, neigh_type, layer_sizes);
882
    }
883
    *rv = List<NodeFlow>(nflows);
884
885
  });

886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
namespace {

void BuildCoo(const ImmutableGraph &g) {
  auto coo = g.GetCOO();
  assert(coo);
}


dgl_id_t global2local_map(dgl_id_t global_id,
                          std::unordered_map<dgl_id_t, dgl_id_t> *map) {
  auto it = map->find(global_id);
  if (it == map->end()) {
    dgl_id_t local_id = map->size();
    map->insert(std::pair<dgl_id_t, dgl_id_t>(global_id, local_id));
    return local_id;
  } else {
    return it->second;
  }
}

906
907
908
909
910
inline bool is_neg_head_mode(const std::string &mode) {
  return mode == "head";
}

Subgraph NegEdgeSubgraph(GraphPtr gptr, const Subgraph &pos_subg,
911
                         const std::string &neg_mode,
912
913
914
                         int neg_sample_size, bool exclude_positive) {
  int64_t num_tot_nodes = gptr->NumVertices();
  bool is_multigraph = gptr->IsMultigraph();
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
  std::vector<IdArray> adj = pos_subg.graph->GetAdj(false, "coo");
  IdArray coo = adj[0];
  int64_t num_pos_edges = coo->shape[0] / 2;
  int64_t num_neg_edges = num_pos_edges * neg_sample_size;
  IdArray neg_dst = IdArray::Empty({num_neg_edges}, coo->dtype, coo->ctx);
  IdArray neg_src = IdArray::Empty({num_neg_edges}, coo->dtype, coo->ctx);
  IdArray neg_eid = IdArray::Empty({num_neg_edges}, coo->dtype, coo->ctx);
  IdArray induced_neg_eid = IdArray::Empty({num_neg_edges}, coo->dtype, coo->ctx);

  // These are vids in the positive subgraph.
  const dgl_id_t *dst_data = static_cast<const dgl_id_t *>(coo->data);
  const dgl_id_t *src_data = static_cast<const dgl_id_t *>(coo->data) + num_pos_edges;
  const dgl_id_t *induced_vid_data = static_cast<const dgl_id_t *>(pos_subg.induced_vertices->data);
  const dgl_id_t *induced_eid_data = static_cast<const dgl_id_t *>(pos_subg.induced_edges->data);
  size_t num_pos_nodes = pos_subg.graph->NumVertices();
  std::vector<size_t> pos_nodes(induced_vid_data, induced_vid_data + num_pos_nodes);

  dgl_id_t *neg_dst_data = static_cast<dgl_id_t *>(neg_dst->data);
  dgl_id_t *neg_src_data = static_cast<dgl_id_t *>(neg_src->data);
  dgl_id_t *neg_eid_data = static_cast<dgl_id_t *>(neg_eid->data);
  dgl_id_t *induced_neg_eid_data = static_cast<dgl_id_t *>(induced_neg_eid->data);

  dgl_id_t curr_eid = 0;
  std::vector<size_t> neg_vids;
  neg_vids.reserve(neg_sample_size);
  std::unordered_map<dgl_id_t, dgl_id_t> neg_map;
  for (int64_t i = 0; i < num_pos_edges; i++) {
    size_t neg_idx = i * neg_sample_size;
    neg_vids.clear();

    std::vector<size_t> neighbors;
    DGLIdIters neigh_it;
    const dgl_id_t *unchanged;
    dgl_id_t *neg_unchanged;
    dgl_id_t *neg_changed;
950
    if (is_neg_head_mode(neg_mode)) {
951
952
953
      unchanged = dst_data;
      neg_unchanged = neg_dst_data;
      neg_changed = neg_src_data;
954
      neigh_it = gptr->PredVec(induced_vid_data[unchanged[i]]);
955
956
957
958
    } else {
      unchanged = src_data;
      neg_unchanged = neg_src_data;
      neg_changed = neg_dst_data;
959
      neigh_it = gptr->SuccVec(induced_vid_data[unchanged[i]]);
960
961
962
963
964
    }

    if (exclude_positive) {
      std::vector<size_t> exclude;
      for (auto it = neigh_it.begin(); it != neigh_it.end(); it++) {
965
966
        dgl_id_t global_vid = *it;
        exclude.push_back(global_vid);
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
      }
      RandomSample(num_tot_nodes, neg_sample_size, exclude, &neg_vids);
    } else {
      RandomSample(num_tot_nodes, neg_sample_size, &neg_vids);
    }

    dgl_id_t global_unchanged = induced_vid_data[unchanged[i]];
    dgl_id_t local_unchanged = global2local_map(global_unchanged, &neg_map);

    for (int64_t j = 0; j < neg_sample_size; j++) {
      neg_unchanged[neg_idx + j] = local_unchanged;
      neg_eid_data[neg_idx + j] = curr_eid++;
      dgl_id_t local_changed = global2local_map(neg_vids[j], &neg_map);
      neg_changed[neg_idx + j] = local_changed;
      // induced negative eid references to the positive one.
      induced_neg_eid_data[neg_idx + j] = induced_eid_data[i];
    }
  }

  // Now we know the number of vertices in the negative graph.
  int64_t num_neg_nodes = neg_map.size();
  IdArray induced_neg_vid = IdArray::Empty({num_neg_nodes}, coo->dtype, coo->ctx);
  dgl_id_t *induced_neg_vid_data = static_cast<dgl_id_t *>(induced_neg_vid->data);
  for (auto it = neg_map.begin(); it != neg_map.end(); it++) {
    induced_neg_vid_data[it->second] = it->first;
  }

  Subgraph neg_subg;
  // We sample negative vertices without replacement.
  // There shouldn't be duplicated edges.
  COOPtr neg_coo(new COO(num_neg_nodes, neg_src, neg_dst, is_multigraph));
  neg_subg.graph = GraphPtr(new ImmutableGraph(neg_coo));
  neg_subg.induced_vertices = induced_neg_vid;
  neg_subg.induced_edges = induced_neg_eid;
  return neg_subg;
}

1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
Subgraph PBGNegEdgeSubgraph(int64_t num_tot_nodes, const Subgraph &pos_subg,
                            const std::string &neg_mode,
                            int neg_sample_size, bool is_multigraph,
                            bool exclude_positive) {
  std::vector<IdArray> adj = pos_subg.graph->GetAdj(false, "coo");
  IdArray coo = adj[0];
  int64_t num_pos_edges = coo->shape[0] / 2;

  int64_t chunk_size = neg_sample_size;
  // If num_pos_edges isn't divisible by chunk_size, the actual number of chunks
  // is num_chunks + 1 and the last chunk size is last_chunk_size.
  // Otherwise, the actual number of chunks is num_chunks, the last chunk size
  // is 0.
  int64_t num_chunks = num_pos_edges / chunk_size;
  int64_t last_chunk_size = num_pos_edges - num_chunks * chunk_size;

  // The number of negative edges.
  int64_t num_neg_edges = neg_sample_size * chunk_size * num_chunks;
  int64_t num_neg_edges_last_chunk = neg_sample_size * last_chunk_size;
  int64_t num_all_neg_edges = num_neg_edges + num_neg_edges_last_chunk;

  // We should include the last chunk.
  if (last_chunk_size > 0)
    num_chunks++;

  IdArray neg_dst = IdArray::Empty({num_all_neg_edges}, coo->dtype, coo->ctx);
  IdArray neg_src = IdArray::Empty({num_all_neg_edges}, coo->dtype, coo->ctx);
  IdArray neg_eid = IdArray::Empty({num_all_neg_edges}, coo->dtype, coo->ctx);
  IdArray induced_neg_eid = IdArray::Empty({num_all_neg_edges}, coo->dtype, coo->ctx);

  // These are vids in the positive subgraph.
  const dgl_id_t *dst_data = static_cast<const dgl_id_t *>(coo->data);
  const dgl_id_t *src_data = static_cast<const dgl_id_t *>(coo->data) + num_pos_edges;
  const dgl_id_t *induced_vid_data = static_cast<const dgl_id_t *>(pos_subg.induced_vertices->data);
  const dgl_id_t *induced_eid_data = static_cast<const dgl_id_t *>(pos_subg.induced_edges->data);
  size_t num_pos_nodes = pos_subg.graph->NumVertices();
  std::vector<size_t> pos_nodes(induced_vid_data, induced_vid_data + num_pos_nodes);

  dgl_id_t *neg_dst_data = static_cast<dgl_id_t *>(neg_dst->data);
  dgl_id_t *neg_src_data = static_cast<dgl_id_t *>(neg_src->data);
  dgl_id_t *neg_eid_data = static_cast<dgl_id_t *>(neg_eid->data);
  dgl_id_t *induced_neg_eid_data = static_cast<dgl_id_t *>(induced_neg_eid->data);

  const dgl_id_t *unchanged;
  dgl_id_t *neg_unchanged;
  dgl_id_t *neg_changed;

  // corrupt head nodes.
  if (is_neg_head_mode(neg_mode)) {
    unchanged = dst_data;
    neg_unchanged = neg_dst_data;
    neg_changed = neg_src_data;
  } else {
    // corrupt tail nodes.
    unchanged = src_data;
    neg_unchanged = neg_src_data;
    neg_changed = neg_dst_data;
  }

  // We first sample all negative edges.
  std::vector<size_t> neg_vids;
  RandomSample(num_tot_nodes,
               num_chunks * neg_sample_size,
               &neg_vids);

  dgl_id_t curr_eid = 0;
  std::unordered_map<dgl_id_t, dgl_id_t> neg_map;
  for (int64_t i_chunk = 0; i_chunk < num_chunks; i_chunk++) {
    // for each chunk.
    int64_t neg_idx = neg_sample_size * chunk_size * i_chunk;
    int64_t pos_edge_idx = chunk_size * i_chunk;
    int64_t neg_node_idx = neg_sample_size * i_chunk;
    // The actual chunk size. It'll be different for the last chunk.
    int64_t chunk_size1;
    if (i_chunk == num_chunks - 1 && last_chunk_size > 0)
      chunk_size1 = last_chunk_size;
    else
      chunk_size1 = chunk_size;

    for (int64_t in_chunk = 0; in_chunk != chunk_size1; ++in_chunk) {
      // For each positive node in a chunk.

      dgl_id_t global_unchanged = induced_vid_data[unchanged[pos_edge_idx + in_chunk]];
      dgl_id_t local_unchanged = global2local_map(global_unchanged, &neg_map);
      for (int64_t j = 0; j < neg_sample_size; ++j) {
        neg_unchanged[neg_idx] = local_unchanged;
        neg_eid_data[neg_idx] = curr_eid++;
        dgl_id_t global_changed_vid = neg_vids[neg_node_idx + j];

        // TODO(zhengda) we can avoid the hashtable lookup here.
        dgl_id_t local_changed = global2local_map(global_changed_vid, &neg_map);
        neg_changed[neg_idx] = local_changed;
        induced_neg_eid_data[neg_idx] = induced_eid_data[pos_edge_idx + in_chunk];
        neg_idx++;
      }
    }
  }

  // Now we know the number of vertices in the negative graph.
  int64_t num_neg_nodes = neg_map.size();
  IdArray induced_neg_vid = IdArray::Empty({num_neg_nodes}, coo->dtype, coo->ctx);
  dgl_id_t *induced_neg_vid_data = static_cast<dgl_id_t *>(induced_neg_vid->data);
  for (auto it = neg_map.begin(); it != neg_map.end(); it++) {
    induced_neg_vid_data[it->second] = it->first;
  }

  Subgraph neg_subg;
  // We sample negative vertices without replacement.
  // There shouldn't be duplicated edges.
  COOPtr neg_coo(new COO(num_neg_nodes, neg_src, neg_dst, is_multigraph));
  neg_subg.graph = GraphPtr(new ImmutableGraph(neg_coo));
  neg_subg.induced_vertices = induced_neg_vid;
  neg_subg.induced_edges = induced_neg_eid;
  return neg_subg;
}

1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
inline SubgraphRef ConvertRef(const Subgraph &subg) {
    return SubgraphRef(std::shared_ptr<Subgraph>(new Subgraph(subg)));
}

}  // namespace

DGL_REGISTER_GLOBAL("sampling._CAPI_UniformEdgeSampling")
.set_body([] (DGLArgs args, DGLRetValue* rv) {
    // arguments
    GraphRef g = args[0];
    IdArray seed_edges = args[1];
    const int64_t batch_start_id = args[2];
    const int64_t batch_size = args[3];
    const int64_t max_num_workers = args[4];
    const std::string neg_mode = args[5];
    const int neg_sample_size = args[6];
    const bool exclude_positive = args[7];
    // process args
    auto gptr = std::dynamic_pointer_cast<ImmutableGraph>(g.sptr());
    CHECK(gptr) << "sampling isn't implemented in mutable graph";
1140
    CHECK(aten::IsValidIdArray(seed_edges));
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
    BuildCoo(*gptr);

    const int64_t num_seeds = seed_edges->shape[0];
    const int64_t num_workers = std::min(max_num_workers,
        (num_seeds + batch_size - 1) / batch_size - batch_start_id);
    // generate subgraphs.
    std::vector<SubgraphRef> positive_subgs(num_workers);
    std::vector<SubgraphRef> negative_subgs(num_workers);
#pragma omp parallel for
    for (int i = 0; i < num_workers; i++) {
      const int64_t start = (batch_start_id + i) * batch_size;
      const int64_t end = std::min(start + batch_size, num_seeds);
      const int64_t num_edges = end - start;
      IdArray worker_seeds = seed_edges.CreateView({num_edges}, DLDataType{kDLInt, 64, 1},
                                                   sizeof(dgl_id_t) * start);
      EdgeArray arr = gptr->FindEdges(worker_seeds);
      const dgl_id_t *src_ids = static_cast<const dgl_id_t *>(arr.src->data);
      const dgl_id_t *dst_ids = static_cast<const dgl_id_t *>(arr.dst->data);
      std::vector<dgl_id_t> src_vec(src_ids, src_ids + num_edges);
      std::vector<dgl_id_t> dst_vec(dst_ids, dst_ids + num_edges);
      // TODO(zhengda) what if there are duplicates in the src and dst vectors.

      Subgraph subg = gptr->EdgeSubgraph(worker_seeds, false);
      positive_subgs[i] = ConvertRef(subg);
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
      // For PBG negative sampling, we accept "PBG-head" for corrupting head
      // nodes and "PBG-tail" for corrupting tail nodes.
      if (neg_mode.substr(0, 3) == "PBG") {
        Subgraph neg_subg = PBGNegEdgeSubgraph(gptr->NumVertices(), subg,
                                               neg_mode.substr(4), neg_sample_size,
                                               gptr->IsMultigraph(), exclude_positive);
        negative_subgs[i] = ConvertRef(neg_subg);
      } else if (neg_mode.size() > 0) {
        Subgraph neg_subg = NegEdgeSubgraph(gptr, subg, neg_mode, neg_sample_size,
                                            exclude_positive);
1175
1176
1177
1178
1179
1180
1181
1182
1183
        negative_subgs[i] = ConvertRef(neg_subg);
      }
    }
    if (neg_mode.size() > 0) {
      positive_subgs.insert(positive_subgs.end(), negative_subgs.begin(), negative_subgs.end());
    }
    *rv = List<SubgraphRef>(positive_subgs);
  });

Da Zheng's avatar
Da Zheng committed
1184
}  // namespace dgl