python_binding.cc 4.64 KB
Newer Older
1
2
3
4
5
6
/**
 *  Copyright (c) 2023 by Contributors
 * @file python_binding.cc
 * @brief Graph bolt library Python binding.
 */

7
#include <graphbolt/fused_csc_sampling_graph.h>
8
#include <graphbolt/isin.h>
9
#include <graphbolt/serialize.h>
10
#include <graphbolt/unique_and_compact.h>
11

12
13
14
#ifdef GRAPHBOLT_USE_CUDA
#include "./cuda/max_uva_threads.h"
#endif
15
#include "./cnumpy.h"
16
#include "./expand_indptr.h"
17
#include "./index_select.h"
18
#include "./random.h"
19

20
21
22
23
#ifdef GRAPHBOLT_USE_CUDA
#include "./cuda/gpu_cache.h"
#endif

24
25
26
27
namespace graphbolt {
namespace sampling {

TORCH_LIBRARY(graphbolt, m) {
28
  m.class_<FusedSampledSubgraph>("FusedSampledSubgraph")
29
      .def(torch::init<>())
30
31
      .def_readwrite("indptr", &FusedSampledSubgraph::indptr)
      .def_readwrite("indices", &FusedSampledSubgraph::indices)
32
      .def_readwrite(
33
          "original_row_node_ids", &FusedSampledSubgraph::original_row_node_ids)
34
      .def_readwrite(
35
          "original_column_node_ids",
36
37
38
          &FusedSampledSubgraph::original_column_node_ids)
      .def_readwrite(
          "original_edge_ids", &FusedSampledSubgraph::original_edge_ids)
39
40
      .def_readwrite("type_per_edge", &FusedSampledSubgraph::type_per_edge)
      .def_readwrite("etype_offsets", &FusedSampledSubgraph::etype_offsets);
41
42
  m.class_<storage::OnDiskNpyArray>("OnDiskNpyArray")
      .def("index_select", &storage::OnDiskNpyArray::IndexSelect);
43
44
45
46
47
48
49
  m.class_<FusedCSCSamplingGraph>("FusedCSCSamplingGraph")
      .def("num_nodes", &FusedCSCSamplingGraph::NumNodes)
      .def("num_edges", &FusedCSCSamplingGraph::NumEdges)
      .def("csc_indptr", &FusedCSCSamplingGraph::CSCIndptr)
      .def("indices", &FusedCSCSamplingGraph::Indices)
      .def("node_type_offset", &FusedCSCSamplingGraph::NodeTypeOffset)
      .def("type_per_edge", &FusedCSCSamplingGraph::TypePerEdge)
50
51
      .def("node_type_to_id", &FusedCSCSamplingGraph::NodeTypeToID)
      .def("edge_type_to_id", &FusedCSCSamplingGraph::EdgeTypeToID)
52
      .def("node_attributes", &FusedCSCSamplingGraph::NodeAttributes)
53
54
55
56
57
      .def("edge_attributes", &FusedCSCSamplingGraph::EdgeAttributes)
      .def("set_csc_indptr", &FusedCSCSamplingGraph::SetCSCIndptr)
      .def("set_indices", &FusedCSCSamplingGraph::SetIndices)
      .def("set_node_type_offset", &FusedCSCSamplingGraph::SetNodeTypeOffset)
      .def("set_type_per_edge", &FusedCSCSamplingGraph::SetTypePerEdge)
58
59
      .def("set_node_type_to_id", &FusedCSCSamplingGraph::SetNodeTypeToID)
      .def("set_edge_type_to_id", &FusedCSCSamplingGraph::SetEdgeTypeToID)
60
      .def("set_node_attributes", &FusedCSCSamplingGraph::SetNodeAttributes)
61
62
63
      .def("set_edge_attributes", &FusedCSCSamplingGraph::SetEdgeAttributes)
      .def("in_subgraph", &FusedCSCSamplingGraph::InSubgraph)
      .def("sample_neighbors", &FusedCSCSamplingGraph::SampleNeighbors)
64
65
66
      .def(
          "temporal_sample_neighbors",
          &FusedCSCSamplingGraph::TemporalSampleNeighbors)
67
      .def("copy_to_shared_memory", &FusedCSCSamplingGraph::CopyToSharedMemory)
68
69
      .def_pickle(
          // __getstate__
70
          [](const c10::intrusive_ptr<FusedCSCSamplingGraph>& self)
71
72
73
74
75
76
              -> torch::Dict<
                  std::string, torch::Dict<std::string, torch::Tensor>> {
            return self->GetState();
          },
          // __setstate__
          [](torch::Dict<std::string, torch::Dict<std::string, torch::Tensor>>
77
78
                 state) -> c10::intrusive_ptr<FusedCSCSamplingGraph> {
            auto g = c10::make_intrusive<FusedCSCSamplingGraph>();
79
80
81
            g->SetState(state);
            return g;
          });
82
83
84
85
86
87
#ifdef GRAPHBOLT_USE_CUDA
  m.class_<cuda::GpuCache>("GpuCache")
      .def("query", &cuda::GpuCache::Query)
      .def("replace", &cuda::GpuCache::Replace);
  m.def("gpu_cache", &cuda::GpuCache::Create);
#endif
88
  m.def("fused_csc_sampling_graph", &FusedCSCSamplingGraph::Create);
89
90
  m.def(
      "load_from_shared_memory", &FusedCSCSamplingGraph::LoadFromSharedMemory);
91
  m.def("unique_and_compact", &UniqueAndCompact);
92
  m.def("unique_and_compact_batched", &UniqueAndCompactBatched);
93
  m.def("isin", &IsIn);
94
  m.def("index_select", &ops::IndexSelect);
95
  m.def("index_select_csc", &ops::IndexSelectCSC);
96
  m.def("ondisk_npy_array", &storage::OnDiskNpyArray::Create);
97
  m.def("set_seed", &RandomEngine::SetManualSeed);
98
99
100
#ifdef GRAPHBOLT_USE_CUDA
  m.def("set_max_uva_threads", &cuda::set_max_uva_threads);
#endif
101
102
103
104
105
106
107
108
109
110
111
#ifdef HAS_IMPL_ABSTRACT_PYSTUB
  m.impl_abstract_pystub("dgl.graphbolt.base", "//dgl.graphbolt.base");
#endif
  m.def(
      "expand_indptr(Tensor indptr, ScalarType dtype, Tensor? node_ids, "
      "SymInt? output_size) -> Tensor"
#ifdef HAS_PT2_COMPLIANT_TAG
      ,
      {at::Tag::pt2_compliant_tag}
#endif
  );
112
113
114
115
}

}  // namespace sampling
}  // namespace graphbolt