utils.h 1.33 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
/**
 *  Copyright (c) 2023 by Contributors
 * @file utils.h
 * @brief Graphbolt utils.
 */

#ifndef GRAPHBOLT_UTILS_H_
#define GRAPHBOLT_UTILS_H_

#include <torch/script.h>

namespace graphbolt {
namespace utils {

15
16
17
18
19
20
21
/**
 * @brief Checks whether the tensor is stored on the GPU.
 */
inline bool is_on_gpu(torch::Tensor tensor) {
  return tensor.device().is_cuda();
}

22
23
24
25
/**
 * @brief Checks whether the tensor is stored on the GPU or the pinned memory.
 */
inline bool is_accessible_from_gpu(torch::Tensor tensor) {
26
  return is_on_gpu(tensor) || tensor.is_pinned();
27
28
}

29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
/**
 * @brief Retrieves the value of the tensor at the given index.
 *
 * @note If the tensor is not contiguous, it will be copied to a contiguous
 * tensor.
 *
 * @tparam T The type of the tensor.
 * @param tensor The tensor.
 * @param index The index.
 *
 * @return T The value of the tensor at the given index.
 */
template <typename T>
T GetValueByIndex(const torch::Tensor& tensor, int64_t index) {
  TORCH_CHECK(
      index >= 0 && index < tensor.numel(),
      "The index should be within the range of the tensor, but got index ",
      index, " and tensor size ", tensor.numel());
  auto contiguous_tensor = tensor.contiguous();
  auto data_ptr = contiguous_tensor.data_ptr<T>();
  return data_ptr[index];
}

52
53
54
55
}  // namespace utils
}  // namespace graphbolt

#endif  // GRAPHBOLT_UTILS_H_