"tests/vscode:/vscode.git/clone" did not exist on "96984faca42c70440de09b0d8db5ab5ede73a5ca"
torch.cpp 1.44 KB
Newer Older
1
2
3
4
5
6
/*!
 *  Copyright (c) 2020 by Contributors
 * \file torch/torch.cpp
 * \brief Implementation of PyTorch adapter library.
 */

7
#include <tensoradapter_exports.h>
8
9
#include <torch/torch.h>
#include <ATen/DLConvertor.h>
10
11
12
#ifdef DGL_USE_CUDA
#include <c10/cuda/CUDACachingAllocator.h>
#endif  // DGL_USE_CUDA
13
14
15
#include <vector>
#include <iostream>

16
17
18
19
20
21
#if DLPACK_VERSION > 040
// Compatibility across DLPack - note that this assumes that the ABI stays the same.
#define kDLGPU kDLCUDA
#define DLContext DLDevice
#endif

22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
namespace tensoradapter {

static at::Device get_device(DLContext ctx) {
  switch (ctx.device_type) {
   case kDLCPU:
    return at::Device(torch::kCPU);
    break;
   case kDLGPU:
    return at::Device(torch::kCUDA, ctx.device_id);
    break;
   default:
    // fallback to CPU
    return at::Device(torch::kCPU);
    break;
  }
}

extern "C" {

41
TA_EXPORTS DLManagedTensor* TAempty(
42
43
44
45
46
47
48
49
50
51
52
    std::vector<int64_t> shape,
    DLDataType dtype,
    DLContext ctx) {
  auto options = torch::TensorOptions()
    .layout(torch::kStrided)
    .device(get_device(ctx))
    .dtype(at::toScalarType(dtype));
  torch::Tensor tensor = torch::empty(shape, options);
  return at::toDLPack(tensor);
}

53
54
55
56
57
58
59
60
61
62
#ifdef DGL_USE_CUDA
TA_EXPORTS void* RawAlloc(size_t nbytes) {
  return c10::cuda::CUDACachingAllocator::raw_alloc(nbytes);
}

TA_EXPORTS void RawDelete(void* ptr) {
  c10::cuda::CUDACachingAllocator::raw_delete(ptr);
}
#endif  // DGL_USE_CUDA

63
64
65
};

};  // namespace tensoradapter