ndarray.cc 16.1 KB
Newer Older
Minjie Wang's avatar
Minjie Wang committed
1
/*!
2
 *  Copyright (c) 2017-2022 by Contributors
Minjie Wang's avatar
Minjie Wang committed
3
4
5
 * \file ndarray.cc
 * \brief NDArray container infratructure.
 */
6
#include <string.h>
Minjie Wang's avatar
Minjie Wang committed
7
8
9
10
#include <dmlc/logging.h>
#include <dgl/runtime/ndarray.h>
#include <dgl/runtime/c_runtime_api.h>
#include <dgl/runtime/device_api.h>
11
12
#include <dgl/runtime/shared_mem.h>
#include <dgl/zerocopy_serializer.h>
13
#include <dgl/runtime/tensordispatch.h>
Minjie Wang's avatar
Minjie Wang committed
14
15
#include "runtime_base.h"

16
namespace dgl {
17

18
19
20
21
22
23
constexpr DGLDataType DGLDataTypeTraits<int8_t>::dtype;
constexpr DGLDataType DGLDataTypeTraits<int16_t>::dtype;
constexpr DGLDataType DGLDataTypeTraits<int32_t>::dtype;
constexpr DGLDataType DGLDataTypeTraits<int64_t>::dtype;
constexpr DGLDataType DGLDataTypeTraits<uint32_t>::dtype;
constexpr DGLDataType DGLDataTypeTraits<uint64_t>::dtype;
24
#ifdef USE_FP16
25
constexpr DGLDataType DGLDataTypeTraits<__half>::dtype;
26
#endif
27
28
constexpr DGLDataType DGLDataTypeTraits<float>::dtype;
constexpr DGLDataType DGLDataTypeTraits<double>::dtype;
29

Minjie Wang's avatar
Minjie Wang committed
30
31
namespace runtime {

32
inline void VerifyDataType(DGLDataType dtype) {
Minjie Wang's avatar
Minjie Wang committed
33
  CHECK_GE(dtype.lanes, 1);
34
  if (dtype.code == kDGLFloat) {
Minjie Wang's avatar
Minjie Wang committed
35
36
37
38
39
40
41
    CHECK_EQ(dtype.bits % 8, 0);
  } else {
    CHECK_EQ(dtype.bits % 8, 0);
  }
  CHECK_EQ(dtype.bits & (dtype.bits - 1), 0);
}

42
inline size_t GetDataSize(const DGLArray& arr) {
Minjie Wang's avatar
Minjie Wang committed
43
  size_t size = 1;
44
  for (dgl_index_t i = 0; i < arr.ndim; ++i) {
Minjie Wang's avatar
Minjie Wang committed
45
46
47
48
49
50
    size *= arr.shape[i];
  }
  size *= (arr.dtype.bits * arr.dtype.lanes + 7) / 8;
  return size;
}

51
inline size_t GetDataAlignment(const DGLArray& arr) {
Minjie Wang's avatar
Minjie Wang committed
52
53
54
55
56
  size_t align = (arr.dtype.bits / 8) * arr.dtype.lanes;
  if (align < kAllocAlignment) return kAllocAlignment;
  return align;
}

57
58
59
60
61
62
63
64
void NDArray::Internal::DefaultDeleter(NDArray::Container* ptr) {
  using dgl::runtime::NDArray;
  if (ptr->manager_ctx != nullptr) {
    static_cast<NDArray::Container*>(ptr->manager_ctx)->DecRef();
  } else if (ptr->mem) {
    ptr->mem = nullptr;
  } else if (ptr->dl_tensor.data != nullptr) {
    // if the array is still pinned before freeing, unpin it.
65
66
    if (ptr->pinned_by_dgl_)
      UnpinContainer(ptr);
67
68
    dgl::runtime::DeviceAPI::Get(ptr->dl_tensor.ctx)->FreeDataSpace(
        ptr->dl_tensor.ctx, ptr->dl_tensor.data);
Minjie Wang's avatar
Minjie Wang committed
69
  }
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
  delete ptr;
}

NDArray NDArray::Internal::Create(std::vector<int64_t> shape,
                                  DGLDataType dtype, DGLContext ctx) {
  VerifyDataType(dtype);
  // critical zone
  NDArray::Container* data = new NDArray::Container();
  data->deleter = DefaultDeleter;
  NDArray ret(data);
  ret.data_ = data;
  // RAII now in effect
  // setup shape
  data->shape_ = std::move(shape);
  data->dl_tensor.shape = dmlc::BeginPtr(data->shape_);
  data->dl_tensor.ndim = static_cast<int>(data->shape_.size());
  // setup stride (this should be optional, but some framework
  //   does not support NULL stride and thus will crash the program).
  data->stride_.resize(data->dl_tensor.ndim, 1);
  for (int i = data->dl_tensor.ndim - 2; i >= 0; --i) {
    data->stride_[i] = data->shape_[i+1] * data->stride_[i+1];
Minjie Wang's avatar
Minjie Wang committed
91
  }
92
93
94
95
96
97
98
99
100
101
102
103
104
105
  data->dl_tensor.strides = dmlc::BeginPtr(data->stride_);
  // setup dtype
  data->dl_tensor.dtype = dtype;
  // setup ctx
  data->dl_tensor.ctx = ctx;
  return ret;
}

DGLArray* NDArray::Internal::MoveAsDGLArray(NDArray arr) {
  DGLArray* tensor = reinterpret_cast<DGLArray*>(arr.data_);
  CHECK(tensor == const_cast<DGLArray*>(arr.operator->()));
  arr.data_ = nullptr;
  return tensor;
}
Minjie Wang's avatar
Minjie Wang committed
106

107
108
109
110
size_t NDArray::GetSize() const {
  return GetDataSize(data_->dl_tensor);
}

111
int64_t NDArray::NumElements() const {
112
113
  if (data_->dl_tensor.ndim == 0)
    return 0;
114
115
116
117
118
119
120
  int64_t size = 1;
  for (int i = 0; i < data_->dl_tensor.ndim; ++i) {
    size *= data_->dl_tensor.shape[i];
  }
  return size;
}

121
122
123
124
bool NDArray::IsContiguous() const {
  CHECK(data_ != nullptr);
  if (data_->dl_tensor.strides == nullptr)
    return true;
125
126
127
128
129
130
131
132
133
134

  // See https://github.com/dmlc/dgl/issues/2118 and PyTorch's compute_contiguous() implementation
  int64_t z = 1;
  for (int64_t i = data_->dl_tensor.ndim - 1; i >= 0; --i) {
    if (data_->dl_tensor.shape[i] != 1) {
      if (data_->dl_tensor.strides[i] == z)
        z *= data_->dl_tensor.shape[i];
      else
        return false;
    }
135
  }
136
  return true;
137
138
}

Minjie Wang's avatar
Minjie Wang committed
139
NDArray NDArray::CreateView(std::vector<int64_t> shape,
140
                            DGLDataType dtype,
141
                            int64_t offset) {
Minjie Wang's avatar
Minjie Wang committed
142
  CHECK(data_ != nullptr);
143
  CHECK(IsContiguous()) << "Can only create view for compact tensor";
Minjie Wang's avatar
Minjie Wang committed
144
145
146
147
148
149
150
151
152
153
  NDArray ret = Internal::Create(shape, dtype, data_->dl_tensor.ctx);
  ret.data_->dl_tensor.byte_offset =
      this->data_->dl_tensor.byte_offset;
  size_t curr_size = GetDataSize(this->data_->dl_tensor);
  size_t view_size = GetDataSize(ret.data_->dl_tensor);
  CHECK_LE(view_size, curr_size)
      << "Tries to create a view that has bigger memory than current one";
  // increase ref count
  this->data_->IncRef();
  ret.data_->manager_ctx = this->data_;
154
155
  ret.data_->dl_tensor.data =
    static_cast<char*>(this->data_->dl_tensor.data) + offset;
Minjie Wang's avatar
Minjie Wang committed
156
157
158
  return ret;
}

159
160
NDArray NDArray::EmptyShared(const std::string &name,
                       std::vector<int64_t> shape,
161
162
                       DGLDataType dtype,
                       DGLContext ctx, bool is_create) {
163
164
165
166
167
  NDArray ret = Internal::Create(shape, dtype, ctx);
  // setup memory content
  size_t size = GetDataSize(ret.data_->dl_tensor);
  auto mem = std::make_shared<SharedMemory>(name);
  if (is_create) {
168
    ret.data_->dl_tensor.data = mem->CreateNew(size);
169
  } else {
170
    ret.data_->dl_tensor.data = mem->Open(size);
171
172
173
174
175
176
  }

  ret.data_->mem = mem;
  return ret;
}

Minjie Wang's avatar
Minjie Wang committed
177
NDArray NDArray::Empty(std::vector<int64_t> shape,
178
179
                       DGLDataType dtype,
                       DGLContext ctx) {
180
  NDArray ret = Internal::Create(shape, dtype, ctx);
Minjie Wang's avatar
Minjie Wang committed
181
182
183
  // setup memory content
  size_t size = GetDataSize(ret.data_->dl_tensor);
  size_t alignment = GetDataAlignment(ret.data_->dl_tensor);
184
185
186
187
  if (size > 0)
    ret.data_->dl_tensor.data =
        DeviceAPI::Get(ret->ctx)->AllocDataSpace(
            ret->ctx, size, alignment, ret->dtype);
Minjie Wang's avatar
Minjie Wang committed
188
189
190
  return ret;
}

191
192
void NDArray::CopyFromTo(DGLArray* from,
                         DGLArray* to) {
Minjie Wang's avatar
Minjie Wang committed
193
194
195
  size_t from_size = GetDataSize(*from);
  size_t to_size = GetDataSize(*to);
  CHECK_EQ(from_size, to_size)
196
    << "DGLArrayCopyFromTo: The size must exactly match";
Minjie Wang's avatar
Minjie Wang committed
197
198

  CHECK(from->ctx.device_type == to->ctx.device_type
199
200
        || from->ctx.device_type == kDGLCPU
        || to->ctx.device_type == kDGLCPU)
Minjie Wang's avatar
Minjie Wang committed
201
202
203
204
    << "Can not copy across different ctx types directly";

  // Use the context that is *not* a cpu context to get the correct device
  // api manager.
205
  DGLContext ctx = from->ctx.device_type != kDGLCPU ? from->ctx : to->ctx;
Minjie Wang's avatar
Minjie Wang committed
206

207
  // default: local current cuda stream
Minjie Wang's avatar
Minjie Wang committed
208
  DeviceAPI::Get(ctx)->CopyDataFromTo(
209
210
211
      from->data, static_cast<size_t>(from->byte_offset),
      to->data, static_cast<size_t>(to->byte_offset),
      from_size, from->ctx, to->ctx, from->dtype);
Minjie Wang's avatar
Minjie Wang committed
212
213
}

214
215
216
void NDArray::PinContainer(NDArray::Container* ptr) {
  if (IsContainerPinned(ptr)) return;
  auto* tensor = &(ptr->dl_tensor);
217
  CHECK_EQ(tensor->ctx.device_type, kDGLCPU)
218
    << "Only NDArray on CPU can be pinned";
219
  DeviceAPI::Get(kDGLCUDA)->PinData(tensor->data, GetDataSize(*tensor));
220
  ptr->pinned_by_dgl_ = true;
221
222
}

223
224
225
226
227
228
229
230
231
void NDArray::UnpinContainer(NDArray::Container* ptr) {
  auto container_is_pinned = IsContainerPinned(ptr);
  // The tensor may be pinned outside of DGL via a different CUDA API,
  // so we cannot unpin it with cudaHostUnregister.
  CHECK(ptr->pinned_by_dgl_ || !container_is_pinned)
    << "Cannot unpin a tensor that is pinned outside of DGL.";
  // 1. not pinned, do nothing
  if (!container_is_pinned) return;
  // 2. pinned by DGL, unpin it
232
  DeviceAPI::Get(kDGLCUDA)->UnpinData(ptr->dl_tensor.data);
233
  ptr->pinned_by_dgl_ = false;
234
235
}

236
237
238
void NDArray::RecordStream(DGLArray* tensor, DGLStreamHandle stream) {
  TensorDispatcher* td = TensorDispatcher::Global();
  CHECK(td->IsAvailable()) << "RecordStream only works when TensorAdaptor is available.";
239
  CHECK_EQ(tensor->ctx.device_type, kDGLCUDA)
240
241
242
243
244
    << "RecordStream only works with GPU tensors.";

  td->RecordStream(tensor->data, stream, tensor->ctx.device_id);
}

245
template<typename T>
246
247
NDArray NDArray::FromVector(const std::vector<T>& vec, DGLContext ctx) {
  const DGLDataType dtype = DGLDataTypeTraits<T>::dtype;
248
  int64_t size = static_cast<int64_t>(vec.size());
249
  NDArray ret = NDArray::Empty({size}, dtype, ctx);
250
251
252
253
254
255
  DeviceAPI::Get(ctx)->CopyDataFromTo(
      vec.data(),
      0,
      static_cast<T*>(ret->data),
      0,
      size * sizeof(T),
256
      DGLContext{kDGLCPU, 0},
257
      ctx,
258
      dtype);
259
260
261
  return ret;
}

262
263
264
265
266
267
268
269
270
NDArray NDArray::CreateFromRaw(const std::vector<int64_t>& shape,
    DGLDataType dtype, DGLContext ctx, void* raw, bool auto_free) {
  NDArray ret = Internal::Create(shape, dtype, ctx);
  ret.data_->dl_tensor.data = raw;
  if (!auto_free)
    ret.data_->deleter = nullptr;
  return ret;
}

271
// export specializations
272
273
274
275
276
277
template NDArray NDArray::FromVector<int32_t>(const std::vector<int32_t>&, DGLContext);
template NDArray NDArray::FromVector<int64_t>(const std::vector<int64_t>&, DGLContext);
template NDArray NDArray::FromVector<uint32_t>(const std::vector<uint32_t>&, DGLContext);
template NDArray NDArray::FromVector<uint64_t>(const std::vector<uint64_t>&, DGLContext);
template NDArray NDArray::FromVector<float>(const std::vector<float>&, DGLContext);
template NDArray NDArray::FromVector<double>(const std::vector<double>&, DGLContext);
278

279
280
template<typename T>
std::vector<T> NDArray::ToVector() const {
281
  const DGLDataType dtype = DGLDataTypeTraits<T>::dtype;
282
283
284
285
286
  CHECK(data_->dl_tensor.ndim == 1) << "ToVector() only supported for 1D arrays";
  CHECK(data_->dl_tensor.dtype == dtype) << "dtype mismatch";

  int64_t size = data_->dl_tensor.shape[0];
  std::vector<T> vec(size);
287
  const DGLContext &ctx = data_->dl_tensor.ctx;
288
289
290
291
292
293
294
  DeviceAPI::Get(ctx)->CopyDataFromTo(
      static_cast<T*>(data_->dl_tensor.data),
      0,
      vec.data(),
      0,
      size * sizeof(T),
      ctx,
295
      DGLContext{kDGLCPU, 0},
296
      dtype);
297
298
299
300
301
302
303
304
305
  return vec;
}

template std::vector<int32_t> NDArray::ToVector<int32_t>() const;
template std::vector<int64_t> NDArray::ToVector<int64_t>() const;
template std::vector<uint32_t> NDArray::ToVector<uint32_t>() const;
template std::vector<uint64_t> NDArray::ToVector<uint64_t>() const;
template std::vector<float> NDArray::ToVector<float>() const;
template std::vector<double> NDArray::ToVector<double>() const;
306

307
308
309
310
std::shared_ptr<SharedMemory> NDArray::GetSharedMem() const {
  return this->data_->mem;
}

311
312
313
314
bool NDArray::IsContainerPinned(NDArray::Container* ptr) {
  if (ptr->pinned_by_dgl_)
    return true;
  auto* tensor = &(ptr->dl_tensor);
315
  // Can only be pinned if on CPU...
316
  if (tensor->ctx.device_type != kDGLCPU)
317
318
    return false;
  // ... and CUDA device API is enabled, and the tensor is indeed in pinned memory.
319
  auto device = DeviceAPI::Get(kDGLCUDA, true);
320
321
  return device && device->IsPinned(tensor->data);
}
322
323

void NDArray::Save(dmlc::Stream* strm) const {
324
  auto zc_strm = dynamic_cast<StreamWithBuffer*>(strm);
325
326
327
328
  if (zc_strm) {
    zc_strm->PushNDArray(*this);
    return;
  }
329
  SaveDGLArray(strm, const_cast<DGLArray*>(operator->()));
330
331
332
}

bool NDArray::Load(dmlc::Stream* strm) {
333
  auto zc_strm = dynamic_cast<StreamWithBuffer*>(strm);
334
335
336
337
338
339
  if (zc_strm) {
    *this = zc_strm->PopNDArray();
    return true;
  }
  uint64_t header, reserved;
  CHECK(strm->Read(&header))
340
      << "Invalid DGLArray file format";
341
  CHECK(strm->Read(&reserved))
342
      << "Invalid DGLArray file format";
343
  CHECK(header == kDGLNDArrayMagic)
344
345
      << "Invalid DGLArray file format";
  DGLContext ctx;
346
  int ndim;
347
  DGLDataType dtype;
348
  CHECK(strm->Read(&ctx))
349
      << "Invalid DGLArray file format";
350
  CHECK(strm->Read(&ndim))
351
      << "Invalid DGLArray file format";
352
  CHECK(strm->Read(&dtype))
353
354
355
      << "Invalid DGLArray file format";
  CHECK_EQ(ctx.device_type, kDGLCPU)
      << "Invalid DGLArray context: can only save as CPU tensor";
356
357
358
  std::vector<int64_t> shape(ndim);
  if (ndim != 0) {
    CHECK(strm->ReadArray(&shape[0], ndim))
359
        << "Invalid DGLArray file format";
360
361
362
363
364
365
366
367
368
  }
  NDArray ret = NDArray::Empty(shape, dtype, ctx);
  int64_t num_elems = 1;
  int elem_bytes = (ret->dtype.bits + 7) / 8;
  for (int i = 0; i < ret->ndim; ++i) {
    num_elems *= ret->shape[i];
  }
  int64_t data_byte_size;
  CHECK(strm->Read(&data_byte_size))
369
      << "Invalid DGLArray file format";
370
  CHECK(data_byte_size == num_elems * elem_bytes)
371
      << "Invalid DGLArray file format";
372
373
374
375
  if (data_byte_size != 0)  {
    // strm->Read will return the total number of elements successfully read.
    // Therefore if data_byte_size is zero, the CHECK below would fail.
    CHECK(strm->Read(ret->data, data_byte_size))
376
        << "Invalid DGLArray file format";
377
378
379
380
381
382
383
384
385
  }
  if (!DMLC_IO_NO_ENDIAN_SWAP) {
    dmlc::ByteSwap(ret->data, elem_bytes, num_elems);
  }
  *this = ret;
  return true;
}


Minjie Wang's avatar
Minjie Wang committed
386
}  // namespace runtime
387
}  // namespace dgl
Minjie Wang's avatar
Minjie Wang committed
388

389
using namespace dgl::runtime;
Minjie Wang's avatar
Minjie Wang committed
390

391
int DGLArrayAlloc(const dgl_index_t* shape,
Minjie Wang's avatar
Minjie Wang committed
392
393
394
395
396
397
                  int ndim,
                  int dtype_code,
                  int dtype_bits,
                  int dtype_lanes,
                  int device_type,
                  int device_id,
398
                  DGLArrayHandle* out) {
Minjie Wang's avatar
Minjie Wang committed
399
  API_BEGIN();
400
  DGLDataType dtype;
Minjie Wang's avatar
Minjie Wang committed
401
402
403
  dtype.code = static_cast<uint8_t>(dtype_code);
  dtype.bits = static_cast<uint8_t>(dtype_bits);
  dtype.lanes = static_cast<uint16_t>(dtype_lanes);
404
405
  DGLContext ctx;
  ctx.device_type = static_cast<DGLDeviceType>(device_type);
Minjie Wang's avatar
Minjie Wang committed
406
  ctx.device_id = device_id;
407
  *out = NDArray::Internal::MoveAsDGLArray(
Minjie Wang's avatar
Minjie Wang committed
408
409
410
411
      NDArray::Empty(std::vector<int64_t>(shape, shape + ndim), dtype, ctx));
  API_END();
}

412
413
414
415
416
417
418
419
420
int DGLArrayAllocSharedMem(const char *mem_name,
                           const dgl_index_t *shape,
                           int ndim,
                           int dtype_code,
                           int dtype_bits,
                           int dtype_lanes,
                           bool is_create,
                           DGLArrayHandle* out) {
  API_BEGIN();
421
  DGLDataType dtype;
422
423
424
425
426
  dtype.code = static_cast<uint8_t>(dtype_code);
  dtype.bits = static_cast<uint8_t>(dtype_bits);
  dtype.lanes = static_cast<uint16_t>(dtype_lanes);
  std::vector<int64_t> shape_vec(shape, shape + ndim);
  NDArray arr = NDArray::EmptyShared(mem_name, shape_vec, dtype,
427
428
                                     DGLContext{kDGLCPU, 0}, is_create);
  *out = NDArray::Internal::MoveAsDGLArray(arr);
429
430
431
  API_END();
}

432
int DGLArrayFree(DGLArrayHandle handle) {
Minjie Wang's avatar
Minjie Wang committed
433
434
435
436
437
  API_BEGIN();
  reinterpret_cast<NDArray::Container*>(handle)->DecRef();
  API_END();
}

438
int DGLArrayCopyFromTo(DGLArrayHandle from,
439
                       DGLArrayHandle to) {
Minjie Wang's avatar
Minjie Wang committed
440
  API_BEGIN();
441
  NDArray::CopyFromTo(from, to);
Minjie Wang's avatar
Minjie Wang committed
442
443
444
  API_END();
}

445
int DGLArrayCopyFromBytes(DGLArrayHandle handle,
Minjie Wang's avatar
Minjie Wang committed
446
447
448
                          void* data,
                          size_t nbytes) {
  API_BEGIN();
449
  DGLContext cpu_ctx;
450
  cpu_ctx.device_type = kDGLCPU;
Minjie Wang's avatar
Minjie Wang committed
451
452
453
  cpu_ctx.device_id = 0;
  size_t arr_size = GetDataSize(*handle);
  CHECK_EQ(arr_size, nbytes)
454
      << "DGLArrayCopyFromBytes: size mismatch";
Minjie Wang's avatar
Minjie Wang committed
455
456
457
  DeviceAPI::Get(handle->ctx)->CopyDataFromTo(
      data, 0,
      handle->data, static_cast<size_t>(handle->byte_offset),
458
      nbytes, cpu_ctx, handle->ctx, handle->dtype);
Minjie Wang's avatar
Minjie Wang committed
459
460
461
  API_END();
}

462
int DGLArrayCopyToBytes(DGLArrayHandle handle,
Minjie Wang's avatar
Minjie Wang committed
463
464
465
                        void* data,
                        size_t nbytes) {
  API_BEGIN();
466
  DGLContext cpu_ctx;
467
  cpu_ctx.device_type = kDGLCPU;
Minjie Wang's avatar
Minjie Wang committed
468
469
470
  cpu_ctx.device_id = 0;
  size_t arr_size = GetDataSize(*handle);
  CHECK_EQ(arr_size, nbytes)
471
      << "DGLArrayCopyToBytes: size mismatch";
Minjie Wang's avatar
Minjie Wang committed
472
473
474
  DeviceAPI::Get(handle->ctx)->CopyDataFromTo(
      handle->data, static_cast<size_t>(handle->byte_offset),
      data, 0,
475
      nbytes, handle->ctx, cpu_ctx, handle->dtype);
Minjie Wang's avatar
Minjie Wang committed
476
477
  API_END();
}
478
479

int DGLArrayPinData(DGLArrayHandle handle,
480
                    DGLContext ctx) {
481
  API_BEGIN();
482
483
  auto* nd_container = reinterpret_cast<NDArray::Container*>(handle);
  NDArray::PinContainer(nd_container);
484
485
486
487
  API_END();
}

int DGLArrayUnpinData(DGLArrayHandle handle,
488
                      DGLContext ctx) {
489
  API_BEGIN();
490
491
  auto* nd_container = reinterpret_cast<NDArray::Container*>(handle);
  NDArray::UnpinContainer(nd_container);
492
493
  API_END();
}
494
495
496
497
498
499

int DGLArrayRecordStream(DGLArrayHandle handle, DGLStreamHandle stream) {
  API_BEGIN();
  NDArray::RecordStream(handle, stream);
  API_END();
}