ndarray.cc 16.1 KB
Newer Older
Minjie Wang's avatar
Minjie Wang committed
1
/*!
2
 *  Copyright (c) 2017-2022 by Contributors
Minjie Wang's avatar
Minjie Wang committed
3
4
5
 * \file ndarray.cc
 * \brief NDArray container infratructure.
 */
6
#include <string.h>
Minjie Wang's avatar
Minjie Wang committed
7
8
9
10
#include <dmlc/logging.h>
#include <dgl/runtime/ndarray.h>
#include <dgl/runtime/c_runtime_api.h>
#include <dgl/runtime/device_api.h>
11
12
#include <dgl/runtime/shared_mem.h>
#include <dgl/zerocopy_serializer.h>
13
#include <dgl/runtime/tensordispatch.h>
Minjie Wang's avatar
Minjie Wang committed
14
15
#include "runtime_base.h"

16
namespace dgl {
17

18
constexpr DGLDataType DGLDataTypeTraits<int8_t>::dtype;
19
constexpr DGLDataType DGLDataTypeTraits<uint8_t>::dtype;
20
21
22
23
24
constexpr DGLDataType DGLDataTypeTraits<int16_t>::dtype;
constexpr DGLDataType DGLDataTypeTraits<int32_t>::dtype;
constexpr DGLDataType DGLDataTypeTraits<int64_t>::dtype;
constexpr DGLDataType DGLDataTypeTraits<uint32_t>::dtype;
constexpr DGLDataType DGLDataTypeTraits<uint64_t>::dtype;
25
#ifdef USE_FP16
26
constexpr DGLDataType DGLDataTypeTraits<__half>::dtype;
27
#endif
28
29
constexpr DGLDataType DGLDataTypeTraits<float>::dtype;
constexpr DGLDataType DGLDataTypeTraits<double>::dtype;
30

Minjie Wang's avatar
Minjie Wang committed
31
32
namespace runtime {

33
inline void VerifyDataType(DGLDataType dtype) {
Minjie Wang's avatar
Minjie Wang committed
34
  CHECK_GE(dtype.lanes, 1);
35
  if (dtype.code == kDGLFloat) {
Minjie Wang's avatar
Minjie Wang committed
36
37
38
39
40
41
42
    CHECK_EQ(dtype.bits % 8, 0);
  } else {
    CHECK_EQ(dtype.bits % 8, 0);
  }
  CHECK_EQ(dtype.bits & (dtype.bits - 1), 0);
}

43
inline size_t GetDataSize(const DGLArray& arr) {
Minjie Wang's avatar
Minjie Wang committed
44
  size_t size = 1;
45
  for (dgl_index_t i = 0; i < arr.ndim; ++i) {
Minjie Wang's avatar
Minjie Wang committed
46
47
48
49
50
51
    size *= arr.shape[i];
  }
  size *= (arr.dtype.bits * arr.dtype.lanes + 7) / 8;
  return size;
}

52
inline size_t GetDataAlignment(const DGLArray& arr) {
Minjie Wang's avatar
Minjie Wang committed
53
54
55
56
57
  size_t align = (arr.dtype.bits / 8) * arr.dtype.lanes;
  if (align < kAllocAlignment) return kAllocAlignment;
  return align;
}

58
59
60
61
62
63
64
65
void NDArray::Internal::DefaultDeleter(NDArray::Container* ptr) {
  using dgl::runtime::NDArray;
  if (ptr->manager_ctx != nullptr) {
    static_cast<NDArray::Container*>(ptr->manager_ctx)->DecRef();
  } else if (ptr->mem) {
    ptr->mem = nullptr;
  } else if (ptr->dl_tensor.data != nullptr) {
    // if the array is still pinned before freeing, unpin it.
66
67
    if (ptr->pinned_by_dgl_)
      UnpinContainer(ptr);
68
69
    dgl::runtime::DeviceAPI::Get(ptr->dl_tensor.ctx)->FreeDataSpace(
        ptr->dl_tensor.ctx, ptr->dl_tensor.data);
Minjie Wang's avatar
Minjie Wang committed
70
  }
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
  delete ptr;
}

NDArray NDArray::Internal::Create(std::vector<int64_t> shape,
                                  DGLDataType dtype, DGLContext ctx) {
  VerifyDataType(dtype);
  // critical zone
  NDArray::Container* data = new NDArray::Container();
  data->deleter = DefaultDeleter;
  NDArray ret(data);
  ret.data_ = data;
  // RAII now in effect
  // setup shape
  data->shape_ = std::move(shape);
  data->dl_tensor.shape = dmlc::BeginPtr(data->shape_);
  data->dl_tensor.ndim = static_cast<int>(data->shape_.size());
  // setup stride (this should be optional, but some framework
  //   does not support NULL stride and thus will crash the program).
  data->stride_.resize(data->dl_tensor.ndim, 1);
  for (int i = data->dl_tensor.ndim - 2; i >= 0; --i) {
    data->stride_[i] = data->shape_[i+1] * data->stride_[i+1];
Minjie Wang's avatar
Minjie Wang committed
92
  }
93
94
95
96
97
98
99
100
101
102
103
104
105
106
  data->dl_tensor.strides = dmlc::BeginPtr(data->stride_);
  // setup dtype
  data->dl_tensor.dtype = dtype;
  // setup ctx
  data->dl_tensor.ctx = ctx;
  return ret;
}

DGLArray* NDArray::Internal::MoveAsDGLArray(NDArray arr) {
  DGLArray* tensor = reinterpret_cast<DGLArray*>(arr.data_);
  CHECK(tensor == const_cast<DGLArray*>(arr.operator->()));
  arr.data_ = nullptr;
  return tensor;
}
Minjie Wang's avatar
Minjie Wang committed
107

108
109
110
111
size_t NDArray::GetSize() const {
  return GetDataSize(data_->dl_tensor);
}

112
int64_t NDArray::NumElements() const {
113
114
  if (data_->dl_tensor.ndim == 0)
    return 0;
115
116
117
118
119
120
121
  int64_t size = 1;
  for (int i = 0; i < data_->dl_tensor.ndim; ++i) {
    size *= data_->dl_tensor.shape[i];
  }
  return size;
}

122
123
124
125
bool NDArray::IsContiguous() const {
  CHECK(data_ != nullptr);
  if (data_->dl_tensor.strides == nullptr)
    return true;
126
127
128
129
130
131
132
133
134
135

  // See https://github.com/dmlc/dgl/issues/2118 and PyTorch's compute_contiguous() implementation
  int64_t z = 1;
  for (int64_t i = data_->dl_tensor.ndim - 1; i >= 0; --i) {
    if (data_->dl_tensor.shape[i] != 1) {
      if (data_->dl_tensor.strides[i] == z)
        z *= data_->dl_tensor.shape[i];
      else
        return false;
    }
136
  }
137
  return true;
138
139
}

Minjie Wang's avatar
Minjie Wang committed
140
NDArray NDArray::CreateView(std::vector<int64_t> shape,
141
                            DGLDataType dtype,
142
                            int64_t offset) {
Minjie Wang's avatar
Minjie Wang committed
143
  CHECK(data_ != nullptr);
144
  CHECK(IsContiguous()) << "Can only create view for compact tensor";
Minjie Wang's avatar
Minjie Wang committed
145
146
147
148
149
150
151
152
153
154
  NDArray ret = Internal::Create(shape, dtype, data_->dl_tensor.ctx);
  ret.data_->dl_tensor.byte_offset =
      this->data_->dl_tensor.byte_offset;
  size_t curr_size = GetDataSize(this->data_->dl_tensor);
  size_t view_size = GetDataSize(ret.data_->dl_tensor);
  CHECK_LE(view_size, curr_size)
      << "Tries to create a view that has bigger memory than current one";
  // increase ref count
  this->data_->IncRef();
  ret.data_->manager_ctx = this->data_;
155
156
  ret.data_->dl_tensor.data =
    static_cast<char*>(this->data_->dl_tensor.data) + offset;
Minjie Wang's avatar
Minjie Wang committed
157
158
159
  return ret;
}

160
161
NDArray NDArray::EmptyShared(const std::string &name,
                       std::vector<int64_t> shape,
162
163
                       DGLDataType dtype,
                       DGLContext ctx, bool is_create) {
164
165
166
167
168
  NDArray ret = Internal::Create(shape, dtype, ctx);
  // setup memory content
  size_t size = GetDataSize(ret.data_->dl_tensor);
  auto mem = std::make_shared<SharedMemory>(name);
  if (is_create) {
169
    ret.data_->dl_tensor.data = mem->CreateNew(size);
170
  } else {
171
    ret.data_->dl_tensor.data = mem->Open(size);
172
173
174
175
176
177
  }

  ret.data_->mem = mem;
  return ret;
}

Minjie Wang's avatar
Minjie Wang committed
178
NDArray NDArray::Empty(std::vector<int64_t> shape,
179
180
                       DGLDataType dtype,
                       DGLContext ctx) {
181
  NDArray ret = Internal::Create(shape, dtype, ctx);
Minjie Wang's avatar
Minjie Wang committed
182
183
184
  // setup memory content
  size_t size = GetDataSize(ret.data_->dl_tensor);
  size_t alignment = GetDataAlignment(ret.data_->dl_tensor);
185
186
187
188
  if (size > 0)
    ret.data_->dl_tensor.data =
        DeviceAPI::Get(ret->ctx)->AllocDataSpace(
            ret->ctx, size, alignment, ret->dtype);
Minjie Wang's avatar
Minjie Wang committed
189
190
191
  return ret;
}

192
193
void NDArray::CopyFromTo(DGLArray* from,
                         DGLArray* to) {
Minjie Wang's avatar
Minjie Wang committed
194
195
196
  size_t from_size = GetDataSize(*from);
  size_t to_size = GetDataSize(*to);
  CHECK_EQ(from_size, to_size)
197
    << "DGLArrayCopyFromTo: The size must exactly match";
Minjie Wang's avatar
Minjie Wang committed
198
199

  CHECK(from->ctx.device_type == to->ctx.device_type
200
201
        || from->ctx.device_type == kDGLCPU
        || to->ctx.device_type == kDGLCPU)
Minjie Wang's avatar
Minjie Wang committed
202
203
204
205
    << "Can not copy across different ctx types directly";

  // Use the context that is *not* a cpu context to get the correct device
  // api manager.
206
  DGLContext ctx = from->ctx.device_type != kDGLCPU ? from->ctx : to->ctx;
Minjie Wang's avatar
Minjie Wang committed
207

208
  // default: local current cuda stream
Minjie Wang's avatar
Minjie Wang committed
209
  DeviceAPI::Get(ctx)->CopyDataFromTo(
210
211
212
      from->data, static_cast<size_t>(from->byte_offset),
      to->data, static_cast<size_t>(to->byte_offset),
      from_size, from->ctx, to->ctx, from->dtype);
Minjie Wang's avatar
Minjie Wang committed
213
214
}

215
216
217
void NDArray::PinContainer(NDArray::Container* ptr) {
  if (IsContainerPinned(ptr)) return;
  auto* tensor = &(ptr->dl_tensor);
218
  CHECK_EQ(tensor->ctx.device_type, kDGLCPU)
219
    << "Only NDArray on CPU can be pinned";
220
  DeviceAPI::Get(kDGLCUDA)->PinData(tensor->data, GetDataSize(*tensor));
221
  ptr->pinned_by_dgl_ = true;
222
223
}

224
225
226
227
228
229
230
231
232
void NDArray::UnpinContainer(NDArray::Container* ptr) {
  auto container_is_pinned = IsContainerPinned(ptr);
  // The tensor may be pinned outside of DGL via a different CUDA API,
  // so we cannot unpin it with cudaHostUnregister.
  CHECK(ptr->pinned_by_dgl_ || !container_is_pinned)
    << "Cannot unpin a tensor that is pinned outside of DGL.";
  // 1. not pinned, do nothing
  if (!container_is_pinned) return;
  // 2. pinned by DGL, unpin it
233
  DeviceAPI::Get(kDGLCUDA)->UnpinData(ptr->dl_tensor.data);
234
  ptr->pinned_by_dgl_ = false;
235
236
}

237
238
239
void NDArray::RecordStream(DGLArray* tensor, DGLStreamHandle stream) {
  TensorDispatcher* td = TensorDispatcher::Global();
  CHECK(td->IsAvailable()) << "RecordStream only works when TensorAdaptor is available.";
240
  CHECK_EQ(tensor->ctx.device_type, kDGLCUDA)
241
242
243
244
245
    << "RecordStream only works with GPU tensors.";

  td->RecordStream(tensor->data, stream, tensor->ctx.device_id);
}

246
template<typename T>
247
248
NDArray NDArray::FromVector(const std::vector<T>& vec, DGLContext ctx) {
  const DGLDataType dtype = DGLDataTypeTraits<T>::dtype;
249
  int64_t size = static_cast<int64_t>(vec.size());
250
  NDArray ret = NDArray::Empty({size}, dtype, ctx);
251
252
253
254
255
256
  DeviceAPI::Get(ctx)->CopyDataFromTo(
      vec.data(),
      0,
      static_cast<T*>(ret->data),
      0,
      size * sizeof(T),
257
      DGLContext{kDGLCPU, 0},
258
      ctx,
259
      dtype);
260
261
262
  return ret;
}

263
264
265
266
267
268
269
270
271
NDArray NDArray::CreateFromRaw(const std::vector<int64_t>& shape,
    DGLDataType dtype, DGLContext ctx, void* raw, bool auto_free) {
  NDArray ret = Internal::Create(shape, dtype, ctx);
  ret.data_->dl_tensor.data = raw;
  if (!auto_free)
    ret.data_->deleter = nullptr;
  return ret;
}

272
// export specializations
273
274
275
276
277
278
template NDArray NDArray::FromVector<int32_t>(const std::vector<int32_t>&, DGLContext);
template NDArray NDArray::FromVector<int64_t>(const std::vector<int64_t>&, DGLContext);
template NDArray NDArray::FromVector<uint32_t>(const std::vector<uint32_t>&, DGLContext);
template NDArray NDArray::FromVector<uint64_t>(const std::vector<uint64_t>&, DGLContext);
template NDArray NDArray::FromVector<float>(const std::vector<float>&, DGLContext);
template NDArray NDArray::FromVector<double>(const std::vector<double>&, DGLContext);
279

280
281
template<typename T>
std::vector<T> NDArray::ToVector() const {
282
  const DGLDataType dtype = DGLDataTypeTraits<T>::dtype;
283
284
285
286
287
  CHECK(data_->dl_tensor.ndim == 1) << "ToVector() only supported for 1D arrays";
  CHECK(data_->dl_tensor.dtype == dtype) << "dtype mismatch";

  int64_t size = data_->dl_tensor.shape[0];
  std::vector<T> vec(size);
288
  const DGLContext &ctx = data_->dl_tensor.ctx;
289
290
291
292
293
294
295
  DeviceAPI::Get(ctx)->CopyDataFromTo(
      static_cast<T*>(data_->dl_tensor.data),
      0,
      vec.data(),
      0,
      size * sizeof(T),
      ctx,
296
      DGLContext{kDGLCPU, 0},
297
      dtype);
298
299
300
301
302
303
304
305
306
  return vec;
}

template std::vector<int32_t> NDArray::ToVector<int32_t>() const;
template std::vector<int64_t> NDArray::ToVector<int64_t>() const;
template std::vector<uint32_t> NDArray::ToVector<uint32_t>() const;
template std::vector<uint64_t> NDArray::ToVector<uint64_t>() const;
template std::vector<float> NDArray::ToVector<float>() const;
template std::vector<double> NDArray::ToVector<double>() const;
307

308
309
310
311
std::shared_ptr<SharedMemory> NDArray::GetSharedMem() const {
  return this->data_->mem;
}

312
313
314
315
bool NDArray::IsContainerPinned(NDArray::Container* ptr) {
  if (ptr->pinned_by_dgl_)
    return true;
  auto* tensor = &(ptr->dl_tensor);
316
  // Can only be pinned if on CPU...
317
  if (tensor->ctx.device_type != kDGLCPU)
318
319
    return false;
  // ... and CUDA device API is enabled, and the tensor is indeed in pinned memory.
320
  auto device = DeviceAPI::Get(kDGLCUDA, true);
321
322
  return device && device->IsPinned(tensor->data);
}
323
324

void NDArray::Save(dmlc::Stream* strm) const {
325
  auto zc_strm = dynamic_cast<StreamWithBuffer*>(strm);
326
327
328
329
  if (zc_strm) {
    zc_strm->PushNDArray(*this);
    return;
  }
330
  SaveDGLArray(strm, const_cast<DGLArray*>(operator->()));
331
332
333
}

bool NDArray::Load(dmlc::Stream* strm) {
334
  auto zc_strm = dynamic_cast<StreamWithBuffer*>(strm);
335
336
337
338
339
340
  if (zc_strm) {
    *this = zc_strm->PopNDArray();
    return true;
  }
  uint64_t header, reserved;
  CHECK(strm->Read(&header))
341
      << "Invalid DGLArray file format";
342
  CHECK(strm->Read(&reserved))
343
      << "Invalid DGLArray file format";
344
  CHECK(header == kDGLNDArrayMagic)
345
346
      << "Invalid DGLArray file format";
  DGLContext ctx;
347
  int ndim;
348
  DGLDataType dtype;
349
  CHECK(strm->Read(&ctx))
350
      << "Invalid DGLArray file format";
351
  CHECK(strm->Read(&ndim))
352
      << "Invalid DGLArray file format";
353
  CHECK(strm->Read(&dtype))
354
355
356
      << "Invalid DGLArray file format";
  CHECK_EQ(ctx.device_type, kDGLCPU)
      << "Invalid DGLArray context: can only save as CPU tensor";
357
358
359
  std::vector<int64_t> shape(ndim);
  if (ndim != 0) {
    CHECK(strm->ReadArray(&shape[0], ndim))
360
        << "Invalid DGLArray file format";
361
362
363
364
365
366
367
368
369
  }
  NDArray ret = NDArray::Empty(shape, dtype, ctx);
  int64_t num_elems = 1;
  int elem_bytes = (ret->dtype.bits + 7) / 8;
  for (int i = 0; i < ret->ndim; ++i) {
    num_elems *= ret->shape[i];
  }
  int64_t data_byte_size;
  CHECK(strm->Read(&data_byte_size))
370
      << "Invalid DGLArray file format";
371
  CHECK(data_byte_size == num_elems * elem_bytes)
372
      << "Invalid DGLArray file format";
373
374
375
376
  if (data_byte_size != 0)  {
    // strm->Read will return the total number of elements successfully read.
    // Therefore if data_byte_size is zero, the CHECK below would fail.
    CHECK(strm->Read(ret->data, data_byte_size))
377
        << "Invalid DGLArray file format";
378
379
380
381
382
383
384
385
386
  }
  if (!DMLC_IO_NO_ENDIAN_SWAP) {
    dmlc::ByteSwap(ret->data, elem_bytes, num_elems);
  }
  *this = ret;
  return true;
}


Minjie Wang's avatar
Minjie Wang committed
387
}  // namespace runtime
388
}  // namespace dgl
Minjie Wang's avatar
Minjie Wang committed
389

390
using namespace dgl::runtime;
Minjie Wang's avatar
Minjie Wang committed
391

392
int DGLArrayAlloc(const dgl_index_t* shape,
Minjie Wang's avatar
Minjie Wang committed
393
394
395
396
397
398
                  int ndim,
                  int dtype_code,
                  int dtype_bits,
                  int dtype_lanes,
                  int device_type,
                  int device_id,
399
                  DGLArrayHandle* out) {
Minjie Wang's avatar
Minjie Wang committed
400
  API_BEGIN();
401
  DGLDataType dtype;
Minjie Wang's avatar
Minjie Wang committed
402
403
404
  dtype.code = static_cast<uint8_t>(dtype_code);
  dtype.bits = static_cast<uint8_t>(dtype_bits);
  dtype.lanes = static_cast<uint16_t>(dtype_lanes);
405
406
  DGLContext ctx;
  ctx.device_type = static_cast<DGLDeviceType>(device_type);
Minjie Wang's avatar
Minjie Wang committed
407
  ctx.device_id = device_id;
408
  *out = NDArray::Internal::MoveAsDGLArray(
Minjie Wang's avatar
Minjie Wang committed
409
410
411
412
      NDArray::Empty(std::vector<int64_t>(shape, shape + ndim), dtype, ctx));
  API_END();
}

413
414
415
416
417
418
419
420
421
int DGLArrayAllocSharedMem(const char *mem_name,
                           const dgl_index_t *shape,
                           int ndim,
                           int dtype_code,
                           int dtype_bits,
                           int dtype_lanes,
                           bool is_create,
                           DGLArrayHandle* out) {
  API_BEGIN();
422
  DGLDataType dtype;
423
424
425
426
427
  dtype.code = static_cast<uint8_t>(dtype_code);
  dtype.bits = static_cast<uint8_t>(dtype_bits);
  dtype.lanes = static_cast<uint16_t>(dtype_lanes);
  std::vector<int64_t> shape_vec(shape, shape + ndim);
  NDArray arr = NDArray::EmptyShared(mem_name, shape_vec, dtype,
428
429
                                     DGLContext{kDGLCPU, 0}, is_create);
  *out = NDArray::Internal::MoveAsDGLArray(arr);
430
431
432
  API_END();
}

433
int DGLArrayFree(DGLArrayHandle handle) {
Minjie Wang's avatar
Minjie Wang committed
434
435
436
437
438
  API_BEGIN();
  reinterpret_cast<NDArray::Container*>(handle)->DecRef();
  API_END();
}

439
int DGLArrayCopyFromTo(DGLArrayHandle from,
440
                       DGLArrayHandle to) {
Minjie Wang's avatar
Minjie Wang committed
441
  API_BEGIN();
442
  NDArray::CopyFromTo(from, to);
Minjie Wang's avatar
Minjie Wang committed
443
444
445
  API_END();
}

446
int DGLArrayCopyFromBytes(DGLArrayHandle handle,
Minjie Wang's avatar
Minjie Wang committed
447
448
449
                          void* data,
                          size_t nbytes) {
  API_BEGIN();
450
  DGLContext cpu_ctx;
451
  cpu_ctx.device_type = kDGLCPU;
Minjie Wang's avatar
Minjie Wang committed
452
453
454
  cpu_ctx.device_id = 0;
  size_t arr_size = GetDataSize(*handle);
  CHECK_EQ(arr_size, nbytes)
455
      << "DGLArrayCopyFromBytes: size mismatch";
Minjie Wang's avatar
Minjie Wang committed
456
457
458
  DeviceAPI::Get(handle->ctx)->CopyDataFromTo(
      data, 0,
      handle->data, static_cast<size_t>(handle->byte_offset),
459
      nbytes, cpu_ctx, handle->ctx, handle->dtype);
Minjie Wang's avatar
Minjie Wang committed
460
461
462
  API_END();
}

463
int DGLArrayCopyToBytes(DGLArrayHandle handle,
Minjie Wang's avatar
Minjie Wang committed
464
465
466
                        void* data,
                        size_t nbytes) {
  API_BEGIN();
467
  DGLContext cpu_ctx;
468
  cpu_ctx.device_type = kDGLCPU;
Minjie Wang's avatar
Minjie Wang committed
469
470
471
  cpu_ctx.device_id = 0;
  size_t arr_size = GetDataSize(*handle);
  CHECK_EQ(arr_size, nbytes)
472
      << "DGLArrayCopyToBytes: size mismatch";
Minjie Wang's avatar
Minjie Wang committed
473
474
475
  DeviceAPI::Get(handle->ctx)->CopyDataFromTo(
      handle->data, static_cast<size_t>(handle->byte_offset),
      data, 0,
476
      nbytes, handle->ctx, cpu_ctx, handle->dtype);
Minjie Wang's avatar
Minjie Wang committed
477
478
  API_END();
}
479
480

int DGLArrayPinData(DGLArrayHandle handle,
481
                    DGLContext ctx) {
482
  API_BEGIN();
483
484
  auto* nd_container = reinterpret_cast<NDArray::Container*>(handle);
  NDArray::PinContainer(nd_container);
485
486
487
488
  API_END();
}

int DGLArrayUnpinData(DGLArrayHandle handle,
489
                      DGLContext ctx) {
490
  API_BEGIN();
491
492
  auto* nd_container = reinterpret_cast<NDArray::Container*>(handle);
  NDArray::UnpinContainer(nd_container);
493
494
  API_END();
}
495
496
497
498
499
500

int DGLArrayRecordStream(DGLArrayHandle handle, DGLStreamHandle stream) {
  API_BEGIN();
  NDArray::RecordStream(handle, stream);
  API_END();
}