deepspeed_py_aio_handle.h 2.4 KB
Newer Older
aiss's avatar
aiss committed
1
2
3
4
// Copyright (c) Microsoft Corporation.
// SPDX-License-Identifier: Apache-2.0

// DeepSpeed Team
aiss's avatar
aiss committed
5

aiss's avatar
aiss committed
6
/*
aiss's avatar
aiss committed
7
8
9
10
11
12
Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
*/

#include <condition_variable>
#include <memory>
#include "deepspeed_aio_thread.h"
aiss's avatar
aiss committed
13
#include "deepspeed_pin_tensor.h"
aiss's avatar
aiss committed
14
15
16
17
18
19
20
21
22
23
24

struct deepspeed_aio_handle_t {
    std::unique_ptr<struct aio_context> _aio_ctxt;
    const bool _single_submit;
    const bool _overlap_events;
    const int _num_threads;
    deepspeed_aio_config_t _aio_config;

    std::vector<std::shared_ptr<struct deepspeed_aio_thread_t>> _thread_contexts;
    std::vector<std::thread> _threads;
    int _num_pending_ops;
aiss's avatar
aiss committed
25
    std::unique_ptr<struct deepspeed_pin_tensor_t> _pinned_tensor_mgr;
aiss's avatar
aiss committed
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62

    deepspeed_aio_handle_t(const int block_size,
                           const int queue_depth,
                           const bool single_submit,
                           const bool overlap_events,
                           const int num_threads);

    ~deepspeed_aio_handle_t();

    const int get_block_size() const;
    const int get_queue_depth() const;
    const bool get_single_submit() const;
    const bool get_overlap_events() const;
    const int get_thread_count() const;

    int read(torch::Tensor& buffer, const char* filename, const bool validate);

    int write(const torch::Tensor& buffer, const char* filename, const bool validate);

    int pread(const torch::Tensor& buffer,
              const char* filename,
              const bool validate,
              const bool async);

    int pwrite(const torch::Tensor& buffer,
               const char* filename,
               const bool validate,
               const bool async);

    int sync_pread(torch::Tensor& buffer, const char* filename);

    int sync_pwrite(const torch::Tensor& buffer, const char* filename);

    int async_pread(torch::Tensor& buffer, const char* filename);

    int async_pwrite(const torch::Tensor& buffer, const char* filename);

aiss's avatar
aiss committed
63
64
65
66
67
    // TODO: Make API's args to be shape and dtype.
    torch::Tensor new_cpu_locked_tensor(const size_t num_elem, const torch::Tensor& example_tensor);

    bool free_cpu_locked_tensor(torch::Tensor&);

aiss's avatar
aiss committed
68
69
70
71
72
73
74
75
76
77
    int wait();

    void _stop_threads();

    void _schedule_aio_work(std::shared_ptr<struct io_op_desc_t> scheduled_op);

    std::shared_ptr<struct io_op_desc_t> _wait_for_aio_work();

    bool _is_valid_parallel_aio_op(const bool read_op, const long long int num_bytes);
};