cache_entry.hh 4.34 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
#ifndef __CACHE_ENTRY_HH_
#define __CACHE_ENTRY_HH_
#include "async_store.hh"
#include "cuda_stream_manager.hh"
#include "defs.h"
#include "hasher.hpp"
#include "io_helper.hpp"
#include "page_aligned_memory_pool.h"
#include "utils/periodic_task.hpp"

#include <atomic>
#include <list>
#include <memory>
#include "utils/mutex_extend.hpp"

namespace kvc2 {
using CacheBlockKey = TokensHash;

class CacheEntryManager;
struct DoubleVerticalBlocksHandle;
class GPUPageCache;

struct ConcurrentControlUnit {
  std::atomic_size_t ref_count = 0;
  std::atomic_bool dirty = false;
  TransferControl<std::mutex> tc;

  bool can_desert();
  void debug();
};

enum IOOption {
  IO_ForceRead,
  IO_ForceWrite,
  IO_Read,
  IO_Write,
};

inline std::string to_string(IOOption op) {
  switch (op) {
    case IO_ForceRead:
      return "IO_ForceRead";
    case IO_ForceWrite:
      return "IO_ForceWrite";
    case IO_Read:
      return "IO_Read";
    case IO_Write:
      return "IO_Write";
    default:
      return "Unknown";
  }
}

struct CacheBlockEntry {
  friend CacheEntryManager;
  using MutexT = non_recursive_mutex;
  // using MutexT = std::mutex;
  MutexT lock;

  // for cache
  bool with_key = true;
  CacheBlockKey hash = 0;
  CacheBlockKey hash_check = 0;

  CacheInfo cache_info;
  CacheEntryManager* manager = nullptr;

  // for memory pool
  void* data = nullptr;
  size_t size = 0;

  ConcurrentControlUnit cpu_cc;

  // for disk
  size_t layer = -1;
  size_t idx = -1;

  // for gpu

  std::optional<size_t> gpu_block_idx = std::nullopt;
  ConcurrentControlUnit gpu_cc;

  CacheBlockEntry() =default;
  CacheBlockEntry(const CacheBlockEntry& other) = delete;
  CacheBlockEntry& operator=(const CacheBlockEntry& other) = delete;
  CacheBlockEntry(CacheBlockEntry&& other) = delete;
  CacheBlockEntry& operator=(CacheBlockEntry&& other) = delete;
  ~CacheBlockEntry();

 private:
  bool alloc_on_cpu();


 public:
  void free_on_cpu();
  bool alloc_on_cpu_no_lock();

  bool inc_ref_or_alloc_on_cpu();
  void set_key(TokensHash key, std::shared_ptr<CacheBlockEntry> me);

  std::unique_lock<MutexT> try_lock();
  std::lock_guard<MutexT> lock_guard();

  // will not get lock
  void io_with(async_store::IODealer* dealer, IO_Helper<CacheBlockEntry>& io_helper, async_store::ArrayStore* store,
               size_t layer, size_t index, IOOption option);
  void flush_back_async(IO_Helper<CacheBlockEntry>& helper, std::vector<std::atomic_bool*>& dirty_flags);

  void debug();
};

struct CacheBlockEntryCollector{

  std::vector<CacheBlockEntry*> entries;
  std::function<void(CacheBlockEntry*)> exit_fn;

  CacheBlockEntryCollector(std::function<void(CacheBlockEntry*)> exit_fn);
  ~CacheBlockEntryCollector();
  
  CacheBlockEntryCollector(const CacheBlockEntryCollector& other) = delete;
  CacheBlockEntryCollector(CacheBlockEntryCollector&& other) = delete;
  CacheBlockEntryCollector& operator=(const CacheBlockEntryCollector& other) = delete;
  CacheBlockEntryCollector& operator=(CacheBlockEntryCollector&& other) = delete;



};


struct KVC2;
struct CacheEntryManagerConfig {
  size_t evict_count = 100;
  KVC2* kvc2_top = nullptr;
};

class CacheEntryManager {
 public:
  using Key = CacheBlockKey;
  using BlockPtr = std::shared_ptr<CacheBlockEntry>;

 private:
  friend CacheBlockEntry;

  CacheEntryManagerConfig config;

  std::mutex lock;
  std::list<BlockPtr> usage_list;
  std::unordered_map<Key, std::list<BlockPtr>::iterator> key_entry_map;

  void insert(BlockPtr entry);
  BlockPtr access(const Key& key);

  // void remove(const Key& key);
  void evict(std::function<bool(const BlockPtr&)> filter, std::function<bool()> stop_condition);


 public:
  std::unique_ptr<periodic::PeriodicTask> background_flush_back=nullptr;
  std::shared_ptr<PageAlignedMemoryPool> pool;
  std::shared_ptr<GPUPageCache> gpu_cache;

  CacheEntryManager(CacheEntryManagerConfig config);

  // disable all move and copy
  CacheEntryManager(const CacheEntryManager& other) = delete;
  CacheEntryManager& operator=(const CacheEntryManager& other) = delete;
  CacheEntryManager(CacheEntryManager&& other) = delete;
  CacheEntryManager& operator=(CacheEntryManager&& other) = delete;

  void cpu_background_flush();

  void evict_for_cpu_cache();

  // just get block pointers, not allocate them, will not return nullptr
  BlockPtr get(bool& is_new,size_t size, std::optional<Key> key = std::nullopt);

  void debug();
};

}  // namespace kvc2

#endif