Commit 19fd8251 authored by limm's avatar limm
Browse files

support v0.6.16

parent 9ccee9c0
......@@ -7,6 +7,9 @@ configure_file(
${CMAKE_BINARY_DIR}/googletest-download/CMakeLists.txt
)
set(PHMAP_SAVE_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
set(PHMAP_SAVE_CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})
# Configure and build the downloaded googletest source
execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" .
RESULT_VARIABLE result
......@@ -24,6 +27,9 @@ if(result)
message(FATAL_ERROR "Build step for googletest failed: ${result}")
endif()
set(CMAKE_CXX_FLAGS ${PHMAP_SAVE_CMAKE_CXX_FLAGS})
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PHMAP_SAVE_CMAKE_RUNTIME_OUTPUT_DIRECTORY})
# Prevent overriding the parent project's compiler/linker settings on Windows
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
......
......@@ -15,8 +15,7 @@ function(phmap_cc_test)
)
set(_NAME "test_${PHMAP_CC_TEST_NAME}")
add_executable(${_NAME} "")
target_sources(${_NAME} PRIVATE ${PHMAP_CC_TEST_SRCS})
add_executable(${_NAME} ${PHMAP_CC_TEST_SRCS})
target_include_directories(${_NAME}
PUBLIC ${PHMAP_COMMON_INCLUDE_DIRS}
PRIVATE ${GMOCK_INCLUDE_DIRS} ${GTEST_INCLUDE_DIRS}
......
- update version in phmap_config.h
- update version in comment on top of CMakeLists.txt
- git commit
- git push
- create the new release on github (tag `v1.3.8` - use semantic versioning)
- download the tar.gz from github, and use `sha256sum parallel-hashmap-1.3.8.tar.gz` on linux to get the sha256
## conan
- fork and clone [conan-center repo](https://github.com/conan-io/conan-center-index)
(or sync + git pull)
- git checkout master
- git checkout -b phmap_1.3.8
- update: `recipes/parallel-hashmap/all/conandata.yml` and `recipes/parallel-hashmap/config.yml`
- sudo pip install conan -U
- cd recipes/parallel-hashmap/all
- conan create conanfile.py parallel-hashmap/1.3.8@ -pr:b=default -pr:h=default
- git diff
- git commit -am "[parallel-hashmap] Bump version to 1.3.8"
- git push origin phmap_1.3.8
- create PR like [this](https://github.com/conan-io/conan-center-index/pull/13161)
## vcpkg
- fork and clone [vcpkg repo](https://github.com/microsoft/vcpkg)
(or sync + git pull)
- git checkout -b phmap_1.3.8
- update ports/parallel-hashmap/portfile.cmake and ports/parallel-hashmap/vcpkg.json
in windows, non-cygwin console
- set VCPKG_ROOT=
- vcpkg install parallel-hashmap --triplet x64-windows
- # update sha in portfile.cmake - run `sha512sum parallel-hashmap-1.3.8.tar.gz` on linux
- git diff
- git commit -am "[parallel-hashmap] Bump version to 1.3.8"
- vcpkg x-add-version --all --overwrite-version ## (or ./vcpkg.exe --no-dry-run upgrade )
- git diff -am "[parallel-hashmap] run x-add-version"
- git commit ...
- git push origin phmap_1.3.8
......@@ -18,11 +18,11 @@ public:
{
phmap::BinaryOutputArchive ar_out (filename.c_str());
ar_out.dump(this->size());
ar_out.saveBinary(this->size());
for (auto& [k, v] : *this)
{
ar_out.dump(k);
v.dump(ar_out);
ar_out.saveBinary(k);
ar_out.saveBinary(v);
}
}
......@@ -31,7 +31,7 @@ public:
phmap::BinaryInputArchive ar_in(filename.c_str());
size_t size;
ar_in.load(&size);
ar_in.loadBinary(&size);
this->reserve(size);
while (size--)
......@@ -39,8 +39,8 @@ public:
K k;
Set v;
ar_in.load(&k);
v.load(ar_in);
ar_in.loadBinary(&k);
ar_in.loadBinary(&v);
this->insert_or_assign(std::move(k), std::move(v));
}
......
#include <parallel_hashmap/phmap_utils.h> // minimal header providing phmap::HashState()
#include <string>
#include <utility>
#include <tuple>
#include <vector>
#include <array>
#if PHMAP_HAVE_STD_STRING_VIEW
#include <string_view>
#endif
#include <iostream>
using std::string;
using std::tuple;
using std::pair;
using groupid_t = std::array<uint16_t, 4>;
namespace std
{
template<> struct hash<groupid_t>
{
#if PHMAP_HAVE_STD_STRING_VIEW
std::size_t operator()(groupid_t const &g) const
{
const std::string_view bv{reinterpret_cast<const char*>(g.data()), sizeof(g)};
return std::hash<std::string_view>()(bv);
}
#else
std::size_t operator()(groupid_t const &g) const
{
return phmap::Hash<decltype(std::tuple_cat(g))>()(std::tuple_cat(g));
}
#endif
};
}
int main()
{
std::vector<groupid_t> groups = {
{17, 75, 82, 66},
{22, 88, 54, 42},
{11, 55, 77, 99} };
for (const auto &g : groups)
std::cout << std::hash<groupid_t>()(g) << '\n';
return 0;
}
#include <iostream>
#include <string>
#include <array>
#include <cstdint>
#include <limits>
#include <random>
#include <utility>
#define PHMAP_ALLOCATOR_NOTHROW 1
#include <parallel_hashmap/phmap.h>
// this is probably the fastest high quality 64bit random number generator that exists.
// Implements Small Fast Counting v4 RNG from PractRand.
class sfc64 {
public:
using result_type = uint64_t;
// no copy ctors so we don't accidentally get the same random again
sfc64(sfc64 const&) = delete;
sfc64& operator=(sfc64 const&) = delete;
sfc64(sfc64&&) = default;
sfc64& operator=(sfc64&&) = default;
sfc64(std::array<uint64_t, 4> const& _state)
: m_a(_state[0])
, m_b(_state[1])
, m_c(_state[2])
, m_counter(_state[3]) {}
static constexpr uint64_t(min)() {
return (std::numeric_limits<uint64_t>::min)();
}
static constexpr uint64_t(max)() {
return (std::numeric_limits<uint64_t>::max)();
}
sfc64()
: sfc64(UINT64_C(0x853c49e6748fea9b)) {}
sfc64(uint64_t _seed)
: m_a(_seed)
, m_b(_seed)
, m_c(_seed)
, m_counter(1) {
for (int i = 0; i < 12; ++i) {
operator()();
}
}
void seed() {
*this = sfc64{std::random_device{}()};
}
uint64_t operator()() noexcept {
auto const tmp = m_a + m_b + m_counter++;
m_a = m_b ^ (m_b >> right_shift);
m_b = m_c + (m_c << left_shift);
m_c = rotl(m_c, rotation) + tmp;
return tmp;
}
std::array<uint64_t, 4> state() const {
return {{m_a, m_b, m_c, m_counter}};
}
void state(std::array<uint64_t, 4> const& s) {
m_a = s[0];
m_b = s[1];
m_c = s[2];
m_counter = s[3];
}
private:
template <typename T>
T rotl(T const x, int k) {
return (x << k) | (x >> (8 * sizeof(T) - k));
}
static constexpr int rotation = 24;
static constexpr int right_shift = 11;
static constexpr int left_shift = 3;
uint64_t m_a;
uint64_t m_b;
uint64_t m_c;
uint64_t m_counter;
};
static inline std::string to_str(uint64_t x) {
std::string res(4, '1');
x = (x >> 48) ^ (x >> 32) ^ (x >> 16) ^ x; // combine 64 bits > 16 lsb
for (size_t i=0; i<4; ++i) {
res[i] = 'a' + (x & 0xF);
x >>= 4;
}
return res;
}
int main()
{
using Map = phmap::flat_hash_map<std::string, uint32_t>;
Map map;
map.reserve((size_t)(65536 * 1.1)); // we will create a maximun of 65536 different strings
sfc64 rng(123);
constexpr size_t const n = 50000000;
for (size_t i = 0; i < n; ++i) {
auto s = to_str(rng());
map[s]++;
map[s]++;
map[s]++;
map[s]++;
map[s]++;
map[s]++;
map[s]++;
map[s]++;
map[s]++;
map[s]++;
}
uint64_t cnt = 0;
for (const auto& s : map) {
if (++cnt == 6) break;
std::cout << s.first << ": " << s.second << '\n';
}
return 0;
}
......@@ -8,18 +8,9 @@
#include <vector>
#include <ppl.h>
class srwlock {
SRWLOCK _lock;
public:
srwlock() { InitializeSRWLock(&_lock); }
void lock() { AcquireSRWLockExclusive(&_lock); }
void unlock() { ReleaseSRWLockExclusive(&_lock); }
};
using Map = phmap::parallel_flat_hash_map<std::string, int, phmap::priv::hash_default_hash<std::string>,
phmap::priv::hash_default_eq<std::string>,
std::allocator<std::pair<const std::string, int>>, 8, srwlock>;
std::allocator<std::pair<const std::string, int>>, 8, phmap::srwlock>;
class Dict
{
......
......@@ -4,6 +4,7 @@
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <utility>
#include <vector>
#include <random>
#include <parallel_hashmap/phmap.h>
......@@ -14,7 +15,7 @@
class Timer
{
public:
Timer(std::string name) : _name(name), _start(std::chrono::high_resolution_clock::now()) {}
Timer(std::string name) : _name(std::move(name)), _start(std::chrono::high_resolution_clock::now()) {}
~Timer()
{
......@@ -63,7 +64,7 @@ using Perturb = std::function<void (std::vector<uint64_t> &)>;
// --------------------------------------------------------------------------
// --------------------------------------------------------------------------
template<class Set, size_t N>
void test(const char *name, Perturb perturb1, Perturb /* perturb2 */)
void test(const char *name, const Perturb &perturb1, const Perturb& /* perturb2 */)
{
//phmap::btree_set<uint64_t> s;
Set s;
......
#include <iostream>
#include <fstream>
#include <sstream>
#include <parallel_hashmap/phmap.h>
#include <parallel_hashmap/btree.h>
#include <thread>
#include <array>
#include <vector>
#include <algorithm>
#include <cstdlib>
/*
* count the number of occurrences of each word in a large text file using multiple threads
*/
int main() {
// download Jane Austin "Pride and Prejudice"
// ------------------------------------------
if (system("curl https://www.gutenberg.org/files/1342/1342-0.txt -o 1342-0.txt") != 0) {
std::cout << "Error: could not retrieve test file https://www.gutenberg.org/files/1342/1342-0.txt\n";
return 1;
}
const std::string filename = "1342-0.txt";
constexpr int num_threads = 4;
std::vector<std::thread> threads;
std::array<std::vector<std::string>, num_threads> lines_array;
{
// populate 4 vectors with lines from the book
std::ifstream file(filename);
if (!file.is_open()) {
std::cout << "Error: could not open file " << filename << std::endl;
return 1;
}
int line_idx = 0;
std::string line;
while (std::getline(file, line)) {
lines_array[line_idx % num_threads].push_back(std::move(line));
++line_idx;
}
}
using Map = phmap::parallel_flat_hash_map_m<std::string, int>; // parallel_flat_hash_map_m has default internal mutex
Map word_counts;
// run 4 threads, each thread processing lines from one of the vectors
// -------------------------------------------------------------------
threads.reserve(num_threads);
for (int i = 0; i < num_threads; ++i) {
threads.emplace_back(
[&word_counts](std::vector<std::string>&& lines) {
for (auto& line : lines) {
std::replace_if(line.begin(), line.end(), [](char c) -> bool { return !std::isalnum(c); }, ' ');
std::istringstream iss(line);
std::string word;
while (iss >> word) {
// use lazy_emplace to modify the map while the mutex is locked
word_counts.lazy_emplace_l(word,
[&](Map::value_type& p) { ++p.second; }, // called only when key was already present
[&](const Map::constructor& ctor) // construct value_type in place when key not present
{ ctor(std::move(word), 1); } );
}
}
},
std::move(lines_array[i]));
}
for (auto& thread : threads)
thread.join();
// print one word used at each frequency
// -------------------------------------
phmap::btree_map<int, std::string> result;
for (const auto& pair : word_counts)
result[pair.second] = pair.first;
for (const auto& p : result)
std::cout << p.first << ": " << p.second << std::endl;
return 0;
}
// example graciously provided @samuelpmish
// ----------------------------------------
//
// Getting rid of the mutexes for read access
//
// This example demonstrated how to populate a parallel_flat_hash_map from multiple
// concurrent threads (The map is protected by internal mutexes), but then doing a
// swap to get rid of the mutexes (and all locking) for accessing the same hash_map
// in `read` only mode, again concurrently from multiple threads.
// --------------------------------------------------------------------------------
#include <random>
#include <iostream>
#include <unordered_map>
#include <unordered_set>
#include "parallel_hashmap/phmap.h"
///////////////////////////////////////////////////////////////////////////////
#include <chrono>
class timer {
typedef std::chrono::high_resolution_clock::time_point time_point;
typedef std::chrono::duration<double> duration_type;
public:
void start() { then = std::chrono::high_resolution_clock::now(); }
void stop() { now = std::chrono::high_resolution_clock::now(); }
double elapsed() { return std::chrono::duration_cast<duration_type>(now - then).count(); }
private:
time_point then, now;
};
///////////////////////////////////////////////////////////////////////////////
#include <thread>
struct threadpool {
std::vector< uint64_t > partition(uint64_t n) {
uint64_t quotient = n / num_threads;
uint64_t remainder = n % num_threads;
std::vector< uint64_t > blocks(num_threads + 1);
blocks[0] = 0;
for (int i = 1; i < num_threads + 1; i++) {
if (remainder > 0) {
blocks[i] = blocks[i-1] + quotient + 1;
remainder--;
} else {
blocks[i] = blocks[i-1] + quotient;
}
}
return blocks;
}
threadpool(int n) : num_threads(n) {}
template < typename lambda >
void parallel_for(uint64_t n, const lambda & f) {
std::vector< uint64_t > blocks = partition(n);
for (int tid = 0; tid < num_threads; tid++) {
threads.push_back(std::thread([&](uint64_t i0) {
for (uint64_t i = blocks[i0]; i < blocks[i0+1]; i++) {
f(i);
}
}, tid));
}
for (int i = 0; i < num_threads; i++) {
threads[i].join();
}
threads.clear();
}
int num_threads;
std::vector< std::thread > threads;
};
///////////////////////////////////////////////////////////////////////////////
template < int n >
using pmap = phmap::parallel_flat_hash_map<
uint64_t,
uint64_t,
std::hash<uint64_t>,
std::equal_to<uint64_t>,
std::allocator<std::pair<const uint64_t, uint64_t>>,
n,
std::mutex >;
template < int n >
using pmap_nullmutex = phmap::parallel_flat_hash_map<
uint64_t,
uint64_t,
std::hash<uint64_t>,
std::equal_to<uint64_t>,
std::allocator<std::pair<const uint64_t, uint64_t>>,
n,
phmap::NullMutex >;
template < typename Map, typename Map_nomutex >
void renumber(const std::vector< uint64_t > & vertex_ids,
std::vector< std::array< uint64_t, 4 > > elements,
int num_threads) {
bool supports_parallel_insertion =
!std::is_same< Map, std::unordered_map<uint64_t, uint64_t> >::value;
Map new_ids;
std::atomic< uint64_t > new_id{ 0 };
timer stopwatch;
threadpool pool((supports_parallel_insertion) ? num_threads : 1);
stopwatch.start();
new_ids.reserve(vertex_ids.size() * 110 / 100);
pool.parallel_for(vertex_ids.size(), [&](uint64_t i){
auto id = new_id++;
new_ids[vertex_ids[i]] = id;
});
stopwatch.stop();
std::cout << stopwatch.elapsed() * 1000 << "ms ";
pool.num_threads = num_threads;
stopwatch.start();
Map_nomutex new_ids_nc;
new_ids_nc.swap(new_ids);
pool.parallel_for(elements.size(), [&](uint64_t i) {
auto & elem = elements[i];
elem = { new_ids_nc.at(elem[0]),
new_ids_nc.at(elem[1]),
new_ids_nc.at(elem[2]),
new_ids_nc.at(elem[3]) };
});
stopwatch.stop();
std::cout << stopwatch.elapsed() * 1000 << "ms" << std::endl;
}
int main() {
uint64_t nvertices = 5000000;
uint64_t nelements = 25000000;
std::random_device rd; // a seed source for the random number engine
std::mt19937 gen(rd()); // mersenne_twister_engine seeded with rd()
std::uniform_int_distribution<uint64_t> vertex_id_dist(0, uint64_t(1) << 35);
std::uniform_int_distribution<uint64_t> elem_id_dist(0, nvertices-1);
std::cout << "generating dataset ." << std::flush;
std::vector< uint64_t > vertex_ids(nvertices);
for (uint64_t i = 0; i < nvertices; i++) {
vertex_ids[i] = vertex_id_dist(gen);
}
std::cout << "." << std::flush;
std::vector< std::array<uint64_t, 4> > elements(nelements);
for (uint64_t i = 0; i < nelements; i++) {
elements[i] = {
vertex_ids[elem_id_dist(gen)],
vertex_ids[elem_id_dist(gen)],
vertex_ids[elem_id_dist(gen)],
vertex_ids[elem_id_dist(gen)]
};
}
std::cout << " done" << std::endl;
using stdmap = std::unordered_map<uint64_t, uint64_t>;
std::cout << "std::unordered_map, 1 thread: ";
renumber< stdmap, stdmap >(vertex_ids, elements, 1);
std::cout << "std::unordered_map, 32 thread (single threaded insertion): ";
renumber< stdmap, stdmap >(vertex_ids, elements, 32);
std::cout << "pmap4, 1 thread: ";
renumber< pmap<4>, pmap_nullmutex<4> >(vertex_ids, elements, 1);
std::cout << "pmap4, 32 threads: ";
renumber< pmap<4>, pmap_nullmutex<4> >(vertex_ids, elements, 32);
std::cout << "pmap6, 1 thread: ";
renumber< pmap<6>, pmap_nullmutex<6> >(vertex_ids, elements, 1);
std::cout << "pmap6, 32 threads: ";
renumber< pmap<6>, pmap_nullmutex<6> >(vertex_ids, elements, 32);
}
\ No newline at end of file
......@@ -55,7 +55,7 @@ public:
// --------------------------------------------------------------------------
// --------------------------------------------------------------------------
void showtime(const char *name, std::function<void ()> doit)
void showtime(const char *name, const std::function<void ()>& doit)
{
auto t1 = std::chrono::high_resolution_clock::now();
doit();
......
......@@ -60,6 +60,7 @@
#include <cstring>
#include <limits>
#include <new>
#include <type_traits>
#include "phmap_fwd_decl.h"
#include "phmap_base.h"
......@@ -76,14 +77,6 @@
namespace phmap {
// Defined and documented later on in this file.
template <typename T>
struct is_trivially_destructible;
// Defined and documented later on in this file.
template <typename T>
struct is_trivially_move_assignable;
namespace type_traits_internal {
// Silence MSVC warnings about the destructor being defined as deleted.
......@@ -107,26 +100,26 @@ namespace phmap {
: std::integral_constant<
bool, std::is_move_constructible<
type_traits_internal::SingleMemberUnion<T>>::value &&
phmap::is_trivially_destructible<T>::value> {};
std::is_trivially_destructible<T>::value> {};
template <class T>
struct IsTriviallyCopyConstructibleObject
: std::integral_constant<
bool, std::is_copy_constructible<
type_traits_internal::SingleMemberUnion<T>>::value &&
phmap::is_trivially_destructible<T>::value> {};
std::is_trivially_destructible<T>::value> {};
#if 0
template <class T>
struct IsTriviallyMoveAssignableReference : std::false_type {};
template <class T>
struct IsTriviallyMoveAssignableReference<T&>
: phmap::is_trivially_move_assignable<T>::type {};
: std::is_trivially_move_assignable<T>::type {};
template <class T>
struct IsTriviallyMoveAssignableReference<T&&>
: phmap::is_trivially_move_assignable<T>::type {};
: std::is_trivially_move_assignable<T>::type {};
#endif
} // namespace type_traits_internal
......@@ -155,10 +148,10 @@ namespace phmap {
public:
static constexpr bool kValue =
(__has_trivial_copy(ExtentsRemoved) || !kIsCopyOrMoveConstructible) &&
(__has_trivial_assign(ExtentsRemoved) || !kIsCopyOrMoveAssignable) &&
(phmap::is_trivially_copyable<ExtentsRemoved>::value || !kIsCopyOrMoveConstructible) &&
(phmap::is_trivially_copy_assignable<ExtentsRemoved>::value || !kIsCopyOrMoveAssignable) &&
(kIsCopyOrMoveConstructible || kIsCopyOrMoveAssignable) &&
is_trivially_destructible<ExtentsRemoved>::value &&
std::is_trivially_destructible<ExtentsRemoved>::value &&
// We need to check for this explicitly because otherwise we'll say
// references are trivial copyable when compiled by MSVC.
!std::is_reference<ExtentsRemoved>::value;
......@@ -744,13 +737,13 @@ namespace priv {
StringBtreeDefaultLess(std::less<std::string_view>) {} // NOLINT
StringBtreeDefaultLess(phmap::Less<std::string_view>) {} // NOLINT
phmap::weak_ordering operator()(std::string_view lhs,
std::string_view rhs) const {
phmap::weak_ordering operator()(const std::string_view &lhs,
const std::string_view &rhs) const {
return compare_internal::compare_result_as_ordering(lhs.compare(rhs));
}
#else
phmap::weak_ordering operator()(std::string lhs,
std::string rhs) const {
phmap::weak_ordering operator()(const std::string &lhs,
const std::string &rhs) const {
return compare_internal::compare_result_as_ordering(lhs.compare(rhs));
}
#endif
......@@ -770,8 +763,8 @@ namespace priv {
return compare_internal::compare_result_as_ordering(rhs.compare(lhs));
}
#else
phmap::weak_ordering operator()(std::string lhs,
std::string rhs) const {
phmap::weak_ordering operator()(const std::string &lhs,
const std::string &rhs) const {
return compare_internal::compare_result_as_ordering(rhs.compare(lhs));
}
#endif
......@@ -1210,6 +1203,10 @@ namespace priv {
reference value(size_type i) { return params_type::element(slot(i)); }
const_reference value(size_type i) const { return params_type::element(slot(i)); }
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
#endif
// Getters/setter for the child at position i in the node.
btree_node *child(size_type i) const { return GetField<3>()[i]; }
btree_node *&mutable_child(size_type i) { return GetField<3>()[i]; }
......@@ -1221,6 +1218,9 @@ namespace priv {
mutable_child(i) = c;
c->set_position((field_type)i);
}
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
void init_child(int i, btree_node *c) {
set_child(i, c);
c->set_parent(this);
......@@ -1861,7 +1861,7 @@ namespace priv {
void swap(btree &x);
const key_compare &key_comp() const noexcept {
return root_.template get<0>();
return std::get<0>(root_);
}
template <typename K, typename LK>
bool compare_keys(const K &x, const LK &y) const {
......@@ -1954,10 +1954,10 @@ namespace priv {
private:
// Internal accessor routines.
node_type *root() { return root_.template get<2>(); }
const node_type *root() const { return root_.template get<2>(); }
node_type *&mutable_root() noexcept { return root_.template get<2>(); }
key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); }
node_type *root() { return std::get<2>(root_); }
const node_type *root() const { return std::get<2>(root_); }
node_type *&mutable_root() noexcept { return std::get<2>(root_); }
key_compare *mutable_key_comp() noexcept { return &std::get<0>(root_); }
// The leftmost node is stored as the parent of the root node.
node_type *leftmost() { return root()->parent(); }
......@@ -1965,10 +1965,10 @@ namespace priv {
// Allocator routines.
allocator_type *mutable_allocator() noexcept {
return &root_.template get<1>();
return &std::get<1>(root_);
}
const allocator_type &allocator() const noexcept {
return root_.template get<1>();
return std::get<1>(root_);
}
// Allocates a correctly aligned node of at least size bytes using the
......@@ -2085,7 +2085,7 @@ namespace priv {
void internal_clear(node_type *node);
// Verifies the tree structure of node.
int internal_verify(const node_type *node,
size_type internal_verify(const node_type *node,
const key_type *lo, const key_type *hi) const;
node_stats internal_stats(const node_type *node) const {
......@@ -2110,11 +2110,7 @@ namespace priv {
}
private:
// We use compressed tuple in order to save space because key_compare and
// allocator_type are usually empty.
phmap::priv::CompressedTuple<key_compare, allocator_type,
node_type *>
root_;
std::tuple<key_compare, allocator_type, node_type *> root_;
// A pointer to the rightmost node. Note that the leftmost node is stored as
// the root's parent.
......@@ -3234,7 +3230,7 @@ namespace priv {
}
template <typename P>
int btree<P>::internal_verify(
typename btree<P>::size_type btree<P>::internal_verify(
const node_type *node, const key_type *lo, const key_type *hi) const {
assert(node->count() > 0);
assert(node->count() <= node->max_count());
......@@ -3247,7 +3243,7 @@ namespace priv {
for (int i = 1; i < node->count(); ++i) {
assert(!compare_keys(node->key(i), node->key(i - 1)));
}
int count = node->count();
size_type count = node->count();
if (!node->leaf()) {
for (int i = 0; i <= node->count(); ++i) {
assert(node->child(i) != nullptr);
......@@ -3325,8 +3321,8 @@ namespace priv {
// ----------------
template <typename K = key_type>
size_type count(const key_arg<K> &key) const {
auto equal_range = this->equal_range(key);
return std::distance(equal_range.first, equal_range.second);
auto er = this->equal_range(key);
return std::distance(er.first, er.second);
}
template <typename K = key_type>
iterator find(const key_arg<K> &key) {
......@@ -3366,8 +3362,8 @@ namespace priv {
}
template <typename K = key_type>
size_type erase(const key_arg<K> &key) {
auto equal_range = this->equal_range(key);
return tree_.erase_range(equal_range.first, equal_range.second).first;
auto er = this->equal_range(key);
return tree_.erase_range(er.first, er.second).first;
}
node_type extract(iterator position) {
// Use Move instead of Transfer, because the rebalancing code expects to
......
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, tools
import os
class SparseppConan(ConanFile):
name = "parallel_hashmap"
version = "1.34"
description = "A header-only, very fast and memory-friendly hash map"
url = "https://github.com/greg7mdp/parallel-hashmap/blob/master/parallel_hashmap/conanfile.py"
# Indicates License type of the packaged library
license = "https://github.com/greg7mdp/parallel-hashmap/blob/master/LICENSE"
# Packages the license for the conanfile.py
exports = ["LICENSE"]
# Custom attributes for Bincrafters recipe conventions
source_subfolder = "source_subfolder"
def source(self):
source_url = "https://github.com/greg7mdp/parallel-hashmap"
tools.get("{0}/archive/{1}.tar.gz".format(source_url, self.version))
extracted_dir = self.name + "-" + self.version
#Rename to "source_folder" is a convention to simplify later steps
os.rename(extracted_dir, self.source_subfolder)
def package(self):
include_folder = os.path.join(self.source_subfolder, "parallel_hashmap")
self.copy(pattern="LICENSE")
self.copy(pattern="*", dst="include/parallel_hashmap", src=include_folder)
def package_id(self):
self.info.header_only()
......@@ -135,6 +135,7 @@ void SwapAlloc(AllocType& lhs, AllocType& rhs,
using std::swap;
swap(lhs, rhs);
}
template <typename AllocType>
void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
std::false_type /* propagate_on_container_swap */) {}
......@@ -192,27 +193,36 @@ struct IsDecomposable<
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
// --------------------------------------------------------------------------
template <class T>
constexpr bool IsNoThrowSwappable() {
constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
using std::swap;
return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
}
template <class T>
constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
return false;
}
// --------------------------------------------------------------------------
template <typename T>
int TrailingZeros(T x) {
uint32_t TrailingZeros(T x) {
uint32_t res;
PHMAP_IF_CONSTEXPR(sizeof(T) == 8)
return base_internal::CountTrailingZerosNonZero64(static_cast<uint64_t>(x));
res = base_internal::CountTrailingZerosNonZero64(static_cast<uint64_t>(x));
else
return base_internal::CountTrailingZerosNonZero32(static_cast<uint32_t>(x));
res = base_internal::CountTrailingZerosNonZero32(static_cast<uint32_t>(x));
return res;
}
// --------------------------------------------------------------------------
template <typename T>
int LeadingZeros(T x) {
uint32_t LeadingZeros(T x) {
uint32_t res;
PHMAP_IF_CONSTEXPR(sizeof(T) == 8)
return base_internal::CountLeadingZeros64(static_cast<uint64_t>(x));
res = base_internal::CountLeadingZeros64(static_cast<uint64_t>(x));
else
return base_internal::CountLeadingZeros32(static_cast<uint32_t>(x));
res = base_internal::CountLeadingZeros32(static_cast<uint32_t>(x));
return res;
}
// --------------------------------------------------------------------------
......@@ -351,7 +361,7 @@ inline size_t H1(size_t hashval, const ctrl_t* ) {
#endif
inline h2_t H2(size_t hashval) { return (ctrl_t)(hashval & 0x7F); }
inline ctrl_t H2(size_t hashval) { return (ctrl_t)(hashval & 0x7F); }
inline bool IsEmpty(ctrl_t c) { return c == kEmpty; }
inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
......@@ -418,14 +428,10 @@ struct GroupSse2Impl
#endif
}
#ifdef __INTEL_COMPILER
#pragma warning push
#pragma warning disable 68
#endif
// Returns a bitmask representing the positions of empty or deleted slots.
// -----------------------------------------------------------------------
BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
auto special = _mm_set1_epi8(static_cast<uint8_t>(kSentinel));
auto special = _mm_set1_epi8(static_cast<char>(kSentinel));
return BitMask<uint32_t, kWidth>(
static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
}
......@@ -433,13 +439,10 @@ struct GroupSse2Impl
// Returns the number of trailing empty or deleted elements in the group.
// ----------------------------------------------------------------------
uint32_t CountLeadingEmptyOrDeleted() const {
auto special = _mm_set1_epi8(static_cast<uint8_t>(kSentinel));
auto special = _mm_set1_epi8(static_cast<char>(kSentinel));
return TrailingZeros(
static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
}
#ifdef __INTEL_COMPILER
#pragma warning pop
#endif
// ----------------------------------------------------------------------
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
......@@ -575,8 +578,7 @@ inline size_t CapacityToGrowth(size_t capacity)
assert(IsValidCapacity(capacity));
// `capacity*7/8`
PHMAP_IF_CONSTEXPR (Group::kWidth == 8) {
if (capacity == 7)
{
if (capacity == 7) {
// x-x/8 does not work when x==7.
return 6;
}
......@@ -592,8 +594,7 @@ inline size_t GrowthToLowerboundCapacity(size_t growth)
{
// `growth*8/7`
PHMAP_IF_CONSTEXPR (Group::kWidth == 8) {
if (growth == 7)
{
if (growth == 7) {
// x+(x-1)/7 does not work when x==7.
return 8;
}
......@@ -957,7 +958,7 @@ public:
return tmp;
}
#if PHMAP_BIDIRECTIONAL
#if 0 // PHMAP_BIDIRECTIONAL
// PRECONDITION: not a begin() iterator.
iterator& operator--() {
assert(ctrl_);
......@@ -1187,7 +1188,7 @@ public:
// Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called.
settings_(that.settings_) {
settings_(std::move(that.settings_)) {
// growth_left was copied above, reset the one from `that`.
that.growth_left() = 0;
}
......@@ -1242,7 +1243,7 @@ public:
}
iterator end()
{
#if PHMAP_BIDIRECTIONAL
#if 0 // PHMAP_BIDIRECTIONAL
return iterator_at(capacity_);
#else
return {ctrl_ + capacity_};
......@@ -1262,23 +1263,18 @@ public:
size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
PHMAP_ATTRIBUTE_REINITIALIZES void clear() {
// Iterating over this container is O(bucket_count()). When bucket_count()
// is much greater than size(), iteration becomes prohibitively expensive.
// For clear() it is more important to reuse the allocated array when the
// container is small because allocation takes comparatively long time
// compared to destruction of the elements of the container. So we pick the
// largest bucket_count() threshold for which iteration is still fast and
// past that we simply deallocate the array.
if (empty())
return;
if (capacity_ > 127) {
destroy_slots();
} else if (capacity_) {
if (capacity_) {
PHMAP_IF_CONSTEXPR((!std::is_trivially_destructible<typename PolicyTraits::value_type>::value ||
std::is_same<typename Policy::is_flat, std::false_type>::value)) {
// node map or not trivially destructible... we need to iterate and destroy values one by one
for (size_t i = 0; i != capacity_; ++i) {
if (IsFull(ctrl_[i])) {
PolicyTraits::destroy(&alloc_ref(), slots_ + i);
}
}
}
size_ = 0;
reset_ctrl(capacity_);
reset_growth_left(capacity_);
......@@ -1447,10 +1443,9 @@ public:
// This overload kicks in if we cannot deduce the key from args. It constructs
// value_type unconditionally and then either moves it into the table or
// destroys.
template <class... Args, typename std::enable_if<
!IsDecomposable<Args...>::value, int>::type = 0>
template <class... Args, typename std::enable_if<!IsDecomposable<Args...>::value, int>::type = 0>
std::pair<iterator, bool> emplace(Args&&... args) {
typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type
typename phmap::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type
raw;
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
......@@ -1461,7 +1456,7 @@ public:
template <class... Args, typename std::enable_if<!IsDecomposable<Args...>::value, int>::type = 0>
std::pair<iterator, bool> emplace_with_hash(size_t hashval, Args&&... args) {
typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type raw;
typename phmap::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type raw;
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
......@@ -1506,6 +1501,10 @@ public:
friend class raw_hash_set;
public:
slot_type* slot() const {
return *slot_;
}
template <class... Args>
void operator()(Args&&... args) const {
assert(*slot_);
......@@ -1520,22 +1519,39 @@ public:
slot_type** slot_;
};
// Extension API: support for lazy emplace.
// Looks up key in the table. If found, returns the iterator to the element.
// Otherwise calls f with one argument of type raw_hash_set::constructor. f
// MUST call raw_hash_set::constructor with arguments as if a
// raw_hash_set::value_type is constructed, otherwise the behavior is
// undefined.
//
// For example:
//
// std::unordered_set<ArenaString> s;
// // Makes ArenaStr even if "abc" is in the map.
// s.insert(ArenaString(&arena, "abc"));
//
// flat_hash_set<ArenaStr> s;
// // Makes ArenaStr only if "abc" is not in the map.
// s.lazy_emplace("abc", [&](const constructor& ctor) {
// ctor(&arena, "abc");
// });
// -----------------------------------------------------
template <class K = key_type, class F>
iterator lazy_emplace(const key_arg<K>& key, F&& f) {
auto res = find_or_prepare_insert(key);
if (res.second) {
lazy_emplace_at(res.first, std::forward<F>(f));
}
return iterator_at(res.first);
return lazy_emplace_with_hash(key, this->hash(key), std::forward<F>(f));
}
template <class K = key_type, class F>
iterator lazy_emplace_with_hash(const key_arg<K>& key, size_t hashval, F&& f) {
auto res = find_or_prepare_insert(key, hashval);
if (res.second) {
lazy_emplace_at(res.first, std::forward<F>(f));
size_t offset = _find_key(key, hashval);
if (offset == (size_t)-1) {
offset = prepare_insert(hashval);
lazy_emplace_at(offset, std::forward<F>(f));
this->set_ctrl(offset, H2(hashval));
}
return iterator_at(res.first);
return iterator_at(offset);
}
template <class K = key_type, class F>
......@@ -1547,11 +1563,13 @@ public:
template <class K = key_type, class F>
void emplace_single_with_hash(const key_arg<K>& key, size_t hashval, F&& f) {
auto res = find_or_prepare_insert(key, hashval);
if (res.second)
lazy_emplace_at(res.first, std::forward<F>(f));
else
_erase(iterator_at(res.first));
size_t offset = _find_key(key, hashval);
if (offset == (size_t)-1) {
offset = prepare_insert(hashval);
lazy_emplace_at(offset, std::forward<F>(f));
this->set_ctrl(offset, H2(hashval));
} else
_erase(iterator_at(offset));
}
......@@ -1647,7 +1665,7 @@ public:
void swap(raw_hash_set& that) noexcept(
IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
(!AllocTraits::propagate_on_container_swap::value ||
IsNoThrowSwappable<allocator_type>())) {
IsNoThrowSwappable<allocator_type>(typename AllocTraits::propagate_on_container_swap{}))) {
using std::swap;
swap(ctrl_, that.ctrl_);
swap(slots_, that.slots_);
......@@ -1657,12 +1675,7 @@ public:
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
swap(infoz_, that.infoz_);
if (AllocTraits::propagate_on_container_swap::value) {
swap(alloc_ref(), that.alloc_ref());
} else {
// If the allocators do not compare equal it is officially undefined
// behavior. We choose to do nothing.
}
SwapAlloc(alloc_ref(), that.alloc_ref(), typename AllocTraits::propagate_on_container_swap{});
}
#if !defined(PHMAP_NON_DETERMINISTIC)
......@@ -1793,7 +1806,7 @@ public:
size_t bucket_count() const { return capacity_; }
float load_factor() const {
return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
return capacity_ ? static_cast<float>(static_cast<double>(size()) / capacity_) : 0.0f;
}
float max_load_factor() const { return 1.0f; }
void max_load_factor(float) {
......@@ -1884,11 +1897,14 @@ private:
std::pair<iterator, bool> emplace_decomposable(const K& key, size_t hashval,
Args&&... args)
{
auto res = find_or_prepare_insert(key, hashval);
if (res.second) {
emplace_at(res.first, std::forward<Args>(args)...);
size_t offset = _find_key(key, hashval);
if (offset == (size_t)-1) {
offset = prepare_insert(hashval);
emplace_at(offset, std::forward<Args>(args)...);
this->set_ctrl(offset, H2(hashval));
return {iterator_at(offset), true};
}
return {iterator_at(res.first), res.second};
return {iterator_at(offset), false};
}
struct EmplaceDecomposable
......@@ -1914,9 +1930,11 @@ private:
{
template <class K, class... Args>
std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
auto res = s.find_or_prepare_insert(key);
size_t hashval = s.hash(key);
auto res = s.find_or_prepare_insert(key, hashval);
if (res.second) {
PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
s.set_ctrl(res.first, H2(hashval));
} else if (do_destroy) {
PolicyTraits::destroy(&s.alloc_ref(), &slot);
}
......@@ -1935,6 +1953,7 @@ private:
auto res = s.find_or_prepare_insert(key, hashval);
if (res.second) {
PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
s.set_ctrl(res.first, H2(hashval));
} else if (do_destroy) {
PolicyTraits::destroy(&s.alloc_ref(), &slot);
}
......@@ -1989,12 +2008,19 @@ private:
}
void destroy_slots() {
if (!capacity_) return;
if (!capacity_)
return;
PHMAP_IF_CONSTEXPR((!std::is_trivially_destructible<typename PolicyTraits::value_type>::value ||
std::is_same<typename Policy::is_flat, std::false_type>::value)) {
// node map, or not trivially destructible... we need to iterate and destroy values one by one
// std::cout << "either this is a node map or " << type_name<typename PolicyTraits::value_type>() << " is not trivially_destructible\n";
for (size_t i = 0; i != capacity_; ++i) {
if (IsFull(ctrl_[i])) {
PolicyTraits::destroy(&alloc_ref(), slots_ + i);
}
}
}
auto layout = MakeLayout(capacity_);
// Unpoison before returning the memory to the allocator.
SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
......@@ -2053,7 +2079,7 @@ private:
// mark target as FULL
// repeat procedure for current slot with moved from element (target)
ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type
typename phmap::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type
raw;
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
for (size_t i = 0; i != capacity_; ++i) {
......@@ -2170,7 +2196,7 @@ private:
protected:
template <class K>
std::pair<size_t, bool> find_or_prepare_insert(const K& key, size_t hashval) {
size_t _find_key(const K& key, size_t hashval) {
auto seq = probe(hashval);
while (true) {
Group g{ctrl_ + seq.offset()};
......@@ -2178,17 +2204,20 @@ protected:
if (PHMAP_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
PolicyTraits::element(slots_ + seq.offset((size_t)i)))))
return {seq.offset((size_t)i), false};
return seq.offset((size_t)i);
}
if (PHMAP_PREDICT_TRUE(g.MatchEmpty())) break;
seq.next();
}
return {prepare_insert(hashval), true};
return (size_t)-1;
}
template <class K>
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
return find_or_prepare_insert(key, this->hash(key));
std::pair<size_t, bool> find_or_prepare_insert(const K& key, size_t hashval) {
size_t offset = _find_key(key, hashval);
if (offset == (size_t)-1)
return {prepare_insert(hashval), true};
return {offset, false};
}
size_t prepare_insert(size_t hashval) PHMAP_ATTRIBUTE_NOINLINE {
......@@ -2200,7 +2229,7 @@ protected:
}
++size_;
growth_left() -= IsEmpty(ctrl_[target.offset]);
set_ctrl(target.offset, H2(hashval));
// set_ctrl(target.offset, H2(hashval));
infoz_.RecordInsert(hashval, target.probe_length);
return target.offset;
}
......@@ -2218,32 +2247,18 @@ protected:
PolicyTraits::construct(&alloc_ref(), slots_ + i,
std::forward<Args>(args)...);
#ifdef PHMAP_CHECK_CONSTRUCTED_VALUE
// this check can be costly, so do it only when requested
assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
iterator_at(i) &&
"constructed value does not match the lookup key");
#endif
}
iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
private:
friend struct RawHashSetTestOnlyAccess;
probe_seq<Group::kWidth> probe(size_t hashval) const {
return probe_seq<Group::kWidth>(H1(hashval, ctrl_), capacity_);
}
// Reset all ctrl bytes back to kEmpty, except the sentinel.
void reset_ctrl(size_t capacity) {
std::memset(ctrl_, kEmpty, capacity + Group::kWidth);
ctrl_[capacity] = kSentinel;
SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity);
}
void reset_growth_left(size_t capacity) {
growth_left() = CapacityToGrowth(capacity) - size_;
}
protected:
// Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at
// the end too.
void set_ctrl(size_t i, ctrl_t h) {
......@@ -2260,7 +2275,27 @@ private:
((Group::kWidth - 1) & capacity_)] = h;
}
size_t& growth_left() { return settings_.template get<0>(); }
private:
friend struct RawHashSetTestOnlyAccess;
probe_seq<Group::kWidth> probe(size_t hashval) const {
return probe_seq<Group::kWidth>(H1(hashval, ctrl_), capacity_);
}
// Reset all ctrl bytes back to kEmpty, except the sentinel.
void reset_ctrl(size_t new_capacity) {
std::memset(ctrl_, kEmpty, new_capacity + Group::kWidth);
ctrl_[new_capacity] = kSentinel;
SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * new_capacity);
}
void reset_growth_left(size_t new_capacity) {
growth_left() = CapacityToGrowth(new_capacity) - size_;
}
size_t& growth_left() { return std::get<0>(settings_); }
const size_t& growth_left() const { return std::get<0>(settings_); }
template <size_t N,
template <class, class, class, class> class RefSet,
......@@ -2288,13 +2323,13 @@ private:
// small tables.
bool is_small() const { return capacity_ < Group::kWidth - 1; }
hasher& hash_ref() { return settings_.template get<1>(); }
const hasher& hash_ref() const { return settings_.template get<1>(); }
key_equal& eq_ref() { return settings_.template get<2>(); }
const key_equal& eq_ref() const { return settings_.template get<2>(); }
allocator_type& alloc_ref() { return settings_.template get<3>(); }
hasher& hash_ref() { return std::get<1>(settings_); }
const hasher& hash_ref() const { return std::get<1>(settings_); }
key_equal& eq_ref() { return std::get<2>(settings_); }
const key_equal& eq_ref() const { return std::get<2>(settings_); }
allocator_type& alloc_ref() { return std::get<3>(settings_); }
const allocator_type& alloc_ref() const {
return settings_.template get<3>();
return std::get<3>(settings_);
}
// TODO(alkis): Investigate removing some of these fields:
......@@ -2305,8 +2340,7 @@ private:
size_t size_ = 0; // number of full slots
size_t capacity_ = 0; // total number of slots
HashtablezInfoHandle infoz_;
phmap::priv::CompressedTuple<size_t /* growth_left */, hasher,
key_equal, allocator_type>
std::tuple<size_t /* growth_left */, hasher, key_equal, allocator_type>
settings_{0, hasher{}, key_equal{}, allocator_type{}};
};
......@@ -2454,22 +2488,31 @@ public:
private:
template <class K, class V>
std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v) {
auto res = this->find_or_prepare_insert(k);
if (res.second)
this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
else
Policy::value(&*this->iterator_at(res.first)) = std::forward<V>(v);
return {this->iterator_at(res.first), res.second};
size_t hashval = this->hash(k);
size_t offset = this->_find_key(k, hashval);
if (offset == (size_t)-1) {
offset = this->prepare_insert(hashval);
this->emplace_at(offset, std::forward<K>(k), std::forward<V>(v));
this->set_ctrl(offset, H2(hashval));
return {this->iterator_at(offset), true};
}
Policy::value(&*this->iterator_at(offset)) = std::forward<V>(v);
return {this->iterator_at(offset), false};
}
template <class K = key_type, class... Args>
std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args) {
auto res = this->find_or_prepare_insert(k);
if (res.second)
this->emplace_at(res.first, std::piecewise_construct,
size_t hashval = this->hash(k);
size_t offset = this->_find_key(k, hashval);
if (offset == (size_t)-1) {
offset = this->prepare_insert(hashval);
this->emplace_at(offset, std::piecewise_construct,
std::forward_as_tuple(std::forward<K>(k)),
std::forward_as_tuple(std::forward<Args>(args)...));
return {this->iterator_at(res.first), res.second};
this->set_ctrl(offset, H2(hashval));
return {this->iterator_at(offset), true};
}
return {this->iterator_at(offset), false};
}
};
......@@ -2536,6 +2579,10 @@ public:
protected:
using Lockable = phmap::LockableImpl<Mtx_>;
using UniqueLock = typename Lockable::UniqueLock;
using SharedLock = typename Lockable::SharedLock;
using ReadWriteLock = typename Lockable::ReadWriteLock;
// --------------------------------------------------------------------
struct Inner : public Lockable
......@@ -2586,9 +2633,7 @@ private:
// --------------------------------------------------------------------
template <class T>
using RequiresInsertable = typename std::enable_if<
phmap::disjunction<std::is_convertible<T, init_type>,
SameAsElementReference<T>>::value,
int>::type;
phmap::disjunction<std::is_convertible<T, init_type>, SameAsElementReference<T>>::value, int>::type;
// RequiresNotInit is a workaround for gcc prior to 7.1.
// See https://godbolt.org/g/Y4xsUh.
......@@ -2915,7 +2960,7 @@ public:
PHMAP_ATTRIBUTE_REINITIALIZES void clear() {
for (auto& inner : sets_)
{
typename Lockable::UniqueLock m(inner);
UniqueLock m(inner);
inner.set_.clear();
}
}
......@@ -2924,7 +2969,7 @@ public:
// ----------------------------------------
void clear(std::size_t submap_index) {
Inner& inner = sets_[submap_index];
typename Lockable::UniqueLock m(inner);
UniqueLock m(inner);
inner.set_.clear();
}
......@@ -3017,7 +3062,7 @@ public:
Inner& inner = sets_[subidx(hashval)];
auto& set = inner.set_;
typename Lockable::UniqueLock m(inner);
UniqueLock m(inner);
auto res = set.insert(std::move(node), hashval);
return { make_iterator(&inner, res.position),
res.inserted,
......@@ -3041,15 +3086,6 @@ public:
// ----------------------------------
// same as emplace, but hashval is provided
// --------------------------------------------------------------------
template <class K, class... Args>
std::pair<iterator, bool> emplace_decomposable_with_hash(const K& key, size_t hashval, Args&&... args)
{
Inner& inner = sets_[subidx(hashval)];
auto& set = inner.set_;
typename Lockable::UniqueLock m(inner);
return make_rv(&inner, set.emplace_decomposable(key, hashval, std::forward<Args>(args)...));
}
struct EmplaceDecomposableHashval
{
template <class K, class... Args>
......@@ -3070,8 +3106,7 @@ public:
// // Creates no std::string copies and makes no heap allocations.
// m.emplace("abc", "xyz");
// --------------------------------------------------------------------
template <class... Args, typename std::enable_if<
IsDecomposable<Args...>::value, int>::type = 0>
template <class... Args, typename std::enable_if<IsDecomposable<Args...>::value, int>::type = 0>
std::pair<iterator, bool> emplace_with_hash(size_t hashval, Args&&... args) {
return PolicyTraits::apply(EmplaceDecomposableHashval{*this, hashval},
std::forward<Args>(args)...);
......@@ -3081,19 +3116,17 @@ public:
// value_type unconditionally and then either moves it into the table or
// destroys.
// --------------------------------------------------------------------
template <class... Args, typename std::enable_if<
!IsDecomposable<Args...>::value, int>::type = 0>
template <class... Args, typename std::enable_if<!IsDecomposable<Args...>::value, int>::type = 0>
std::pair<iterator, bool> emplace_with_hash(size_t hashval, Args&&... args) {
typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type raw;
typename phmap::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type raw;
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
const auto& elem = PolicyTraits::element(slot);
Inner& inner = sets_[subidx(hashval)];
auto& set = inner.set_;
typename Lockable::UniqueLock m(inner);
typename EmbeddedSet::template InsertSlotWithHash<true> f {
inner, std::move(*slot), hashval};
UniqueLock m(inner);
typename EmbeddedSet::template InsertSlotWithHash<true> f { inner, std::move(*slot), hashval };
return make_rv(PolicyTraits::apply(f, elem));
}
......@@ -3102,26 +3135,36 @@ public:
return emplace_with_hash(hashval, std::forward<Args>(args)...).first;
}
template <class K = key_type, class F>
iterator lazy_emplace_with_hash(const key_arg<K>& key, size_t hashval, F&& f) {
Inner& inner = sets_[subidx(hashval)];
auto& set = inner.set_;
typename Lockable::UniqueLock m(inner);
return make_iterator(&inner, set.lazy_emplace_with_hash(key, hashval, std::forward<F>(f)));
}
// --------------------------------------------------------------------
// end of phmap expension
// --------------------------------------------------------------------
template <class K, class... Args>
std::pair<iterator, bool> emplace_decomposable(const K& key, Args&&... args)
std::pair<iterator, bool> emplace_decomposable_with_hash(const K& key, size_t hashval, Args&&... args)
{
size_t hashval = this->hash(key);
Inner& inner = sets_[subidx(hashval)];
auto& set = inner.set_;
typename Lockable::UniqueLock m(inner);
return make_rv(&inner, set.emplace_decomposable(key, hashval, std::forward<Args>(args)...));
ReadWriteLock m(inner);
size_t offset = set._find_key(key, hashval);
if (offset == (size_t)-1 && m.switch_to_unique()) {
// we did an unlock/lock, and another thread could have inserted the same key, so we need to
// do a find() again.
offset = set._find_key(key, hashval);
}
if (offset == (size_t)-1) {
offset = set.prepare_insert(hashval);
set.emplace_at(offset, std::forward<Args>(args)...);
set.set_ctrl(offset, H2(hashval));
return make_rv(&inner, {set.iterator_at(offset), true});
}
return make_rv(&inner, {set.iterator_at(offset), false});
}
template <class K, class... Args>
std::pair<iterator, bool> emplace_decomposable(const K& key, Args&&... args)
{
return emplace_decomposable_with_hash(key, this->hash(key), std::forward<Args>(args)...);
}
struct EmplaceDecomposable
......@@ -3143,21 +3186,18 @@ public:
// // Creates no std::string copies and makes no heap allocations.
// m.emplace("abc", "xyz");
// --------------------------------------------------------------------
template <class... Args, typename std::enable_if<
IsDecomposable<Args...>::value, int>::type = 0>
template <class... Args, typename std::enable_if<IsDecomposable<Args...>::value, int>::type = 0>
std::pair<iterator, bool> emplace(Args&&... args) {
return PolicyTraits::apply(EmplaceDecomposable{*this},
std::forward<Args>(args)...);
return PolicyTraits::apply(EmplaceDecomposable{*this}, std::forward<Args>(args)...);
}
// This overload kicks in if we cannot deduce the key from args. It constructs
// value_type unconditionally and then either moves it into the table or
// destroys.
// --------------------------------------------------------------------
template <class... Args, typename std::enable_if<
!IsDecomposable<Args...>::value, int>::type = 0>
template <class... Args, typename std::enable_if<!IsDecomposable<Args...>::value, int>::type = 0>
std::pair<iterator, bool> emplace(Args&&... args) {
typename std::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type raw;
typename phmap::aligned_storage<sizeof(slot_type), alignof(slot_type)>::type raw;
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
size_t hashval = this->hash(PolicyTraits::key(slot));
......@@ -3165,9 +3205,8 @@ public:
const auto& elem = PolicyTraits::element(slot);
Inner& inner = sets_[subidx(hashval)];
auto& set = inner.set_;
typename Lockable::UniqueLock m(inner);
typename EmbeddedSet::template InsertSlotWithHash<true> f {
inner, std::move(*slot), hashval};
UniqueLock m(inner);
typename EmbeddedSet::template InsertSlotWithHash<true> f { inner, std::move(*slot), hashval };
return make_rv(PolicyTraits::apply(f, elem));
}
......@@ -3192,12 +3231,27 @@ public:
// lazy_emplace
// ------------
template <class K = key_type, class F>
iterator lazy_emplace(const key_arg<K>& key, F&& f) {
auto hashval = this->hash(key);
iterator lazy_emplace_with_hash(const key_arg<K>& key, size_t hashval, F&& f) {
Inner& inner = sets_[subidx(hashval)];
auto& set = inner.set_;
typename Lockable::UniqueLock m(inner);
return make_iterator(&inner, set.lazy_emplace_with_hash(key, hashval, std::forward<F>(f)));
ReadWriteLock m(inner);
size_t offset = set._find_key(key, hashval);
if (offset == (size_t)-1 && m.switch_to_unique()) {
// we did an unlock/lock, and another thread could have inserted the same key, so we need to
// do a find() again.
offset = set._find_key(key, hashval);
}
if (offset == (size_t)-1) {
offset = set.prepare_insert(hashval);
set.lazy_emplace_at(offset, std::forward<F>(f));
set.set_ctrl(offset, H2(hashval));
}
return make_iterator(&inner, set.iterator_at(offset));
}
template <class K = key_type, class F>
iterator lazy_emplace(const key_arg<K>& key, F&& f) {
return lazy_emplace_with_hash(key, this->hash(key), std::forward<F>(f));
}
// emplace_single
......@@ -3206,14 +3260,13 @@ public:
void emplace_single_with_hash(const key_arg<K>& key, size_t hashval, F&& f) {
Inner& inner = sets_[subidx(hashval)];
auto& set = inner.set_;
typename Lockable::UniqueLock m(inner);
UniqueLock m(inner);
set.emplace_single_with_hash(key, hashval, std::forward<F>(f));
}
template <class K = key_type, class F>
void emplace_single(const key_arg<K>& key, F&& f) {
auto hashval = this->hash(key);
emplace_single_with_hash<K, F>(key, hashval, std::forward<F>(f));
emplace_single_with_hash<K, F>(key, this->hash(key), std::forward<F>(f));
}
// if set contains key, lambda is called with the value_type (under read lock protection),
......@@ -3222,7 +3275,7 @@ public:
template <class K = key_type, class F>
bool if_contains(const key_arg<K>& key, F&& f) const {
return const_cast<parallel_hash_set*>(this)->template
modify_if_impl<K, F, typename Lockable::SharedLock>(key, std::forward<F>(f));
modify_if_impl<K, F, SharedLock>(key, std::forward<F>(f));
}
// if set contains key, lambda is called with the value_type without read lock protection,
......@@ -3240,7 +3293,7 @@ public:
// ----------------------------------------------------------------------------------------------------
template <class K = key_type, class F>
bool modify_if(const key_arg<K>& key, F&& f) {
return modify_if_impl<K, F, typename Lockable::UniqueLock>(key, std::forward<F>(f));
return modify_if_impl<K, F, UniqueLock>(key, std::forward<F>(f));
}
// -----------------------------------------------------------------------------------------
......@@ -3264,23 +3317,33 @@ public:
// ----------------------------------------------------------------------------------------------------
template <class K = key_type, class F>
bool erase_if(const key_arg<K>& key, F&& f) {
return erase_if_impl<K, F, typename Lockable::UniqueLock>(key, std::forward<F>(f));
return !!erase_if_impl<K, F, ReadWriteLock>(key, std::forward<F>(f));
}
template <class K = key_type, class F, class L>
bool erase_if_impl(const key_arg<K>& key, F&& f) {
size_type erase_if_impl(const key_arg<K>& key, F&& f) {
#if __cplusplus >= 201703L
static_assert(std::is_invocable<F, value_type&>::value);
#endif
L m;
auto it = this->template find<K, L>(key, this->hash(key), m);
if (it == this->end()) return false;
auto hashval = this->hash(key);
Inner& inner = sets_[subidx(hashval)];
auto& set = inner.set_;
L m(inner);
auto it = set.find(key, hashval);
if (it == set.end())
return 0;
if (m.switch_to_unique()) {
// we did an unlock/lock, need to call `find()` again
it = set.find(key, hashval);
if (it == set.end())
return 0;
}
if (std::forward<F>(f)(const_cast<value_type &>(*it)))
{
this->erase(it);
return true;
set._erase(it);
return 1;
}
return false;
return 0;
}
// if map already contains key, the first lambda is called with the mapped value (under
......@@ -3291,14 +3354,18 @@ public:
// ---------------------------------------------------------------------------------------
template <class K = key_type, class FExists, class FEmplace>
bool lazy_emplace_l(const key_arg<K>& key, FExists&& fExists, FEmplace&& fEmplace) {
typename Lockable::UniqueLock m;
auto res = this->find_or_prepare_insert(key, m);
size_t hashval = this->hash(key);
ReadWriteLock m;
auto res = this->find_or_prepare_insert_with_hash(hashval, key, m);
Inner* inner = std::get<0>(res);
if (std::get<2>(res))
if (std::get<2>(res)) {
// key not found. call fEmplace lambda which should invoke passed constructor
inner->set_.lazy_emplace_at(std::get<1>(res), std::forward<FEmplace>(fEmplace));
else {
inner->set_.set_ctrl(std::get<1>(res), H2(hashval));
} else {
// key found. Call fExists lambda. In case of the set, non "key" part of value_type can be changed
auto it = this->iterator_at(inner, inner->set_.iterator_at(std::get<1>(res)));
std::forward<FExists>(fExists)(const_cast<value_type &>(*it)); // in case of the set, non "key" part of value_type can be changed
std::forward<FExists>(fExists)(const_cast<value_type &>(*it));
}
return std::get<2>(res);
}
......@@ -3313,17 +3380,68 @@ public:
template <class F>
void for_each(F&& fCallback) const {
for (auto const& inner : sets_) {
typename Lockable::SharedLock m(const_cast<Inner&>(inner));
SharedLock m(const_cast<Inner&>(inner));
std::for_each(inner.set_.begin(), inner.set_.end(), fCallback);
}
}
// this version allows to modify the values
void for_each_m(std::function<void (value_type&)> && fCallback) {
template <class F>
void for_each_m(F&& fCallback) {
for (auto& inner : sets_) {
typename Lockable::UniqueLock m(const_cast<Inner&>(inner));
UniqueLock m(inner);
std::for_each(inner.set_.begin(), inner.set_.end(), fCallback);
}
}
#if __cplusplus >= 201703L
template <class ExecutionPolicy, class F>
void for_each(ExecutionPolicy&& policy, F&& fCallback) const {
std::for_each(
std::forward<ExecutionPolicy>(policy), sets_.begin(), sets_.end(),
[&](auto const& inner) {
SharedLock m(const_cast<Inner&>(inner));
std::for_each(inner.set_.begin(), inner.set_.end(), fCallback);
}
);
}
template <class ExecutionPolicy, class F>
void for_each_m(ExecutionPolicy&& policy, F&& fCallback) {
std::for_each(
std::forward<ExecutionPolicy>(policy), sets_.begin(), sets_.end(),
[&](auto& inner) {
UniqueLock m(inner);
std::for_each(inner.set_.begin(), inner.set_.end(), fCallback);
}
);
}
#endif
// Extension API: access internal submaps by index
// under lock protection
// ex: m.with_submap(i, [&](const Map::EmbeddedSet& set) {
// for (auto& p : set) { ...; }});
// -------------------------------------------------
template <class F>
void with_submap(size_t idx, F&& fCallback) const {
const Inner& inner = sets_[idx];
const auto& set = inner.set_;
SharedLock m(const_cast<Inner&>(inner));
fCallback(set);
}
template <class F>
void with_submap_m(size_t idx, F&& fCallback) {
Inner& inner = sets_[idx];
auto& set = inner.set_;
UniqueLock m(inner);
fCallback(set);
}
// unsafe, for internal use only
Inner& get_inner(size_t idx) {
return sets_[idx];
}
// Extension API: support for heterogeneous keys.
......@@ -3339,17 +3457,8 @@ public:
// --------------------------------------------------------------------
template <class K = key_type>
size_type erase(const key_arg<K>& key) {
auto hashval = this->hash(key);
Inner& inner = sets_[subidx(hashval)];
auto& set = inner.set_;
typename Lockable::UpgradeLock m(inner);
auto it = set.find(key, hashval);
if (it == set.end())
return 0;
typename Lockable::UpgradeToUnique unique(m);
set._erase(it);
return 1;
auto always_erase = [](const value_type&){ return true; };
return erase_if_impl<K, decltype(always_erase), ReadWriteLock>(key, std::move(always_erase));
}
// --------------------------------------------------------------------
......@@ -3370,11 +3479,11 @@ public:
//
// Do not use erase APIs taking iterators when accessing the map concurrently
// --------------------------------------------------------------------
void _erase(iterator it, bool do_lock = true) {
void _erase(iterator it) {
Inner* inner = it.inner_;
assert(inner != nullptr);
auto& set = inner->set_;
// typename Lockable::UniqueLock m(*inner); // don't lock here
// UniqueLock m(*inner); // don't lock here
set._erase(it.it_);
}
......@@ -3427,15 +3536,20 @@ public:
return it == end() ? node_type() : extract(const_iterator{it});
}
void swap(parallel_hash_set& that) noexcept(
IsNoThrowSwappable<EmbeddedSet>() &&
template<class Mtx2_>
void swap(parallel_hash_set<N, RefSet, Mtx2_, Policy, Hash, Eq, Alloc>& that)
noexcept(IsNoThrowSwappable<EmbeddedSet>() &&
(!AllocTraits::propagate_on_container_swap::value ||
IsNoThrowSwappable<allocator_type>())) {
IsNoThrowSwappable<allocator_type>(typename AllocTraits::propagate_on_container_swap{})))
{
using std::swap;
using Lockable2 = phmap::LockableImpl<Mtx2_>;
for (size_t i=0; i<num_tables; ++i)
{
typename Lockable::UniqueLocks l(sets_[i], that.sets_[i]);
swap(sets_[i].set_, that.sets_[i].set_);
typename Lockable::UniqueLock l(sets_[i]);
typename Lockable2::UniqueLock l2(that.get_inner(i));
swap(sets_[i].set_, that.get_inner(i).set_);
}
}
......@@ -3443,7 +3557,7 @@ public:
size_t nn = n / num_tables;
for (auto& inner : sets_)
{
typename Lockable::UniqueLock m(inner);
UniqueLock m(inner);
inner.set_.rehash(nn);
}
}
......@@ -3451,7 +3565,7 @@ public:
void reserve(size_t n)
{
size_t target = GrowthToLowerboundCapacity(n);
size_t normalized = 16 * NormalizeCapacity(n / num_tables);
size_t normalized = num_tables * NormalizeCapacity(n / num_tables);
rehash(normalized > target ? normalized : target);
}
......@@ -3479,7 +3593,7 @@ public:
void prefetch_hash(size_t hashval) const {
const Inner& inner = sets_[subidx(hashval)];
const auto& set = inner.set_;
typename Lockable::SharedLock m(const_cast<Inner&>(inner));
SharedLock m(const_cast<Inner&>(inner));
set.prefetch_hash(hashval);
}
......@@ -3498,7 +3612,7 @@ public:
// --------------------------------------------------------------------
template <class K = key_type>
iterator find(const key_arg<K>& key, size_t hashval) {
typename Lockable::SharedLock m;
SharedLock m;
return find(key, hashval, m);
}
......@@ -3546,7 +3660,7 @@ public:
size_t sz = 0;
for (const auto& inner : sets_)
{
typename Lockable::SharedLock m(const_cast<Inner&>(inner));
SharedLock m(const_cast<Inner&>(inner));
sz += inner.set_.bucket_count();
}
return sz;
......@@ -3574,8 +3688,11 @@ public:
return !(a == b);
}
template<class Mtx2_>
friend void swap(parallel_hash_set& a,
parallel_hash_set& b) noexcept(noexcept(a.swap(b))) {
parallel_hash_set<N, RefSet, Mtx2_, Policy, Hash, Eq, Alloc>& b)
noexcept(noexcept(a.swap(b)))
{
a.swap(b);
}
......@@ -3639,7 +3756,7 @@ private:
void drop_deletes_without_resize() PHMAP_ATTRIBUTE_NOINLINE {
for (auto& inner : sets_)
{
typename Lockable::UniqueLock m(inner);
UniqueLock m(inner);
inner.set_.drop_deletes_without_resize();
}
}
......@@ -3648,26 +3765,28 @@ private:
size_t hashval = PolicyTraits::apply(HashElement{hash_ref()}, elem);
Inner& inner = sets_[subidx(hashval)];
auto& set = inner.set_;
typename Lockable::SharedLock m(const_cast<Inner&>(inner));
SharedLock m(const_cast<Inner&>(inner));
return set.has_element(elem, hashval);
}
// TODO(alkis): Optimize this assuming *this and that don't overlap.
// --------------------------------------------------------------------
parallel_hash_set& move_assign(parallel_hash_set&& that, std::true_type) {
parallel_hash_set tmp(std::move(that));
template<class Mtx2_>
parallel_hash_set& move_assign(parallel_hash_set<N, RefSet, Mtx2_, Policy, Hash, Eq, Alloc>&& that, std::true_type) {
parallel_hash_set<N, RefSet, Mtx2_, Policy, Hash, Eq, Alloc> tmp(std::move(that));
swap(tmp);
return *this;
}
parallel_hash_set& move_assign(parallel_hash_set&& that, std::false_type) {
parallel_hash_set tmp(std::move(that), alloc_ref());
template<class Mtx2_>
parallel_hash_set& move_assign(parallel_hash_set<N, RefSet, Mtx2_, Policy, Hash, Eq, Alloc>&& that, std::false_type) {
parallel_hash_set<N, RefSet, Mtx2_, Policy, Hash, Eq, Alloc> tmp(std::move(that), alloc_ref());
swap(tmp);
return *this;
}
protected:
template <class K = key_type, class L = typename Lockable::SharedLock>
template <class K = key_type, class L = SharedLock>
pointer find_ptr(const key_arg<K>& key, size_t hashval, L& mutexlock)
{
Inner& inner = sets_[subidx(hashval)];
......@@ -3676,7 +3795,7 @@ protected:
return set.find_ptr(key, hashval);
}
template <class K = key_type, class L = typename Lockable::SharedLock>
template <class K = key_type, class L = SharedLock>
iterator find(const key_arg<K>& key, size_t hashval, L& mutexlock) {
Inner& inner = sets_[subidx(hashval)];
auto& set = inner.set_;
......@@ -3686,17 +3805,26 @@ protected:
template <class K>
std::tuple<Inner*, size_t, bool>
find_or_prepare_insert_with_hash(size_t hashval, const K& key, typename Lockable::UniqueLock &mutexlock) {
find_or_prepare_insert_with_hash(size_t hashval, const K& key, ReadWriteLock &mutexlock) {
Inner& inner = sets_[subidx(hashval)];
auto& set = inner.set_;
mutexlock = std::move(typename Lockable::UniqueLock(inner));
auto p = set.find_or_prepare_insert(key, hashval); // std::pair<size_t, bool>
return std::make_tuple(&inner, p.first, p.second);
mutexlock = std::move(ReadWriteLock(inner));
size_t offset = set._find_key(key, hashval);
if (offset == (size_t)-1 && mutexlock.switch_to_unique()) {
// we did an unlock/lock, and another thread could have inserted the same key, so we need to
// do a find() again.
offset = set._find_key(key, hashval);
}
if (offset == (size_t)-1) {
offset = set.prepare_insert(hashval);
return std::make_tuple(&inner, offset, true);
}
return std::make_tuple(&inner, offset, false);
}
template <class K>
std::tuple<Inner*, size_t, bool>
find_or_prepare_insert(const K& key, typename Lockable::UniqueLock &mutexlock) {
find_or_prepare_insert(const K& key, ReadWriteLock &mutexlock) {
return find_or_prepare_insert_with_hash<K>(this->hash(key), key, mutexlock);
}
......@@ -3765,6 +3893,9 @@ class parallel_hash_map : public parallel_hash_set<N, RefSet, Mtx_, Policy, Hash
using Base = typename parallel_hash_map::parallel_hash_set;
using Lockable = phmap::LockableImpl<Mtx_>;
using UniqueLock = typename Lockable::UniqueLock;
using SharedLock = typename Lockable::SharedLock;
using ReadWriteLock = typename Lockable::ReadWriteLock;
public:
using key_type = typename Policy::key_type;
......@@ -3914,20 +4045,41 @@ public:
// ---------------------------------------------------------------------------------------
template <class K = key_type, class F, class... Args>
bool try_emplace_l(K&& k, F&& f, Args&&... args) {
typename Lockable::UniqueLock m;
auto res = this->find_or_prepare_insert(k, m);
size_t hashval = this->hash(k);
ReadWriteLock m;
auto res = this->find_or_prepare_insert_with_hash(hashval, k, m);
typename Base::Inner *inner = std::get<0>(res);
if (std::get<2>(res))
if (std::get<2>(res)) {
inner->set_.emplace_at(std::get<1>(res), std::piecewise_construct,
std::forward_as_tuple(std::forward<K>(k)),
std::forward_as_tuple(std::forward<Args>(args)...));
else {
inner->set_.set_ctrl(std::get<1>(res), H2(hashval));
} else {
auto it = this->iterator_at(inner, inner->set_.iterator_at(std::get<1>(res)));
std::forward<F>(f)(const_cast<value_type &>(*it)); // in case of the set, non "key" part of value_type can be changed
// call lambda. in case of the set, non "key" part of value_type can be changed
std::forward<F>(f)(const_cast<value_type &>(*it));
}
return std::get<2>(res);
}
// returns {pointer, bool} instead of {iterator, bool} per try_emplace.
// useful for node-based containers, since the pointer is not invalidated by concurrent insert etc.
template <class K = key_type, class... Args>
std::pair<typename parallel_hash_map::parallel_hash_set::pointer, bool> try_emplace_p(K&& k, Args&&... args) {
size_t hashval = this->hash(k);
ReadWriteLock m;
auto res = this->find_or_prepare_insert_with_hash(hashval, k, m);
typename Base::Inner *inner = std::get<0>(res);
if (std::get<2>(res)) {
inner->set_.emplace_at(std::get<1>(res), std::piecewise_construct,
std::forward_as_tuple(std::forward<K>(k)),
std::forward_as_tuple(std::forward<Args>(args)...));
inner->set_.set_ctrl(std::get<1>(res), H2(hashval));
}
auto it = this->iterator_at(inner, inner->set_.iterator_at(std::get<1>(res)));
return {&*it, std::get<2>(res)};
}
// ----------- end of phmap extensions --------------------------
template <class K = key_type, class P = Policy, K* = nullptr>
......@@ -3944,12 +4096,14 @@ private:
template <class K, class V>
std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v) {
typename Lockable::UniqueLock m;
auto res = this->find_or_prepare_insert(k, m);
size_t hashval = this->hash(k);
ReadWriteLock m;
auto res = this->find_or_prepare_insert_with_hash(hashval, k, m);
typename Base::Inner *inner = std::get<0>(res);
if (std::get<2>(res))
if (std::get<2>(res)) {
inner->set_.emplace_at(std::get<1>(res), std::forward<K>(k), std::forward<V>(v));
else
inner->set_.set_ctrl(std::get<1>(res), H2(hashval));
} else
Policy::value(&*inner->set_.iterator_at(std::get<1>(res))) = std::forward<V>(v);
return {this->iterator_at(inner, inner->set_.iterator_at(std::get<1>(res))),
std::get<2>(res)};
......@@ -3957,26 +4111,21 @@ private:
template <class K = key_type, class... Args>
std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args) {
typename Lockable::UniqueLock m;
auto res = this->find_or_prepare_insert(k, m);
typename Base::Inner *inner = std::get<0>(res);
if (std::get<2>(res))
inner->set_.emplace_at(std::get<1>(res), std::piecewise_construct,
std::forward_as_tuple(std::forward<K>(k)),
std::forward_as_tuple(std::forward<Args>(args)...));
return {this->iterator_at(inner, inner->set_.iterator_at(std::get<1>(res))),
std::get<2>(res)};
return try_emplace_impl_with_hash(this->hash(k), std::forward<K>(k),
std::forward<Args>(args)...);
}
template <class K = key_type, class... Args>
std::pair<iterator, bool> try_emplace_impl_with_hash(size_t hashval, K&& k, Args&&... args) {
typename Lockable::UniqueLock m;
ReadWriteLock m;
auto res = this->find_or_prepare_insert_with_hash(hashval, k, m);
typename Base::Inner *inner = std::get<0>(res);
if (std::get<2>(res))
if (std::get<2>(res)) {
inner->set_.emplace_at(std::get<1>(res), std::piecewise_construct,
std::forward_as_tuple(std::forward<K>(k)),
std::forward_as_tuple(std::forward<Args>(args)...));
inner->set_.set_ctrl(std::get<1>(res), H2(hashval));
}
return {this->iterator_at(inner, inner->set_.iterator_at(std::get<1>(res))),
std::get<2>(res)};
}
......@@ -4095,6 +4244,7 @@ struct FlatHashSetPolicy
using key_type = T;
using init_type = T;
using constant_iterators = std::true_type;
using is_flat = std::true_type;
template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
......@@ -4137,6 +4287,7 @@ struct FlatHashMapPolicy
using key_type = K;
using mapped_type = V;
using init_type = std::pair</*non const*/ key_type, mapped_type>;
using is_flat = std::true_type;
template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
......@@ -4219,6 +4370,7 @@ struct NodeHashSetPolicy
using key_type = T;
using init_type = T;
using constant_iterators = std::true_type;
using is_flat = std::false_type;
template <class Allocator, class... Args>
static T* new_element(Allocator* alloc, Args&&... args) {
......@@ -4264,6 +4416,7 @@ public:
using key_type = Key;
using mapped_type = Value;
using init_type = std::pair</*non const*/ key_type, mapped_type>;
using is_flat = std::false_type;
template <class Allocator, class... Args>
static value_type* new_element(Allocator* alloc, Args&&... args) {
......@@ -4309,28 +4462,26 @@ public:
#if PHMAP_HAVE_STD_STRING_VIEW
// support char16_t wchar_t ....
// Supports heterogeneous lookup for basic_string<T>-like elements.
template<class CharT>
struct StringHashT
struct StringHashEqT
{
struct Hash
{
using is_transparent = void;
size_t operator()(std::basic_string_view<CharT> v) const {
std::string_view bv{reinterpret_cast<const char*>(v.data()), v.size() * sizeof(CharT)};
std::string_view bv{
reinterpret_cast<const char*>(v.data()), v.size() * sizeof(CharT)};
return std::hash<std::string_view>()(bv);
}
};
// Supports heterogeneous lookup for basic_string<T>-like elements.
template<class CharT>
struct StringHashEqT
{
using Hash = StringHashT<CharT>;
};
struct Eq {
using is_transparent = void;
bool operator()(std::basic_string_view<CharT> lhs, std::basic_string_view<CharT> rhs) const {
bool operator()(std::basic_string_view<CharT> lhs,
std::basic_string_view<CharT> rhs) const {
return lhs == rhs;
}
};
......@@ -4367,7 +4518,9 @@ struct HashEq<T*>
using is_transparent = void;
template <class U>
size_t operator()(const U& ptr) const {
return phmap::Hash<const T*>{}(HashEq::ToPtr(ptr));
// we want phmap::Hash<T*> and not phmap::Hash<const T*>
// so "struct std::hash<T*> " override works
return phmap::Hash<T*>{}((T*)(uintptr_t)HashEq::ToPtr(ptr));
}
};
......@@ -4424,7 +4577,7 @@ struct HashtableDebugAccess<Set, typename std::enable_if<has_member_type_raw_has
auto seq = set.probe(hashval);
while (true) {
priv::Group g{set.ctrl_ + seq.offset()};
for (uint32_t i : g.Match(priv::H2(hashval))) {
for (uint32_t i : g.Match((h2_t)priv::H2(hashval))) {
if (Traits::apply(
typename Set::template EqualElement<typename Set::key_type>{
key, set.eq_ref()},
......@@ -4636,7 +4789,7 @@ public:
// hashing function and equality operator.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash set.
// * Returns `void` from the `erase(iterator)` overload.
// * Returns `void` from the `_erase(iterator)` overload.
// -----------------------------------------------------------------------------
template <class T, class Hash, class Eq, class Alloc> // default values in phmap_fwd_decl.h
class node_hash_set
......@@ -4701,7 +4854,7 @@ public:
// hashing function and equality operator.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash map.
// * Returns `void` from the `erase(iterator)` overload.
// * Returns `void` from the `_erase(iterator)` overload.
// -----------------------------------------------------------------------------
template <class Key, class Value, class Hash, class Eq, class Alloc> // default values in phmap_fwd_decl.h
class node_hash_map
......
......@@ -96,19 +96,6 @@ struct VoidTImpl {
using type = void;
};
// This trick to retrieve a default alignment is necessary for our
// implementation of aligned_storage_t to be consistent with any implementation
// of std::aligned_storage.
// ---------------------------------------------------------------------------
template <size_t Len, typename T = std::aligned_storage<Len>>
struct default_alignment_of_aligned_storage;
template <size_t Len, size_t Align>
struct default_alignment_of_aligned_storage<Len,
std::aligned_storage<Len, Align>> {
static constexpr size_t value = Align;
};
// NOTE: The `is_detected` family of templates here differ from the library
// fundamentals specification in that for library fundamentals, `Op<Args...>` is
// evaluated as soon as the type `is_detected<Op, Args...>` undergoes
......@@ -236,29 +223,36 @@ struct disjunction<> : std::false_type {};
template <typename T>
struct negation : std::integral_constant<bool, !T::value> {};
template <typename T>
struct is_trivially_destructible
: std::integral_constant<bool, __has_trivial_destructor(T) &&
std::is_destructible<T>::value> {};
template <typename T>
struct is_trivially_default_constructible
: std::integral_constant<bool, __has_trivial_constructor(T) &&
std::is_default_constructible<T>::value &&
is_trivially_destructible<T>::value> {};
#if defined(__GNUC__) && __GNUC__ < 5 && !defined(__clang__) && !defined(_MSC_VER) && !defined(__INTEL_COMPILER)
#define PHMAP_OLD_GCC 1
#else
#define PHMAP_OLD_GCC 0
#endif
template <typename T>
struct is_trivially_copy_constructible
: std::integral_constant<bool, __has_trivial_copy(T) &&
#if PHMAP_OLD_GCC
template <typename T>
struct is_trivially_copy_constructible
: std::integral_constant<bool,
__has_trivial_copy(typename std::remove_reference<T>::type) &&
std::is_copy_constructible<T>::value &&
is_trivially_destructible<T>::value> {};
std::is_trivially_destructible<T>::value> {};
template <typename T>
struct is_trivially_copy_assignable
: std::integral_constant<
bool, __has_trivial_assign(typename std::remove_reference<T>::type) &&
template <typename T>
struct is_trivially_copy_assignable :
std::integral_constant<bool,
__has_trivial_assign(typename std::remove_reference<T>::type) &&
phmap::is_copy_assignable<T>::value> {};
template <typename T>
struct is_trivially_copyable :
std::integral_constant<bool, __has_trivial_copy(typename std::remove_reference<T>::type)> {};
#else
template <typename T> using is_trivially_copy_constructible = std::is_trivially_copy_constructible<T>;
template <typename T> using is_trivially_copy_assignable = std::is_trivially_copy_assignable<T>;
template <typename T> using is_trivially_copyable = std::is_trivially_copyable<T>;
#endif
// -----------------------------------------------------------------------------
// C++14 "_t" trait aliases
// -----------------------------------------------------------------------------
......@@ -308,9 +302,15 @@ using remove_extent_t = typename std::remove_extent<T>::type;
template <typename T>
using remove_all_extents_t = typename std::remove_all_extents<T>::type;
template <size_t Len, size_t Align = type_traits_internal::
default_alignment_of_aligned_storage<Len>::value>
using aligned_storage_t = typename std::aligned_storage<Len, Align>::type;
template<std::size_t Len, std::size_t Align>
struct aligned_storage {
struct type {
alignas(Align) unsigned char data[Len];
};
};
template< std::size_t Len, std::size_t Align>
using aligned_storage_t = typename aligned_storage<Len, Align>::type;
template <typename T>
using decay_t = typename std::decay<T>::type;
......@@ -652,83 +652,87 @@ namespace phmap {
namespace base_internal {
namespace {
template <typename T>
#ifdef PHMAP_HAVE_EXCEPTIONS
[[noreturn]] void Throw(const T& error) {
throw error;
}
#define PHMAP_THROW_IMPL_MSG(e, message) throw e(message)
#define PHMAP_THROW_IMPL(e) throw e()
#else
[[noreturn]] void Throw(const T&) {
std::abort();
}
#define PHMAP_THROW_IMPL_MSG(e, message) do { (void)(message); std::abort(); } while(0)
#define PHMAP_THROW_IMPL(e) std::abort()
#endif
} // namespace
static inline void ThrowStdLogicError(const std::string& what_arg) {
Throw(std::logic_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::logic_error, what_arg);
}
static inline void ThrowStdLogicError(const char* what_arg) {
Throw(std::logic_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::logic_error, what_arg);
}
static inline void ThrowStdInvalidArgument(const std::string& what_arg) {
Throw(std::invalid_argument(what_arg));
PHMAP_THROW_IMPL_MSG(std::invalid_argument, what_arg);
}
static inline void ThrowStdInvalidArgument(const char* what_arg) {
Throw(std::invalid_argument(what_arg));
PHMAP_THROW_IMPL_MSG(std::invalid_argument, what_arg);
}
static inline void ThrowStdDomainError(const std::string& what_arg) {
Throw(std::domain_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::domain_error, what_arg);
}
static inline void ThrowStdDomainError(const char* what_arg) {
Throw(std::domain_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::domain_error, what_arg);
}
static inline void ThrowStdLengthError(const std::string& what_arg) {
Throw(std::length_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::length_error, what_arg);
}
static inline void ThrowStdLengthError(const char* what_arg) {
Throw(std::length_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::length_error, what_arg);
}
static inline void ThrowStdOutOfRange(const std::string& what_arg) {
Throw(std::out_of_range(what_arg));
PHMAP_THROW_IMPL_MSG(std::out_of_range, what_arg);
}
static inline void ThrowStdOutOfRange(const char* what_arg) {
Throw(std::out_of_range(what_arg));
PHMAP_THROW_IMPL_MSG(std::out_of_range, what_arg);
}
static inline void ThrowStdRuntimeError(const std::string& what_arg) {
Throw(std::runtime_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::runtime_error, what_arg);
}
static inline void ThrowStdRuntimeError(const char* what_arg) {
Throw(std::runtime_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::runtime_error, what_arg);
}
static inline void ThrowStdRangeError(const std::string& what_arg) {
Throw(std::range_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::range_error, what_arg);
}
static inline void ThrowStdRangeError(const char* what_arg) {
Throw(std::range_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::range_error, what_arg);
}
static inline void ThrowStdOverflowError(const std::string& what_arg) {
Throw(std::overflow_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::overflow_error, what_arg);
}
static inline void ThrowStdOverflowError(const char* what_arg) {
Throw(std::overflow_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::overflow_error, what_arg);
}
static inline void ThrowStdUnderflowError(const std::string& what_arg) {
Throw(std::underflow_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::underflow_error, what_arg);
}
static inline void ThrowStdUnderflowError(const char* what_arg) {
Throw(std::underflow_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::underflow_error, what_arg);
}
static inline void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); }
static inline void ThrowStdBadFunctionCall() {
PHMAP_THROW_IMPL(std::bad_function_call);
}
static inline void ThrowStdBadAlloc() { Throw(std::bad_alloc()); }
static inline void ThrowStdBadAlloc() {
PHMAP_THROW_IMPL(std::bad_alloc);
}
} // namespace base_internal
} // namespace phmap
......@@ -1815,9 +1819,10 @@ protected:
// Also, we should be checking is_trivially_copyable here, which is not
// supported now, so we use is_trivially_* traits instead.
template <typename T,
bool unused = phmap::is_trivially_copy_constructible<T>::value&&
phmap::is_trivially_copy_assignable<typename std::remove_cv<
T>::type>::value&& std::is_trivially_destructible<T>::value>
bool unused =
phmap::is_trivially_copy_constructible<T>::value &&
phmap::is_trivially_copy_assignable<typename std::remove_cv<T>::type>::value &&
std::is_trivially_destructible<T>::value>
class optional_data;
// Trivially copyable types
......@@ -2048,6 +2053,11 @@ struct optional_hash_base<T, decltype(std::hash<phmap::remove_const_t<T> >()(
// -----------------------------------------------------------------------------
// phmap::optional class definition
// -----------------------------------------------------------------------------
#if PHMAP_OLD_GCC
#define PHMAP_OPTIONAL_NOEXCEPT
#else
#define PHMAP_OPTIONAL_NOEXCEPT noexcept
#endif
template <typename T>
class optional : private optional_internal::optional_data<T>,
......@@ -2074,7 +2084,7 @@ public:
optional(const optional& src) = default;
// Move constructor, standard semantics
optional(optional&& src) = default;
optional(optional&& src) PHMAP_OPTIONAL_NOEXCEPT = default;
// Constructs a non-empty `optional` direct-initialized value of type `T` from
// the arguments `std::forward<Args>(args)...` within the `optional`.
......@@ -2214,7 +2224,7 @@ public:
optional& operator=(const optional& src) = default;
// Move assignment operator, standard semantics
optional& operator=(optional&& src) = default;
optional& operator=(optional&& src) PHMAP_OPTIONAL_NOEXCEPT = default;
// Value assignment operators
template <
......@@ -4154,180 +4164,6 @@ public:
: internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {}
};
} // namespace priv
} // namespace phmap
// ---------------------------------------------------------------------------
// compressed_tuple.h
// ---------------------------------------------------------------------------
#ifdef _MSC_VER
// We need to mark these classes with this declspec to ensure that
// CompressedTuple happens.
#define PHMAP_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases)
#else // _MSC_VER
#define PHMAP_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
#endif // _MSC_VER
namespace phmap {
namespace priv {
template <typename... Ts>
class CompressedTuple;
namespace internal_compressed_tuple {
template <typename D, size_t I>
struct Elem;
template <typename... B, size_t I>
struct Elem<CompressedTuple<B...>, I>
: std::tuple_element<I, std::tuple<B...>> {};
template <typename D, size_t I>
using ElemT = typename Elem<D, I>::type;
// ---------------------------------------------------------------------------
// Use the __is_final intrinsic if available. Where it's not available, classes
// declared with the 'final' specifier cannot be used as CompressedTuple
// elements.
// TODO(sbenza): Replace this with std::is_final in C++14.
// ---------------------------------------------------------------------------
template <typename T>
constexpr bool IsFinal() {
#if defined(__clang__) || defined(__GNUC__)
return __is_final(T);
#else
return false;
#endif
}
template <typename T>
constexpr bool ShouldUseBase() {
#ifdef __INTEL_COMPILER
// avoid crash in Intel compiler
// assertion failed at: "shared/cfe/edgcpfe/lower_init.c", line 7013
return false;
#else
return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>();
#endif
}
// The storage class provides two specializations:
// - For empty classes, it stores T as a base class.
// - For everything else, it stores T as a member.
// ------------------------------------------------
template <typename D, size_t I, bool = ShouldUseBase<ElemT<D, I>>()>
struct Storage
{
using T = ElemT<D, I>;
T value;
constexpr Storage() = default;
explicit constexpr Storage(T&& v) : value(phmap::forward<T>(v)) {}
constexpr const T& get() const& { return value; }
T& get() & { return value; }
constexpr const T&& get() const&& { return phmap::move(*this).value; }
T&& get() && { return std::move(*this).value; }
};
template <typename D, size_t I>
struct PHMAP_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<D, I, true>
: ElemT<D, I>
{
using T = internal_compressed_tuple::ElemT<D, I>;
constexpr Storage() = default;
explicit constexpr Storage(T&& v) : T(phmap::forward<T>(v)) {}
constexpr const T& get() const& { return *this; }
T& get() & { return *this; }
constexpr const T&& get() const&& { return phmap::move(*this); }
T&& get() && { return std::move(*this); }
};
template <typename D, typename I>
struct PHMAP_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;
template <typename... Ts, size_t... I>
struct PHMAP_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
CompressedTupleImpl<CompressedTuple<Ts...>, phmap::index_sequence<I...>>
// We use the dummy identity function through std::integral_constant to
// convince MSVC of accepting and expanding I in that context. Without it
// you would get:
// error C3548: 'I': parameter pack cannot be used in this context
: Storage<CompressedTuple<Ts...>,
std::integral_constant<size_t, I>::value>...
{
constexpr CompressedTupleImpl() = default;
explicit constexpr CompressedTupleImpl(Ts&&... args)
: Storage<CompressedTuple<Ts...>, I>(phmap::forward<Ts>(args))... {}
};
} // namespace internal_compressed_tuple
// ---------------------------------------------------------------------------
// Helper class to perform the Empty Base Class Optimization.
// Ts can contain classes and non-classes, empty or not. For the ones that
// are empty classes, we perform the CompressedTuple. If all types in Ts are
// empty classes, then CompressedTuple<Ts...> is itself an empty class.
//
// To access the members, use member .get<N>() function.
//
// Eg:
// phmap::priv::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
// t3);
// assert(value.get<0>() == 7);
// T1& t1 = value.get<1>();
// const T2& t2 = value.get<2>();
// ...
//
// https://en.cppreference.com/w/cpp/language/ebo
// ---------------------------------------------------------------------------
template <typename... Ts>
class PHMAP_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
: private internal_compressed_tuple::CompressedTupleImpl<
CompressedTuple<Ts...>, phmap::index_sequence_for<Ts...>>
{
private:
template <int I>
using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
public:
constexpr CompressedTuple() = default;
explicit constexpr CompressedTuple(Ts... base)
: CompressedTuple::CompressedTupleImpl(phmap::forward<Ts>(base)...) {}
template <int I>
ElemT<I>& get() & {
return internal_compressed_tuple::Storage<CompressedTuple, I>::get();
}
template <int I>
constexpr const ElemT<I>& get() const& {
return internal_compressed_tuple::Storage<CompressedTuple, I>::get();
}
template <int I>
ElemT<I>&& get() && {
return std::move(*this)
.internal_compressed_tuple::template Storage<CompressedTuple, I>::get();
}
template <int I>
constexpr const ElemT<I>&& get() const&& {
return phmap::move(*this)
.internal_compressed_tuple::template Storage<CompressedTuple, I>::get();
}
};
// Explicit specialization for a zero-element tuple
// (needed to avoid ambiguous overloads for the default constructor).
// ---------------------------------------------------------------------------
template <>
class PHMAP_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {};
} // namespace priv
} // namespace phmap
namespace phmap {
namespace priv {
#ifdef _MSC_VER
#pragma warning(push)
......@@ -4541,8 +4377,8 @@ namespace memory_internal {
// ----------------------------------------------------------------------------
template <class Pair, class = std::true_type>
struct OffsetOf {
static constexpr size_t kFirst = (size_t)-1;
static constexpr size_t kSecond = (size_t)-1;
static constexpr size_t kFirst = static_cast<size_t>(-1);
static constexpr size_t kSecond = static_cast<size_t>(-1);
};
template <class Pair>
......@@ -4792,9 +4628,14 @@ public:
DoNothing(mutex_type&, phmap::try_to_lock_t) {}
template<class T> explicit DoNothing(T&&) {}
DoNothing& operator=(const DoNothing&) { return *this; }
DoNothing& operator=(DoNothing&&) { return *this; }
void swap(DoNothing &) {}
DoNothing& operator=(DoNothing&&) noexcept { return *this; }
void swap(DoNothing &) noexcept {}
bool owns_lock() const noexcept { return true; }
void lock() {}
void unlock() {}
void lock_shared() {}
void unlock_shared() {}
bool switch_to_unique() { return false; }
};
// ----------------------------------------------------
......@@ -4823,13 +4664,13 @@ public:
m_->try_lock();
}
WriteLock(WriteLock &&o) :
WriteLock(WriteLock &&o) noexcept :
m_(std::move(o.m_)), locked_(std::move(o.locked_)) {
o.locked_ = false;
o.m_ = nullptr;
}
WriteLock& operator=(WriteLock&& other) {
WriteLock& operator=(WriteLock&& other) noexcept {
WriteLock temp(std::move(other));
swap(temp);
return *this;
......@@ -4870,6 +4711,8 @@ public:
mutex_type *mutex() const noexcept { return m_; }
bool switch_to_unique() { return false; }
private:
mutex_type *m_;
bool locked_;
......@@ -4901,13 +4744,13 @@ public:
m_->try_lock_shared();
}
ReadLock(ReadLock &&o) :
ReadLock(ReadLock &&o) noexcept :
m_(std::move(o.m_)), locked_(std::move(o.locked_)) {
o.locked_ = false;
o.m_ = nullptr;
}
ReadLock& operator=(ReadLock&& other) {
ReadLock& operator=(ReadLock&& other) noexcept {
ReadLock temp(std::move(other));
swap(temp);
return *this;
......@@ -4948,11 +4791,103 @@ public:
mutex_type *mutex() const noexcept { return m_; }
bool switch_to_unique() { return false; }
private:
mutex_type *m_;
bool locked_;
};
// ----------------------------------------------------
class ReadWriteLock
{
public:
using mutex_type = MutexType;
ReadWriteLock() : m_(nullptr), locked_(false), locked_shared_(false) {}
explicit ReadWriteLock(mutex_type &m) : m_(&m), locked_(false), locked_shared_(true) {
m_->lock_shared();
}
ReadWriteLock(mutex_type& m, defer_lock_t) noexcept :
m_(&m), locked_(false), locked_shared_(false)
{}
ReadWriteLock(ReadWriteLock &&o) noexcept :
m_(std::move(o.m_)), locked_(o.locked_), locked_shared_(o.locked_shared_) {
o.locked_ = false;
o.locked_shared_ = false;
o.m_ = nullptr;
}
ReadWriteLock& operator=(ReadWriteLock&& other) noexcept {
ReadWriteLock temp(std::move(other));
swap(temp);
return *this;
}
~ReadWriteLock() {
if (locked_shared_)
m_->unlock_shared();
else if (locked_)
m_->unlock();
}
void lock_shared() {
assert(!locked_);
if (!locked_shared_) {
m_->lock_shared();
locked_shared_ = true;
}
}
void unlock_shared() {
if (locked_shared_) {
m_->unlock_shared();
locked_shared_ = false;
}
}
void lock() {
assert(!locked_shared_);
if (!locked_) {
m_->lock();
locked_ = true;
}
}
void unlock() {
if (locked_) {
m_->unlock();
locked_ = false;
}
}
bool owns_lock() const noexcept { return locked_; }
bool owns_shared_lock() const noexcept { return locked_shared_; }
void swap(ReadWriteLock &o) noexcept {
std::swap(m_, o.m_);
std::swap(locked_, o.locked_);
std::swap(locked_shared_, o.locked_shared_);
}
mutex_type *mutex() const noexcept { return m_; }
bool switch_to_unique() {
assert(locked_shared_);
unlock_shared();
lock();
return true;
}
private:
mutex_type *m_;
bool locked_;
bool locked_shared_;
};
// ----------------------------------------------------
class WriteLocks
{
......@@ -5022,12 +4957,11 @@ public:
// using Lockable = phmap::LockableImpl<mutex_type>;
// Lockable m;
//
// Lockable::UpgradeLock read_lock(m); // take a upgradable lock
// Lockable::ReadWriteLock read_lock(m); // take a lock (read if supported, otherwise write)
// ... do something
//
// {
// Lockable::UpgradeToUnique unique_lock(read_lock);
// m.switch_to_unique(); // returns true if we had a read lock and switched to write
// // now locked for write
// }
//
// ---------------------------------------------------------------------------
// Generic mutex support (always write locks)
......@@ -5039,11 +4973,10 @@ public:
using mutex_type = Mtx_;
using Base = LockableBaseImpl<Mtx_>;
using SharedLock = typename Base::WriteLock;
using UpgradeLock = typename Base::WriteLock;
using UniqueLock = typename Base::WriteLock;
using ReadWriteLock = typename Base::WriteLock;
using SharedLocks = typename Base::WriteLocks;
using UniqueLocks = typename Base::WriteLocks;
using UpgradeToUnique = typename Base::DoNothing; // we already have unique ownership
};
// ---------------------------------------------------------------------------
......@@ -5056,26 +4989,26 @@ public:
using mutex_type = phmap::NullMutex;
using Base = LockableBaseImpl<phmap::NullMutex>;
using SharedLock = typename Base::DoNothing;
using UpgradeLock = typename Base::DoNothing;
using ReadWriteLock = typename Base::DoNothing;
using UniqueLock = typename Base::DoNothing;
using UpgradeToUnique = typename Base::DoNothing;
using SharedLocks = typename Base::DoNothing;
using UniqueLocks = typename Base::DoNothing;
};
// --------------------------------------------------------------------------
// Abseil Mutex support (read and write lock support)
// use: `phmap::AbslMutex` instead of `std::mutex`
// --------------------------------------------------------------------------
#ifdef ABSL_SYNCHRONIZATION_MUTEX_H_
struct AbslMutex : protected absl::Mutex
{
void lock() { this->Lock(); }
void unlock() { this->Unlock(); }
void try_lock() { this->TryLock(); }
void lock_shared() { this->ReaderLock(); }
void unlock_shared() { this->ReaderUnlock(); }
void try_lock_shared() { this->ReaderTryLock(); }
void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
void unlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
void try_lock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { this->TryLock(); }
void lock_shared() ABSL_SHARED_LOCK_FUNCTION() { this->ReaderLock(); }
void unlock_shared() ABSL_UNLOCK_FUNCTION() { this->ReaderUnlock(); }
void try_lock_shared() ABSL_SHARED_TRYLOCK_FUNCTION(true) { this->ReaderTryLock(); }
};
template <>
......@@ -5085,50 +5018,66 @@ public:
using mutex_type = phmap::AbslMutex;
using Base = LockableBaseImpl<phmap::AbslMutex>;
using SharedLock = typename Base::ReadLock;
using UpgradeLock = typename Base::WriteLock;
using ReadWriteLock = typename Base::ReadWriteLock;
using UniqueLock = typename Base::WriteLock;
using SharedLocks = typename Base::ReadLocks;
using UniqueLocks = typename Base::WriteLocks;
using UpgradeToUnique = typename Base::DoNothing; // we already have unique ownership
};
#endif
// --------------------------------------------------------------------------
// Boost shared_mutex support (read and write lock support)
// Microsoft SRWLOCK support (read and write lock support)
// use: `phmap::srwlock` instead of `std::mutex`
// --------------------------------------------------------------------------
#ifdef BOOST_THREAD_SHARED_MUTEX_HPP
#if defined(_MSC_VER) && defined(SRWLOCK_INIT)
#if 1
// ---------------------------------------------------------------------------
template <>
class LockableImpl<boost::shared_mutex> : public boost::shared_mutex
class srwlock {
SRWLOCK _lock;
public:
srwlock() { InitializeSRWLock(&_lock); }
void lock() { AcquireSRWLockExclusive(&_lock); }
void unlock() { ReleaseSRWLockExclusive(&_lock); }
bool try_lock() { return !!TryAcquireSRWLockExclusive(&_lock); }
void lock_shared() { AcquireSRWLockShared(&_lock); }
void unlock_shared() { ReleaseSRWLockShared(&_lock); }
bool try_lock_shared() { return !!TryAcquireSRWLockShared(&_lock); }
};
template<>
class LockableImpl<srwlock> : public srwlock
{
public:
using mutex_type = boost::shared_mutex;
using Base = LockableBaseImpl<boost::shared_mutex>;
using SharedLock = boost::shared_lock<mutex_type>;
using UpgradeLock = boost::unique_lock<mutex_type>; // assume can't upgrade
using UniqueLock = boost::unique_lock<mutex_type>;
using mutex_type = srwlock;
using Base = LockableBaseImpl<srwlock>;
using SharedLock = typename Base::ReadLock;
using ReadWriteLock = typename Base::ReadWriteLock;
using UniqueLock = typename Base::WriteLock;
using SharedLocks = typename Base::ReadLocks;
using UniqueLocks = typename Base::WriteLocks;
using UpgradeToUnique = typename Base::DoNothing; // we already have unique ownership
};
#else
#endif
// --------------------------------------------------------------------------
// Boost shared_mutex support (read and write lock support)
// --------------------------------------------------------------------------
#ifdef BOOST_THREAD_SHARED_MUTEX_HPP
// ---------------------------------------------------------------------------
template <>
class LockableImpl<boost::upgrade_mutex> : public boost::upgrade_mutex
class LockableImpl<boost::shared_mutex> : public boost::shared_mutex
{
public:
using mutex_type = boost::upgrade_mutex;
using mutex_type = boost::shared_mutex;
using Base = LockableBaseImpl<boost::shared_mutex>;
using SharedLock = boost::shared_lock<mutex_type>;
using UpgradeLock = boost::upgrade_lock<mutex_type>;
using ReadWriteLock = typename Base::ReadWriteLock;
using UniqueLock = boost::unique_lock<mutex_type>;
using SharedLocks = typename Base::ReadLocks;
using UniqueLocks = typename Base::WriteLocks;
using UpgradeToUnique = boost::upgrade_to_unique_lock<mutex_type>;
};
#endif
#endif // BOOST_THREAD_SHARED_MUTEX_HPP
......@@ -5145,11 +5094,10 @@ public:
using mutex_type = std::shared_mutex;
using Base = LockableBaseImpl<std::shared_mutex>;
using SharedLock = std::shared_lock<mutex_type>;
using UpgradeLock = std::unique_lock<mutex_type>; // assume can't upgrade
using ReadWriteLock = typename Base::ReadWriteLock;
using UniqueLock = std::unique_lock<mutex_type>;
using SharedLocks = typename Base::ReadLocks;
using UniqueLocks = typename Base::WriteLocks;
using UpgradeToUnique = typename Base::DoNothing; // we already have unique ownership
};
#endif // PHMAP_HAVE_SHARED_MUTEX
......
......@@ -98,19 +98,6 @@ struct VoidTImpl {
using type = void;
};
// This trick to retrieve a default alignment is necessary for our
// implementation of aligned_storage_t to be consistent with any implementation
// of std::aligned_storage.
// ---------------------------------------------------------------------------
template <size_t Len, typename T = std::aligned_storage<Len>>
struct default_alignment_of_aligned_storage;
template <size_t Len, size_t Align>
struct default_alignment_of_aligned_storage<Len,
std::aligned_storage<Len, Align>> {
static constexpr size_t value = Align;
};
// NOTE: The `is_detected` family of templates here differ from the library
// fundamentals specification in that for library fundamentals, `Op<Args...>` is
// evaluated as soon as the type `is_detected<Op, Args...>` undergoes
......@@ -238,29 +225,36 @@ struct disjunction<> : std::false_type {};
template <typename T>
struct negation : std::integral_constant<bool, !T::value> {};
template <typename T>
struct is_trivially_destructible
: std::integral_constant<bool, __has_trivial_destructor(T) &&
std::is_destructible<T>::value> {};
template <typename T>
struct is_trivially_default_constructible
: std::integral_constant<bool, __has_trivial_constructor(T) &&
std::is_default_constructible<T>::value &&
is_trivially_destructible<T>::value> {};
#if defined(__GNUC__) && __GNUC__ < 5 && !defined(__clang__) && !defined(_MSC_VER) && !defined(__INTEL_COMPILER)
#define PHMAP_OLD_GCC 1
#else
#define PHMAP_OLD_GCC 0
#endif
template <typename T>
struct is_trivially_copy_constructible
: std::integral_constant<bool, __has_trivial_copy(T) &&
#if PHMAP_OLD_GCC
template <typename T>
struct is_trivially_copy_constructible
: std::integral_constant<bool,
__has_trivial_copy(typename std::remove_reference<T>::type) &&
std::is_copy_constructible<T>::value &&
is_trivially_destructible<T>::value> {};
std::is_trivially_destructible<T>::value> {};
template <typename T>
struct is_trivially_copy_assignable
: std::integral_constant<
bool, __has_trivial_assign(typename std::remove_reference<T>::type) &&
template <typename T>
struct is_trivially_copy_assignable :
std::integral_constant<bool,
__has_trivial_assign(typename std::remove_reference<T>::type) &&
phmap::is_copy_assignable<T>::value> {};
template <typename T>
struct is_trivially_copyable :
std::integral_constant<bool, __has_trivial_copy(typename std::remove_reference<T>::type)> {};
#else
template <typename T> using is_trivially_copy_constructible = std::is_trivially_copy_constructible<T>;
template <typename T> using is_trivially_copy_assignable = std::is_trivially_copy_assignable<T>;
template <typename T> using is_trivially_copyable = std::is_trivially_copyable<T>;
#endif
// -----------------------------------------------------------------------------
// C++14 "_t" trait aliases
// -----------------------------------------------------------------------------
......@@ -310,9 +304,15 @@ using remove_extent_t = typename std::remove_extent<T>::type;
template <typename T>
using remove_all_extents_t = typename std::remove_all_extents<T>::type;
template <size_t Len, size_t Align = type_traits_internal::
default_alignment_of_aligned_storage<Len>::value>
using aligned_storage_t = typename std::aligned_storage<Len, Align>::type;
template<std::size_t Len, std::size_t Align>
struct aligned_storage {
struct type {
alignas(Align) unsigned char data[Len];
};
};
template< std::size_t Len, std::size_t Align>
using aligned_storage_t = typename aligned_storage<Len, Align>::type;
template <typename T>
using decay_t = typename std::decay<T>::type;
......@@ -654,83 +654,87 @@ namespace phmap {
namespace base_internal {
namespace {
template <typename T>
#ifdef PHMAP_HAVE_EXCEPTIONS
[[noreturn]] void Throw(const T& error) {
throw error;
}
#define PHMAP_THROW_IMPL_MSG(e, message) throw e(message)
#define PHMAP_THROW_IMPL(e) throw e()
#else
[[noreturn]] void Throw(const T&) {
std::abort();
}
#define PHMAP_THROW_IMPL_MSG(e, message) do { (void)(message); std::abort(); } while(0)
#define PHMAP_THROW_IMPL(e) std::abort()
#endif
} // namespace
static inline void ThrowStdLogicError(const std::string& what_arg) {
Throw(std::logic_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::logic_error, what_arg);
}
static inline void ThrowStdLogicError(const char* what_arg) {
Throw(std::logic_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::logic_error, what_arg);
}
static inline void ThrowStdInvalidArgument(const std::string& what_arg) {
Throw(std::invalid_argument(what_arg));
PHMAP_THROW_IMPL_MSG(std::invalid_argument, what_arg);
}
static inline void ThrowStdInvalidArgument(const char* what_arg) {
Throw(std::invalid_argument(what_arg));
PHMAP_THROW_IMPL_MSG(std::invalid_argument, what_arg);
}
static inline void ThrowStdDomainError(const std::string& what_arg) {
Throw(std::domain_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::domain_error, what_arg);
}
static inline void ThrowStdDomainError(const char* what_arg) {
Throw(std::domain_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::domain_error, what_arg);
}
static inline void ThrowStdLengthError(const std::string& what_arg) {
Throw(std::length_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::length_error, what_arg);
}
static inline void ThrowStdLengthError(const char* what_arg) {
Throw(std::length_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::length_error, what_arg);
}
static inline void ThrowStdOutOfRange(const std::string& what_arg) {
Throw(std::out_of_range(what_arg));
PHMAP_THROW_IMPL_MSG(std::out_of_range, what_arg);
}
static inline void ThrowStdOutOfRange(const char* what_arg) {
Throw(std::out_of_range(what_arg));
PHMAP_THROW_IMPL_MSG(std::out_of_range, what_arg);
}
static inline void ThrowStdRuntimeError(const std::string& what_arg) {
Throw(std::runtime_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::runtime_error, what_arg);
}
static inline void ThrowStdRuntimeError(const char* what_arg) {
Throw(std::runtime_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::runtime_error, what_arg);
}
static inline void ThrowStdRangeError(const std::string& what_arg) {
Throw(std::range_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::range_error, what_arg);
}
static inline void ThrowStdRangeError(const char* what_arg) {
Throw(std::range_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::range_error, what_arg);
}
static inline void ThrowStdOverflowError(const std::string& what_arg) {
Throw(std::overflow_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::overflow_error, what_arg);
}
static inline void ThrowStdOverflowError(const char* what_arg) {
Throw(std::overflow_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::overflow_error, what_arg);
}
static inline void ThrowStdUnderflowError(const std::string& what_arg) {
Throw(std::underflow_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::underflow_error, what_arg);
}
static inline void ThrowStdUnderflowError(const char* what_arg) {
Throw(std::underflow_error(what_arg));
PHMAP_THROW_IMPL_MSG(std::underflow_error, what_arg);
}
static inline void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); }
static inline void ThrowStdBadFunctionCall() {
PHMAP_THROW_IMPL(std::bad_function_call);
}
static inline void ThrowStdBadAlloc() { Throw(std::bad_alloc()); }
static inline void ThrowStdBadAlloc() {
PHMAP_THROW_IMPL(std::bad_alloc);
}
} // namespace base_internal
} // namespace phmap
......@@ -1817,9 +1821,10 @@ protected:
// Also, we should be checking is_trivially_copyable here, which is not
// supported now, so we use is_trivially_* traits instead.
template <typename T,
bool unused = phmap::is_trivially_copy_constructible<T>::value&&
phmap::is_trivially_copy_assignable<typename std::remove_cv<
T>::type>::value&& std::is_trivially_destructible<T>::value>
bool unused =
phmap::is_trivially_copy_constructible<T>::value &&
phmap::is_trivially_copy_assignable<typename std::remove_cv<T>::type>::value &&
std::is_trivially_destructible<T>::value>
class optional_data;
// Trivially copyable types
......@@ -2050,6 +2055,11 @@ struct optional_hash_base<T, decltype(std::hash<phmap::remove_const_t<T> >()(
// -----------------------------------------------------------------------------
// phmap::optional class definition
// -----------------------------------------------------------------------------
#if PHMAP_OLD_GCC
#define PHMAP_OPTIONAL_NOEXCEPT
#else
#define PHMAP_OPTIONAL_NOEXCEPT noexcept
#endif
template <typename T>
class optional : private optional_internal::optional_data<T>,
......@@ -2076,7 +2086,7 @@ public:
optional(const optional& src) = default;
// Move constructor, standard semantics
optional(optional&& src) = default;
optional(optional&& src) PHMAP_OPTIONAL_NOEXCEPT = default;
// Constructs a non-empty `optional` direct-initialized value of type `T` from
// the arguments `std::forward<Args>(args)...` within the `optional`.
......@@ -2216,7 +2226,7 @@ public:
optional& operator=(const optional& src) = default;
// Move assignment operator, standard semantics
optional& operator=(optional&& src) = default;
optional& operator=(optional&& src) PHMAP_OPTIONAL_NOEXCEPT = default;
// Value assignment operators
template <
......@@ -4156,180 +4166,6 @@ public:
: internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {}
};
} // namespace priv
} // namespace phmap
// ---------------------------------------------------------------------------
// compressed_tuple.h
// ---------------------------------------------------------------------------
#ifdef _MSC_VER
// We need to mark these classes with this declspec to ensure that
// CompressedTuple happens.
#define PHMAP_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases)
#else // _MSC_VER
#define PHMAP_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
#endif // _MSC_VER
namespace phmap {
namespace priv {
template <typename... Ts>
class CompressedTuple;
namespace internal_compressed_tuple {
template <typename D, size_t I>
struct Elem;
template <typename... B, size_t I>
struct Elem<CompressedTuple<B...>, I>
: std::tuple_element<I, std::tuple<B...>> {};
template <typename D, size_t I>
using ElemT = typename Elem<D, I>::type;
// ---------------------------------------------------------------------------
// Use the __is_final intrinsic if available. Where it's not available, classes
// declared with the 'final' specifier cannot be used as CompressedTuple
// elements.
// TODO(sbenza): Replace this with std::is_final in C++14.
// ---------------------------------------------------------------------------
template <typename T>
constexpr bool IsFinal() {
#if defined(__clang__) || defined(__GNUC__)
return __is_final(T);
#else
return false;
#endif
}
template <typename T>
constexpr bool ShouldUseBase() {
#ifdef __INTEL_COMPILER
// avoid crash in Intel compiler
// assertion failed at: "shared/cfe/edgcpfe/lower_init.c", line 7013
return false;
#else
return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>();
#endif
}
// The storage class provides two specializations:
// - For empty classes, it stores T as a base class.
// - For everything else, it stores T as a member.
// ------------------------------------------------
template <typename D, size_t I, bool = ShouldUseBase<ElemT<D, I>>()>
struct Storage
{
using T = ElemT<D, I>;
T value;
constexpr Storage() = default;
explicit constexpr Storage(T&& v) : value(phmap::forward<T>(v)) {}
constexpr const T& get() const& { return value; }
T& get() & { return value; }
constexpr const T&& get() const&& { return phmap::move(*this).value; }
T&& get() && { return std::move(*this).value; }
};
template <typename D, size_t I>
struct PHMAP_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<D, I, true>
: ElemT<D, I>
{
using T = internal_compressed_tuple::ElemT<D, I>;
constexpr Storage() = default;
explicit constexpr Storage(T&& v) : T(phmap::forward<T>(v)) {}
constexpr const T& get() const& { return *this; }
T& get() & { return *this; }
constexpr const T&& get() const&& { return phmap::move(*this); }
T&& get() && { return std::move(*this); }
};
template <typename D, typename I>
struct PHMAP_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;
template <typename... Ts, size_t... I>
struct PHMAP_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
CompressedTupleImpl<CompressedTuple<Ts...>, phmap::index_sequence<I...>>
// We use the dummy identity function through std::integral_constant to
// convince MSVC of accepting and expanding I in that context. Without it
// you would get:
// error C3548: 'I': parameter pack cannot be used in this context
: Storage<CompressedTuple<Ts...>,
std::integral_constant<size_t, I>::value>...
{
constexpr CompressedTupleImpl() = default;
explicit constexpr CompressedTupleImpl(Ts&&... args)
: Storage<CompressedTuple<Ts...>, I>(phmap::forward<Ts>(args))... {}
};
} // namespace internal_compressed_tuple
// ---------------------------------------------------------------------------
// Helper class to perform the Empty Base Class Optimization.
// Ts can contain classes and non-classes, empty or not. For the ones that
// are empty classes, we perform the CompressedTuple. If all types in Ts are
// empty classes, then CompressedTuple<Ts...> is itself an empty class.
//
// To access the members, use member .get<N>() function.
//
// Eg:
// phmap::priv::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
// t3);
// assert(value.get<0>() == 7);
// T1& t1 = value.get<1>();
// const T2& t2 = value.get<2>();
// ...
//
// https://en.cppreference.com/w/cpp/language/ebo
// ---------------------------------------------------------------------------
template <typename... Ts>
class PHMAP_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
: private internal_compressed_tuple::CompressedTupleImpl<
CompressedTuple<Ts...>, phmap::index_sequence_for<Ts...>>
{
private:
template <int I>
using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
public:
constexpr CompressedTuple() = default;
explicit constexpr CompressedTuple(Ts... base)
: CompressedTuple::CompressedTupleImpl(phmap::forward<Ts>(base)...) {}
template <int I>
ElemT<I>& get() & {
return internal_compressed_tuple::Storage<CompressedTuple, I>::get();
}
template <int I>
constexpr const ElemT<I>& get() const& {
return internal_compressed_tuple::Storage<CompressedTuple, I>::get();
}
template <int I>
ElemT<I>&& get() && {
return std::move(*this)
.internal_compressed_tuple::template Storage<CompressedTuple, I>::get();
}
template <int I>
constexpr const ElemT<I>&& get() const&& {
return phmap::move(*this)
.internal_compressed_tuple::template Storage<CompressedTuple, I>::get();
}
};
// Explicit specialization for a zero-element tuple
// (needed to avoid ambiguous overloads for the default constructor).
// ---------------------------------------------------------------------------
template <>
class PHMAP_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {};
} // namespace priv
} // namespace phmap
namespace phmap {
namespace priv {
#ifdef _MSC_VER
#pragma warning(push)
......@@ -4543,8 +4379,8 @@ namespace memory_internal {
// ----------------------------------------------------------------------------
template <class Pair, class = std::true_type>
struct OffsetOf {
static constexpr size_t kFirst = (size_t)-1;
static constexpr size_t kSecond = (size_t)-1;
static constexpr size_t kFirst = static_cast<size_t>(-1);
static constexpr size_t kSecond = static_cast<size_t>(-1);
};
template <class Pair>
......@@ -4794,9 +4630,14 @@ public:
DoNothing(mutex_type&, phmap::try_to_lock_t) {}
template<class T> explicit DoNothing(T&&) {}
DoNothing& operator=(const DoNothing&) { return *this; }
DoNothing& operator=(DoNothing&&) { return *this; }
void swap(DoNothing &) {}
DoNothing& operator=(DoNothing&&) noexcept { return *this; }
void swap(DoNothing &) noexcept {}
bool owns_lock() const noexcept { return true; }
void lock() {}
void unlock() {}
void lock_shared() {}
void unlock_shared() {}
bool switch_to_unique() { return false; }
};
// ----------------------------------------------------
......@@ -4825,13 +4666,13 @@ public:
m_->try_lock();
}
WriteLock(WriteLock &&o) :
WriteLock(WriteLock &&o) noexcept :
m_(std::move(o.m_)), locked_(std::move(o.locked_)) {
o.locked_ = false;
o.m_ = nullptr;
}
WriteLock& operator=(WriteLock&& other) {
WriteLock& operator=(WriteLock&& other) noexcept {
WriteLock temp(std::move(other));
swap(temp);
return *this;
......@@ -4872,6 +4713,8 @@ public:
mutex_type *mutex() const noexcept { return m_; }
bool switch_to_unique() { return false; }
private:
mutex_type *m_;
bool locked_;
......@@ -4903,13 +4746,13 @@ public:
m_->try_lock_shared();
}
ReadLock(ReadLock &&o) :
ReadLock(ReadLock &&o) noexcept :
m_(std::move(o.m_)), locked_(std::move(o.locked_)) {
o.locked_ = false;
o.m_ = nullptr;
}
ReadLock& operator=(ReadLock&& other) {
ReadLock& operator=(ReadLock&& other) noexcept {
ReadLock temp(std::move(other));
swap(temp);
return *this;
......@@ -4950,11 +4793,103 @@ public:
mutex_type *mutex() const noexcept { return m_; }
bool switch_to_unique() { return false; }
private:
mutex_type *m_;
bool locked_;
};
// ----------------------------------------------------
class ReadWriteLock
{
public:
using mutex_type = MutexType;
ReadWriteLock() : m_(nullptr), locked_(false), locked_shared_(false) {}
explicit ReadWriteLock(mutex_type &m) : m_(&m), locked_(false), locked_shared_(true) {
m_->lock_shared();
}
ReadWriteLock(mutex_type& m, defer_lock_t) noexcept :
m_(&m), locked_(false), locked_shared_(false)
{}
ReadWriteLock(ReadWriteLock &&o) noexcept :
m_(std::move(o.m_)), locked_(o.locked_), locked_shared_(o.locked_shared_) {
o.locked_ = false;
o.locked_shared_ = false;
o.m_ = nullptr;
}
ReadWriteLock& operator=(ReadWriteLock&& other) noexcept {
ReadWriteLock temp(std::move(other));
swap(temp);
return *this;
}
~ReadWriteLock() {
if (locked_shared_)
m_->unlock_shared();
else if (locked_)
m_->unlock();
}
void lock_shared() {
assert(!locked_);
if (!locked_shared_) {
m_->lock_shared();
locked_shared_ = true;
}
}
void unlock_shared() {
if (locked_shared_) {
m_->unlock_shared();
locked_shared_ = false;
}
}
void lock() {
assert(!locked_shared_);
if (!locked_) {
m_->lock();
locked_ = true;
}
}
void unlock() {
if (locked_) {
m_->unlock();
locked_ = false;
}
}
bool owns_lock() const noexcept { return locked_; }
bool owns_shared_lock() const noexcept { return locked_shared_; }
void swap(ReadWriteLock &o) noexcept {
std::swap(m_, o.m_);
std::swap(locked_, o.locked_);
std::swap(locked_shared_, o.locked_shared_);
}
mutex_type *mutex() const noexcept { return m_; }
bool switch_to_unique() {
assert(locked_shared_);
unlock_shared();
lock();
return true;
}
private:
mutex_type *m_;
bool locked_;
bool locked_shared_;
};
// ----------------------------------------------------
class WriteLocks
{
......@@ -5024,12 +4959,11 @@ public:
// using Lockable = phmap::LockableImpl<mutex_type>;
// Lockable m;
//
// Lockable::UpgradeLock read_lock(m); // take a upgradable lock
// Lockable::ReadWriteLock read_lock(m); // take a lock (read if supported, otherwise write)
// ... do something
//
// {
// Lockable::UpgradeToUnique unique_lock(read_lock);
// m.switch_to_unique(); // returns true if we had a read lock and switched to write
// // now locked for write
// }
//
// ---------------------------------------------------------------------------
// Generic mutex support (always write locks)
......@@ -5041,11 +4975,10 @@ public:
using mutex_type = Mtx_;
using Base = LockableBaseImpl<Mtx_>;
using SharedLock = typename Base::WriteLock;
using UpgradeLock = typename Base::WriteLock;
using UniqueLock = typename Base::WriteLock;
using ReadWriteLock = typename Base::WriteLock;
using SharedLocks = typename Base::WriteLocks;
using UniqueLocks = typename Base::WriteLocks;
using UpgradeToUnique = typename Base::DoNothing; // we already have unique ownership
};
// ---------------------------------------------------------------------------
......@@ -5058,26 +4991,26 @@ public:
using mutex_type = phmap::NullMutex;
using Base = LockableBaseImpl<phmap::NullMutex>;
using SharedLock = typename Base::DoNothing;
using UpgradeLock = typename Base::DoNothing;
using ReadWriteLock = typename Base::DoNothing;
using UniqueLock = typename Base::DoNothing;
using UpgradeToUnique = typename Base::DoNothing;
using SharedLocks = typename Base::DoNothing;
using UniqueLocks = typename Base::DoNothing;
};
// --------------------------------------------------------------------------
// Abseil Mutex support (read and write lock support)
// use: `phmap::AbslMutex` instead of `std::mutex`
// --------------------------------------------------------------------------
#ifdef ABSL_SYNCHRONIZATION_MUTEX_H_
struct AbslMutex : protected absl::Mutex
{
void lock() { this->Lock(); }
void unlock() { this->Unlock(); }
void try_lock() { this->TryLock(); }
void lock_shared() { this->ReaderLock(); }
void unlock_shared() { this->ReaderUnlock(); }
void try_lock_shared() { this->ReaderTryLock(); }
void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
void unlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
void try_lock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { this->TryLock(); }
void lock_shared() ABSL_SHARED_LOCK_FUNCTION() { this->ReaderLock(); }
void unlock_shared() ABSL_UNLOCK_FUNCTION() { this->ReaderUnlock(); }
void try_lock_shared() ABSL_SHARED_TRYLOCK_FUNCTION(true) { this->ReaderTryLock(); }
};
template <>
......@@ -5087,50 +5020,66 @@ public:
using mutex_type = phmap::AbslMutex;
using Base = LockableBaseImpl<phmap::AbslMutex>;
using SharedLock = typename Base::ReadLock;
using UpgradeLock = typename Base::WriteLock;
using ReadWriteLock = typename Base::ReadWriteLock;
using UniqueLock = typename Base::WriteLock;
using SharedLocks = typename Base::ReadLocks;
using UniqueLocks = typename Base::WriteLocks;
using UpgradeToUnique = typename Base::DoNothing; // we already have unique ownership
};
#endif
// --------------------------------------------------------------------------
// Boost shared_mutex support (read and write lock support)
// Microsoft SRWLOCK support (read and write lock support)
// use: `phmap::srwlock` instead of `std::mutex`
// --------------------------------------------------------------------------
#ifdef BOOST_THREAD_SHARED_MUTEX_HPP
#if defined(_MSC_VER) && defined(SRWLOCK_INIT)
#if 1
// ---------------------------------------------------------------------------
template <>
class LockableImpl<boost::shared_mutex> : public boost::shared_mutex
class srwlock {
SRWLOCK _lock;
public:
srwlock() { InitializeSRWLock(&_lock); }
void lock() { AcquireSRWLockExclusive(&_lock); }
void unlock() { ReleaseSRWLockExclusive(&_lock); }
bool try_lock() { return !!TryAcquireSRWLockExclusive(&_lock); }
void lock_shared() { AcquireSRWLockShared(&_lock); }
void unlock_shared() { ReleaseSRWLockShared(&_lock); }
bool try_lock_shared() { return !!TryAcquireSRWLockShared(&_lock); }
};
template<>
class LockableImpl<srwlock> : public srwlock
{
public:
using mutex_type = boost::shared_mutex;
using Base = LockableBaseImpl<boost::shared_mutex>;
using SharedLock = boost::shared_lock<mutex_type>;
using UpgradeLock = boost::unique_lock<mutex_type>; // assume can't upgrade
using UniqueLock = boost::unique_lock<mutex_type>;
using mutex_type = srwlock;
using Base = LockableBaseImpl<srwlock>;
using SharedLock = typename Base::ReadLock;
using ReadWriteLock = typename Base::ReadWriteLock;
using UniqueLock = typename Base::WriteLock;
using SharedLocks = typename Base::ReadLocks;
using UniqueLocks = typename Base::WriteLocks;
using UpgradeToUnique = typename Base::DoNothing; // we already have unique ownership
};
#else
#endif
// --------------------------------------------------------------------------
// Boost shared_mutex support (read and write lock support)
// --------------------------------------------------------------------------
#ifdef BOOST_THREAD_SHARED_MUTEX_HPP
// ---------------------------------------------------------------------------
template <>
class LockableImpl<boost::upgrade_mutex> : public boost::upgrade_mutex
class LockableImpl<boost::shared_mutex> : public boost::shared_mutex
{
public:
using mutex_type = boost::upgrade_mutex;
using mutex_type = boost::shared_mutex;
using Base = LockableBaseImpl<boost::shared_mutex>;
using SharedLock = boost::shared_lock<mutex_type>;
using UpgradeLock = boost::upgrade_lock<mutex_type>;
using ReadWriteLock = typename Base::ReadWriteLock;
using UniqueLock = boost::unique_lock<mutex_type>;
using SharedLocks = typename Base::ReadLocks;
using UniqueLocks = typename Base::WriteLocks;
using UpgradeToUnique = boost::upgrade_to_unique_lock<mutex_type>;
};
#endif
#endif // BOOST_THREAD_SHARED_MUTEX_HPP
......@@ -5147,11 +5096,10 @@ public:
using mutex_type = std::shared_mutex;
using Base = LockableBaseImpl<std::shared_mutex>;
using SharedLock = std::shared_lock<mutex_type>;
using UpgradeLock = std::unique_lock<mutex_type>; // assume can't upgrade
using ReadWriteLock = typename Base::ReadWriteLock;
using UniqueLock = std::unique_lock<mutex_type>;
using SharedLocks = typename Base::ReadLocks;
using UniqueLocks = typename Base::WriteLocks;
using UpgradeToUnique = typename Base::DoNothing; // we already have unique ownership
};
#endif // PHMAP_HAVE_SHARED_MUTEX
......
......@@ -270,31 +270,31 @@ inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); }
namespace phmap {
namespace base_internal {
PHMAP_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) {
PHMAP_BASE_INTERNAL_FORCEINLINE uint32_t CountLeadingZeros64Slow(uint64_t n) {
int zeroes = 60;
if (n >> 32) zeroes -= 32, n >>= 32;
if (n >> 16) zeroes -= 16, n >>= 16;
if (n >> 8) zeroes -= 8, n >>= 8;
if (n >> 4) zeroes -= 4, n >>= 4;
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
return (uint32_t)("\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes);
}
PHMAP_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) {
PHMAP_BASE_INTERNAL_FORCEINLINE uint32_t CountLeadingZeros64(uint64_t n) {
#if defined(_MSC_VER) && defined(_M_X64)
// MSVC does not have __buitin_clzll. Use _BitScanReverse64.
unsigned long result = 0; // NOLINT(runtime/int)
if (_BitScanReverse64(&result, n)) {
return (int)(63 - result);
return (uint32_t)(63 - result);
}
return 64;
#elif defined(_MSC_VER) && !defined(__clang__)
// MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
unsigned long result = 0; // NOLINT(runtime/int)
if ((n >> 32) && _BitScanReverse(&result, (unsigned long)(n >> 32))) {
return 31 - result;
return (uint32_t)(31 - result);
}
if (_BitScanReverse(&result, (unsigned long)n)) {
return 63 - result;
return (uint32_t)(63 - result);
}
return 64;
#elif defined(__GNUC__) || defined(__clang__)
......@@ -309,7 +309,7 @@ PHMAP_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) {
if (n == 0) {
return 64;
}
return __builtin_clzll(n);
return (uint32_t)__builtin_clzll(n);
#else
return CountLeadingZeros64Slow(n);
#endif
......
......@@ -272,31 +272,31 @@ inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); }
namespace phmap {
namespace base_internal {
PHMAP_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) {
PHMAP_BASE_INTERNAL_FORCEINLINE uint32_t CountLeadingZeros64Slow(uint64_t n) {
int zeroes = 60;
if (n >> 32) zeroes -= 32, n >>= 32;
if (n >> 16) zeroes -= 16, n >>= 16;
if (n >> 8) zeroes -= 8, n >>= 8;
if (n >> 4) zeroes -= 4, n >>= 4;
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
return (uint32_t)("\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes);
}
PHMAP_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) {
PHMAP_BASE_INTERNAL_FORCEINLINE uint32_t CountLeadingZeros64(uint64_t n) {
#if defined(_MSC_VER) && defined(_M_X64)
// MSVC does not have __buitin_clzll. Use _BitScanReverse64.
unsigned long result = 0; // NOLINT(runtime/int)
if (_BitScanReverse64(&result, n)) {
return (int)(63 - result);
return (uint32_t)(63 - result);
}
return 64;
#elif defined(_MSC_VER) && !defined(__clang__)
// MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
unsigned long result = 0; // NOLINT(runtime/int)
if ((n >> 32) && _BitScanReverse(&result, (unsigned long)(n >> 32))) {
return 31 - result;
return (uint32_t)(31 - result);
}
if (_BitScanReverse(&result, (unsigned long)n)) {
return 63 - result;
return (uint32_t)(63 - result);
}
return 64;
#elif defined(__GNUC__) || defined(__clang__)
......@@ -311,7 +311,7 @@ PHMAP_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) {
if (n == 0) {
return 64;
}
return __builtin_clzll(n);
return (uint32_t)__builtin_clzll(n);
#else
return CountLeadingZeros64Slow(n);
#endif
......
......@@ -35,8 +35,8 @@
// ---------------------------------------------------------------------------
#define PHMAP_VERSION_MAJOR 1
#define PHMAP_VERSION_MINOR 0
#define PHMAP_VERSION_PATCH 0
#define PHMAP_VERSION_MINOR 3
#define PHMAP_VERSION_PATCH 12
// Included for the __GLIBC__ macro (or similar macros on other systems).
#include <limits.h>
......@@ -100,7 +100,7 @@
#endif
#if CHAR_BIT != 8
#error "phmap assumes CHAR_BIT == 8."
#warning "phmap assumes CHAR_BIT == 8."
#endif
// phmap currently assumes that an int is 4 bytes.
......@@ -120,7 +120,8 @@
#define PHMAP_HAVE_BUILTIN(x) 0
#endif
#if (defined(_MSVC_LANG) && _MSVC_LANG >= 201703) || __cplusplus >= 201703
#if (!defined(__GNUC__) || defined(__clang__) || __GNUC__ >= 5) && \
((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L)
#define PHMAP_HAVE_CC17 1
#else
#define PHMAP_HAVE_CC17 0
......@@ -148,40 +149,13 @@
#define PHMAP_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) 0
#endif
// ----------------------------------------------------------------
// Checks whether `std::is_trivially_destructible<T>` is supported.
// ----------------------------------------------------------------
#ifdef PHMAP_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
#error PHMAP_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set
#elif defined(_LIBCPP_VERSION) || defined(_MSC_VER) || \
(!defined(__clang__) && defined(__GNUC__) && defined(__GLIBCXX__) && PHMAP_INTERNAL_HAVE_MIN_GNUC_VERSION(4, 8))
#define PHMAP_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1
#endif
// --------------------------------------------------------------
// Checks whether `std::is_trivially_default_constructible<T>` is
// supported.
// --------------------------------------------------------------
#if defined(PHMAP_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE)
#error PHMAP_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set
#elif defined(PHMAP_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE)
#error PHMAP_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set
#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \
(!defined(__clang__) && defined(__GNUC__) && \
PHMAP_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 1) && \
(defined(_LIBCPP_VERSION) || defined(__GLIBCXX__))) || \
(defined(_MSC_VER) && !defined(__NVCC__))
#define PHMAP_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1
#define PHMAP_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1
#endif
// -------------------------------------------------------------------
// Checks whether C++11's `thread_local` storage duration specifier is
// supported.
// -------------------------------------------------------------------
#ifdef PHMAP_HAVE_THREAD_LOCAL
#error PHMAP_HAVE_THREAD_LOCAL cannot be directly set
#elif defined(__APPLE__)
#elif defined(__APPLE__) && defined(__clang__)
#if __has_feature(cxx_thread_local) && \
!(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
#define PHMAP_HAVE_THREAD_LOCAL 1
......@@ -341,7 +315,11 @@
#endif
#if PHMAP_HAVE_CC17
#ifdef __has_include
#if __has_include(<shared_mutex>)
#define PHMAP_HAVE_SHARED_MUTEX 1
#endif
#endif
#endif
#ifndef PHMAP_HAVE_STD_STRING_VIEW
......@@ -672,6 +650,15 @@
#define PHMAP_IF_CONSTEXPR(expr) if ((expr))
#endif
// ----------------------------------------------------------------------
// builtin unreachable
// ----------------------------------------------------------------------
#if PHMAP_HAVE_BUILTIN(__builtin_unreachable)
#define PHMAP_BUILTIN_UNREACHABLE() __builtin_unreachable()
#else
#define PHMAP_BUILTIN_UNREACHABLE() (void)0
#endif
// ----------------------------------------------------------------------
// base/macros.h
// ----------------------------------------------------------------------
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment