mikk_atomic_hash_set.hh 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. /* SPDX-FileCopyrightText: 2012-2021 Meta Platforms, Inc. and affiliates.
  2. * SPDX-FileCopyrightText: 2022 Blender Authors
  3. *
  4. * SPDX-License-Identifier: Apache-2.0 */
  5. /* Simplified version of Folly's AtomicHashArray
  6. * (https://github.com/facebook/folly/blob/main/folly/AtomicHashArray.h).
  7. *
  8. * Notable changes:
  9. * - Standalone and header-only.
  10. * - Behaves like a set, not like a map: There's no value type anymore, only keys.
  11. * - Capacity check logic have been removed, the code assumes you know the required size in
  12. * advance.
  13. * - Custom allocator support has been removed.
  14. * - Erase has been removed.
  15. * - Find has been removed.
  16. */
  17. /** \file
  18. * \ingroup mikktspace
  19. */
  20. #pragma once
  21. #ifdef _MSC_VER
  22. # include <intrin.h>
  23. #endif
  24. #include <atomic>
  25. #include <type_traits>
  26. namespace mikk {
  27. struct AtomicHashSetLinearProbeFcn {
  28. inline size_t operator()(size_t idx, size_t /* numProbes */, size_t capacity) const
  29. {
  30. idx += 1; // linear probing
  31. // Avoid modulus because it's slow
  32. return (idx < capacity) ? idx : (idx - capacity);
  33. }
  34. };
  35. struct AtomicHashSetQuadraticProbeFcn {
  36. inline size_t operator()(size_t idx, size_t numProbes, size_t capacity) const
  37. {
  38. idx += numProbes; // quadratic probing
  39. // Avoid modulus because it's slow
  40. return (idx < capacity) ? idx : (idx - capacity);
  41. }
  42. };
  43. template<class KeyT,
  44. bool isAtomic,
  45. class KeyHash = std::hash<KeyT>,
  46. class KeyEqual = std::equal_to<KeyT>,
  47. class ProbeFcn = AtomicHashSetLinearProbeFcn>
  48. class AtomicHashSet {
  49. static_assert((std::is_convertible<KeyT, int32_t>::value ||
  50. std::is_convertible<KeyT, int64_t>::value ||
  51. std::is_convertible<KeyT, const void *>::value),
  52. "You are trying to use AtomicHashSet with disallowed key "
  53. "types. You must use atomically compare-and-swappable integer "
  54. "keys, or a different container class.");
  55. public:
  56. const size_t capacity_;
  57. const KeyT kEmptyKey_;
  58. KeyHash hasher_;
  59. KeyEqual equalityChecker_;
  60. private:
  61. size_t kAnchorMask_;
  62. /* When using a single thread, we can avoid overhead by not bothering with atomic cells. */
  63. typedef typename std::conditional<isAtomic, std::atomic<KeyT>, KeyT>::type cell_type;
  64. std::vector<cell_type> cells_;
  65. public:
  66. struct Config {
  67. KeyT emptyKey;
  68. double maxLoadFactor;
  69. double growthFactor;
  70. size_t capacity; // if positive, overrides maxLoadFactor
  71. // Cannot have constexpr ctor because some compilers rightly complain.
  72. Config() : emptyKey((KeyT)-1), maxLoadFactor(0.8), growthFactor(-1), capacity(0) {}
  73. };
  74. /* Instead of a mess of arguments, we take a max size and a Config struct to
  75. * simulate named ctor parameters. The Config struct has sensible defaults
  76. * for everything, but is overloaded - if you specify a positive capacity,
  77. * that will be used directly instead of computing it based on maxLoadFactor.
  78. */
  79. AtomicHashSet(size_t maxSize,
  80. KeyHash hasher = KeyHash(),
  81. KeyEqual equalityChecker = KeyEqual(),
  82. const Config &c = Config())
  83. : capacity_(size_t(double(maxSize) / c.maxLoadFactor) + 1),
  84. kEmptyKey_(c.emptyKey),
  85. hasher_(hasher),
  86. equalityChecker_(equalityChecker),
  87. cells_(capacity_)
  88. {
  89. /* Get next power of two. Could be done more effiently with builtin_clz, but this is not
  90. * performance-critical. */
  91. kAnchorMask_ = 1;
  92. while (kAnchorMask_ < capacity_) {
  93. kAnchorMask_ *= 2;
  94. }
  95. /* Get mask for lower bits. */
  96. kAnchorMask_ -= 1;
  97. /* Not great, but the best we can do to support both atomic and non-atomic cells
  98. * since std::atomic doesn't have a copy constructor so cells_(capacity_, kEmptyKey_)
  99. * in the initializer list won't work. */
  100. std::fill((KeyT *)cells_.data(), (KeyT *)cells_.data() + capacity_, kEmptyKey_);
  101. }
  102. AtomicHashSet(const AtomicHashSet &) = delete;
  103. AtomicHashSet &operator=(const AtomicHashSet &) = delete;
  104. ~AtomicHashSet() = default;
  105. /* Sequential specialization. */
  106. bool tryUpdateCell(KeyT *cell, KeyT &existingKey, KeyT newKey)
  107. {
  108. if (*cell == existingKey) {
  109. *cell = newKey;
  110. return true;
  111. }
  112. existingKey = *cell;
  113. return false;
  114. }
  115. /* Atomic specialization. */
  116. bool tryUpdateCell(std::atomic<KeyT> *cell, KeyT &existingKey, KeyT newKey)
  117. {
  118. return cell->compare_exchange_strong(existingKey, newKey, std::memory_order_acq_rel);
  119. }
  120. std::pair<KeyT, bool> emplace(KeyT key)
  121. {
  122. size_t idx = keyToAnchorIdx(key);
  123. size_t numProbes = 0;
  124. for (;;) {
  125. cell_type *cell = &cells_[idx];
  126. KeyT existingKey = kEmptyKey_;
  127. /* Try to replace empty cell with our key. */
  128. if (tryUpdateCell(cell, existingKey, key)) {
  129. /* Cell was empty, we're done. */
  130. return std::make_pair(key, true);
  131. }
  132. /* Cell was not empty, check if the existing key is equal. */
  133. if (equalityChecker_(existingKey, key)) {
  134. /* Found equal element, we're done. */
  135. return std::make_pair(existingKey, false);
  136. }
  137. /* Continue to next cell according to probe strategy. */
  138. ++numProbes;
  139. if ((numProbes >= capacity_)) {
  140. // probed every cell...fail
  141. assert(false);
  142. return std::make_pair(kEmptyKey_, false);
  143. }
  144. idx = ProbeFcn()(idx, numProbes, capacity_);
  145. }
  146. }
  147. private:
  148. inline size_t keyToAnchorIdx(const KeyT k) const
  149. {
  150. const size_t hashVal = hasher_(k);
  151. const size_t probe = hashVal & kAnchorMask_;
  152. return (probe < capacity_) ? probe : hashVal % capacity_;
  153. }
  154. }; // AtomicHashSet
  155. } // namespace mikk