mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-06-09 17:44:56 +09:00
AK: Merge HashTable bucket state into one enum
The hash table buckets had three different state booleans that are in fact exclusive. In preparation for further states, this commit consolidates them into one enum. This has the added benefit on not relying on the compiler's boolean packing anymore; we definitely now only need one byte for the bucket state.
This commit is contained in:
parent
7d667b9f69
commit
bcb8937898
Notes:
sideshowbarker
2024-07-17 16:26:22 +09:00
Author: https://github.com/kleinesfilmroellchen
Commit: bcb8937898
Pull-request: https://github.com/SerenityOS/serenity/pull/12937
Reviewed-by: https://github.com/Hendiadyoin1
Reviewed-by: https://github.com/davidot
1 changed files with 31 additions and 32 deletions
|
@ -28,6 +28,15 @@ enum class HashSetExistingEntryBehavior {
|
||||||
Replace
|
Replace
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// FIXME: Choose specific values so that we can do bit-based checks for various classes of state.
|
||||||
|
enum class BucketState : u8 {
|
||||||
|
Free = 0,
|
||||||
|
Used,
|
||||||
|
Deleted,
|
||||||
|
Rehashed,
|
||||||
|
End,
|
||||||
|
};
|
||||||
|
|
||||||
template<typename HashTableType, typename T, typename BucketType>
|
template<typename HashTableType, typename T, typename BucketType>
|
||||||
class HashTableIterator {
|
class HashTableIterator {
|
||||||
friend HashTableType;
|
friend HashTableType;
|
||||||
|
@ -46,10 +55,10 @@ private:
|
||||||
return;
|
return;
|
||||||
do {
|
do {
|
||||||
++m_bucket;
|
++m_bucket;
|
||||||
if (m_bucket->used)
|
if (m_bucket->state == BucketState::Used)
|
||||||
return;
|
return;
|
||||||
} while (!m_bucket->end);
|
} while (m_bucket->state != BucketState::End);
|
||||||
if (m_bucket->end)
|
if (m_bucket->state == BucketState::End)
|
||||||
m_bucket = nullptr;
|
m_bucket = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,9 +96,7 @@ class HashTable {
|
||||||
static constexpr size_t load_factor_in_percent = 60;
|
static constexpr size_t load_factor_in_percent = 60;
|
||||||
|
|
||||||
struct Bucket {
|
struct Bucket {
|
||||||
bool used;
|
BucketState state;
|
||||||
bool deleted;
|
|
||||||
bool end;
|
|
||||||
alignas(T) u8 storage[sizeof(T)];
|
alignas(T) u8 storage[sizeof(T)];
|
||||||
|
|
||||||
T* slot() { return reinterpret_cast<T*>(storage); }
|
T* slot() { return reinterpret_cast<T*>(storage); }
|
||||||
|
@ -99,8 +106,7 @@ class HashTable {
|
||||||
struct OrderedBucket {
|
struct OrderedBucket {
|
||||||
OrderedBucket* previous;
|
OrderedBucket* previous;
|
||||||
OrderedBucket* next;
|
OrderedBucket* next;
|
||||||
bool used;
|
BucketState state;
|
||||||
bool deleted;
|
|
||||||
alignas(T) u8 storage[sizeof(T)];
|
alignas(T) u8 storage[sizeof(T)];
|
||||||
T* slot() { return reinterpret_cast<T*>(storage); }
|
T* slot() { return reinterpret_cast<T*>(storage); }
|
||||||
const T* slot() const { return reinterpret_cast<const T*>(storage); }
|
const T* slot() const { return reinterpret_cast<const T*>(storage); }
|
||||||
|
@ -128,7 +134,7 @@ public:
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (size_t i = 0; i < m_capacity; ++i) {
|
for (size_t i = 0; i < m_capacity; ++i) {
|
||||||
if (m_buckets[i].used)
|
if (m_buckets[i].state == BucketState::Used)
|
||||||
m_buckets[i].slot()->~T();
|
m_buckets[i].slot()->~T();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,7 +238,7 @@ public:
|
||||||
return Iterator(m_collection_data.head);
|
return Iterator(m_collection_data.head);
|
||||||
|
|
||||||
for (size_t i = 0; i < m_capacity; ++i) {
|
for (size_t i = 0; i < m_capacity; ++i) {
|
||||||
if (m_buckets[i].used)
|
if (m_buckets[i].state == BucketState::Used)
|
||||||
return Iterator(&m_buckets[i]);
|
return Iterator(&m_buckets[i]);
|
||||||
}
|
}
|
||||||
return end();
|
return end();
|
||||||
|
@ -253,7 +259,7 @@ public:
|
||||||
return ConstIterator(m_collection_data.head);
|
return ConstIterator(m_collection_data.head);
|
||||||
|
|
||||||
for (size_t i = 0; i < m_capacity; ++i) {
|
for (size_t i = 0; i < m_capacity; ++i) {
|
||||||
if (m_buckets[i].used)
|
if (m_buckets[i].state == BucketState::Used)
|
||||||
return ConstIterator(&m_buckets[i]);
|
return ConstIterator(&m_buckets[i]);
|
||||||
}
|
}
|
||||||
return end();
|
return end();
|
||||||
|
@ -281,14 +287,14 @@ public:
|
||||||
if constexpr (IsOrdered)
|
if constexpr (IsOrdered)
|
||||||
m_collection_data = { nullptr, nullptr };
|
m_collection_data = { nullptr, nullptr };
|
||||||
else
|
else
|
||||||
m_buckets[m_capacity].end = true;
|
m_buckets[m_capacity].state = BucketState::End;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename U = T>
|
template<typename U = T>
|
||||||
ErrorOr<HashSetResult> try_set(U&& value, HashSetExistingEntryBehavior existing_entry_behavior = HashSetExistingEntryBehavior::Replace)
|
ErrorOr<HashSetResult> try_set(U&& value, HashSetExistingEntryBehavior existing_entry_behavior = HashSetExistingEntryBehavior::Replace)
|
||||||
{
|
{
|
||||||
auto* bucket = TRY(try_lookup_for_writing(value));
|
auto* bucket = TRY(try_lookup_for_writing(value));
|
||||||
if (bucket->used) {
|
if (bucket->state == BucketState::Used) {
|
||||||
if (existing_entry_behavior == HashSetExistingEntryBehavior::Keep)
|
if (existing_entry_behavior == HashSetExistingEntryBehavior::Keep)
|
||||||
return HashSetResult::KeptExistingEntry;
|
return HashSetResult::KeptExistingEntry;
|
||||||
(*bucket->slot()) = forward<U>(value);
|
(*bucket->slot()) = forward<U>(value);
|
||||||
|
@ -296,11 +302,9 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
new (bucket->slot()) T(forward<U>(value));
|
new (bucket->slot()) T(forward<U>(value));
|
||||||
bucket->used = true;
|
if (bucket->state == BucketState::Deleted)
|
||||||
if (bucket->deleted) {
|
|
||||||
bucket->deleted = false;
|
|
||||||
--m_deleted_count;
|
--m_deleted_count;
|
||||||
}
|
bucket->state = BucketState::Used;
|
||||||
|
|
||||||
if constexpr (IsOrdered) {
|
if constexpr (IsOrdered) {
|
||||||
if (!m_collection_data.head) [[unlikely]] {
|
if (!m_collection_data.head) [[unlikely]] {
|
||||||
|
@ -393,11 +397,7 @@ public:
|
||||||
{
|
{
|
||||||
VERIFY(iterator.m_bucket);
|
VERIFY(iterator.m_bucket);
|
||||||
auto& bucket = *iterator.m_bucket;
|
auto& bucket = *iterator.m_bucket;
|
||||||
VERIFY(bucket.used);
|
VERIFY(bucket.state == BucketState::Used);
|
||||||
VERIFY(!bucket.deleted);
|
|
||||||
|
|
||||||
if constexpr (!IsOrdered)
|
|
||||||
VERIFY(!bucket.end);
|
|
||||||
|
|
||||||
delete_bucket(bucket);
|
delete_bucket(bucket);
|
||||||
--m_size;
|
--m_size;
|
||||||
|
@ -412,7 +412,7 @@ public:
|
||||||
size_t removed_count = 0;
|
size_t removed_count = 0;
|
||||||
for (size_t i = 0; i < m_capacity; ++i) {
|
for (size_t i = 0; i < m_capacity; ++i) {
|
||||||
auto& bucket = m_buckets[i];
|
auto& bucket = m_buckets[i];
|
||||||
if (bucket.used && predicate(*bucket.slot())) {
|
if (bucket.state == BucketState::Used && predicate(*bucket.slot())) {
|
||||||
delete_bucket(bucket);
|
delete_bucket(bucket);
|
||||||
++removed_count;
|
++removed_count;
|
||||||
}
|
}
|
||||||
|
@ -430,7 +430,7 @@ private:
|
||||||
{
|
{
|
||||||
auto& bucket = lookup_for_writing(value);
|
auto& bucket = lookup_for_writing(value);
|
||||||
new (bucket.slot()) T(move(value));
|
new (bucket.slot()) T(move(value));
|
||||||
bucket.used = true;
|
bucket.state = BucketState::Used;
|
||||||
|
|
||||||
if constexpr (IsOrdered) {
|
if constexpr (IsOrdered) {
|
||||||
if (!m_collection_data.head) [[unlikely]] {
|
if (!m_collection_data.head) [[unlikely]] {
|
||||||
|
@ -473,7 +473,7 @@ private:
|
||||||
if constexpr (IsOrdered)
|
if constexpr (IsOrdered)
|
||||||
m_collection_data = { nullptr, nullptr };
|
m_collection_data = { nullptr, nullptr };
|
||||||
else
|
else
|
||||||
m_buckets[m_capacity].end = true;
|
m_buckets[m_capacity].state = BucketState::End;
|
||||||
|
|
||||||
if (!old_buckets)
|
if (!old_buckets)
|
||||||
return {};
|
return {};
|
||||||
|
@ -500,10 +500,10 @@ private:
|
||||||
for (;;) {
|
for (;;) {
|
||||||
auto& bucket = m_buckets[hash % m_capacity];
|
auto& bucket = m_buckets[hash % m_capacity];
|
||||||
|
|
||||||
if (bucket.used && predicate(*bucket.slot()))
|
if (bucket.state == BucketState::Used && predicate(*bucket.slot()))
|
||||||
return &bucket;
|
return &bucket;
|
||||||
|
|
||||||
if (!bucket.used && !bucket.deleted)
|
if (bucket.state != BucketState::Used && bucket.state != BucketState::Deleted)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
hash = double_hash(hash);
|
hash = double_hash(hash);
|
||||||
|
@ -522,14 +522,14 @@ private:
|
||||||
for (;;) {
|
for (;;) {
|
||||||
auto& bucket = m_buckets[hash % m_capacity];
|
auto& bucket = m_buckets[hash % m_capacity];
|
||||||
|
|
||||||
if (bucket.used && TraitsForT::equals(*bucket.slot(), value))
|
if (bucket.state == BucketState::Used && TraitsForT::equals(*bucket.slot(), value))
|
||||||
return &bucket;
|
return &bucket;
|
||||||
|
|
||||||
if (!bucket.used) {
|
if (bucket.state != BucketState::Used) {
|
||||||
if (!first_empty_bucket)
|
if (!first_empty_bucket)
|
||||||
first_empty_bucket = &bucket;
|
first_empty_bucket = &bucket;
|
||||||
|
|
||||||
if (!bucket.deleted)
|
if (bucket.state != BucketState::Deleted)
|
||||||
return const_cast<BucketType*>(first_empty_bucket);
|
return const_cast<BucketType*>(first_empty_bucket);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -560,8 +560,7 @@ private:
|
||||||
void delete_bucket(auto& bucket)
|
void delete_bucket(auto& bucket)
|
||||||
{
|
{
|
||||||
bucket.slot()->~T();
|
bucket.slot()->~T();
|
||||||
bucket.used = false;
|
bucket.state = BucketState::Deleted;
|
||||||
bucket.deleted = true;
|
|
||||||
|
|
||||||
if constexpr (IsOrdered) {
|
if constexpr (IsOrdered) {
|
||||||
if (bucket.previous)
|
if (bucket.previous)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue