diff --git a/AK/HashTable.h b/AK/HashTable.h index e01ab001fa9..79fc2320606 100644 --- a/AK/HashTable.h +++ b/AK/HashTable.h @@ -461,12 +461,11 @@ private: auto old_capacity = m_capacity; Iterator old_iter = begin(); - auto* new_buckets = kmalloc(size_in_bytes(new_capacity)); + auto* new_buckets = kcalloc(1, size_in_bytes(new_capacity)); if (!new_buckets) return Error::from_errno(ENOMEM); m_buckets = (BucketType*)new_buckets; - __builtin_memset(m_buckets, 0, size_in_bytes(new_capacity)); m_capacity = new_capacity; m_deleted_count = 0; diff --git a/AK/kmalloc.h b/AK/kmalloc.h index 183345c7538..481e2e8ce8c 100644 --- a/AK/kmalloc.h +++ b/AK/kmalloc.h @@ -15,6 +15,7 @@ # include # include +# define kcalloc calloc # define kmalloc malloc # define kmalloc_good_size malloc_good_size diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index d7b62e40f24..be0e9ab66af 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -449,6 +449,18 @@ void* kmalloc(size_t size) return ptr; } +void* kcalloc(size_t count, size_t size) +{ + if (Checked::multiplication_would_overflow(count, size)) + return nullptr; + size_t new_size = count * size; + auto* ptr = kmalloc(new_size); + // FIXME: Avoid redundantly scrubbing the memory in kmalloc() + if (ptr) + memset(ptr, 0, new_size); + return ptr; +} + void kfree_sized(void* ptr, size_t size) { if (!ptr) diff --git a/Kernel/Heap/kmalloc.h b/Kernel/Heap/kmalloc.h index 5cff42f1463..f7c62fc046f 100644 --- a/Kernel/Heap/kmalloc.h +++ b/Kernel/Heap/kmalloc.h @@ -72,6 +72,7 @@ void operator delete[](void* ptrs) noexcept DISALLOW("All deletes in the kernel void operator delete[](void* ptr, size_t) noexcept; [[gnu::malloc, gnu::alloc_size(1)]] void* kmalloc(size_t); +[[gnu::malloc, gnu::alloc_size(1, 2)]] void* kcalloc(size_t, size_t); [[gnu::malloc, gnu::alloc_size(1), gnu::alloc_align(2)]] void* kmalloc_aligned(size_t size, size_t alignment);