Jolt Physics
A multi core friendly Game Physics Engine
Loading...
Searching...
No Matches
LockFreeHashMap.inl
Go to the documentation of this file.
1// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics)
2// SPDX-FileCopyrightText: 2021 Jorrit Rouwe
3// SPDX-License-Identifier: MIT
4
5#pragma once
6
8
10// LFHMAllocator
12
14{
15#if JPH_DEFAULT_ALLOCATE_ALIGNMENT < 16
16 AlignedFree(mObjectStore);
17#else
18 Free(mObjectStore);
19#endif
20}
21
22inline void LFHMAllocator::Init(uint inObjectStoreSizeBytes)
23{
24 JPH_ASSERT(mObjectStore == nullptr);
25
26 mObjectStoreSizeBytes = inObjectStoreSizeBytes;
27#if JPH_DEFAULT_ALLOCATE_ALIGNMENT < 16
28 mObjectStore = reinterpret_cast<uint8 *>(JPH::AlignedAllocate(inObjectStoreSizeBytes, 16));
29#else
30 mObjectStore = reinterpret_cast<uint8 *>(JPH::Allocate(inObjectStoreSizeBytes));
31#endif
32}
33
35{
36 mWriteOffset = 0;
37}
38
39inline void LFHMAllocator::Allocate(uint32 inBlockSize, uint32 &ioBegin, uint32 &ioEnd)
40{
41 // If we're already beyond the end of our buffer then don't do an atomic add.
42 // It's possible that many keys are inserted after the allocator is full, making it possible
43 // for mWriteOffset (uint32) to wrap around to zero. When this happens, there will be a memory corruption.
44 // This way, we will be able to progress the write offset beyond the size of the buffer
45 // worst case by max <CPU count> * inBlockSize.
46 if (mWriteOffset.load(memory_order_relaxed) >= mObjectStoreSizeBytes)
47 return;
48
49 // Atomically fetch a block from the pool
50 uint32 begin = mWriteOffset.fetch_add(inBlockSize, memory_order_relaxed);
51 uint32 end = min(begin + inBlockSize, mObjectStoreSizeBytes);
53 if (ioEnd == begin)
54 {
55 // Block is allocated straight after our previous block
56 begin = ioBegin;
57 }
58 else
59 {
60 // Block is a new block
61 begin = min(begin, mObjectStoreSizeBytes);
62 }
63
64 // Store the begin and end of the resulting block
65 ioBegin = begin;
66 ioEnd = end;
67}
68
69template <class T>
70inline uint32 LFHMAllocator::ToOffset(const T *inData) const
71{
72 const uint8 *data = reinterpret_cast<const uint8 *>(inData);
73 JPH_ASSERT(data >= mObjectStore && data < mObjectStore + mObjectStoreSizeBytes);
74 return uint32(data - mObjectStore);
75}
76
77template <class T>
78inline T *LFHMAllocator::FromOffset(uint32 inOffset) const
79{
80 JPH_ASSERT(inOffset < mObjectStoreSizeBytes);
81 return reinterpret_cast<T *>(mObjectStore + inOffset);
83
85// LFHMAllocatorContext
87
89 mAllocator(inAllocator),
90 mBlockSize(inBlockSize)
91{
92}
93
94inline bool LFHMAllocatorContext::Allocate(uint32 inSize, uint32 inAlignment, uint32 &outWriteOffset)
95{
96 // Calculate needed bytes for alignment
97 JPH_ASSERT(IsPowerOf2(inAlignment));
98 uint32 alignment_mask = inAlignment - 1;
99 uint32 alignment = (inAlignment - (mBegin & alignment_mask)) & alignment_mask;
100
101 // Check if we have space
102 if (mEnd - mBegin < inSize + alignment)
103 {
104 // Allocate a new block
105 mAllocator.Allocate(mBlockSize, mBegin, mEnd);
106
107 // Update alignment
108 alignment = (inAlignment - (mBegin & alignment_mask)) & alignment_mask;
109
110 // Check if we have space again
111 if (mEnd - mBegin < inSize + alignment)
112 return false;
113 }
114
115 // Make the allocation
116 mBegin += alignment;
117 outWriteOffset = mBegin;
118 mBegin += inSize;
119 return true;
121
123// LockFreeHashMap
125
126template <class Key, class Value>
128{
129 JPH_ASSERT(inMaxBuckets >= 4 && IsPowerOf2(inMaxBuckets));
130 JPH_ASSERT(mBuckets == nullptr);
131
132 mNumBuckets = inMaxBuckets;
133 mMaxBuckets = inMaxBuckets;
134
135#if JPH_DEFAULT_ALLOCATE_ALIGNMENT < 16
136 mBuckets = reinterpret_cast<atomic<uint32> *>(AlignedAllocate(inMaxBuckets * sizeof(atomic<uint32>), 16));
137#else
138 mBuckets = reinterpret_cast<atomic<uint32> *>(Allocate(inMaxBuckets * sizeof(atomic<uint32>)));
139#endif
140
141 Clear();
142}
143
144template <class Key, class Value>
146{
147#if JPH_DEFAULT_ALLOCATE_ALIGNMENT < 16
148 AlignedFree(mBuckets);
149#else
150 Free(mBuckets);
151#endif
152}
153
154template <class Key, class Value>
156{
157#ifdef JPH_ENABLE_ASSERTS
158 // Reset number of key value pairs
159 mNumKeyValues = 0;
160#endif // JPH_ENABLE_ASSERTS
162 // Reset buckets 4 at a time
163 static_assert(sizeof(atomic<uint32>) == sizeof(uint32));
164 UVec4 invalid_handle = UVec4::sReplicate(cInvalidHandle);
165 uint32 *start = reinterpret_cast<uint32 *>(mBuckets);
166 const uint32 *end = start + mNumBuckets;
167 JPH_ASSERT(IsAligned(start, 16));
168 while (start < end)
169 {
170 invalid_handle.StoreInt4Aligned(start);
171 start += 4;
172 }
173}
174
175template <class Key, class Value>
177{
178 JPH_ASSERT(mNumKeyValues == 0);
179 JPH_ASSERT(inNumBuckets <= mMaxBuckets);
180 JPH_ASSERT(inNumBuckets >= 4 && IsPowerOf2(inNumBuckets));
181
182 mNumBuckets = inNumBuckets;
183}
184
185template <class Key, class Value>
186template <class... Params>
187inline typename LockFreeHashMap<Key, Value>::KeyValue *LockFreeHashMap<Key, Value>::Create(LFHMAllocatorContext &ioContext, const Key &inKey, uint64 inKeyHash, int inExtraBytes, Params &&... inConstructorParams)
188{
189 // This is not a multi map, test the key hasn't been inserted yet
190 JPH_ASSERT(Find(inKey, inKeyHash) == nullptr);
191
192 // Calculate total size
193 uint size = sizeof(KeyValue) + inExtraBytes;
194
195 // Get the write offset for this key value pair
196 uint32 write_offset;
197 if (!ioContext.Allocate(size, alignof(KeyValue), write_offset))
198 return nullptr;
199
200#ifdef JPH_ENABLE_ASSERTS
201 // Increment amount of entries in map
202 mNumKeyValues.fetch_add(1, memory_order_relaxed);
203#endif // JPH_ENABLE_ASSERTS
204
205 // Construct the key/value pair
206 KeyValue *kv = mAllocator.template FromOffset<KeyValue>(write_offset);
207 JPH_ASSERT(intptr_t(kv) % alignof(KeyValue) == 0);
208#ifdef JPH_DEBUG
209 memset(kv, 0xcd, size);
210#endif
211 kv->mKey = inKey;
212 new (&kv->mValue) Value(std::forward<Params>(inConstructorParams)...);
213
214 // Get the offset to the first object from the bucket with corresponding hash
215 atomic<uint32> &offset = mBuckets[inKeyHash & (mNumBuckets - 1)];
216
217 // Add this entry as the first element in the linked list
218 uint32 old_offset = offset.load(memory_order_relaxed);
219 for (;;)
220 {
221 kv->mNextOffset = old_offset;
222 if (offset.compare_exchange_weak(old_offset, write_offset, memory_order_release))
223 break;
224 }
225
226 return kv;
227}
228
229template <class Key, class Value>
230inline const typename LockFreeHashMap<Key, Value>::KeyValue *LockFreeHashMap<Key, Value>::Find(const Key &inKey, uint64 inKeyHash) const
231{
232 // Get the offset to the keyvalue object from the bucket with corresponding hash
233 uint32 offset = mBuckets[inKeyHash & (mNumBuckets - 1)].load(memory_order_acquire);
234 while (offset != cInvalidHandle)
235 {
236 // Loop through linked list of values until the right one is found
237 const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset);
238 if (kv->mKey == inKey)
239 return kv;
240 offset = kv->mNextOffset;
241 }
242
243 // Not found
244 return nullptr;
245}
246
247template <class Key, class Value>
249{
250 return mAllocator.ToOffset(inKeyValue);
251}
252
253template <class Key, class Value>
255{
256 return mAllocator.template FromOffset<const KeyValue>(inHandle);
257}
258
259template <class Key, class Value>
261{
262 for (const atomic<uint32> *bucket = mBuckets; bucket < mBuckets + mNumBuckets; ++bucket)
263 {
264 uint32 offset = *bucket;
265 while (offset != cInvalidHandle)
266 {
267 const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset);
268 outAll.push_back(kv);
269 offset = kv->mNextOffset;
270 }
271 }
272}
273
274template <class Key, class Value>
276{
277 // Start with the first bucket
278 Iterator it { this, 0, mBuckets[0] };
279
280 // If it doesn't contain a valid entry, use the ++ operator to find the first valid entry
281 if (it.mOffset == cInvalidHandle)
282 ++it;
283
284 return it;
285}
286
287template <class Key, class Value>
289{
290 return { this, mNumBuckets, cInvalidHandle };
291}
292
293template <class Key, class Value>
295{
297
298 return *mMap->mAllocator.template FromOffset<KeyValue>(mOffset);
299}
300
301template <class Key, class Value>
303{
304 JPH_ASSERT(mBucket < mMap->mNumBuckets);
305
306 // Find the next key value in this bucket
307 if (mOffset != cInvalidHandle)
308 {
309 const KeyValue *kv = mMap->mAllocator.template FromOffset<const KeyValue>(mOffset);
310 mOffset = kv->mNextOffset;
311 if (mOffset != cInvalidHandle)
312 return *this;
313 }
314
315 // Loop over next buckets
316 for (;;)
317 {
318 // Next bucket
319 ++mBucket;
320 if (mBucket >= mMap->mNumBuckets)
321 return *this;
322
323 // Fetch the first entry in the bucket
324 mOffset = mMap->mBuckets[mBucket];
325 if (mOffset != cInvalidHandle)
326 return *this;
327 }
328}
329
330#ifdef JPH_DEBUG
331
332template <class Key, class Value>
334{
335 const int cMaxPerBucket = 256;
336
337 int max_objects_per_bucket = 0;
338 int num_objects = 0;
339 int histogram[cMaxPerBucket];
340 for (int i = 0; i < cMaxPerBucket; ++i)
341 histogram[i] = 0;
342
343 for (atomic<uint32> *bucket = mBuckets, *bucket_end = mBuckets + mNumBuckets; bucket < bucket_end; ++bucket)
344 {
345 int objects_in_bucket = 0;
346 uint32 offset = *bucket;
347 while (offset != cInvalidHandle)
348 {
349 const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset);
350 offset = kv->mNextOffset;
351 ++objects_in_bucket;
352 ++num_objects;
353 }
354 max_objects_per_bucket = max(objects_in_bucket, max_objects_per_bucket);
355 histogram[min(objects_in_bucket, cMaxPerBucket - 1)]++;
356 }
357
358 Trace("max_objects_per_bucket = %d, num_buckets = %u, num_objects = %d", max_objects_per_bucket, mNumBuckets, num_objects);
359
360 for (int i = 0; i < cMaxPerBucket; ++i)
361 if (histogram[i] != 0)
362 Trace("%d: %d", i, histogram[i]);
363}
364
365#endif
366
std::uint8_t uint8
Definition Core.h:506
std::uint64_t uint64
Definition Core.h:510
unsigned int uint
Definition Core.h:505
#define JPH_NAMESPACE_END
Definition Core.h:428
std::uint32_t uint32
Definition Core.h:508
#define JPH_NAMESPACE_BEGIN
Definition Core.h:422
TraceFunction Trace
Definition IssueReporting.cpp:14
#define JPH_ASSERT(...)
Definition IssueReporting.h:33
constexpr bool IsPowerOf2(T inV)
Check if inV is a power of 2.
Definition Math.h:76
bool IsAligned(T inV, uint64 inAlignment)
Check if inV is inAlignment aligned.
Definition Math.h:91
AllocateFunction Allocate
Definition Memory.cpp:68
FreeFunction Free
Definition Memory.cpp:70
AlignedFreeFunction AlignedFree
Definition Memory.cpp:72
AlignedAllocateFunction AlignedAllocate
Definition Memory.cpp:71
Definition Array.h:36
void push_back(const T &inValue)
Add element to the back of the array.
Definition Array.h:354
Definition LockFreeHashMap.h:49
LFHMAllocatorContext(LFHMAllocator &inAllocator, uint32 inBlockSize)
Construct a new allocator context.
Definition LockFreeHashMap.inl:88
bool Allocate(uint32 inSize, uint32 inAlignment, uint32 &outWriteOffset)
Allocate data block.
Definition LockFreeHashMap.inl:94
Allocator for a lock free hash map.
Definition LockFreeHashMap.h:14
uint32 ToOffset(const T *inData) const
Convert a pointer to an offset.
Definition LockFreeHashMap.inl:70
~LFHMAllocator()
Destructor.
Definition LockFreeHashMap.inl:13
void Init(uint inObjectStoreSizeBytes)
Definition LockFreeHashMap.inl:22
T * FromOffset(uint32 inOffset) const
Convert an offset to a pointer.
Definition LockFreeHashMap.inl:78
void Allocate(uint32 inBlockSize, uint32 &ioBegin, uint32 &ioEnd)
Definition LockFreeHashMap.inl:39
void Clear()
Clear all allocations.
Definition LockFreeHashMap.inl:34
A key / value pair that is inserted in the map.
Definition LockFreeHashMap.h:100
Definition LockFreeHashMap.h:72
const KeyValue * FromHandle(uint32 inHandle) const
Convert uint32 handle back to key and value.
Definition LockFreeHashMap.inl:254
Iterator begin()
Definition LockFreeHashMap.inl:275
void GetAllKeyValues(Array< const KeyValue * > &outAll) const
Get all key/value pairs.
Definition LockFreeHashMap.inl:260
void Init(uint32 inMaxBuckets)
Definition LockFreeHashMap.inl:127
void SetNumBuckets(uint32 inNumBuckets)
Definition LockFreeHashMap.inl:176
KeyValue * Create(LFHMAllocatorContext &ioContext, const Key &inKey, uint64 inKeyHash, int inExtraBytes, Params &&... inConstructorParams)
Definition LockFreeHashMap.inl:187
~LockFreeHashMap()
Definition LockFreeHashMap.inl:145
const KeyValue * Find(const Key &inKey, uint64 inKeyHash) const
Find an element, returns null if not found.
Definition LockFreeHashMap.inl:230
uint32 ToHandle(const KeyValue *inKeyValue) const
Get convert key value pair to uint32 handle.
Definition LockFreeHashMap.inl:248
Iterator end()
Definition LockFreeHashMap.inl:288
static const uint32 cInvalidHandle
Value of an invalid handle.
Definition LockFreeHashMap.h:123
void Clear()
Definition LockFreeHashMap.inl:155
Definition UVec4.h:12
static JPH_INLINE UVec4 sReplicate(uint32 inV)
Replicate int inV across all components.
Definition UVec4.inl:75
JPH_INLINE void StoreInt4Aligned(uint32 *outV) const
Store 4 ints to memory, aligned to 16 bytes.
Definition UVec4.inl:599
Non-const iterator.
Definition LockFreeHashMap.h:142
uint32 mOffset
Definition LockFreeHashMap.h:155
Iterator & operator++()
Next item.
Definition LockFreeHashMap.inl:302
MapType * mMap
Definition LockFreeHashMap.h:153
KeyValue & operator*()
Convert to key value pair.
Definition LockFreeHashMap.inl:294