Krita Source Code Documentation
Loading...
Searching...
No Matches
leapfrog.h
Go to the documentation of this file.
1/*------------------------------------------------------------------------
2 Junction: Concurrent data structures in C++
3 Copyright (c) 2016 Jeff Preshing
4 Distributed under the Simplified BSD License.
5 Original location: https://github.com/preshing/junction
6 This software is distributed WITHOUT ANY WARRANTY; without even the
7 implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
8 See the LICENSE file for more information.
9------------------------------------------------------------------------*/
10
11#ifndef LEAPFROG_H
12#define LEAPFROG_H
13
14#include "map_traits.h"
16#include "kis_assert.h"
17
18#define SANITY_CHECK
19
20template <class Map>
21struct Leapfrog {
22 typedef typename Map::Hash Hash;
23 typedef typename Map::Value Value;
24 typedef typename Map::KeyTraits KeyTraits;
25 typedef typename Map::ValueTraits ValueTraits;
26
27 static const quint64 InitialSize = 8;
28 static const quint64 TableMigrationUnitSize = 32;
29 static const quint64 LinearSearchLimit = 128;
30 static const quint64 CellsInUseSample = LinearSearchLimit;
31
32 Q_STATIC_ASSERT(LinearSearchLimit > 0 && LinearSearchLimit < 256); // Must fit in CellGroup::links
33 Q_STATIC_ASSERT(CellsInUseSample > 0 && CellsInUseSample <= LinearSearchLimit); // Limit sample to failed search chain
34
39
40 struct CellGroup {
41 // Every cell in the table actually represents a bucket of cells, all linked together in a probe chain.
42 // Each cell in the probe chain is located within the table itself.
43 // "deltas" determines the index of the next cell in the probe chain.
44 // The first cell in the chain is the one that was hashed. It may or may not actually belong in the bucket.
45 // The "second" cell in the chain is given by deltas 0 - 3. It's guaranteed to belong in the bucket.
46 // All subsequent cells in the chain is given by deltas 4 - 7. Also guaranteed to belong in the bucket.
49 };
50
51 struct Table {
52 const quint64 sizeMask; // a power of two minus one
53 QMutex mutex; // to DCLI the TableMigration (stored in the jobCoordinator)
54 SimpleJobCoordinator jobCoordinator; // makes all blocked threads participate in the migration
55
57 {
58 }
59
60 static Table* create(quint64 tableSize)
61 {
62#ifdef SANITY_CHECK
64 KIS_ASSERT_RECOVER_NOOP(tableSize >= 4);
65#endif // SANITY_CHECK
66 quint64 numGroups = tableSize >> 2;
67 Table* table = (Table*) std::malloc(sizeof(Table) + sizeof(CellGroup) * numGroups);
68 new (table) Table(tableSize - 1);
69
70 for (quint64 i = 0; i < numGroups; i++) {
71 CellGroup* group = table->getCellGroups() + i;
72
73 for (quint64 j = 0; j < 4; j++) {
74 group->deltas[j].storeNonatomic(0);
75 group->deltas[j + 4].storeNonatomic(0);
76 group->cells[j].hash.storeNonatomic(KeyTraits::NullHash);
77 group->cells[j].value.storeNonatomic(Value(ValueTraits::NullValue));
78 }
79 }
80 return table;
81 }
82
83 void destroy()
84 {
85 this->Table::~Table();
86 std::free(this);
87 }
88
90 {
91 return (CellGroup*)(this + 1);
92 }
93
94 quint64 getNumMigrationUnits() const
95 {
97 }
98 };
99
101 {
102 public:
107
108 Map& m_map;
110 Atomic<quint64> m_workerStatus; // number of workers + end flag
113 quint64 m_numSources {0};
114
115 TableMigration(Map& map) : m_map(map)
116 {
117 }
118
119 static TableMigration* create(Map& map, quint64 numSources)
120 {
121 TableMigration* migration =
122 (TableMigration*) std::malloc(sizeof(TableMigration) + sizeof(TableMigration::Source) * numSources);
123 new (migration) TableMigration(map);
124
125 migration->m_workerStatus.storeNonatomic(0);
126 migration->m_overflowed.storeNonatomic(false);
127 migration->m_unitsRemaining.storeNonatomic(0);
128 migration->m_numSources = numSources;
129 // Caller is responsible for filling in sources & destination
130 return migration;
131 }
132
133 virtual ~TableMigration() override
134 {
135 }
136
137 void destroy()
138 {
139 // Destroy all source tables.
140 for (quint64 i = 0; i < m_numSources; i++)
141 if (getSources()[i].table)
142 getSources()[i].table->destroy();
143 // Delete the migration object itself.
145 std::free(this);
146 }
147
149 {
150 return (Source*)(this + 1);
151 }
152
153 bool migrateRange(Table* srcTable, quint64 startIdx);
154 virtual void run() override;
155 };
156
157 static Cell* find(Hash hash, Table* table)
158 {
159#ifdef SANITY_CHECK
161 KIS_ASSERT_RECOVER_NOOP(hash != KeyTraits::NullHash);
162#endif // SANITY_CHECK
163 quint64 sizeMask = table->sizeMask;
164 // Optimistically check hashed cell even though it might belong to another bucket
165 quint64 idx = hash & sizeMask;
166 CellGroup* group = table->getCellGroups() + (idx >> 2);
167 Cell* cell = group->cells + (idx & 3);
168 Hash probeHash = cell->hash.load(Relaxed);
169
170 if (probeHash == hash) {
171 return cell;
172 } else if (probeHash == KeyTraits::NullHash) {
173 return cell = NULL;
174 }
175 // Follow probe chain for our bucket
176 quint8 delta = group->deltas[idx & 3].load(Relaxed);
177 while (delta) {
178 idx = (idx + delta) & sizeMask;
179 group = table->getCellGroups() + (idx >> 2);
180 cell = group->cells + (idx & 3);
181 Hash probeHash = cell->hash.load(Relaxed);
182 // Note: probeHash might actually be NULL due to memory reordering of a concurrent insert,
183 // but we don't check for it. We just follow the probe chain.
184 if (probeHash == hash) {
185 return cell;
186 }
187 delta = group->deltas[(idx & 3) + 4].load(Relaxed);
188 }
189 // End of probe chain, not found
190 return NULL;
191 }
192
193 // FIXME: Possible optimization: Dedicated insert for migration? It wouldn't check for InsertResult_AlreadyFound.
195 static InsertResult insertOrFind(Hash hash, Table* table, Cell*& cell, quint64& overflowIdx)
196 {
197#ifdef SANITY_CHECK
199 KIS_ASSERT_RECOVER_NOOP(hash != KeyTraits::NullHash);
200#endif // SANITY_CHECK
201 quint64 sizeMask = table->sizeMask;
202 quint64 idx = quint64(hash);
203
204 // Check hashed cell first, though it may not even belong to the bucket.
205 CellGroup* group = table->getCellGroups() + ((idx & sizeMask) >> 2);
206 cell = group->cells + (idx & 3);
207 Hash probeHash = cell->hash.load(Relaxed);
208
209 if (probeHash == KeyTraits::NullHash) {
210 if (cell->hash.compareExchangeStrong(probeHash, hash, Relaxed)) {
211 // There are no links to set. We're done.
213 } else {
214 // Fall through to check if it was the same hash...
215 }
216 }
217
218 if (probeHash == hash) {
220 }
221
222 // Follow the link chain for this bucket.
223 quint64 maxIdx = idx + sizeMask;
224 quint64 linkLevel = 0;
225 Atomic<quint8>* prevLink;
226 for (;;) {
227 followLink:
228 prevLink = group->deltas + ((idx & 3) + linkLevel);
229 linkLevel = 4;
230 quint8 probeDelta = prevLink->load(Relaxed);
231
232 if (probeDelta) {
233 idx += probeDelta;
234 // Check the hash for this cell.
235 group = table->getCellGroups() + ((idx & sizeMask) >> 2);
236 cell = group->cells + (idx & 3);
237 probeHash = cell->hash.load(Relaxed);
238
239 if (probeHash == KeyTraits::NullHash) {
240 // Cell was linked, but hash is not visible yet.
241 // We could avoid this case (and guarantee it's visible) using acquire & release, but instead,
242 // just poll until it becomes visible.
243 do {
244 probeHash = cell->hash.load(Acquire);
245 } while (probeHash == KeyTraits::NullHash);
246 }
247
248#ifdef SANITY_CHECK
249 KIS_ASSERT_RECOVER_NOOP(((probeHash ^ hash) & sizeMask) == 0); // Only hashes in same bucket can be linked
250#endif // SANITY_CHECK
251 if (probeHash == hash) {
253 }
254 } else {
255 // Reached the end of the link chain for this bucket.
256 // Switch to linear probing until we reserve a new cell or find a late-arriving cell in the same bucket.
257 quint64 prevLinkIdx = idx;
258#ifdef SANITY_CHECK
259 KIS_ASSERT_RECOVER_NOOP(qint64(maxIdx - idx) >= 0); // Nobody would have linked an idx that's out of range.
260#endif // SANITY_CHECK
261 quint64 linearProbesRemaining = qMin(maxIdx - idx, quint64(LinearSearchLimit));
262
263 while (linearProbesRemaining-- > 0) {
264 idx++;
265 group = table->getCellGroups() + ((idx & sizeMask) >> 2);
266 cell = group->cells + (idx & 3);
267 probeHash = cell->hash.load(Relaxed);
268
269 if (probeHash == KeyTraits::NullHash) {
270 // It's an empty cell. Try to reserve it.
271 if (cell->hash.compareExchangeStrong(probeHash, hash, Relaxed)) {
272 // Success. We've reserved the cell. Link it to previous cell in same bucket.
273#ifdef SANITY_CHECK
274 KIS_ASSERT_RECOVER_NOOP(probeDelta == 0);
275#endif // SANITY_CHECK
276 quint8 desiredDelta = idx - prevLinkIdx;
277 prevLink->store(desiredDelta, Relaxed);
279 } else {
280 // Fall through to check if it's the same hash...
281 }
282 }
283 Hash x = (probeHash ^ hash);
284 // Check for same hash.
285 if (!x) {
287 }
288 // Check for same bucket.
289 if ((x & sizeMask) == 0) {
290 // Attempt to set the link on behalf of the late-arriving cell.
291 // This is usually redundant, but if we don't attempt to set the late-arriving cell's link here,
292 // there's no guarantee that our own link chain will be well-formed by the time this function returns.
293 // (Indeed, subsequent lookups sometimes failed during testing, for this exact reason.)
294 quint8 desiredDelta = idx - prevLinkIdx;
295 prevLink->store(desiredDelta, Relaxed);
296 goto followLink; // Try to follow link chain for the bucket again.
297 }
298 // Continue linear search...
299 }
300 // Table is too full to insert.
301 overflowIdx = idx + 1;
303 }
304 }
305 }
306
307 static void beginTableMigrationToSize(Map& map, Table* table, quint64 nextTableSize)
308 {
309 // Create new migration by DCLI.
311 if (job) {
312 // new migration already exists
313 } else {
314 QMutexLocker guard(&table->mutex);
315 job = table->jobCoordinator.loadConsume(); // Non-atomic would be sufficient, but that's OK.
316
317 if (job) {
318 // new migration already exists (double-checked)
319 } else {
320 // Create new migration.
321 TableMigration* migration = TableMigration::create(map, 1);
323 migration->getSources()[0].table = table;
324 migration->getSources()[0].sourceIndex.storeNonatomic(0);
325 migration->m_destination = Table::create(nextTableSize);
326 // Publish the new migration.
327 table->jobCoordinator.storeRelease(migration);
328 }
329 }
330 }
331
332 static void beginTableMigration(Map& map, Table* table, quint64 overflowIdx)
333 {
334 // Estimate number of cells in use based on a small sample.
335 quint64 sizeMask = table->sizeMask;
336 quint64 idx = overflowIdx - CellsInUseSample;
337 quint64 inUseCells = 0;
338 for (quint64 linearProbesRemaining = CellsInUseSample; linearProbesRemaining > 0; linearProbesRemaining--) {
339 CellGroup* group = table->getCellGroups() + ((idx & sizeMask) >> 2);
340 Cell* cell = group->cells + (idx & 3);
341 Value value = cell->value.load(Relaxed);
342 if (value == Value(ValueTraits::Redirect)) {
343 // Another thread kicked off the jobCoordinator. The caller will participate upon return.
344 return;
345 }
346 if (value != Value(ValueTraits::NullValue))
347 inUseCells++;
348 idx++;
349 }
350 float inUseRatio = float(inUseCells) / CellsInUseSample;
351 float estimatedInUse = (sizeMask + 1) * inUseRatio;
352 quint64 nextTableSize = qMax(quint64(InitialSize), roundUpPowerOf2(quint64(estimatedInUse * 2)));
353 beginTableMigrationToSize(map, table, nextTableSize);
354 }
355}; // Leapfrog
356
357template <class Map>
358bool Leapfrog<Map>::TableMigration::migrateRange(Table* srcTable, quint64 startIdx)
359{
360 quint64 srcSizeMask = srcTable->sizeMask;
361 quint64 endIdx = qMin(startIdx + TableMigrationUnitSize, srcSizeMask + 1);
362 // Iterate over source range.
363 for (quint64 srcIdx = startIdx; srcIdx < endIdx; srcIdx++) {
364 CellGroup* srcGroup = srcTable->getCellGroups() + ((srcIdx & srcSizeMask) >> 2);
365 Cell* srcCell = srcGroup->cells + (srcIdx & 3);
366 Hash srcHash;
367 Value srcValue;
368 // Fetch the srcHash and srcValue.
369 for (;;) {
370 srcHash = srcCell->hash.load(Relaxed);
371 if (srcHash == KeyTraits::NullHash) {
372 // An unused cell. Try to put a Redirect marker in its value.
373 srcValue =
374 srcCell->value.compareExchange(Value(ValueTraits::NullValue), Value(ValueTraits::Redirect), Relaxed);
375 if (srcValue == Value(ValueTraits::Redirect)) {
376 // srcValue is already marked Redirect due to previous incomplete migration.
377 break;
378 }
379 if (srcValue == Value(ValueTraits::NullValue)) {
380 break; // Redirect has been placed. Break inner loop, continue outer loop.
381 }
382 // Otherwise, somebody just claimed the cell. Read srcHash again...
383 } else {
384 // Check for deleted/uninitialized value.
385 srcValue = srcCell->value.load(Relaxed);
386 if (srcValue == Value(ValueTraits::NullValue)) {
387 // Try to put a Redirect marker.
388 if (srcCell->value.compareExchangeStrong(srcValue, Value(ValueTraits::Redirect), Relaxed)) {
389 break; // Redirect has been placed. Break inner loop, continue outer loop.
390 }
391
392 if (srcValue == Value(ValueTraits::Redirect)) {
393 // FIXME: I don't think this will happen. Investigate & change to assert
394 break;
395 }
396 } else if (srcValue == Value(ValueTraits::Redirect)) {
397 // srcValue is already marked Redirect due to previous incomplete migration.
398 break;
399 }
400
401 // We've got a key/value pair to migrate.
402 // Reserve a destination cell in the destination.
403#ifdef SANITY_CHECK
404 KIS_ASSERT_RECOVER_NOOP(srcHash != KeyTraits::NullHash);
405 KIS_ASSERT_RECOVER_NOOP(srcValue != Value(ValueTraits::NullValue));
406 KIS_ASSERT_RECOVER_NOOP(srcValue != Value(ValueTraits::Redirect));
407#endif // SANITY_CHECK
408 Cell* dstCell;
409 quint64 overflowIdx;
410 InsertResult result = insertOrFind(srcHash, m_destination, dstCell, overflowIdx);
411 // During migration, a hash can only exist in one place among all the source tables,
412 // and it is only migrated by one thread. Therefore, the hash will never already exist
413 // in the destination table:
414#ifdef SANITY_CHECK
416#endif // SANITY_CHECK
417 if (result == InsertResult_Overflow) {
418 // Destination overflow.
419 // This can happen for several reasons. For example, the source table could have
420 // existed of all deleted cells when it overflowed, resulting in a small destination
421 // table size, but then another thread could re-insert all the same hashes
422 // before the migration completed.
423 // Caller will cancel the current migration and begin a new one.
424 return false;
425 }
426 // Migrate the old value to the new cell.
427 for (;;) {
428 // Copy srcValue to the destination.
429 dstCell->value.store(srcValue, Relaxed);
430 // Try to place a Redirect marker in srcValue.
431 Value doubleCheckedSrcValue = srcCell->value.compareExchange(srcValue, Value(ValueTraits::Redirect), Relaxed);
432#ifdef SANITY_CHECK
433 KIS_ASSERT_RECOVER_NOOP(doubleCheckedSrcValue != Value(ValueTraits::Redirect)); // Only one thread can redirect a cell at a time.
434#endif // SANITY_CHECK
435 if (doubleCheckedSrcValue == srcValue) {
436 // No racing writes to the src. We've successfully placed the Redirect marker.
437 // srcValue was non-NULL when we decided to migrate it, but it may have changed to NULL
438 // by a late-arriving erase.
439 if (srcValue == Value(ValueTraits::NullValue)) {
440 // racing update was erase", uptr(srcTable), srcIdx)
441 }
442
443 break;
444 }
445 // There was a late-arriving write (or erase) to the src. Migrate the new value and try again.
446 srcValue = doubleCheckedSrcValue;
447 }
448 // Cell successfully migrated. Proceed to next source cell.
449 break;
450 }
451 }
452 }
453 // Range has been migrated successfully.
454 return true;
455}
456
457template <class Map>
459{
460#ifdef SANITY_CHECK
461 KIS_ASSERT_RECOVER_NOOP(m_map.getGC().sanityRawPointerAccessLocked());
462#endif // SANITY_CHECK
463
464
465 // Conditionally increment the shared # of workers.
466 quint64 probeStatus = m_workerStatus.load(Relaxed);
467 do {
468 if (probeStatus & 1) {
469 // End flag is already set, so do nothing.
470 return;
471 }
472 } while (!m_workerStatus.compareExchangeWeak(probeStatus, probeStatus + 2, Relaxed, Relaxed));
473 // # of workers has been incremented, and the end flag is clear.
474#ifdef SANITY_CHECK
475 KIS_ASSERT_RECOVER_NOOP((probeStatus & 1) == 0);
476#endif // SANITY_CHECK
477
478 // Iterate over all source tables.
479 for (quint64 s = 0; s < m_numSources; s++) {
480 Source& source = getSources()[s];
481 // Loop over all migration units in this source table.
482 for (;;) {
483 if (m_workerStatus.load(Relaxed) & 1) {
484 goto endMigration;
485 }
486 quint64 startIdx = source.sourceIndex.fetchAdd(TableMigrationUnitSize, Relaxed);
487 if (startIdx >= source.table->sizeMask + 1)
488 break; // No more migration units in this table. Try next source table.
489 bool overflowed = !migrateRange(source.table, startIdx);
490 if (overflowed) {
491 // *** FAILED MIGRATION ***
492 // TableMigration failed due to destination table overflow.
493 // No other thread can declare the migration successful at this point, because *this* unit will never complete,
494 // hence m_unitsRemaining won't reach zero.
495 // However, multiple threads can independently detect a failed migration at the same time.
496 // The reason we store overflowed in a shared variable is because we can must flush all the worker threads before
497 // we can safely deal with the overflow. Therefore, the thread that detects the failure is often different from
498 // the thread
499 // that deals with it.
500 bool oldOverflowed = m_overflowed.exchange(overflowed, Relaxed);
501 if (oldOverflowed) {
502 // race to set m_overflowed
503 }
504
505 m_workerStatus.fetchOr(1, Relaxed);
506 goto endMigration;
507 }
508
509 qint64 prevRemaining = m_unitsRemaining.fetchSub(1, Relaxed);
510#ifdef SANITY_CHECK
511 KIS_ASSERT_RECOVER_NOOP(prevRemaining > 0);
512#endif // SANITY_CHECK
513 if (prevRemaining == 1) {
514 // *** SUCCESSFUL MIGRATION ***
515 // That was the last chunk to migrate.
516 m_workerStatus.fetchOr(1, Relaxed);
517 goto endMigration;
518 }
519 }
520 }
521
522endMigration:
523 // Decrement the shared # of workers.
524 probeStatus = m_workerStatus.fetchSub(2, AcquireRelease); // AcquireRelease makes all previous writes visible to the last worker thread.
525 if (probeStatus >= 4) {
526 // There are other workers remaining. Return here so that only the very last worker will proceed.
527 return;
528 }
529
530 // We're the very last worker thread.
531 // Perform the appropriate post-migration step depending on whether the migration succeeded or failed.
532#ifdef SANITY_CHECK
533 KIS_ASSERT_RECOVER_NOOP(probeStatus == 3);
534#endif // SANITY_CHECK
535 bool overflowed = m_overflowed.loadNonatomic(); // No racing writes at this point
536 if (!overflowed) {
537 // The migration succeeded. This is the most likely outcome. Publish the new subtree.
538 m_map.publishTableMigration(this);
539 // End the jobCoodinator.
540 getSources()[0].table->jobCoordinator.end();
541 } else {
542 // The migration failed due to the overflow of the destination table.
543 Table* origTable = getSources()[0].table;
544 QMutexLocker guard(&origTable->mutex);
545 SimpleJobCoordinator::Job* checkedJob = origTable->jobCoordinator.loadConsume();
546
547 if (checkedJob != this) {
548 // a new TableMigration was already started
549 } else {
550 TableMigration* migration = TableMigration::create(m_map, m_numSources + 1);
551 // Double the destination table size.
552 migration->m_destination = Table::create((m_destination->sizeMask + 1) * 2);
553 // Transfer source tables to the new migration.
554 for (quint64 i = 0; i < m_numSources; i++) {
555 migration->getSources()[i].table = getSources()[i].table;
556 getSources()[i].table = NULL;
557 migration->getSources()[i].sourceIndex.storeNonatomic(0);
558 }
559
560 migration->getSources()[m_numSources].table = m_destination;
561 migration->getSources()[m_numSources].sourceIndex.storeNonatomic(0);
562 // Calculate total number of migration units to move.
563 quint64 unitsRemaining = 0;
564 for (quint64 s = 0; s < migration->m_numSources; s++) {
565 unitsRemaining += migration->getSources()[s].table->getNumMigrationUnits();
566 }
567
568 migration->m_unitsRemaining.storeNonatomic(unitsRemaining);
569 // Publish the new migration.
570 origTable->jobCoordinator.storeRelease(migration);
571 }
572 }
573
574 // We're done with this TableMigration. Queue it for GC.
575 m_map.getGC().enqueue(&TableMigration::destroy, this, true);
576}
577
578#endif // LEAPFROG_H
float value(const T *src, size_t ch)
KisMagneticGraph::vertex_descriptor source(typename KisMagneticGraph::edge_descriptor e, KisMagneticGraph g)
@ Acquire
Definition atomic.h:59
@ AcquireRelease
Definition atomic.h:62
@ Relaxed
Definition atomic.h:57
bool compareExchangeStrong(T &expected, T desired, MemoryOrder memoryOrder)
Definition atomic.h:110
void storeNonatomic(T value)
Definition atomic.h:94
T compareExchange(T expected, T desired, MemoryOrder memoryOrder)
Definition atomic.h:104
T load(MemoryOrder memoryOrder) const
Definition atomic.h:89
void store(T value, MemoryOrder memoryOrder)
Definition atomic.h:99
Atomic< qint64 > m_unitsRemaining
Definition leapfrog.h:112
bool migrateRange(Table *srcTable, quint64 startIdx)
Definition leapfrog.h:358
Source * getSources() const
Definition leapfrog.h:148
static TableMigration * create(Map &map, quint64 numSources)
Definition leapfrog.h:119
virtual ~TableMigration() override
Definition leapfrog.h:133
virtual void run() override
Definition leapfrog.h:458
Atomic< bool > m_overflowed
Definition leapfrog.h:111
Atomic< quint64 > m_workerStatus
Definition leapfrog.h:110
#define KIS_ASSERT_RECOVER_NOOP(cond)
Definition kis_assert.h:97
bool isPowerOf2(quint64 v)
Definition map_traits.h:29
quint64 roundUpPowerOf2(quint64 v)
Definition map_traits.h:16
Atomic< quint8 > deltas[8]
Definition leapfrog.h:47
Atomic< Hash > hash
Definition leapfrog.h:36
Atomic< Value > value
Definition leapfrog.h:37
Atomic< quint64 > sourceIndex
Definition leapfrog.h:105
quint64 getNumMigrationUnits() const
Definition leapfrog.h:94
const quint64 sizeMask
Definition leapfrog.h:52
static Table * create(quint64 tableSize)
Definition leapfrog.h:60
Table(quint64 sizeMask)
Definition leapfrog.h:56
CellGroup * getCellGroups() const
Definition leapfrog.h:89
void destroy()
Definition leapfrog.h:83
SimpleJobCoordinator jobCoordinator
Definition leapfrog.h:54
static const quint64 CellsInUseSample
Definition leapfrog.h:30
static Cell * find(Hash hash, Table *table)
Definition leapfrog.h:157
Map::Hash Hash
Definition leapfrog.h:22
@ InsertResult_InsertedNew
Definition leapfrog.h:194
@ InsertResult_AlreadyFound
Definition leapfrog.h:194
@ InsertResult_Overflow
Definition leapfrog.h:194
static const quint64 InitialSize
Definition leapfrog.h:27
Map::ValueTraits ValueTraits
Definition leapfrog.h:25
Map::KeyTraits KeyTraits
Definition leapfrog.h:24
static void beginTableMigration(Map &map, Table *table, quint64 overflowIdx)
Definition leapfrog.h:332
Q_STATIC_ASSERT(LinearSearchLimit > 0 &&LinearSearchLimit< 256)
Map::Value Value
Definition leapfrog.h:23
static const quint64 LinearSearchLimit
Definition leapfrog.h:29
static InsertResult insertOrFind(Hash hash, Table *table, Cell *&cell, quint64 &overflowIdx)
Definition leapfrog.h:195
static const quint64 TableMigrationUnitSize
Definition leapfrog.h:28
Q_STATIC_ASSERT(CellsInUseSample > 0 &&CellsInUseSample<=LinearSearchLimit)
static void beginTableMigrationToSize(Map &map, Table *table, quint64 nextTableSize)
Definition leapfrog.h:307