Krita Source Code Documentation
Loading...
Searching...
No Matches
Leapfrog< Map > Struct Template Reference

#include <leapfrog.h>

Classes

struct  Cell
 
struct  CellGroup
 
struct  Table
 
class  TableMigration
 

Public Types

typedef Map::Hash Hash
 
enum  InsertResult { InsertResult_AlreadyFound , InsertResult_InsertedNew , InsertResult_Overflow }
 
typedef Map::KeyTraits KeyTraits
 
typedef Map::Value Value
 
typedef Map::ValueTraits ValueTraits
 

Public Member Functions

 Q_STATIC_ASSERT (CellsInUseSample > 0 &&CellsInUseSample<=LinearSearchLimit)
 
 Q_STATIC_ASSERT (LinearSearchLimit > 0 &&LinearSearchLimit< 256)
 

Static Public Member Functions

static void beginTableMigration (Map &map, Table *table, quint64 overflowIdx)
 
static void beginTableMigrationToSize (Map &map, Table *table, quint64 nextTableSize)
 
static Cellfind (Hash hash, Table *table)
 
static InsertResult insertOrFind (Hash hash, Table *table, Cell *&cell, quint64 &overflowIdx)
 

Static Public Attributes

static const quint64 CellsInUseSample = LinearSearchLimit
 
static const quint64 InitialSize = 8
 
static const quint64 LinearSearchLimit = 128
 
static const quint64 TableMigrationUnitSize = 32
 

Detailed Description

template<class Map>
struct Leapfrog< Map >

Definition at line 21 of file leapfrog.h.

Member Typedef Documentation

◆ Hash

template<class Map >
typedef Map::Hash Leapfrog< Map >::Hash

Definition at line 22 of file leapfrog.h.

◆ KeyTraits

template<class Map >
typedef Map::KeyTraits Leapfrog< Map >::KeyTraits

Definition at line 24 of file leapfrog.h.

◆ Value

template<class Map >
typedef Map::Value Leapfrog< Map >::Value

Definition at line 23 of file leapfrog.h.

◆ ValueTraits

template<class Map >
typedef Map::ValueTraits Leapfrog< Map >::ValueTraits

Definition at line 25 of file leapfrog.h.

Member Enumeration Documentation

◆ InsertResult

template<class Map >
enum Leapfrog::InsertResult
Enumerator
InsertResult_AlreadyFound 
InsertResult_InsertedNew 
InsertResult_Overflow 

Definition at line 194 of file leapfrog.h.

Member Function Documentation

◆ beginTableMigration()

template<class Map >
static void Leapfrog< Map >::beginTableMigration ( Map & map,
Table * table,
quint64 overflowIdx )
inlinestatic

Definition at line 332 of file leapfrog.h.

333 {
334 // Estimate number of cells in use based on a small sample.
335 quint64 sizeMask = table->sizeMask;
336 quint64 idx = overflowIdx - CellsInUseSample;
337 quint64 inUseCells = 0;
338 for (quint64 linearProbesRemaining = CellsInUseSample; linearProbesRemaining > 0; linearProbesRemaining--) {
339 CellGroup* group = table->getCellGroups() + ((idx & sizeMask) >> 2);
340 Cell* cell = group->cells + (idx & 3);
341 Value value = cell->value.load(Relaxed);
342 if (value == Value(ValueTraits::Redirect)) {
343 // Another thread kicked off the jobCoordinator. The caller will participate upon return.
344 return;
345 }
346 if (value != Value(ValueTraits::NullValue))
347 inUseCells++;
348 idx++;
349 }
350 float inUseRatio = float(inUseCells) / CellsInUseSample;
351 float estimatedInUse = (sizeMask + 1) * inUseRatio;
352 quint64 nextTableSize = qMax(quint64(InitialSize), roundUpPowerOf2(quint64(estimatedInUse * 2)));
353 beginTableMigrationToSize(map, table, nextTableSize);
354 }
float value(const T *src, size_t ch)
@ Relaxed
Definition atomic.h:57
quint64 roundUpPowerOf2(quint64 v)
Definition map_traits.h:16
static const quint64 CellsInUseSample
Definition leapfrog.h:30
static const quint64 InitialSize
Definition leapfrog.h:27
Map::Value Value
Definition leapfrog.h:23
static void beginTableMigrationToSize(Map &map, Table *table, quint64 nextTableSize)
Definition leapfrog.h:307
union Value::@1 value

References Leapfrog< Map >::beginTableMigrationToSize(), Leapfrog< Map >::CellGroup::cells, Leapfrog< Map >::CellsInUseSample, Leapfrog< Map >::Table::getCellGroups(), Leapfrog< Map >::InitialSize, Atomic< T >::load(), Relaxed, roundUpPowerOf2(), Leapfrog< Map >::Table::sizeMask, Leapfrog< Map >::Cell::value, and value().

◆ beginTableMigrationToSize()

template<class Map >
static void Leapfrog< Map >::beginTableMigrationToSize ( Map & map,
Table * table,
quint64 nextTableSize )
inlinestatic

Definition at line 307 of file leapfrog.h.

308 {
309 // Create new migration by DCLI.
310 SimpleJobCoordinator::Job* job = table->jobCoordinator.loadConsume();
311 if (job) {
312 // new migration already exists
313 } else {
314 QMutexLocker guard(&table->mutex);
315 job = table->jobCoordinator.loadConsume(); // Non-atomic would be sufficient, but that's OK.
316
317 if (job) {
318 // new migration already exists (double-checked)
319 } else {
320 // Create new migration.
321 TableMigration* migration = TableMigration::create(map, 1);
322 migration->m_unitsRemaining.storeNonatomic(table->getNumMigrationUnits());
323 migration->getSources()[0].table = table;
324 migration->getSources()[0].sourceIndex.storeNonatomic(0);
325 migration->m_destination = Table::create(nextTableSize);
326 // Publish the new migration.
327 table->jobCoordinator.storeRelease(migration);
328 }
329 }
330 }
static TableMigration * create(Map &map, quint64 numSources)
Definition leapfrog.h:119
static Table * create(quint64 tableSize)
Definition leapfrog.h:60

References Leapfrog< Map >::TableMigration::create(), Leapfrog< Map >::Table::create(), Leapfrog< Map >::Table::getNumMigrationUnits(), Leapfrog< Map >::TableMigration::getSources(), Leapfrog< Map >::Table::jobCoordinator, SimpleJobCoordinator::loadConsume(), Leapfrog< Map >::TableMigration::m_destination, Leapfrog< Map >::TableMigration::m_unitsRemaining, Leapfrog< Map >::Table::mutex, Leapfrog< Map >::TableMigration::Source::sourceIndex, Atomic< T >::storeNonatomic(), SimpleJobCoordinator::storeRelease(), and Leapfrog< Map >::TableMigration::Source::table.

◆ find()

template<class Map >
static Cell * Leapfrog< Map >::find ( Hash hash,
Table * table )
inlinestatic

Definition at line 157 of file leapfrog.h.

158 {
159#ifdef SANITY_CHECK
161 KIS_ASSERT_RECOVER_NOOP(hash != KeyTraits::NullHash);
162#endif // SANITY_CHECK
163 quint64 sizeMask = table->sizeMask;
164 // Optimistically check hashed cell even though it might belong to another bucket
165 quint64 idx = hash & sizeMask;
166 CellGroup* group = table->getCellGroups() + (idx >> 2);
167 Cell* cell = group->cells + (idx & 3);
168 Hash probeHash = cell->hash.load(Relaxed);
169
170 if (probeHash == hash) {
171 return cell;
172 } else if (probeHash == KeyTraits::NullHash) {
173 return cell = NULL;
174 }
175 // Follow probe chain for our bucket
176 quint8 delta = group->deltas[idx & 3].load(Relaxed);
177 while (delta) {
178 idx = (idx + delta) & sizeMask;
179 group = table->getCellGroups() + (idx >> 2);
180 cell = group->cells + (idx & 3);
181 Hash probeHash = cell->hash.load(Relaxed);
182 // Note: probeHash might actually be NULL due to memory reordering of a concurrent insert,
183 // but we don't check for it. We just follow the probe chain.
184 if (probeHash == hash) {
185 return cell;
186 }
187 delta = group->deltas[(idx & 3) + 4].load(Relaxed);
188 }
189 // End of probe chain, not found
190 return NULL;
191 }
#define KIS_ASSERT_RECOVER_NOOP(cond)
Definition kis_assert.h:97
Map::Hash Hash
Definition leapfrog.h:22

References Leapfrog< Map >::CellGroup::cells, Leapfrog< Map >::CellGroup::deltas, Leapfrog< Map >::Table::getCellGroups(), Leapfrog< Map >::Cell::hash, KIS_ASSERT_RECOVER_NOOP, Atomic< T >::load(), Relaxed, and Leapfrog< Map >::Table::sizeMask.

◆ insertOrFind()

template<class Map >
static InsertResult Leapfrog< Map >::insertOrFind ( Hash hash,
Table * table,
Cell *& cell,
quint64 & overflowIdx )
inlinestatic

Definition at line 195 of file leapfrog.h.

196 {
197#ifdef SANITY_CHECK
199 KIS_ASSERT_RECOVER_NOOP(hash != KeyTraits::NullHash);
200#endif // SANITY_CHECK
201 quint64 sizeMask = table->sizeMask;
202 quint64 idx = quint64(hash);
203
204 // Check hashed cell first, though it may not even belong to the bucket.
205 CellGroup* group = table->getCellGroups() + ((idx & sizeMask) >> 2);
206 cell = group->cells + (idx & 3);
207 Hash probeHash = cell->hash.load(Relaxed);
208
209 if (probeHash == KeyTraits::NullHash) {
210 if (cell->hash.compareExchangeStrong(probeHash, hash, Relaxed)) {
211 // There are no links to set. We're done.
213 } else {
214 // Fall through to check if it was the same hash...
215 }
216 }
217
218 if (probeHash == hash) {
220 }
221
222 // Follow the link chain for this bucket.
223 quint64 maxIdx = idx + sizeMask;
224 quint64 linkLevel = 0;
225 Atomic<quint8>* prevLink;
226 for (;;) {
227 followLink:
228 prevLink = group->deltas + ((idx & 3) + linkLevel);
229 linkLevel = 4;
230 quint8 probeDelta = prevLink->load(Relaxed);
231
232 if (probeDelta) {
233 idx += probeDelta;
234 // Check the hash for this cell.
235 group = table->getCellGroups() + ((idx & sizeMask) >> 2);
236 cell = group->cells + (idx & 3);
237 probeHash = cell->hash.load(Relaxed);
238
239 if (probeHash == KeyTraits::NullHash) {
240 // Cell was linked, but hash is not visible yet.
241 // We could avoid this case (and guarantee it's visible) using acquire & release, but instead,
242 // just poll until it becomes visible.
243 do {
244 probeHash = cell->hash.load(Acquire);
245 } while (probeHash == KeyTraits::NullHash);
246 }
247
248#ifdef SANITY_CHECK
249 KIS_ASSERT_RECOVER_NOOP(((probeHash ^ hash) & sizeMask) == 0); // Only hashes in same bucket can be linked
250#endif // SANITY_CHECK
251 if (probeHash == hash) {
253 }
254 } else {
255 // Reached the end of the link chain for this bucket.
256 // Switch to linear probing until we reserve a new cell or find a late-arriving cell in the same bucket.
257 quint64 prevLinkIdx = idx;
258#ifdef SANITY_CHECK
259 KIS_ASSERT_RECOVER_NOOP(qint64(maxIdx - idx) >= 0); // Nobody would have linked an idx that's out of range.
260#endif // SANITY_CHECK
261 quint64 linearProbesRemaining = qMin(maxIdx - idx, quint64(LinearSearchLimit));
262
263 while (linearProbesRemaining-- > 0) {
264 idx++;
265 group = table->getCellGroups() + ((idx & sizeMask) >> 2);
266 cell = group->cells + (idx & 3);
267 probeHash = cell->hash.load(Relaxed);
268
269 if (probeHash == KeyTraits::NullHash) {
270 // It's an empty cell. Try to reserve it.
271 if (cell->hash.compareExchangeStrong(probeHash, hash, Relaxed)) {
272 // Success. We've reserved the cell. Link it to previous cell in same bucket.
273#ifdef SANITY_CHECK
274 KIS_ASSERT_RECOVER_NOOP(probeDelta == 0);
275#endif // SANITY_CHECK
276 quint8 desiredDelta = idx - prevLinkIdx;
277 prevLink->store(desiredDelta, Relaxed);
279 } else {
280 // Fall through to check if it's the same hash...
281 }
282 }
283 Hash x = (probeHash ^ hash);
284 // Check for same hash.
285 if (!x) {
287 }
288 // Check for same bucket.
289 if ((x & sizeMask) == 0) {
290 // Attempt to set the link on behalf of the late-arriving cell.
291 // This is usually redundant, but if we don't attempt to set the late-arriving cell's link here,
292 // there's no guarantee that our own link chain will be well-formed by the time this function returns.
293 // (Indeed, subsequent lookups sometimes failed during testing, for this exact reason.)
294 quint8 desiredDelta = idx - prevLinkIdx;
295 prevLink->store(desiredDelta, Relaxed);
296 goto followLink; // Try to follow link chain for the bucket again.
297 }
298 // Continue linear search...
299 }
300 // Table is too full to insert.
301 overflowIdx = idx + 1;
303 }
304 }
305 }
@ Acquire
Definition atomic.h:59
T load(MemoryOrder memoryOrder) const
Definition atomic.h:89
void store(T value, MemoryOrder memoryOrder)
Definition atomic.h:99
static const quint64 LinearSearchLimit
Definition leapfrog.h:29

References Acquire, Leapfrog< Map >::CellGroup::cells, Atomic< T >::compareExchangeStrong(), Leapfrog< Map >::CellGroup::deltas, Leapfrog< Map >::Table::getCellGroups(), Leapfrog< Map >::Cell::hash, Leapfrog< Map >::InsertResult_AlreadyFound, Leapfrog< Map >::InsertResult_InsertedNew, Leapfrog< Map >::InsertResult_Overflow, KIS_ASSERT_RECOVER_NOOP, Leapfrog< Map >::LinearSearchLimit, Atomic< T >::load(), Relaxed, Leapfrog< Map >::Table::sizeMask, and Atomic< T >::store().

◆ Q_STATIC_ASSERT() [1/2]

template<class Map >
Leapfrog< Map >::Q_STATIC_ASSERT ( CellsInUseSample ,
0 &&CellsInUseSample<= LinearSearchLimit )

◆ Q_STATIC_ASSERT() [2/2]

template<class Map >
Leapfrog< Map >::Q_STATIC_ASSERT ( LinearSearchLimit )

Member Data Documentation

◆ CellsInUseSample

template<class Map >
const quint64 Leapfrog< Map >::CellsInUseSample = LinearSearchLimit
static

Definition at line 30 of file leapfrog.h.

◆ InitialSize

template<class Map >
const quint64 Leapfrog< Map >::InitialSize = 8
static

Definition at line 27 of file leapfrog.h.

◆ LinearSearchLimit

template<class Map >
const quint64 Leapfrog< Map >::LinearSearchLimit = 128
static

Definition at line 29 of file leapfrog.h.

◆ TableMigrationUnitSize

template<class Map >
const quint64 Leapfrog< Map >::TableMigrationUnitSize = 32
static

Definition at line 28 of file leapfrog.h.


The documentation for this struct was generated from the following file: