!highlight c
static inline unsigned cuckoo_hash(unsigned val, unsigned bits)
{
return hash_32(val, bits);
}
static inline unsigned cuckoo_next(unsigned h, unsigned val, unsigned bits)
{
return (val * GOLDEN_RATIO_PRIME_32 - h) & ~(~0 << bits);
}
static struct btree *btree_cache_find(struct cache_set *c, struct bkey *k)
{
struct btree **hash;
struct btree *b = NULL;
unsigned bits, h, val = PTR_HASH(c, k);
rcu_read_lock();
hash = rcu_dereference(c->bucket_hash);
bits = ((unsigned long) hash) & (PAGE_SIZE - 1);
hash = ((void *) hash) - bits;
h = cuckoo_hash(val, bits);
if (hash[h] && hash[h]->key.ptr[0] == k->ptr[0])
b = hash[h];
h = cuckoo_next(h, val, bits);
if (hash[h] && hash[h]->key.ptr[0] == k->ptr[0])
b = hash[h];
rcu_read_unlock();
return b;
}
static void btree_cache_insert(struct cache_set *c, struct btree *b)
{
bool __insert(struct btree **hash, unsigned bits)
{
unsigned h = cuckoo_hash(PTR_HASH(c, &b->key), bits);
if (!hash[h])
goto found;
for (unsigned i = 0; i < 16; i++) {
h = cuckoo_next(h, PTR_HASH(c, &b->key), bits);
if (!hash[h])
goto found;
swap(b, hash[h]);
}
return false;
found:
hash[h] = b;
return true;
}
struct btree **n, **hash = c->bucket_hash;
unsigned bits;
/* We need to use rcu_assign_pointer() for the first insert - but not
* the rest, so one barrier here is fine.
*/
smp_wmb();
bits = ((unsigned long) hash) & (PAGE_SIZE - 1);
hash = ((void *) hash) - bits;
if (__insert(hash, bits))
return;
rehash:
bits++;
n = (void *) __get_free_pages(__GFP_ZERO|GFP_NOIO,
max_t(int, bits - PAGE_SHIFT, 0));
if (!n)
BUG();
__insert(n, bits);
for (unsigned i = 0; i < 1U << (bits - 1); i++) {
b = hash[i];
if (b && !__insert(n, bits)) {
free_pages((unsigned long) n,
max_t(int, bits - PAGE_SHIFT, 0));
goto rehash;
}
}
n = ((void *) n) + bits;
rcu_assign_pointer(c->bucket_hash, n);
}