mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-26 16:23:57 +00:00
bcache: Bkey indexing renaming
More refactoring: node() -> bset_bkey_idx() end() -> bset_bkey_last() Signed-off-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
parent
085d2a3dd4
commit
fafff81cea
7 changed files with 63 additions and 53 deletions
|
@ -724,9 +724,6 @@ struct bbio {
|
||||||
#define __set_blocks(i, k, c) DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c))
|
#define __set_blocks(i, k, c) DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c))
|
||||||
#define set_blocks(i, c) __set_blocks(i, (i)->keys, c)
|
#define set_blocks(i, c) __set_blocks(i, (i)->keys, c)
|
||||||
|
|
||||||
#define node(i, j) ((struct bkey *) ((i)->d + (j)))
|
|
||||||
#define end(i) node(i, (i)->keys)
|
|
||||||
|
|
||||||
#define btree_data_space(b) (PAGE_SIZE << (b)->page_order)
|
#define btree_data_space(b) (PAGE_SIZE << (b)->page_order)
|
||||||
|
|
||||||
#define prios_per_bucket(c) \
|
#define prios_per_bucket(c) \
|
||||||
|
@ -791,18 +788,14 @@ static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
|
||||||
|
|
||||||
/* Btree key macros */
|
/* Btree key macros */
|
||||||
|
|
||||||
static inline void bkey_init(struct bkey *k)
|
|
||||||
{
|
|
||||||
*k = ZERO_KEY;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is used for various on disk data structures - cache_sb, prio_set, bset,
|
* This is used for various on disk data structures - cache_sb, prio_set, bset,
|
||||||
* jset: The checksum is _always_ the first 8 bytes of these structs
|
* jset: The checksum is _always_ the first 8 bytes of these structs
|
||||||
*/
|
*/
|
||||||
#define csum_set(i) \
|
#define csum_set(i) \
|
||||||
bch_crc64(((void *) (i)) + sizeof(uint64_t), \
|
bch_crc64(((void *) (i)) + sizeof(uint64_t), \
|
||||||
((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t)))
|
((void *) bset_bkey_last(i)) - \
|
||||||
|
(((void *) (i)) + sizeof(uint64_t)))
|
||||||
|
|
||||||
/* Error handling macros */
|
/* Error handling macros */
|
||||||
|
|
||||||
|
|
|
@ -500,7 +500,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
|
||||||
: tree_to_prev_bkey(t, j >> ffs(j));
|
: tree_to_prev_bkey(t, j >> ffs(j));
|
||||||
|
|
||||||
struct bkey *r = is_power_of_2(j + 1)
|
struct bkey *r = is_power_of_2(j + 1)
|
||||||
? node(t->data, t->data->keys - bkey_u64s(&t->end))
|
? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
|
||||||
: tree_to_bkey(t, j >> (ffz(j) + 1));
|
: tree_to_bkey(t, j >> (ffz(j) + 1));
|
||||||
|
|
||||||
BUG_ON(m < l || m > r);
|
BUG_ON(m < l || m > r);
|
||||||
|
@ -559,7 +559,7 @@ static void bset_build_written_tree(struct btree *b)
|
||||||
bset_alloc_tree(b, t);
|
bset_alloc_tree(b, t);
|
||||||
|
|
||||||
t->size = min_t(unsigned,
|
t->size = min_t(unsigned,
|
||||||
bkey_to_cacheline(t, end(t->data)),
|
bkey_to_cacheline(t, bset_bkey_last(t->data)),
|
||||||
b->sets->tree + bset_tree_space(b) - t->tree);
|
b->sets->tree + bset_tree_space(b) - t->tree);
|
||||||
|
|
||||||
if (t->size < 2) {
|
if (t->size < 2) {
|
||||||
|
@ -582,7 +582,7 @@ static void bset_build_written_tree(struct btree *b)
|
||||||
t->tree[j].m = bkey_to_cacheline_offset(k);
|
t->tree[j].m = bkey_to_cacheline_offset(k);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (bkey_next(k) != end(t->data))
|
while (bkey_next(k) != bset_bkey_last(t->data))
|
||||||
k = bkey_next(k);
|
k = bkey_next(k);
|
||||||
|
|
||||||
t->end = *k;
|
t->end = *k;
|
||||||
|
@ -600,7 +600,7 @@ void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
|
||||||
unsigned inorder, j = 1;
|
unsigned inorder, j = 1;
|
||||||
|
|
||||||
for (t = b->sets; t <= &b->sets[b->nsets]; t++)
|
for (t = b->sets; t <= &b->sets[b->nsets]; t++)
|
||||||
if (k < end(t->data))
|
if (k < bset_bkey_last(t->data))
|
||||||
goto found_set;
|
goto found_set;
|
||||||
|
|
||||||
BUG();
|
BUG();
|
||||||
|
@ -613,7 +613,7 @@ found_set:
|
||||||
if (k == t->data->start)
|
if (k == t->data->start)
|
||||||
goto fix_left;
|
goto fix_left;
|
||||||
|
|
||||||
if (bkey_next(k) == end(t->data)) {
|
if (bkey_next(k) == bset_bkey_last(t->data)) {
|
||||||
t->end = *k;
|
t->end = *k;
|
||||||
goto fix_right;
|
goto fix_right;
|
||||||
}
|
}
|
||||||
|
@ -679,7 +679,7 @@ void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
|
||||||
/* Possibly add a new entry to the end of the lookup table */
|
/* Possibly add a new entry to the end of the lookup table */
|
||||||
|
|
||||||
for (k = table_to_bkey(t, t->size - 1);
|
for (k = table_to_bkey(t, t->size - 1);
|
||||||
k != end(t->data);
|
k != bset_bkey_last(t->data);
|
||||||
k = bkey_next(k))
|
k = bkey_next(k))
|
||||||
if (t->size == bkey_to_cacheline(t, k)) {
|
if (t->size == bkey_to_cacheline(t, k)) {
|
||||||
t->prev[t->size] = bkey_to_cacheline_offset(k);
|
t->prev[t->size] = bkey_to_cacheline_offset(k);
|
||||||
|
@ -715,7 +715,7 @@ static struct bset_search_iter bset_search_write_set(struct btree *b,
|
||||||
unsigned li = 0, ri = t->size;
|
unsigned li = 0, ri = t->size;
|
||||||
|
|
||||||
BUG_ON(!b->nsets &&
|
BUG_ON(!b->nsets &&
|
||||||
t->size < bkey_to_cacheline(t, end(t->data)));
|
t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
|
||||||
|
|
||||||
while (li + 1 != ri) {
|
while (li + 1 != ri) {
|
||||||
unsigned m = (li + ri) >> 1;
|
unsigned m = (li + ri) >> 1;
|
||||||
|
@ -728,7 +728,7 @@ static struct bset_search_iter bset_search_write_set(struct btree *b,
|
||||||
|
|
||||||
return (struct bset_search_iter) {
|
return (struct bset_search_iter) {
|
||||||
table_to_bkey(t, li),
|
table_to_bkey(t, li),
|
||||||
ri < t->size ? table_to_bkey(t, ri) : end(t->data)
|
ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -780,7 +780,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b,
|
||||||
f = &t->tree[inorder_next(j, t->size)];
|
f = &t->tree[inorder_next(j, t->size)];
|
||||||
r = cacheline_to_bkey(t, inorder, f->m);
|
r = cacheline_to_bkey(t, inorder, f->m);
|
||||||
} else
|
} else
|
||||||
r = end(t->data);
|
r = bset_bkey_last(t->data);
|
||||||
} else {
|
} else {
|
||||||
r = cacheline_to_bkey(t, inorder, f->m);
|
r = cacheline_to_bkey(t, inorder, f->m);
|
||||||
|
|
||||||
|
@ -816,7 +816,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
|
||||||
|
|
||||||
if (unlikely(!t->size)) {
|
if (unlikely(!t->size)) {
|
||||||
i.l = t->data->start;
|
i.l = t->data->start;
|
||||||
i.r = end(t->data);
|
i.r = bset_bkey_last(t->data);
|
||||||
} else if (bset_written(b, t)) {
|
} else if (bset_written(b, t)) {
|
||||||
/*
|
/*
|
||||||
* Each node in the auxiliary search tree covers a certain range
|
* Each node in the auxiliary search tree covers a certain range
|
||||||
|
@ -826,7 +826,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (unlikely(bkey_cmp(search, &t->end) >= 0))
|
if (unlikely(bkey_cmp(search, &t->end) >= 0))
|
||||||
return end(t->data);
|
return bset_bkey_last(t->data);
|
||||||
|
|
||||||
if (unlikely(bkey_cmp(search, t->data->start) < 0))
|
if (unlikely(bkey_cmp(search, t->data->start) < 0))
|
||||||
return t->data->start;
|
return t->data->start;
|
||||||
|
@ -842,7 +842,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
|
||||||
inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
|
inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
|
||||||
search) > 0);
|
search) > 0);
|
||||||
|
|
||||||
BUG_ON(i.r != end(t->data) &&
|
BUG_ON(i.r != bset_bkey_last(t->data) &&
|
||||||
bkey_cmp(i.r, search) <= 0);
|
bkey_cmp(i.r, search) <= 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -897,7 +897,7 @@ struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
|
||||||
|
|
||||||
for (; start <= &b->sets[b->nsets]; start++) {
|
for (; start <= &b->sets[b->nsets]; start++) {
|
||||||
ret = bch_bset_search(b, start, search);
|
ret = bch_bset_search(b, start, search);
|
||||||
bch_btree_iter_push(iter, ret, end(start->data));
|
bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1067,7 +1067,7 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
|
||||||
} else {
|
} else {
|
||||||
b->sets[start].data->keys = out->keys;
|
b->sets[start].data->keys = out->keys;
|
||||||
memcpy(b->sets[start].data->start, out->start,
|
memcpy(b->sets[start].data->start, out->start,
|
||||||
(void *) end(out) - (void *) out->start);
|
(void *) bset_bkey_last(out) - (void *) out->start);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (used_mempool)
|
if (used_mempool)
|
||||||
|
|
|
@ -190,14 +190,6 @@ struct bset_tree {
|
||||||
struct bset *data;
|
struct bset *data;
|
||||||
};
|
};
|
||||||
|
|
||||||
static __always_inline int64_t bkey_cmp(const struct bkey *l,
|
|
||||||
const struct bkey *r)
|
|
||||||
{
|
|
||||||
return unlikely(KEY_INODE(l) != KEY_INODE(r))
|
|
||||||
? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r)
|
|
||||||
: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Keylists */
|
/* Keylists */
|
||||||
|
|
||||||
struct keylist {
|
struct keylist {
|
||||||
|
@ -261,6 +253,28 @@ struct bkey *bch_keylist_pop(struct keylist *);
|
||||||
void bch_keylist_pop_front(struct keylist *);
|
void bch_keylist_pop_front(struct keylist *);
|
||||||
int __bch_keylist_realloc(struct keylist *, unsigned);
|
int __bch_keylist_realloc(struct keylist *, unsigned);
|
||||||
|
|
||||||
|
/* Bkey utility code */
|
||||||
|
|
||||||
|
#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
|
||||||
|
|
||||||
|
static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
|
||||||
|
{
|
||||||
|
return bkey_idx(i->start, idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void bkey_init(struct bkey *k)
|
||||||
|
{
|
||||||
|
*k = ZERO_KEY;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline int64_t bkey_cmp(const struct bkey *l,
|
||||||
|
const struct bkey *r)
|
||||||
|
{
|
||||||
|
return unlikely(KEY_INODE(l) != KEY_INODE(r))
|
||||||
|
? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r)
|
||||||
|
: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
|
||||||
|
}
|
||||||
|
|
||||||
void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
|
void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
|
||||||
unsigned);
|
unsigned);
|
||||||
bool __bch_cut_front(const struct bkey *, struct bkey *);
|
bool __bch_cut_front(const struct bkey *, struct bkey *);
|
||||||
|
|
|
@ -197,7 +197,7 @@ void bkey_put(struct cache_set *c, struct bkey *k)
|
||||||
static uint64_t btree_csum_set(struct btree *b, struct bset *i)
|
static uint64_t btree_csum_set(struct btree *b, struct bset *i)
|
||||||
{
|
{
|
||||||
uint64_t crc = b->key.ptr[0];
|
uint64_t crc = b->key.ptr[0];
|
||||||
void *data = (void *) i + 8, *end = end(i);
|
void *data = (void *) i + 8, *end = bset_bkey_last(i);
|
||||||
|
|
||||||
crc = bch_crc64_update(crc, data, end - data);
|
crc = bch_crc64_update(crc, data, end - data);
|
||||||
return crc ^ 0xffffffffffffffffULL;
|
return crc ^ 0xffffffffffffffffULL;
|
||||||
|
@ -251,7 +251,7 @@ void bch_btree_node_read_done(struct btree *b)
|
||||||
if (i != b->sets[0].data && !i->keys)
|
if (i != b->sets[0].data && !i->keys)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
bch_btree_iter_push(iter, i->start, end(i));
|
bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
|
||||||
|
|
||||||
b->written += set_blocks(i, b->c);
|
b->written += set_blocks(i, b->c);
|
||||||
}
|
}
|
||||||
|
@ -1310,7 +1310,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
|
||||||
|
|
||||||
if (i > 1) {
|
if (i > 1) {
|
||||||
for (k = n2->start;
|
for (k = n2->start;
|
||||||
k < end(n2);
|
k < bset_bkey_last(n2);
|
||||||
k = bkey_next(k)) {
|
k = bkey_next(k)) {
|
||||||
if (__set_blocks(n1, n1->keys + keys +
|
if (__set_blocks(n1, n1->keys + keys +
|
||||||
bkey_u64s(k), b->c) > blocks)
|
bkey_u64s(k), b->c) > blocks)
|
||||||
|
@ -1343,16 +1343,17 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
|
||||||
if (last)
|
if (last)
|
||||||
bkey_copy_key(&new_nodes[i]->key, last);
|
bkey_copy_key(&new_nodes[i]->key, last);
|
||||||
|
|
||||||
memcpy(end(n1),
|
memcpy(bset_bkey_last(n1),
|
||||||
n2->start,
|
n2->start,
|
||||||
(void *) node(n2, keys) - (void *) n2->start);
|
(void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
|
||||||
|
|
||||||
n1->keys += keys;
|
n1->keys += keys;
|
||||||
r[i].keys = n1->keys;
|
r[i].keys = n1->keys;
|
||||||
|
|
||||||
memmove(n2->start,
|
memmove(n2->start,
|
||||||
node(n2, keys),
|
bset_bkey_idx(n2, keys),
|
||||||
(void *) end(n2) - (void *) node(n2, keys));
|
(void *) bset_bkey_last(n2) -
|
||||||
|
(void *) bset_bkey_idx(n2, keys));
|
||||||
|
|
||||||
n2->keys -= keys;
|
n2->keys -= keys;
|
||||||
|
|
||||||
|
@ -1830,7 +1831,7 @@ static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
|
||||||
|
|
||||||
memmove((uint64_t *) where + bkey_u64s(insert),
|
memmove((uint64_t *) where + bkey_u64s(insert),
|
||||||
where,
|
where,
|
||||||
(void *) end(i) - (void *) where);
|
(void *) bset_bkey_last(i) - (void *) where);
|
||||||
|
|
||||||
i->keys += bkey_u64s(insert);
|
i->keys += bkey_u64s(insert);
|
||||||
bkey_copy(where, insert);
|
bkey_copy(where, insert);
|
||||||
|
@ -2014,7 +2015,7 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
|
||||||
bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
|
bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
|
||||||
KEY_START(k), KEY_SIZE(k));
|
KEY_START(k), KEY_SIZE(k));
|
||||||
|
|
||||||
while (m != end(i) &&
|
while (m != bset_bkey_last(i) &&
|
||||||
bkey_cmp(k, &START_KEY(m)) > 0)
|
bkey_cmp(k, &START_KEY(m)) > 0)
|
||||||
prev = m, m = bkey_next(m);
|
prev = m, m = bkey_next(m);
|
||||||
|
|
||||||
|
@ -2028,12 +2029,12 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
|
||||||
goto merged;
|
goto merged;
|
||||||
|
|
||||||
status = BTREE_INSERT_STATUS_OVERWROTE;
|
status = BTREE_INSERT_STATUS_OVERWROTE;
|
||||||
if (m != end(i) &&
|
if (m != bset_bkey_last(i) &&
|
||||||
KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
|
KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
|
||||||
goto copy;
|
goto copy;
|
||||||
|
|
||||||
status = BTREE_INSERT_STATUS_FRONT_MERGE;
|
status = BTREE_INSERT_STATUS_FRONT_MERGE;
|
||||||
if (m != end(i) &&
|
if (m != bset_bkey_last(i) &&
|
||||||
bch_bkey_try_merge(b, k, m))
|
bch_bkey_try_merge(b, k, m))
|
||||||
goto copy;
|
goto copy;
|
||||||
} else {
|
} else {
|
||||||
|
@ -2142,16 +2143,18 @@ static int btree_split(struct btree *b, struct btree_op *op,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
while (keys < (n1->sets[0].data->keys * 3) / 5)
|
while (keys < (n1->sets[0].data->keys * 3) / 5)
|
||||||
keys += bkey_u64s(node(n1->sets[0].data, keys));
|
keys += bkey_u64s(bset_bkey_idx(n1->sets[0].data,
|
||||||
|
keys));
|
||||||
|
|
||||||
bkey_copy_key(&n1->key, node(n1->sets[0].data, keys));
|
bkey_copy_key(&n1->key,
|
||||||
keys += bkey_u64s(node(n1->sets[0].data, keys));
|
bset_bkey_idx(n1->sets[0].data, keys));
|
||||||
|
keys += bkey_u64s(bset_bkey_idx(n1->sets[0].data, keys));
|
||||||
|
|
||||||
n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
|
n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
|
||||||
n1->sets[0].data->keys = keys;
|
n1->sets[0].data->keys = keys;
|
||||||
|
|
||||||
memcpy(n2->sets[0].data->start,
|
memcpy(n2->sets[0].data->start,
|
||||||
end(n1->sets[0].data),
|
bset_bkey_last(n1->sets[0].data),
|
||||||
n2->sets[0].data->keys * sizeof(uint64_t));
|
n2->sets[0].data->keys * sizeof(uint64_t));
|
||||||
|
|
||||||
bkey_copy_key(&n2->key, &b->key);
|
bkey_copy_key(&n2->key, &b->key);
|
||||||
|
|
|
@ -84,7 +84,7 @@ static void dump_bset(struct btree *b, struct bset *i, unsigned set)
|
||||||
unsigned j;
|
unsigned j;
|
||||||
char buf[80];
|
char buf[80];
|
||||||
|
|
||||||
for (k = i->start; k < end(i); k = next) {
|
for (k = i->start; k < bset_bkey_last(i); k = next) {
|
||||||
next = bkey_next(k);
|
next = bkey_next(k);
|
||||||
|
|
||||||
bch_bkey_to_text(buf, sizeof(buf), k);
|
bch_bkey_to_text(buf, sizeof(buf), k);
|
||||||
|
@ -102,7 +102,7 @@ static void dump_bset(struct btree *b, struct bset *i, unsigned set)
|
||||||
|
|
||||||
printk(" %s\n", bch_ptr_status(b->c, k));
|
printk(" %s\n", bch_ptr_status(b->c, k));
|
||||||
|
|
||||||
if (next < end(i) &&
|
if (next < bset_bkey_last(i) &&
|
||||||
bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
|
bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
|
||||||
printk(KERN_ERR "Key skipped backwards\n");
|
printk(KERN_ERR "Key skipped backwards\n");
|
||||||
}
|
}
|
||||||
|
@ -162,7 +162,7 @@ void bch_btree_verify(struct btree *b)
|
||||||
if (inmemory->keys != sorted->keys ||
|
if (inmemory->keys != sorted->keys ||
|
||||||
memcmp(inmemory->start,
|
memcmp(inmemory->start,
|
||||||
sorted->start,
|
sorted->start,
|
||||||
(void *) end(inmemory) - (void *) inmemory->start)) {
|
(void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
|
||||||
struct bset *i;
|
struct bset *i;
|
||||||
unsigned j;
|
unsigned j;
|
||||||
|
|
||||||
|
|
|
@ -284,7 +284,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (k = i->j.start;
|
for (k = i->j.start;
|
||||||
k < end(&i->j);
|
k < bset_bkey_last(&i->j);
|
||||||
k = bkey_next(k)) {
|
k = bkey_next(k)) {
|
||||||
unsigned j;
|
unsigned j;
|
||||||
|
|
||||||
|
@ -322,7 +322,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
|
||||||
n, i->j.seq - 1, start, end);
|
n, i->j.seq - 1, start, end);
|
||||||
|
|
||||||
for (k = i->j.start;
|
for (k = i->j.start;
|
||||||
k < end(&i->j);
|
k < bset_bkey_last(&i->j);
|
||||||
k = bkey_next(k)) {
|
k = bkey_next(k)) {
|
||||||
trace_bcache_journal_replay_key(k);
|
trace_bcache_journal_replay_key(k);
|
||||||
|
|
||||||
|
@ -751,7 +751,7 @@ atomic_t *bch_journal(struct cache_set *c,
|
||||||
|
|
||||||
w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
|
w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
|
||||||
|
|
||||||
memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
|
memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
|
||||||
w->data->keys += bch_keylist_nkeys(keys);
|
w->data->keys += bch_keylist_nkeys(keys);
|
||||||
|
|
||||||
ret = &fifo_back(&c->journal.pin);
|
ret = &fifo_back(&c->journal.pin);
|
||||||
|
|
|
@ -118,7 +118,7 @@ static inline struct bkey *bkey_next(const struct bkey *k)
|
||||||
return (struct bkey *) (d + bkey_u64s(k));
|
return (struct bkey *) (d + bkey_u64s(k));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct bkey *bkey_last(const struct bkey *k, unsigned nr_keys)
|
static inline struct bkey *bkey_idx(const struct bkey *k, unsigned nr_keys)
|
||||||
{
|
{
|
||||||
__u64 *d = (void *) k;
|
__u64 *d = (void *) k;
|
||||||
return (struct bkey *) (d + nr_keys);
|
return (struct bkey *) (d + nr_keys);
|
||||||
|
|
Loading…
Add table
Reference in a new issue