mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-03-17 12:34:01 +00:00
dm writecache: don't split bios when overwriting contiguous cache content
If dm-writecache overwrites existing cached data, it splits the incoming bio into many block-sized bios. The I/O scheduler does merge these bios into one large request but this needless splitting and merging causes performance degradation. Fix this by avoiding bio splitting if the cache target area that is being overwritten is contiguous. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
6bcd658f2a
commit
ee50cc19d8
1 changed files with 30 additions and 8 deletions
|
@ -1360,14 +1360,18 @@ read_next_block:
|
|||
} else {
|
||||
do {
|
||||
bool found_entry = false;
|
||||
bool search_used = false;
|
||||
if (writecache_has_error(wc))
|
||||
goto unlock_error;
|
||||
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
|
||||
if (e) {
|
||||
if (!writecache_entry_is_committed(wc, e))
|
||||
if (!writecache_entry_is_committed(wc, e)) {
|
||||
search_used = true;
|
||||
goto bio_copy;
|
||||
}
|
||||
if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
|
||||
wc->overwrote_committed = true;
|
||||
search_used = true;
|
||||
goto bio_copy;
|
||||
}
|
||||
found_entry = true;
|
||||
|
@ -1404,13 +1408,31 @@ bio_copy:
|
|||
sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
|
||||
|
||||
while (bio_size < bio->bi_iter.bi_size) {
|
||||
struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
|
||||
if (!f)
|
||||
break;
|
||||
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
|
||||
(bio_size >> SECTOR_SHIFT), wc->seq_count);
|
||||
writecache_insert_entry(wc, f);
|
||||
wc->uncommitted_blocks++;
|
||||
if (!search_used) {
|
||||
struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
|
||||
if (!f)
|
||||
break;
|
||||
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
|
||||
(bio_size >> SECTOR_SHIFT), wc->seq_count);
|
||||
writecache_insert_entry(wc, f);
|
||||
wc->uncommitted_blocks++;
|
||||
} else {
|
||||
struct wc_entry *f;
|
||||
struct rb_node *next = rb_next(&e->rb_node);
|
||||
if (!next)
|
||||
break;
|
||||
f = container_of(next, struct wc_entry, rb_node);
|
||||
if (f != e + 1)
|
||||
break;
|
||||
if (read_original_sector(wc, f) !=
|
||||
read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
|
||||
break;
|
||||
if (unlikely(f->write_in_progress))
|
||||
break;
|
||||
if (writecache_entry_is_committed(wc, f))
|
||||
wc->overwrote_committed = true;
|
||||
e = f;
|
||||
}
|
||||
bio_size += wc->block_size;
|
||||
current_cache_sec += wc->block_size >> SECTOR_SHIFT;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue