mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
Btrfs: shift all end_io work to thread pools
bio_end_io for reads without checksumming on and btree writes were happening without using async thread pools. This means the extent_io.c code had to use spin_lock_irq and friends on the rb tree locks for extent state. There were some irq safe vs unsafe lock inversions between the delallock lock and the extent state locks. This patch gets rid of them by moving all end_io code into the thread pools. To avoid contention and deadlocks between the data end_io processing and the metadata end_io processing yet another thread pool is added to finish off metadata writes. Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
parent
87b29b208c
commit
cad321ad52
4 changed files with 57 additions and 46 deletions
|
@ -1282,8 +1282,8 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
|
|||
}
|
||||
|
||||
/*
|
||||
* extent_io.c submission hook. This does the right thing for csum calculation on write,
|
||||
* or reading the csums from the tree before a read
|
||||
* extent_io.c submission hook. This does the right thing for csum calculation
|
||||
* on write, or reading the csums from the tree before a read
|
||||
*/
|
||||
static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags)
|
||||
|
@ -1292,11 +1292,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
|||
int ret = 0;
|
||||
int skip_sum;
|
||||
|
||||
skip_sum = btrfs_test_flag(inode, NODATASUM);
|
||||
|
||||
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
|
||||
BUG_ON(ret);
|
||||
|
||||
skip_sum = btrfs_test_flag(inode, NODATASUM);
|
||||
|
||||
if (!(rw & (1 << BIO_RW))) {
|
||||
if (bio_flags & EXTENT_BIO_COMPRESSED) {
|
||||
return btrfs_submit_compressed_read(inode, bio,
|
||||
|
@ -1648,13 +1648,13 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
|
|||
failrec->logical, failrec->len);
|
||||
failrec->last_mirror++;
|
||||
if (!state) {
|
||||
spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
|
||||
spin_lock(&BTRFS_I(inode)->io_tree.lock);
|
||||
state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
|
||||
failrec->start,
|
||||
EXTENT_LOCKED);
|
||||
if (state && state->start != failrec->start)
|
||||
state = NULL;
|
||||
spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
|
||||
spin_unlock(&BTRFS_I(inode)->io_tree.lock);
|
||||
}
|
||||
if (!state || failrec->last_mirror > num_copies) {
|
||||
set_state_private(failure_tree, failrec->start, 0);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue