mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-03-30 02:45:55 +00:00
md: avoid spinlock problem in blk_throtl_exit
blk_throtl_exit assumes that ->queue_lock still exists, so make sure that it does. To do this, we stop redirecting ->queue_lock to conf->device_lock and leave it pointing where it is initialised - __queue_lock. As the blk_plug functions check the ->queue_lock is held, we now take that spin_lock explicitly around the plug functions. We don't need the locking, just the warning removal. This is needed for any kernel with the blk_throtl code, which is which is 2.6.37 and later. Cc: stable@kernel.org Signed-off-by: NeilBrown <neilb@suse.de>
This commit is contained in:
parent
8f5f02c460
commit
da9cf5050a
6 changed files with 8 additions and 9 deletions
|
@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
|
||||||
|
|
||||||
if (md_check_no_bitmap(mddev))
|
if (md_check_no_bitmap(mddev))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
|
|
||||||
conf = linear_conf(mddev, mddev->raid_disks);
|
conf = linear_conf(mddev, mddev->raid_disks);
|
||||||
|
|
||||||
if (!conf)
|
if (!conf)
|
||||||
|
|
|
@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
|
||||||
* bookkeeping area. [whatever we allocate in multipath_run(),
|
* bookkeeping area. [whatever we allocate in multipath_run(),
|
||||||
* should be freed in multipath_stop()]
|
* should be freed in multipath_stop()]
|
||||||
*/
|
*/
|
||||||
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
|
|
||||||
|
|
||||||
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
|
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
|
||||||
mddev->private = conf;
|
mddev->private = conf;
|
||||||
|
|
|
@ -361,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
|
||||||
if (md_check_no_bitmap(mddev))
|
if (md_check_no_bitmap(mddev))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
|
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
|
||||||
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
|
|
||||||
|
|
||||||
/* if private is not null, we are here after takeover */
|
/* if private is not null, we are here after takeover */
|
||||||
if (mddev->private == NULL) {
|
if (mddev->private == NULL) {
|
||||||
|
|
|
@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
|
||||||
if (conf->pending_bio_list.head) {
|
if (conf->pending_bio_list.head) {
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
bio = bio_list_get(&conf->pending_bio_list);
|
bio = bio_list_get(&conf->pending_bio_list);
|
||||||
|
/* Only take the spinlock to quiet a warning */
|
||||||
|
spin_lock(conf->mddev->queue->queue_lock);
|
||||||
blk_remove_plug(conf->mddev->queue);
|
blk_remove_plug(conf->mddev->queue);
|
||||||
|
spin_unlock(conf->mddev->queue->queue_lock);
|
||||||
spin_unlock_irq(&conf->device_lock);
|
spin_unlock_irq(&conf->device_lock);
|
||||||
/* flush any pending bitmap writes to
|
/* flush any pending bitmap writes to
|
||||||
* disk before proceeding w/ I/O */
|
* disk before proceeding w/ I/O */
|
||||||
|
@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
|
||||||
atomic_inc(&r1_bio->remaining);
|
atomic_inc(&r1_bio->remaining);
|
||||||
spin_lock_irqsave(&conf->device_lock, flags);
|
spin_lock_irqsave(&conf->device_lock, flags);
|
||||||
bio_list_add(&conf->pending_bio_list, mbio);
|
bio_list_add(&conf->pending_bio_list, mbio);
|
||||||
blk_plug_device(mddev->queue);
|
blk_plug_device_unlocked(mddev->queue);
|
||||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||||
}
|
}
|
||||||
r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
|
r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
|
||||||
|
@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev)
|
||||||
if (IS_ERR(conf))
|
if (IS_ERR(conf))
|
||||||
return PTR_ERR(conf);
|
return PTR_ERR(conf);
|
||||||
|
|
||||||
mddev->queue->queue_lock = &conf->device_lock;
|
|
||||||
list_for_each_entry(rdev, &mddev->disks, same_set) {
|
list_for_each_entry(rdev, &mddev->disks, same_set) {
|
||||||
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
||||||
rdev->data_offset << 9);
|
rdev->data_offset << 9);
|
||||||
|
|
|
@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
|
||||||
if (conf->pending_bio_list.head) {
|
if (conf->pending_bio_list.head) {
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
bio = bio_list_get(&conf->pending_bio_list);
|
bio = bio_list_get(&conf->pending_bio_list);
|
||||||
|
/* Spinlock only taken to quiet a warning */
|
||||||
|
spin_lock(conf->mddev->queue->queue_lock);
|
||||||
blk_remove_plug(conf->mddev->queue);
|
blk_remove_plug(conf->mddev->queue);
|
||||||
|
spin_unlock(conf->mddev->queue->queue_lock);
|
||||||
spin_unlock_irq(&conf->device_lock);
|
spin_unlock_irq(&conf->device_lock);
|
||||||
/* flush any pending bitmap writes to disk
|
/* flush any pending bitmap writes to disk
|
||||||
* before proceeding w/ I/O */
|
* before proceeding w/ I/O */
|
||||||
|
@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
|
||||||
atomic_inc(&r10_bio->remaining);
|
atomic_inc(&r10_bio->remaining);
|
||||||
spin_lock_irqsave(&conf->device_lock, flags);
|
spin_lock_irqsave(&conf->device_lock, flags);
|
||||||
bio_list_add(&conf->pending_bio_list, mbio);
|
bio_list_add(&conf->pending_bio_list, mbio);
|
||||||
blk_plug_device(mddev->queue);
|
blk_plug_device_unlocked(mddev->queue);
|
||||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev)
|
||||||
if (!conf)
|
if (!conf)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
mddev->queue->queue_lock = &conf->device_lock;
|
|
||||||
|
|
||||||
mddev->thread = conf->thread;
|
mddev->thread = conf->thread;
|
||||||
conf->thread = NULL;
|
conf->thread = NULL;
|
||||||
|
|
||||||
|
|
|
@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev)
|
||||||
|
|
||||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||||
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
|
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
|
||||||
mddev->queue->queue_lock = &conf->device_lock;
|
|
||||||
mddev->queue->unplug_fn = raid5_unplug_queue;
|
mddev->queue->unplug_fn = raid5_unplug_queue;
|
||||||
|
|
||||||
chunk_size = mddev->chunk_sectors << 9;
|
chunk_size = mddev->chunk_sectors << 9;
|
||||||
|
|
Loading…
Add table
Reference in a new issue