mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-25 16:11:45 +00:00
md raid0: access mddev->queue (request queue member) conditionally because it is not set when accessed from dm-raid
The patch makes 3 references to mddev->queue in the raid0 personality conditional in order to allow for it to be accessed from dm-raid. Mandatory, because md instances underneath dm-raid don't manage a request queue of their own which'd lead to oopses without the patch. Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com> Tested-by: Heinz Mauelshagen <heinzm@redhat.com> Signed-off-by: NeilBrown <neilb@suse.de>
This commit is contained in:
parent
ac8fa4196d
commit
753f2856cd
1 changed files with 26 additions and 20 deletions
|
@ -271,14 +271,16 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
|
||||||
goto abort;
|
goto abort;
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
|
if (mddev->queue) {
|
||||||
blk_queue_io_opt(mddev->queue,
|
blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
|
||||||
(mddev->chunk_sectors << 9) * mddev->raid_disks);
|
blk_queue_io_opt(mddev->queue,
|
||||||
|
(mddev->chunk_sectors << 9) * mddev->raid_disks);
|
||||||
|
|
||||||
if (!discard_supported)
|
if (!discard_supported)
|
||||||
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
||||||
else
|
else
|
||||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
|
||||||
|
}
|
||||||
|
|
||||||
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
|
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
|
||||||
*private_conf = conf;
|
*private_conf = conf;
|
||||||
|
@ -429,9 +431,12 @@ static int raid0_run(struct mddev *mddev)
|
||||||
}
|
}
|
||||||
if (md_check_no_bitmap(mddev))
|
if (md_check_no_bitmap(mddev))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
|
|
||||||
blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
|
if (mddev->queue) {
|
||||||
blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
|
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
|
||||||
|
blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
|
||||||
|
blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
|
||||||
|
}
|
||||||
|
|
||||||
/* if private is not null, we are here after takeover */
|
/* if private is not null, we are here after takeover */
|
||||||
if (mddev->private == NULL) {
|
if (mddev->private == NULL) {
|
||||||
|
@ -448,16 +453,17 @@ static int raid0_run(struct mddev *mddev)
|
||||||
printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
|
printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
|
||||||
mdname(mddev),
|
mdname(mddev),
|
||||||
(unsigned long long)mddev->array_sectors);
|
(unsigned long long)mddev->array_sectors);
|
||||||
/* calculate the max read-ahead size.
|
|
||||||
* For read-ahead of large files to be effective, we need to
|
if (mddev->queue) {
|
||||||
* readahead at least twice a whole stripe. i.e. number of devices
|
/* calculate the max read-ahead size.
|
||||||
* multiplied by chunk size times 2.
|
* For read-ahead of large files to be effective, we need to
|
||||||
* If an individual device has an ra_pages greater than the
|
* readahead at least twice a whole stripe. i.e. number of devices
|
||||||
* chunk size, then we will not drive that device as hard as it
|
* multiplied by chunk size times 2.
|
||||||
* wants. We consider this a configuration error: a larger
|
* If an individual device has an ra_pages greater than the
|
||||||
* chunksize should be used in that case.
|
* chunk size, then we will not drive that device as hard as it
|
||||||
*/
|
* wants. We consider this a configuration error: a larger
|
||||||
{
|
* chunksize should be used in that case.
|
||||||
|
*/
|
||||||
int stripe = mddev->raid_disks *
|
int stripe = mddev->raid_disks *
|
||||||
(mddev->chunk_sectors << 9) / PAGE_SIZE;
|
(mddev->chunk_sectors << 9) / PAGE_SIZE;
|
||||||
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
|
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue