mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-05-09 08:43:46 +00:00
dm: various cleanups to md->queue initialization code
Also, add dm_sysfs_init() error handling to dm_create(). Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
ac514ffc96
commit
c12c9a3c38
3 changed files with 12 additions and 22 deletions
|
@ -129,8 +129,6 @@ struct mapped_device {
|
||||||
struct srcu_struct io_barrier;
|
struct srcu_struct io_barrier;
|
||||||
};
|
};
|
||||||
|
|
||||||
void dm_init_md_queue(struct mapped_device *md);
|
|
||||||
void dm_init_normal_md_queue(struct mapped_device *md);
|
|
||||||
int md_in_flight(struct mapped_device *md);
|
int md_in_flight(struct mapped_device *md);
|
||||||
void disable_write_same(struct mapped_device *md);
|
void disable_write_same(struct mapped_device *md);
|
||||||
void disable_write_zeroes(struct mapped_device *md);
|
void disable_write_zeroes(struct mapped_device *md);
|
||||||
|
|
|
@ -704,7 +704,6 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
|
||||||
/* disable dm_old_request_fn's merge heuristic by default */
|
/* disable dm_old_request_fn's merge heuristic by default */
|
||||||
md->seq_rq_merge_deadline_usecs = 0;
|
md->seq_rq_merge_deadline_usecs = 0;
|
||||||
|
|
||||||
dm_init_normal_md_queue(md);
|
|
||||||
blk_queue_softirq_done(md->queue, dm_softirq_done);
|
blk_queue_softirq_done(md->queue, dm_softirq_done);
|
||||||
|
|
||||||
/* Initialize the request-based DM worker thread */
|
/* Initialize the request-based DM worker thread */
|
||||||
|
@ -814,7 +813,6 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
|
||||||
err = PTR_ERR(q);
|
err = PTR_ERR(q);
|
||||||
goto out_tag_set;
|
goto out_tag_set;
|
||||||
}
|
}
|
||||||
dm_init_md_queue(md);
|
|
||||||
|
|
||||||
/* backfill 'mq' sysfs registration normally done in blk_register_queue */
|
/* backfill 'mq' sysfs registration normally done in blk_register_queue */
|
||||||
err = blk_mq_register_dev(disk_to_dev(md->disk), q);
|
err = blk_mq_register_dev(disk_to_dev(md->disk), q);
|
||||||
|
|
|
@ -1733,20 +1733,9 @@ static const struct dax_operations dm_dax_ops;
|
||||||
|
|
||||||
static void dm_wq_work(struct work_struct *work);
|
static void dm_wq_work(struct work_struct *work);
|
||||||
|
|
||||||
void dm_init_md_queue(struct mapped_device *md)
|
static void dm_init_normal_md_queue(struct mapped_device *md)
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Initialize data that will only be used by a non-blk-mq DM queue
|
|
||||||
* - must do so here (in alloc_dev callchain) before queue is used
|
|
||||||
*/
|
|
||||||
md->queue->queuedata = md;
|
|
||||||
md->queue->backing_dev_info->congested_data = md;
|
|
||||||
}
|
|
||||||
|
|
||||||
void dm_init_normal_md_queue(struct mapped_device *md)
|
|
||||||
{
|
{
|
||||||
md->use_blk_mq = false;
|
md->use_blk_mq = false;
|
||||||
dm_init_md_queue(md);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize aspects of queue that aren't relevant for blk-mq
|
* Initialize aspects of queue that aren't relevant for blk-mq
|
||||||
|
@ -1846,10 +1835,10 @@ static struct mapped_device *alloc_dev(int minor)
|
||||||
md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
|
md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
|
||||||
if (!md->queue)
|
if (!md->queue)
|
||||||
goto bad;
|
goto bad;
|
||||||
|
md->queue->queuedata = md;
|
||||||
|
md->queue->backing_dev_info->congested_data = md;
|
||||||
|
|
||||||
dm_init_md_queue(md);
|
md->disk = alloc_disk_node(1, md->numa_node_id);
|
||||||
|
|
||||||
md->disk = alloc_disk_node(1, numa_node_id);
|
|
||||||
if (!md->disk)
|
if (!md->disk)
|
||||||
goto bad;
|
goto bad;
|
||||||
|
|
||||||
|
@ -2082,13 +2071,18 @@ static struct dm_table *__unbind(struct mapped_device *md)
|
||||||
*/
|
*/
|
||||||
int dm_create(int minor, struct mapped_device **result)
|
int dm_create(int minor, struct mapped_device **result)
|
||||||
{
|
{
|
||||||
|
int r;
|
||||||
struct mapped_device *md;
|
struct mapped_device *md;
|
||||||
|
|
||||||
md = alloc_dev(minor);
|
md = alloc_dev(minor);
|
||||||
if (!md)
|
if (!md)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
dm_sysfs_init(md);
|
r = dm_sysfs_init(md);
|
||||||
|
if (r) {
|
||||||
|
free_dev(md);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
*result = md;
|
*result = md;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2145,6 +2139,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case DM_TYPE_REQUEST_BASED:
|
case DM_TYPE_REQUEST_BASED:
|
||||||
|
dm_init_normal_md_queue(md);
|
||||||
r = dm_old_init_request_queue(md, t);
|
r = dm_old_init_request_queue(md, t);
|
||||||
if (r) {
|
if (r) {
|
||||||
DMERR("Cannot initialize queue for request-based mapped device");
|
DMERR("Cannot initialize queue for request-based mapped device");
|
||||||
|
@ -2236,7 +2231,6 @@ EXPORT_SYMBOL_GPL(dm_device_name);
|
||||||
|
|
||||||
static void __dm_destroy(struct mapped_device *md, bool wait)
|
static void __dm_destroy(struct mapped_device *md, bool wait)
|
||||||
{
|
{
|
||||||
struct request_queue *q = dm_get_md_queue(md);
|
|
||||||
struct dm_table *map;
|
struct dm_table *map;
|
||||||
int srcu_idx;
|
int srcu_idx;
|
||||||
|
|
||||||
|
@ -2247,7 +2241,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
|
||||||
set_bit(DMF_FREEING, &md->flags);
|
set_bit(DMF_FREEING, &md->flags);
|
||||||
spin_unlock(&_minor_lock);
|
spin_unlock(&_minor_lock);
|
||||||
|
|
||||||
blk_set_queue_dying(q);
|
blk_set_queue_dying(md->queue);
|
||||||
|
|
||||||
if (dm_request_based(md) && md->kworker_task)
|
if (dm_request_based(md) && md->kworker_task)
|
||||||
kthread_flush_worker(&md->kworker);
|
kthread_flush_worker(&md->kworker);
|
||||||
|
|
Loading…
Add table
Reference in a new issue