mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-17 20:54:10 +00:00
block/rq_qos: implement rq_qos_ops->queue_depth_changed()
wbt already gets queue depth changed notification through wbt_set_queue_depth(). Generalize it into rq_qos_ops->queue_depth_changed() so that other rq_qos policies can easily hook into the events too. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
d3e65ffff6
commit
9677a3e01f
5 changed files with 26 additions and 15 deletions
|
@ -101,6 +101,15 @@ void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio)
|
||||||
} while (rqos);
|
} while (rqos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __rq_qos_queue_depth_changed(struct rq_qos *rqos)
|
||||||
|
{
|
||||||
|
do {
|
||||||
|
if (rqos->ops->queue_depth_changed)
|
||||||
|
rqos->ops->queue_depth_changed(rqos);
|
||||||
|
rqos = rqos->next;
|
||||||
|
} while (rqos);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return true, if we can't increase the depth further by scaling
|
* Return true, if we can't increase the depth further by scaling
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -41,6 +41,7 @@ struct rq_qos_ops {
|
||||||
void (*done)(struct rq_qos *, struct request *);
|
void (*done)(struct rq_qos *, struct request *);
|
||||||
void (*done_bio)(struct rq_qos *, struct bio *);
|
void (*done_bio)(struct rq_qos *, struct bio *);
|
||||||
void (*cleanup)(struct rq_qos *, struct bio *);
|
void (*cleanup)(struct rq_qos *, struct bio *);
|
||||||
|
void (*queue_depth_changed)(struct rq_qos *);
|
||||||
void (*exit)(struct rq_qos *);
|
void (*exit)(struct rq_qos *);
|
||||||
const struct blk_mq_debugfs_attr *debugfs_attrs;
|
const struct blk_mq_debugfs_attr *debugfs_attrs;
|
||||||
};
|
};
|
||||||
|
@ -138,6 +139,7 @@ void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
|
||||||
void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
|
void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
|
||||||
void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
|
void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
|
||||||
void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
|
void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
|
||||||
|
void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
|
||||||
|
|
||||||
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
|
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
|
@ -194,6 +196,12 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
|
||||||
__rq_qos_merge(q->rq_qos, rq, bio);
|
__rq_qos_merge(q->rq_qos, rq, bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
|
||||||
|
{
|
||||||
|
if (q->rq_qos)
|
||||||
|
__rq_qos_queue_depth_changed(q->rq_qos);
|
||||||
|
}
|
||||||
|
|
||||||
void rq_qos_exit(struct request_queue *);
|
void rq_qos_exit(struct request_queue *);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -805,7 +805,7 @@ EXPORT_SYMBOL(blk_queue_update_dma_alignment);
|
||||||
void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
|
void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
|
||||||
{
|
{
|
||||||
q->queue_depth = depth;
|
q->queue_depth = depth;
|
||||||
wbt_set_queue_depth(q, depth);
|
rq_qos_queue_depth_changed(q);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_set_queue_depth);
|
EXPORT_SYMBOL(blk_set_queue_depth);
|
||||||
|
|
||||||
|
|
|
@ -629,15 +629,6 @@ static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
|
|
||||||
{
|
|
||||||
struct rq_qos *rqos = wbt_rq_qos(q);
|
|
||||||
if (rqos) {
|
|
||||||
RQWB(rqos)->rq_depth.queue_depth = depth;
|
|
||||||
__wbt_update_limits(RQWB(rqos));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
|
void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
|
||||||
{
|
{
|
||||||
struct rq_qos *rqos = wbt_rq_qos(q);
|
struct rq_qos *rqos = wbt_rq_qos(q);
|
||||||
|
@ -689,6 +680,12 @@ static int wbt_data_dir(const struct request *rq)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void wbt_queue_depth_changed(struct rq_qos *rqos)
|
||||||
|
{
|
||||||
|
RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
|
||||||
|
__wbt_update_limits(RQWB(rqos));
|
||||||
|
}
|
||||||
|
|
||||||
static void wbt_exit(struct rq_qos *rqos)
|
static void wbt_exit(struct rq_qos *rqos)
|
||||||
{
|
{
|
||||||
struct rq_wb *rwb = RQWB(rqos);
|
struct rq_wb *rwb = RQWB(rqos);
|
||||||
|
@ -811,6 +808,7 @@ static struct rq_qos_ops wbt_rqos_ops = {
|
||||||
.requeue = wbt_requeue,
|
.requeue = wbt_requeue,
|
||||||
.done = wbt_done,
|
.done = wbt_done,
|
||||||
.cleanup = wbt_cleanup,
|
.cleanup = wbt_cleanup,
|
||||||
|
.queue_depth_changed = wbt_queue_depth_changed,
|
||||||
.exit = wbt_exit,
|
.exit = wbt_exit,
|
||||||
#ifdef CONFIG_BLK_DEBUG_FS
|
#ifdef CONFIG_BLK_DEBUG_FS
|
||||||
.debugfs_attrs = wbt_debugfs_attrs,
|
.debugfs_attrs = wbt_debugfs_attrs,
|
||||||
|
@ -853,7 +851,7 @@ int wbt_init(struct request_queue *q)
|
||||||
|
|
||||||
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
|
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
|
||||||
|
|
||||||
wbt_set_queue_depth(q, blk_queue_depth(q));
|
wbt_queue_depth_changed(&rwb->rqos);
|
||||||
wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
|
wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -95,7 +95,6 @@ void wbt_enable_default(struct request_queue *);
|
||||||
u64 wbt_get_min_lat(struct request_queue *q);
|
u64 wbt_get_min_lat(struct request_queue *q);
|
||||||
void wbt_set_min_lat(struct request_queue *q, u64 val);
|
void wbt_set_min_lat(struct request_queue *q, u64 val);
|
||||||
|
|
||||||
void wbt_set_queue_depth(struct request_queue *, unsigned int);
|
|
||||||
void wbt_set_write_cache(struct request_queue *, bool);
|
void wbt_set_write_cache(struct request_queue *, bool);
|
||||||
|
|
||||||
u64 wbt_default_latency_nsec(struct request_queue *);
|
u64 wbt_default_latency_nsec(struct request_queue *);
|
||||||
|
@ -118,9 +117,6 @@ static inline void wbt_disable_default(struct request_queue *q)
|
||||||
static inline void wbt_enable_default(struct request_queue *q)
|
static inline void wbt_enable_default(struct request_queue *q)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
static inline void wbt_set_write_cache(struct request_queue *q, bool wc)
|
static inline void wbt_set_write_cache(struct request_queue *q, bool wc)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue