mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
blk-mq: move srcu from blk_mq_hw_ctx to request_queue
In case of BLK_MQ_F_BLOCKING, per-hctx srcu is used to protect dispatch critical area. However, this srcu instance stays at the end of hctx, and it often takes standalone cacheline, often cold. Inside srcu_read_lock() and srcu_read_unlock(), WRITE is always done on the indirect percpu variable which is allocated from heap instead of being embedded, srcu->srcu_idx is read only in srcu_read_lock(). It doesn't matter if srcu structure stays in hctx or request queue. So switch to per-request-queue srcu for protecting dispatch, and this way simplifies quiesce a lot, not mention quiesce is always done on the request queue wide. Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20211203131534.3668411-3-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
2a904d0085
commit
704b914f15
9 changed files with 53 additions and 49 deletions
|
@ -16,6 +16,7 @@
|
|||
#include <linux/percpu-refcount.h>
|
||||
#include <linux/blkzoned.h>
|
||||
#include <linux/sbitmap.h>
|
||||
#include <linux/srcu.h>
|
||||
|
||||
struct module;
|
||||
struct request_queue;
|
||||
|
@ -373,11 +374,18 @@ struct request_queue {
|
|||
* devices that do not have multiple independent access ranges.
|
||||
*/
|
||||
struct blk_independent_access_ranges *ia_ranges;
|
||||
|
||||
/**
|
||||
* @srcu: Sleepable RCU. Use as lock when type of the request queue
|
||||
* is blocking (BLK_MQ_F_BLOCKING). Must be the last member
|
||||
*/
|
||||
struct srcu_struct srcu[];
|
||||
};
|
||||
|
||||
/* Keep blk_queue_flag_name[] in sync with the definitions below */
|
||||
#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
|
||||
#define QUEUE_FLAG_DYING 1 /* queue being torn down */
|
||||
#define QUEUE_FLAG_HAS_SRCU 2 /* SRCU is allocated */
|
||||
#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
|
||||
#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
|
||||
#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
|
||||
|
@ -415,6 +423,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
|
|||
|
||||
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
|
||||
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
|
||||
#define blk_queue_has_srcu(q) test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags)
|
||||
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
|
||||
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
|
||||
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue