mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 07:12:09 +00:00
Merge branch 'for-4.9/block-irq' of git://git.kernel.dk/linux-block
Pull blk-mq irq/cpu mapping updates from Jens Axboe: "This is the block-irq topic branch for 4.9-rc. It's mostly from Christoph, and it allows drivers to specify their own mappings, and more importantly, to share the blk-mq mappings with the IRQ affinity mappings. It's a good step towards making this work better out of the box" * 'for-4.9/block-irq' of git://git.kernel.dk/linux-block: blk_mq: linux/blk-mq.h does not include all the headers it depends on blk-mq: kill unused blk_mq_create_mq_map() blk-mq: get rid of the cpumask in struct blk_mq_tags nvme: remove the post_scan callout nvme: switch to use pci_alloc_irq_vectors blk-mq: provide a default queue mapping for PCI device blk-mq: allow the driver to pass in a queue mapping blk-mq: remove ->map_queue blk-mq: only allocate a single mq_map per tag_set blk-mq: don't redistribute hardware queues on a CPU hotplug event
This commit is contained in:
commit
12e3d3cdd9
27 changed files with 159 additions and 191 deletions
|
@ -62,6 +62,7 @@ struct blk_mq_hw_ctx {
|
|||
};
|
||||
|
||||
struct blk_mq_tag_set {
|
||||
unsigned int *mq_map;
|
||||
struct blk_mq_ops *ops;
|
||||
unsigned int nr_hw_queues;
|
||||
unsigned int queue_depth; /* max hw supported */
|
||||
|
@ -85,7 +86,6 @@ struct blk_mq_queue_data {
|
|||
};
|
||||
|
||||
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
|
||||
typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
|
||||
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
|
||||
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
|
||||
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
|
||||
|
@ -99,6 +99,7 @@ typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
|
|||
bool);
|
||||
typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
|
||||
typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
|
||||
typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
|
||||
|
||||
|
||||
struct blk_mq_ops {
|
||||
|
@ -107,11 +108,6 @@ struct blk_mq_ops {
|
|||
*/
|
||||
queue_rq_fn *queue_rq;
|
||||
|
||||
/*
|
||||
* Map to specific hardware queue
|
||||
*/
|
||||
map_queue_fn *map_queue;
|
||||
|
||||
/*
|
||||
* Called on request timeout
|
||||
*/
|
||||
|
@ -144,6 +140,8 @@ struct blk_mq_ops {
|
|||
init_request_fn *init_request;
|
||||
exit_request_fn *exit_request;
|
||||
reinit_request_fn *reinit_request;
|
||||
|
||||
map_queues_fn *map_queues;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -199,7 +197,6 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
|
|||
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op,
|
||||
unsigned int flags, unsigned int hctx_idx);
|
||||
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
|
||||
struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags);
|
||||
|
||||
enum {
|
||||
BLK_MQ_UNIQUE_TAG_BITS = 16,
|
||||
|
@ -218,7 +215,6 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
|
|||
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
|
||||
}
|
||||
|
||||
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
|
||||
|
||||
int blk_mq_request_started(struct request *rq);
|
||||
void blk_mq_start_request(struct request *rq);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue