mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-30 19:06:14 +00:00
io_uring: remove IOU_F_TWQ_FORCE_NORMAL
Extract a function for non-local task_work_add, and use it directly from io_move_task_work_from_local(). Now we don't use IOU_F_TWQ_FORCE_NORMAL and it can be killed. As a small positive side effect we don't grab task->io_uring in io_req_normal_work_add anymore, which is not needed for io_req_local_work_add(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/2e55571e8ff2927ae3cc12da606d204e2485525b.1687518903.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
2fdd6fb5ff
commit
91c7884ac9
2 changed files with 15 additions and 15 deletions
|
@ -1317,7 +1317,7 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
|
static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
|
||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
unsigned nr_wait, nr_tw, nr_tw_prev;
|
unsigned nr_wait, nr_tw, nr_tw_prev;
|
||||||
|
@ -1368,19 +1368,11 @@ static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
|
||||||
wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
|
wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
|
static void io_req_normal_work_add(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
struct io_uring_task *tctx = req->task->io_uring;
|
struct io_uring_task *tctx = req->task->io_uring;
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
|
|
||||||
if (!(flags & IOU_F_TWQ_FORCE_NORMAL) &&
|
|
||||||
(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) {
|
|
||||||
rcu_read_lock();
|
|
||||||
io_req_local_work_add(req, flags);
|
|
||||||
rcu_read_unlock();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* task_work already pending, we're done */
|
/* task_work already pending, we're done */
|
||||||
if (!llist_add(&req->io_task_work.node, &tctx->task_list))
|
if (!llist_add(&req->io_task_work.node, &tctx->task_list))
|
||||||
return;
|
return;
|
||||||
|
@ -1394,6 +1386,17 @@ void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
|
||||||
io_fallback_tw(tctx);
|
io_fallback_tw(tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
|
||||||
|
{
|
||||||
|
if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
|
||||||
|
rcu_read_lock();
|
||||||
|
io_req_local_work_add(req, flags);
|
||||||
|
rcu_read_unlock();
|
||||||
|
} else {
|
||||||
|
io_req_normal_work_add(req);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
|
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
struct llist_node *node;
|
struct llist_node *node;
|
||||||
|
@ -1404,7 +1407,7 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
|
||||||
io_task_work.node);
|
io_task_work.node);
|
||||||
|
|
||||||
node = node->next;
|
node = node->next;
|
||||||
__io_req_task_work_add(req, IOU_F_TWQ_FORCE_NORMAL);
|
io_req_normal_work_add(req);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,9 +16,6 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
/* don't use deferred task_work */
|
|
||||||
IOU_F_TWQ_FORCE_NORMAL = 1,
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A hint to not wake right away but delay until there are enough of
|
* A hint to not wake right away but delay until there are enough of
|
||||||
* tw's queued to match the number of CQEs the task is waiting for.
|
* tw's queued to match the number of CQEs the task is waiting for.
|
||||||
|
@ -26,7 +23,7 @@ enum {
|
||||||
* Must not be used wirh requests generating more than one CQE.
|
* Must not be used wirh requests generating more than one CQE.
|
||||||
* It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
|
* It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
|
||||||
*/
|
*/
|
||||||
IOU_F_TWQ_LAZY_WAKE = 2,
|
IOU_F_TWQ_LAZY_WAKE = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue