mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
io_uring: finish waiting before flushing overflow entries
If we have overflow entries being generated after we've done the initial flush in io_cqring_wait(), then we could be flushing them in the main wait loop as well. If that's done after having added ourselves to the cq_wait waitqueue, then the task state can be != TASK_RUNNING when we enter the overflow flush. Check for the need to overflow flush, and finish our wait cycle first if we have to do so. Reported-and-tested-by: syzbot+cf6ea1d6bb30a4ce10b2@syzkaller.appspotmail.com Link: https://lore.kernel.org/io-uring/000000000000cb143a05f04eee15@google.com/ Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
5ad70eb27d
commit
52ea806ad9
1 changed files with 16 additions and 9 deletions
|
@ -677,16 +677,20 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
|
||||||
io_cq_unlock_post(ctx);
|
io_cq_unlock_post(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_cqring_overflow_flush(struct io_ring_ctx *ctx)
|
static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
|
|
||||||
/* iopoll syncs against uring_lock, not completion_lock */
|
/* iopoll syncs against uring_lock, not completion_lock */
|
||||||
if (ctx->flags & IORING_SETUP_IOPOLL)
|
if (ctx->flags & IORING_SETUP_IOPOLL)
|
||||||
mutex_lock(&ctx->uring_lock);
|
mutex_lock(&ctx->uring_lock);
|
||||||
__io_cqring_overflow_flush(ctx);
|
__io_cqring_overflow_flush(ctx);
|
||||||
if (ctx->flags & IORING_SETUP_IOPOLL)
|
if (ctx->flags & IORING_SETUP_IOPOLL)
|
||||||
mutex_unlock(&ctx->uring_lock);
|
mutex_unlock(&ctx->uring_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void io_cqring_overflow_flush(struct io_ring_ctx *ctx)
|
||||||
|
{
|
||||||
|
if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
|
||||||
|
io_cqring_do_overflow_flush(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __io_put_task(struct task_struct *task, int nr)
|
void __io_put_task(struct task_struct *task, int nr)
|
||||||
|
@ -2549,7 +2553,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
|
||||||
|
|
||||||
trace_io_uring_cqring_wait(ctx, min_events);
|
trace_io_uring_cqring_wait(ctx, min_events);
|
||||||
do {
|
do {
|
||||||
io_cqring_overflow_flush(ctx);
|
if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
|
||||||
|
finish_wait(&ctx->cq_wait, &iowq.wq);
|
||||||
|
io_cqring_do_overflow_flush(ctx);
|
||||||
|
}
|
||||||
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
|
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
|
||||||
TASK_INTERRUPTIBLE);
|
TASK_INTERRUPTIBLE);
|
||||||
ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
|
ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue