mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-04-07 14:54:52 +00:00
mmc: queue: Fix queue thread wake-up
The only time the driver sleeps expecting to be woken upon the arrival of a new request, is when the dispatch queue is empty. The only time that it is known whether the dispatch queue is empty is after NULL is returned from blk_fetch_request() while under the queue lock. Recognizing those facts, simplify the synchronization between the queue thread and the request function. A couple of flags tell the request function what to do, and the queue lock and barriers associated with wake-ups ensure synchronization. The result is simpler and allows the removal of the context_info lock. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Reviewed-by: Harjani Ritesh <riteshh@codeaurora.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
This commit is contained in:
parent
5be80375f5
commit
e0097cf5f2
5 changed files with 22 additions and 29 deletions
|
@ -1758,8 +1758,6 @@ int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
||||||
int ret;
|
int ret;
|
||||||
struct mmc_blk_data *md = mq->blkdata;
|
struct mmc_blk_data *md = mq->blkdata;
|
||||||
struct mmc_card *card = md->queue.card;
|
struct mmc_card *card = md->queue.card;
|
||||||
struct mmc_host *host = card->host;
|
|
||||||
unsigned long flags;
|
|
||||||
bool req_is_special = mmc_req_is_special(req);
|
bool req_is_special = mmc_req_is_special(req);
|
||||||
|
|
||||||
if (req && !mq->mqrq_prev->req)
|
if (req && !mq->mqrq_prev->req)
|
||||||
|
@ -1792,11 +1790,6 @@ int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
||||||
mmc_blk_issue_rw_rq(mq, NULL);
|
mmc_blk_issue_rw_rq(mq, NULL);
|
||||||
ret = mmc_blk_issue_flush(mq, req);
|
ret = mmc_blk_issue_flush(mq, req);
|
||||||
} else {
|
} else {
|
||||||
if (!req && host->areq) {
|
|
||||||
spin_lock_irqsave(&host->context_info.lock, flags);
|
|
||||||
host->context_info.is_waiting_last_req = true;
|
|
||||||
spin_unlock_irqrestore(&host->context_info.lock, flags);
|
|
||||||
}
|
|
||||||
ret = mmc_blk_issue_rw_rq(mq, req);
|
ret = mmc_blk_issue_rw_rq(mq, req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,6 +53,7 @@ static int mmc_queue_thread(void *d)
|
||||||
{
|
{
|
||||||
struct mmc_queue *mq = d;
|
struct mmc_queue *mq = d;
|
||||||
struct request_queue *q = mq->queue;
|
struct request_queue *q = mq->queue;
|
||||||
|
struct mmc_context_info *cntx = &mq->card->host->context_info;
|
||||||
|
|
||||||
current->flags |= PF_MEMALLOC;
|
current->flags |= PF_MEMALLOC;
|
||||||
|
|
||||||
|
@ -63,6 +64,19 @@ static int mmc_queue_thread(void *d)
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
req = blk_fetch_request(q);
|
req = blk_fetch_request(q);
|
||||||
|
mq->asleep = false;
|
||||||
|
cntx->is_waiting_last_req = false;
|
||||||
|
cntx->is_new_req = false;
|
||||||
|
if (!req) {
|
||||||
|
/*
|
||||||
|
* Dispatch queue is empty so set flags for
|
||||||
|
* mmc_request_fn() to wake us up.
|
||||||
|
*/
|
||||||
|
if (mq->mqrq_prev->req)
|
||||||
|
cntx->is_waiting_last_req = true;
|
||||||
|
else
|
||||||
|
mq->asleep = true;
|
||||||
|
}
|
||||||
mq->mqrq_cur->req = req;
|
mq->mqrq_cur->req = req;
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
|
@ -115,7 +129,6 @@ static void mmc_request_fn(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct mmc_queue *mq = q->queuedata;
|
struct mmc_queue *mq = q->queuedata;
|
||||||
struct request *req;
|
struct request *req;
|
||||||
unsigned long flags;
|
|
||||||
struct mmc_context_info *cntx;
|
struct mmc_context_info *cntx;
|
||||||
|
|
||||||
if (!mq) {
|
if (!mq) {
|
||||||
|
@ -127,19 +140,13 @@ static void mmc_request_fn(struct request_queue *q)
|
||||||
}
|
}
|
||||||
|
|
||||||
cntx = &mq->card->host->context_info;
|
cntx = &mq->card->host->context_info;
|
||||||
if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
|
|
||||||
/*
|
if (cntx->is_waiting_last_req) {
|
||||||
* New MMC request arrived when MMC thread may be
|
cntx->is_new_req = true;
|
||||||
* blocked on the previous request to be complete
|
wake_up_interruptible(&cntx->wait);
|
||||||
* with no current request fetched
|
}
|
||||||
*/
|
|
||||||
spin_lock_irqsave(&cntx->lock, flags);
|
if (mq->asleep)
|
||||||
if (cntx->is_waiting_last_req) {
|
|
||||||
cntx->is_new_req = true;
|
|
||||||
wake_up_interruptible(&cntx->wait);
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&cntx->lock, flags);
|
|
||||||
} else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
|
|
||||||
wake_up_process(mq->thread);
|
wake_up_process(mq->thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,7 @@ struct mmc_queue {
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
#define MMC_QUEUE_SUSPENDED (1 << 0)
|
#define MMC_QUEUE_SUSPENDED (1 << 0)
|
||||||
#define MMC_QUEUE_NEW_REQUEST (1 << 1)
|
#define MMC_QUEUE_NEW_REQUEST (1 << 1)
|
||||||
|
bool asleep;
|
||||||
struct mmc_blk_data *blkdata;
|
struct mmc_blk_data *blkdata;
|
||||||
struct request_queue *queue;
|
struct request_queue *queue;
|
||||||
struct mmc_queue_req mqrq[2];
|
struct mmc_queue_req mqrq[2];
|
||||||
|
|
|
@ -504,18 +504,14 @@ static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
|
||||||
struct mmc_command *cmd;
|
struct mmc_command *cmd;
|
||||||
struct mmc_context_info *context_info = &host->context_info;
|
struct mmc_context_info *context_info = &host->context_info;
|
||||||
enum mmc_blk_status status;
|
enum mmc_blk_status status;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
wait_event_interruptible(context_info->wait,
|
wait_event_interruptible(context_info->wait,
|
||||||
(context_info->is_done_rcv ||
|
(context_info->is_done_rcv ||
|
||||||
context_info->is_new_req));
|
context_info->is_new_req));
|
||||||
spin_lock_irqsave(&context_info->lock, flags);
|
|
||||||
context_info->is_waiting_last_req = false;
|
context_info->is_waiting_last_req = false;
|
||||||
spin_unlock_irqrestore(&context_info->lock, flags);
|
|
||||||
if (context_info->is_done_rcv) {
|
if (context_info->is_done_rcv) {
|
||||||
context_info->is_done_rcv = false;
|
context_info->is_done_rcv = false;
|
||||||
context_info->is_new_req = false;
|
|
||||||
cmd = mrq->cmd;
|
cmd = mrq->cmd;
|
||||||
|
|
||||||
if (!cmd->error || !cmd->retries ||
|
if (!cmd->error || !cmd->retries ||
|
||||||
|
@ -534,7 +530,6 @@ static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
|
||||||
continue; /* wait for done/new event again */
|
continue; /* wait for done/new event again */
|
||||||
}
|
}
|
||||||
} else if (context_info->is_new_req) {
|
} else if (context_info->is_new_req) {
|
||||||
context_info->is_new_req = false;
|
|
||||||
if (!next_req)
|
if (!next_req)
|
||||||
return MMC_BLK_NEW_REQUEST;
|
return MMC_BLK_NEW_REQUEST;
|
||||||
}
|
}
|
||||||
|
@ -3016,7 +3011,6 @@ void mmc_unregister_pm_notifier(struct mmc_host *host)
|
||||||
*/
|
*/
|
||||||
void mmc_init_context_info(struct mmc_host *host)
|
void mmc_init_context_info(struct mmc_host *host)
|
||||||
{
|
{
|
||||||
spin_lock_init(&host->context_info.lock);
|
|
||||||
host->context_info.is_new_req = false;
|
host->context_info.is_new_req = false;
|
||||||
host->context_info.is_done_rcv = false;
|
host->context_info.is_done_rcv = false;
|
||||||
host->context_info.is_waiting_last_req = false;
|
host->context_info.is_waiting_last_req = false;
|
||||||
|
|
|
@ -197,14 +197,12 @@ struct mmc_slot {
|
||||||
* @is_new_req wake up reason was new request
|
* @is_new_req wake up reason was new request
|
||||||
* @is_waiting_last_req mmc context waiting for single running request
|
* @is_waiting_last_req mmc context waiting for single running request
|
||||||
* @wait wait queue
|
* @wait wait queue
|
||||||
* @lock lock to protect data fields
|
|
||||||
*/
|
*/
|
||||||
struct mmc_context_info {
|
struct mmc_context_info {
|
||||||
bool is_done_rcv;
|
bool is_done_rcv;
|
||||||
bool is_new_req;
|
bool is_new_req;
|
||||||
bool is_waiting_last_req;
|
bool is_waiting_last_req;
|
||||||
wait_queue_head_t wait;
|
wait_queue_head_t wait;
|
||||||
spinlock_t lock;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct regulator;
|
struct regulator;
|
||||||
|
|
Loading…
Add table
Reference in a new issue