mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-17 20:29:24 +00:00
scsi: ufs: Add clock ungating to a separate workqueue
UFS driver can receive a request during memory reclaim by kswapd. So when ufs driver puts the ungate work in queue, and if there are no idle workers, kthreadd is invoked to create a new kworker. Since kswapd task holds a mutex which kthreadd also needs, this can cause a deadlock situation. So ungate work must be done in a separate work queue with WQ_MEM_RECLAIM flag enabled. Such a workqueue will have a rescue thread which will be called when the above deadlock condition is possible. Signed-off-by: Vijay Viswanath <vviswana@codeaurora.org> Signed-off-by: Can Guo <cang@codeaurora.org> Signed-off-by: Asutosh Das <asutoshd@codeaurora.org> Reviewed-by: Subhash Jadavani <subhashj@codeaurora.org> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
7f6ba4f12e
commit
10e5e37581
2 changed files with 11 additions and 1 deletions
|
@ -1532,7 +1532,8 @@ start:
|
||||||
hba->clk_gating.state = REQ_CLKS_ON;
|
hba->clk_gating.state = REQ_CLKS_ON;
|
||||||
trace_ufshcd_clk_gating(dev_name(hba->dev),
|
trace_ufshcd_clk_gating(dev_name(hba->dev),
|
||||||
hba->clk_gating.state);
|
hba->clk_gating.state);
|
||||||
schedule_work(&hba->clk_gating.ungate_work);
|
queue_work(hba->clk_gating.clk_gating_workq,
|
||||||
|
&hba->clk_gating.ungate_work);
|
||||||
/*
|
/*
|
||||||
* fall through to check if we should wait for this
|
* fall through to check if we should wait for this
|
||||||
* work to be done or not.
|
* work to be done or not.
|
||||||
|
@ -1718,6 +1719,8 @@ out:
|
||||||
|
|
||||||
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
|
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
|
||||||
{
|
{
|
||||||
|
char wq_name[sizeof("ufs_clk_gating_00")];
|
||||||
|
|
||||||
if (!ufshcd_is_clkgating_allowed(hba))
|
if (!ufshcd_is_clkgating_allowed(hba))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -1725,6 +1728,11 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
|
||||||
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
|
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
|
||||||
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
|
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
|
||||||
|
|
||||||
|
snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
|
||||||
|
hba->host->host_no);
|
||||||
|
hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
|
||||||
|
WQ_MEM_RECLAIM);
|
||||||
|
|
||||||
hba->clk_gating.is_enabled = true;
|
hba->clk_gating.is_enabled = true;
|
||||||
|
|
||||||
hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
|
hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
|
||||||
|
@ -1752,6 +1760,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
|
||||||
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
|
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
|
||||||
cancel_work_sync(&hba->clk_gating.ungate_work);
|
cancel_work_sync(&hba->clk_gating.ungate_work);
|
||||||
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
|
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
|
||||||
|
destroy_workqueue(hba->clk_gating.clk_gating_workq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Must be called with host lock acquired */
|
/* Must be called with host lock acquired */
|
||||||
|
|
|
@ -362,6 +362,7 @@ struct ufs_clk_gating {
|
||||||
struct device_attribute enable_attr;
|
struct device_attribute enable_attr;
|
||||||
bool is_enabled;
|
bool is_enabled;
|
||||||
int active_reqs;
|
int active_reqs;
|
||||||
|
struct workqueue_struct *clk_gating_workq;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ufs_saved_pwr_info {
|
struct ufs_saved_pwr_info {
|
||||||
|
|
Loading…
Add table
Reference in a new issue