Skip to content

Commit 0e9d4ca

Browse files
Can Guomartinkpetersen
Can Guo
authored andcommitted
scsi: ufs: Protect some contexts from unexpected clock scaling
In contexts like suspend, shutdown, and error handling we need to suspend devfreq to make sure these contexts won't be disturbed by clock scaling. However, suspending devfreq is not enough since users can still trigger a clock scaling by manipulating the devfreq sysfs nodes like min/max_freq and governor even after devfreq is suspended. Moreover, mere suspending devfreq cannot synchroinze a clock scaling which has already been invoked through these sysfs nodes. Add one more flag in struct clk_scaling and wrap the entire func ufshcd_devfreq_scale() with the clk_scaling_lock, so that we can use this flag and clk_scaling_lock to control and synchronize clock scaling invoked through devfreq sysfs nodes. Link: https://lore.kernel.org/r/[email protected] Reviewed-by: Stanley Chu <[email protected]> Signed-off-by: Can Guo <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
1 parent 4cd4899 commit 0e9d4ca

File tree

2 files changed

+54
-32
lines changed

2 files changed

+54
-32
lines changed

drivers/scsi/ufs/ufshcd.c

Lines changed: 49 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1198,19 +1198,30 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
11981198
*/
11991199
ufshcd_scsi_block_requests(hba);
12001200
down_write(&hba->clk_scaling_lock);
1201-
if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1201+
1202+
if (!hba->clk_scaling.is_allowed ||
1203+
ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
12021204
ret = -EBUSY;
12031205
up_write(&hba->clk_scaling_lock);
12041206
ufshcd_scsi_unblock_requests(hba);
1207+
goto out;
12051208
}
12061209

1210+
/* let's not get into low power until clock scaling is completed */
1211+
ufshcd_hold(hba, false);
1212+
1213+
out:
12071214
return ret;
12081215
}
12091216

1210-
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1217+
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
12111218
{
1212-
up_write(&hba->clk_scaling_lock);
1219+
if (writelock)
1220+
up_write(&hba->clk_scaling_lock);
1221+
else
1222+
up_read(&hba->clk_scaling_lock);
12131223
ufshcd_scsi_unblock_requests(hba);
1224+
ufshcd_release(hba);
12141225
}
12151226

12161227
/**
@@ -1225,13 +1236,11 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
12251236
static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
12261237
{
12271238
int ret = 0;
1228-
1229-
/* let's not get into low power until clock scaling is completed */
1230-
ufshcd_hold(hba, false);
1239+
bool is_writelock = true;
12311240

12321241
ret = ufshcd_clock_scaling_prepare(hba);
12331242
if (ret)
1234-
goto out;
1243+
return ret;
12351244

12361245
/* scale down the gear before scaling down clocks */
12371246
if (!scale_up) {
@@ -1257,14 +1266,12 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
12571266
}
12581267

12591268
/* Enable Write Booster if we have scaled up else disable it */
1260-
up_write(&hba->clk_scaling_lock);
1269+
downgrade_write(&hba->clk_scaling_lock);
1270+
is_writelock = false;
12611271
ufshcd_wb_ctrl(hba, scale_up);
1262-
down_write(&hba->clk_scaling_lock);
12631272

12641273
out_unprepare:
1265-
ufshcd_clock_scaling_unprepare(hba);
1266-
out:
1267-
ufshcd_release(hba);
1274+
ufshcd_clock_scaling_unprepare(hba, is_writelock);
12681275
return ret;
12691276
}
12701277

@@ -1538,7 +1545,7 @@ static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
15381545
{
15391546
struct ufs_hba *hba = dev_get_drvdata(dev);
15401547

1541-
return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1548+
return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled);
15421549
}
15431550

15441551
static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
@@ -1558,7 +1565,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
15581565
}
15591566

15601567
value = !!value;
1561-
if (value == hba->clk_scaling.is_allowed)
1568+
if (value == hba->clk_scaling.is_enabled)
15621569
goto out;
15631570

15641571
pm_runtime_get_sync(hba->dev);
@@ -1567,7 +1574,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
15671574
cancel_work_sync(&hba->clk_scaling.suspend_work);
15681575
cancel_work_sync(&hba->clk_scaling.resume_work);
15691576

1570-
hba->clk_scaling.is_allowed = value;
1577+
hba->clk_scaling.is_enabled = value;
15711578

15721579
if (value) {
15731580
ufshcd_resume_clkscaling(hba);
@@ -1906,15 +1913,15 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
19061913
snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
19071914
hba->host->host_no);
19081915
hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1909-
1910-
ufshcd_clkscaling_init_sysfs(hba);
19111916
}
19121917

19131918
static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
19141919
{
19151920
if (!ufshcd_is_clkscaling_supported(hba))
19161921
return;
19171922

1923+
if (hba->clk_scaling.enable_attr.attr.name)
1924+
device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
19181925
destroy_workqueue(hba->clk_scaling.workq);
19191926
ufshcd_devfreq_remove(hba);
19201927
}
@@ -1979,7 +1986,7 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
19791986
if (!hba->clk_scaling.active_reqs++)
19801987
queue_resume_work = true;
19811988

1982-
if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1989+
if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
19831990
return;
19841991

19851992
if (queue_resume_work)
@@ -5763,18 +5770,24 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
57635770
ufshcd_vops_resume(hba, pm_op);
57645771
} else {
57655772
ufshcd_hold(hba, false);
5766-
if (hba->clk_scaling.is_allowed) {
5773+
if (hba->clk_scaling.is_enabled) {
57675774
cancel_work_sync(&hba->clk_scaling.suspend_work);
57685775
cancel_work_sync(&hba->clk_scaling.resume_work);
57695776
ufshcd_suspend_clkscaling(hba);
57705777
}
5778+
down_write(&hba->clk_scaling_lock);
5779+
hba->clk_scaling.is_allowed = false;
5780+
up_write(&hba->clk_scaling_lock);
57715781
}
57725782
}
57735783

57745784
static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
57755785
{
57765786
ufshcd_release(hba);
5777-
if (hba->clk_scaling.is_allowed)
5787+
down_write(&hba->clk_scaling_lock);
5788+
hba->clk_scaling.is_allowed = true;
5789+
up_write(&hba->clk_scaling_lock);
5790+
if (hba->clk_scaling.is_enabled)
57785791
ufshcd_resume_clkscaling(hba);
57795792
pm_runtime_put(hba->dev);
57805793
}
@@ -7747,12 +7760,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
77477760
sizeof(struct ufs_pa_layer_attr));
77487761
hba->clk_scaling.saved_pwr_info.is_valid = true;
77497762
if (!hba->devfreq) {
7763+
hba->clk_scaling.is_allowed = true;
77507764
ret = ufshcd_devfreq_init(hba);
77517765
if (ret)
77527766
goto out;
7753-
}
77547767

7755-
hba->clk_scaling.is_allowed = true;
7768+
hba->clk_scaling.is_enabled = true;
7769+
ufshcd_clkscaling_init_sysfs(hba);
7770+
}
77567771
}
77577772

77587773
ufs_bsg_probe(hba);
@@ -8672,11 +8687,14 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
86728687
ufshcd_hold(hba, false);
86738688
hba->clk_gating.is_suspended = true;
86748689

8675-
if (hba->clk_scaling.is_allowed) {
8690+
if (hba->clk_scaling.is_enabled) {
86768691
cancel_work_sync(&hba->clk_scaling.suspend_work);
86778692
cancel_work_sync(&hba->clk_scaling.resume_work);
86788693
ufshcd_suspend_clkscaling(hba);
86798694
}
8695+
down_write(&hba->clk_scaling_lock);
8696+
hba->clk_scaling.is_allowed = false;
8697+
up_write(&hba->clk_scaling_lock);
86808698

86818699
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
86828700
req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -8773,8 +8791,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
87738791
goto out;
87748792

87758793
set_link_active:
8776-
if (hba->clk_scaling.is_allowed)
8777-
ufshcd_resume_clkscaling(hba);
87788794
ufshcd_vreg_set_hpm(hba);
87798795
/*
87808796
* Device hardware reset is required to exit DeepSleep. Also, for
@@ -8798,7 +8814,10 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
87988814
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
87998815
ufshcd_disable_auto_bkops(hba);
88008816
enable_gating:
8801-
if (hba->clk_scaling.is_allowed)
8817+
down_write(&hba->clk_scaling_lock);
8818+
hba->clk_scaling.is_allowed = true;
8819+
up_write(&hba->clk_scaling_lock);
8820+
if (hba->clk_scaling.is_enabled)
88028821
ufshcd_resume_clkscaling(hba);
88038822
hba->clk_gating.is_suspended = false;
88048823
hba->dev_info.b_rpm_dev_flush_capable = false;
@@ -8901,7 +8920,10 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
89018920

89028921
hba->clk_gating.is_suspended = false;
89038922

8904-
if (hba->clk_scaling.is_allowed)
8923+
down_write(&hba->clk_scaling_lock);
8924+
hba->clk_scaling.is_allowed = true;
8925+
up_write(&hba->clk_scaling_lock);
8926+
if (hba->clk_scaling.is_enabled)
89058927
ufshcd_resume_clkscaling(hba);
89068928

89078929
/* Enable Auto-Hibernate if configured */
@@ -8923,8 +8945,6 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
89238945
ufshcd_vops_suspend(hba, pm_op);
89248946
disable_irq_and_vops_clks:
89258947
ufshcd_disable_irq(hba);
8926-
if (hba->clk_scaling.is_allowed)
8927-
ufshcd_suspend_clkscaling(hba);
89288948
ufshcd_setup_clocks(hba, false);
89298949
if (ufshcd_is_clkgating_allowed(hba)) {
89308950
hba->clk_gating.state = CLKS_OFF;
@@ -9165,8 +9185,6 @@ void ufshcd_remove(struct ufs_hba *hba)
91659185

91669186
ufshcd_exit_clk_scaling(hba);
91679187
ufshcd_exit_clk_gating(hba);
9168-
if (ufshcd_is_clkscaling_supported(hba))
9169-
device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
91709188
ufshcd_hba_exit(hba);
91719189
}
91729190
EXPORT_SYMBOL_GPL(ufshcd_remove);

drivers/scsi/ufs/ufshcd.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -419,7 +419,10 @@ struct ufs_saved_pwr_info {
419419
* @suspend_work: worker to suspend devfreq
420420
* @resume_work: worker to resume devfreq
421421
* @min_gear: lowest HS gear to scale down to
422-
* @is_allowed: tracks if scaling is currently allowed or not
422+
* @is_enabled: tracks if scaling is currently enabled or not, controlled by
423+
clkscale_enable sysfs node
424+
* @is_allowed: tracks if scaling is currently allowed or not, used to block
425+
clock scaling which is not invoked from devfreq governor
423426
* @is_busy_started: tracks if busy period has started or not
424427
* @is_suspended: tracks if devfreq is suspended or not
425428
*/
@@ -434,6 +437,7 @@ struct ufs_clk_scaling {
434437
struct work_struct suspend_work;
435438
struct work_struct resume_work;
436439
u32 min_gear;
440+
bool is_enabled;
437441
bool is_allowed;
438442
bool is_busy_started;
439443
bool is_suspended;

0 commit comments

Comments
 (0)