Skip to content

Commit 7eb584d

Browse files
Dolev RavivChristoph Hellwig
Dolev Raviv
authored and
Christoph Hellwig
committed
ufs: refactor configuring power mode
Sometimes, the device shall report its maximum power and speed capabilities, but we might not wish to configure it to use those maximum capabilities. This change adds support for the vendor specific host driver to implement power change notify callback. To enable configuring different power modes (number of lanes, gear number and fast/slow modes) it is necessary to split the configuration stage from the stage that reads the device max power mode. In addition, it is not required to read the configuration more than once, thus the configuration is stored after reading it once. Signed-off-by: Dolev Raviv <[email protected]> Signed-off-by: Yaniv Gardi <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]>
1 parent 57d104c commit 7eb584d

File tree

2 files changed

+160
-33
lines changed

2 files changed

+160
-33
lines changed

drivers/scsi/ufs/ufshcd.c

Lines changed: 133 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,8 @@ static void ufshcd_hba_exit(struct ufs_hba *hba);
179179
static int ufshcd_probe_hba(struct ufs_hba *hba);
180180
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
181181
static irqreturn_t ufshcd_intr(int irq, void *__hba);
182+
static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
183+
struct ufs_pa_layer_attr *desired_pwr_mode);
182184

183185
static inline int ufshcd_enable_irq(struct ufs_hba *hba)
184186
{
@@ -1958,40 +1960,83 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
19581960
}
19591961

19601962
/**
1961-
* ufshcd_config_max_pwr_mode - Set & Change power mode with
1962-
* maximum capability attribute information.
1963-
* @hba: per adapter instance
1964-
*
1965-
* Returns 0 on success, non-zero value on failure
1963+
* ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
1964+
* @hba: per-adapter instance
19661965
*/
1967-
static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
1966+
static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
19681967
{
1969-
enum {RX = 0, TX = 1};
1970-
u32 lanes[] = {1, 1};
1971-
u32 gear[] = {1, 1};
1972-
u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
1973-
int ret;
1968+
struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
1969+
1970+
if (hba->max_pwr_info.is_valid)
1971+
return 0;
1972+
1973+
pwr_info->pwr_tx = FASTAUTO_MODE;
1974+
pwr_info->pwr_rx = FASTAUTO_MODE;
1975+
pwr_info->hs_rate = PA_HS_MODE_B;
19741976

19751977
/* Get the connected lane count */
1976-
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]);
1977-
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]);
1978+
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
1979+
&pwr_info->lane_rx);
1980+
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
1981+
&pwr_info->lane_tx);
1982+
1983+
if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
1984+
dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
1985+
__func__,
1986+
pwr_info->lane_rx,
1987+
pwr_info->lane_tx);
1988+
return -EINVAL;
1989+
}
19781990

19791991
/*
19801992
* First, get the maximum gears of HS speed.
19811993
* If a zero value, it means there is no HSGEAR capability.
19821994
* Then, get the maximum gears of PWM speed.
19831995
*/
1984-
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]);
1985-
if (!gear[RX]) {
1986-
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]);
1987-
pwr[RX] = SLOWAUTO_MODE;
1996+
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
1997+
if (!pwr_info->gear_rx) {
1998+
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1999+
&pwr_info->gear_rx);
2000+
if (!pwr_info->gear_rx) {
2001+
dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
2002+
__func__, pwr_info->gear_rx);
2003+
return -EINVAL;
2004+
}
2005+
pwr_info->pwr_rx = SLOWAUTO_MODE;
19882006
}
19892007

1990-
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]);
1991-
if (!gear[TX]) {
2008+
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
2009+
&pwr_info->gear_tx);
2010+
if (!pwr_info->gear_tx) {
19922011
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1993-
&gear[TX]);
1994-
pwr[TX] = SLOWAUTO_MODE;
2012+
&pwr_info->gear_tx);
2013+
if (!pwr_info->gear_tx) {
2014+
dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
2015+
__func__, pwr_info->gear_tx);
2016+
return -EINVAL;
2017+
}
2018+
pwr_info->pwr_tx = SLOWAUTO_MODE;
2019+
}
2020+
2021+
hba->max_pwr_info.is_valid = true;
2022+
return 0;
2023+
}
2024+
2025+
static int ufshcd_change_power_mode(struct ufs_hba *hba,
2026+
struct ufs_pa_layer_attr *pwr_mode)
2027+
{
2028+
int ret;
2029+
2030+
/* if already configured to the requested pwr_mode */
2031+
if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
2032+
pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
2033+
pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
2034+
pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
2035+
pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
2036+
pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
2037+
pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
2038+
dev_dbg(hba->dev, "%s: power already configured\n", __func__);
2039+
return 0;
19952040
}
19962041

19972042
/*
@@ -2000,23 +2045,67 @@ static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
20002045
* - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
20012046
* - PA_HSSERIES
20022047
*/
2003-
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]);
2004-
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]);
2005-
if (pwr[RX] == FASTAUTO_MODE)
2048+
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
2049+
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
2050+
pwr_mode->lane_rx);
2051+
if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2052+
pwr_mode->pwr_rx == FAST_MODE)
20062053
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
2054+
else
2055+
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
20072056

2008-
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]);
2009-
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]);
2010-
if (pwr[TX] == FASTAUTO_MODE)
2057+
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
2058+
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
2059+
pwr_mode->lane_tx);
2060+
if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
2061+
pwr_mode->pwr_tx == FAST_MODE)
20112062
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
2063+
else
2064+
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
20122065

2013-
if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE)
2014-
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B);
2066+
if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2067+
pwr_mode->pwr_tx == FASTAUTO_MODE ||
2068+
pwr_mode->pwr_rx == FAST_MODE ||
2069+
pwr_mode->pwr_tx == FAST_MODE)
2070+
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
2071+
pwr_mode->hs_rate);
20152072

2016-
ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]);
2017-
if (ret)
2073+
ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
2074+
| pwr_mode->pwr_tx);
2075+
2076+
if (ret) {
20182077
dev_err(hba->dev,
2019-
"pwr_mode: power mode change failed %d\n", ret);
2078+
"%s: power mode change failed %d\n", __func__, ret);
2079+
} else {
2080+
if (hba->vops && hba->vops->pwr_change_notify)
2081+
hba->vops->pwr_change_notify(hba,
2082+
POST_CHANGE, NULL, pwr_mode);
2083+
2084+
memcpy(&hba->pwr_info, pwr_mode,
2085+
sizeof(struct ufs_pa_layer_attr));
2086+
}
2087+
2088+
return ret;
2089+
}
2090+
2091+
/**
2092+
* ufshcd_config_pwr_mode - configure a new power mode
2093+
* @hba: per-adapter instance
2094+
* @desired_pwr_mode: desired power configuration
2095+
*/
2096+
static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
2097+
struct ufs_pa_layer_attr *desired_pwr_mode)
2098+
{
2099+
struct ufs_pa_layer_attr final_params = { 0 };
2100+
int ret;
2101+
2102+
if (hba->vops && hba->vops->pwr_change_notify)
2103+
hba->vops->pwr_change_notify(hba,
2104+
PRE_CHANGE, desired_pwr_mode, &final_params);
2105+
else
2106+
memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
2107+
2108+
ret = ufshcd_change_power_mode(hba, &final_params);
20202109

20212110
return ret;
20222111
}
@@ -3757,7 +3846,16 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
37573846
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
37583847
hba->wlun_dev_clr_ua = true;
37593848

3760-
ufshcd_config_max_pwr_mode(hba);
3849+
if (ufshcd_get_max_pwr_mode(hba)) {
3850+
dev_err(hba->dev,
3851+
"%s: Failed getting max supported power mode\n",
3852+
__func__);
3853+
} else {
3854+
ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
3855+
if (ret)
3856+
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
3857+
__func__, ret);
3858+
}
37613859

37623860
/*
37633861
* If we are in error handling context or in power management callbacks
@@ -4920,6 +5018,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
49205018
host->unique_id = host->host_no;
49215019
host->max_cmd_len = MAX_CDB_SIZE;
49225020

5021+
hba->max_pwr_info.is_valid = false;
5022+
49235023
/* Initailize wait queue for task management */
49245024
init_waitqueue_head(&hba->tm_wq);
49255025
init_waitqueue_head(&hba->tm_tag_wq);

drivers/scsi/ufs/ufshcd.h

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,22 @@ struct ufs_clk_info {
221221

222222
#define PRE_CHANGE 0
223223
#define POST_CHANGE 1
224+
225+
struct ufs_pa_layer_attr {
226+
u32 gear_rx;
227+
u32 gear_tx;
228+
u32 lane_rx;
229+
u32 lane_tx;
230+
u32 pwr_rx;
231+
u32 pwr_tx;
232+
u32 hs_rate;
233+
};
234+
235+
struct ufs_pwr_mode_info {
236+
bool is_valid;
237+
struct ufs_pa_layer_attr info;
238+
};
239+
224240
/**
225241
* struct ufs_hba_variant_ops - variant specific callbacks
226242
* @name: variant name
@@ -232,6 +248,9 @@ struct ufs_clk_info {
232248
* variant specific Uni-Pro initialization.
233249
* @link_startup_notify: called before and after Link startup is carried out
234250
* to allow variant specific Uni-Pro initialization.
251+
* @pwr_change_notify: called before and after a power mode change
252+
* is carried out to allow vendor spesific capabilities
253+
* to be set.
235254
* @suspend: called during host controller PM callback
236255
* @resume: called during host controller PM callback
237256
*/
@@ -243,6 +262,9 @@ struct ufs_hba_variant_ops {
243262
int (*setup_regulators)(struct ufs_hba *, bool);
244263
int (*hce_enable_notify)(struct ufs_hba *, bool);
245264
int (*link_startup_notify)(struct ufs_hba *, bool);
265+
int (*pwr_change_notify)(struct ufs_hba *,
266+
bool, struct ufs_pa_layer_attr *,
267+
struct ufs_pa_layer_attr *);
246268
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
247269
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
248270
};
@@ -302,6 +324,8 @@ struct ufs_init_prefetch {
302324
* @auto_bkops_enabled: to track whether bkops is enabled in device
303325
* @vreg_info: UFS device voltage regulator information
304326
* @clk_list_head: UFS host controller clocks list node head
327+
* @pwr_info: holds current power mode
328+
* @max_pwr_info: keeps the device max valid pwm
305329
*/
306330
struct ufs_hba {
307331
void __iomem *mmio_base;
@@ -387,6 +411,9 @@ struct ufs_hba {
387411
struct list_head clk_list_head;
388412

389413
bool wlun_dev_clr_ua;
414+
415+
struct ufs_pa_layer_attr pwr_info;
416+
struct ufs_pwr_mode_info max_pwr_info;
390417
};
391418

392419
#define ufshcd_writel(hba, val, reg) \

0 commit comments

Comments
 (0)