Skip to content

Commit fa9a67e

Browse files
committed
Merge tag 'pm+acpi-4.3-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull more power management and ACPI updates from Rafael Wysocki: "These are mostly fixes and cleanups on top of the previous PM+ACPI pull request (cpufreq core and drivers, cpuidle, generic power domains framework). Some of them didn't make to that pull request and some fix issues introduced by it. The only really new thing is the support for suspend frequency in the cpufreq-dt driver, but it is needed to fix an issue with Exynos platforms. Specifics: - build fix for the new Mediatek MT8173 cpufreq driver (Guenter Roeck). - generic power domains framework fixes (power on error code path, subdomain removal) and cleanup of a deprecated API user (Geert Uytterhoeven, Jon Hunter, Ulf Hansson). - cpufreq-dt driver fixes including two fixes for bugs related to the new Operating Performance Points Device Tree bindings introduced recently (Viresh Kumar). - suspend frequency support for the cpufreq-dt driver (Bartlomiej Zolnierkiewicz, Viresh Kumar). - cpufreq core cleanups (Viresh Kumar). - intel_pstate driver fixes (Chen Yu, Kristen Carlson Accardi). - additional sanity check in the cpuidle core (Xunlei Pang). - fix for a comment related to CPU power management (Lina Iyer)" * tag 'pm+acpi-4.3-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: intel_pstate: fix PCT_TO_HWP macro intel_pstate: Fix user input of min/max to legal policy region PM / OPP: Return suspend_opp only if it is enabled cpufreq-dt: add suspend frequency support cpufreq: allow cpufreq_generic_suspend() to work without suspend frequency PM / OPP: add dev_pm_opp_get_suspend_opp() helper staging: board: Migrate away from __pm_genpd_name_add_device() cpufreq: Use __func__ to print function's name cpufreq: staticize cpufreq_cpu_get_raw() PM / Domains: Ensure subdomain is not in use before removing cpufreq: Add ARM_MT8173_CPUFREQ dependency on THERMAL cpuidle/coupled: Add sanity check for safe_state_index PM / Domains: Try power off masters in error path of __pm_genpd_poweron() cpufreq: dt: Tolerance applies on both sides of target voltage cpufreq: dt: Print error on failing to mark OPPs as shared cpufreq: dt: Check OPP count before marking them shared kernel/cpu_pm: fix cpu_cluster_pm_exit comment
2 parents 05c7808 + 4614e0c commit fa9a67e

File tree

13 files changed

+187
-44
lines changed

13 files changed

+187
-44
lines changed

drivers/base/power/domain.c

Lines changed: 25 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -212,6 +212,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
212212
return ret;
213213
}
214214

215+
/**
216+
* genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
217+
* @genpd: PM domait to power off.
218+
*
219+
* Queue up the execution of pm_genpd_poweroff() unless it's already been done
220+
* before.
221+
*/
222+
static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
223+
{
224+
queue_work(pm_wq, &genpd->power_off_work);
225+
}
226+
215227
/**
216228
* __pm_genpd_poweron - Restore power to a given PM domain and its masters.
217229
* @genpd: PM domain to power up.
@@ -259,8 +271,12 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
259271
return 0;
260272

261273
err:
262-
list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
274+
list_for_each_entry_continue_reverse(link,
275+
&genpd->slave_links,
276+
slave_node) {
263277
genpd_sd_counter_dec(link->master);
278+
genpd_queue_power_off_work(link->master);
279+
}
264280

265281
return ret;
266282
}
@@ -348,18 +364,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
348364
return NOTIFY_DONE;
349365
}
350366

351-
/**
352-
* genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
353-
* @genpd: PM domait to power off.
354-
*
355-
* Queue up the execution of pm_genpd_poweroff() unless it's already been done
356-
* before.
357-
*/
358-
static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
359-
{
360-
queue_work(pm_wq, &genpd->power_off_work);
361-
}
362-
363367
/**
364368
* pm_genpd_poweroff - Remove power from a given PM domain.
365369
* @genpd: PM domain to power down.
@@ -1469,6 +1473,13 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
14691473

14701474
mutex_lock(&genpd->lock);
14711475

1476+
if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
1477+
pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1478+
subdomain->name);
1479+
ret = -EBUSY;
1480+
goto out;
1481+
}
1482+
14721483
list_for_each_entry(link, &genpd->master_links, master_node) {
14731484
if (link->slave != subdomain)
14741485
continue;
@@ -1487,6 +1498,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
14871498
break;
14881499
}
14891500

1501+
out:
14901502
mutex_unlock(&genpd->lock);
14911503

14921504
return ret;

drivers/base/power/opp.c

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -340,6 +340,34 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
340340
}
341341
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
342342

343+
/**
344+
* dev_pm_opp_get_suspend_opp() - Get suspend opp
345+
* @dev: device for which we do this operation
346+
*
347+
* Return: This function returns pointer to the suspend opp if it is
348+
* defined and available, otherwise it returns NULL.
349+
*
350+
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
351+
* protected pointer. The reason for the same is that the opp pointer which is
352+
* returned will remain valid for use with opp_get_{voltage, freq} only while
353+
* under the locked area. The pointer returned must be used prior to unlocking
354+
* with rcu_read_unlock() to maintain the integrity of the pointer.
355+
*/
356+
struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
357+
{
358+
struct device_opp *dev_opp;
359+
360+
opp_rcu_lockdep_assert();
361+
362+
dev_opp = _find_device_opp(dev);
363+
if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
364+
!dev_opp->suspend_opp->available)
365+
return NULL;
366+
367+
return dev_opp->suspend_opp;
368+
}
369+
EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
370+
343371
/**
344372
* dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
345373
* @dev: device for which we do this operation

drivers/cpufreq/Kconfig.arm

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ
8484
config ARM_MT8173_CPUFREQ
8585
bool "Mediatek MT8173 CPUFreq support"
8686
depends on ARCH_MEDIATEK && REGULATOR
87+
depends on !CPU_THERMAL || THERMAL=y
8788
select PM_OPP
8889
help
8990
This adds the CPUFreq driver support for Mediatek MT8173 SoC.

drivers/cpufreq/cpufreq-dt.c

Lines changed: 26 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -196,6 +196,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
196196
struct device *cpu_dev;
197197
struct regulator *cpu_reg;
198198
struct clk *cpu_clk;
199+
struct dev_pm_opp *suspend_opp;
199200
unsigned long min_uV = ~0, max_uV = 0;
200201
unsigned int transition_latency;
201202
bool need_update = false;
@@ -239,6 +240,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
239240
*/
240241
of_cpumask_init_opp_table(policy->cpus);
241242

243+
/*
244+
* But we need OPP table to function so if it is not there let's
245+
* give platform code chance to provide it for us.
246+
*/
247+
ret = dev_pm_opp_get_opp_count(cpu_dev);
248+
if (ret <= 0) {
249+
pr_debug("OPP table is not ready, deferring probe\n");
250+
ret = -EPROBE_DEFER;
251+
goto out_free_opp;
252+
}
253+
242254
if (need_update) {
243255
struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
244256

@@ -249,24 +261,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
249261
* OPP tables are initialized only for policy->cpu, do it for
250262
* others as well.
251263
*/
252-
set_cpus_sharing_opps(cpu_dev, policy->cpus);
264+
ret = set_cpus_sharing_opps(cpu_dev, policy->cpus);
265+
if (ret)
266+
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
267+
__func__, ret);
253268

254269
of_property_read_u32(np, "clock-latency", &transition_latency);
255270
} else {
256271
transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
257272
}
258273

259-
/*
260-
* But we need OPP table to function so if it is not there let's
261-
* give platform code chance to provide it for us.
262-
*/
263-
ret = dev_pm_opp_get_opp_count(cpu_dev);
264-
if (ret <= 0) {
265-
pr_debug("OPP table is not ready, deferring probe\n");
266-
ret = -EPROBE_DEFER;
267-
goto out_free_opp;
268-
}
269-
270274
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
271275
if (!priv) {
272276
ret = -ENOMEM;
@@ -300,7 +304,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
300304
rcu_read_unlock();
301305

302306
tol_uV = opp_uV * priv->voltage_tolerance / 100;
303-
if (regulator_is_supported_voltage(cpu_reg, opp_uV,
307+
if (regulator_is_supported_voltage(cpu_reg,
308+
opp_uV - tol_uV,
304309
opp_uV + tol_uV)) {
305310
if (opp_uV < min_uV)
306311
min_uV = opp_uV;
@@ -329,6 +334,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
329334
policy->driver_data = priv;
330335

331336
policy->clk = cpu_clk;
337+
338+
rcu_read_lock();
339+
suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev);
340+
if (suspend_opp)
341+
policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
342+
rcu_read_unlock();
343+
332344
ret = cpufreq_table_validate_and_show(policy, freq_table);
333345
if (ret) {
334346
dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
@@ -419,6 +431,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
419431
.ready = cpufreq_ready,
420432
.name = "cpufreq-dt",
421433
.attr = cpufreq_dt_attr,
434+
.suspend = cpufreq_generic_suspend,
422435
};
423436

424437
static int dt_cpufreq_probe(struct platform_device *pdev)

drivers/cpufreq/cpufreq.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
239239
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
240240

241241
/* Only for cpufreq core internal use */
242-
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
242+
static struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
243243
{
244244
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
245245

@@ -1626,8 +1626,8 @@ int cpufreq_generic_suspend(struct cpufreq_policy *policy)
16261626
int ret;
16271627

16281628
if (!policy->suspend_freq) {
1629-
pr_err("%s: suspend_freq can't be zero\n", __func__);
1630-
return -EINVAL;
1629+
pr_debug("%s: suspend_freq not defined\n", __func__);
1630+
return 0;
16311631
}
16321632

16331633
pr_debug("%s: Setting suspend-freq: %u\n", __func__,
@@ -2031,8 +2031,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
20312031
if (!try_module_get(policy->governor->owner))
20322032
return -EINVAL;
20332033

2034-
pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2035-
policy->cpu, event);
2034+
pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
20362035

20372036
mutex_lock(&cpufreq_governor_lock);
20382037
if ((policy->governor_enabled && event == CPUFREQ_GOV_START)

drivers/cpufreq/intel_pstate.c

Lines changed: 28 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -260,24 +260,31 @@ static inline void update_turbo_state(void)
260260
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
261261
}
262262

263-
#define PCT_TO_HWP(x) (x * 255 / 100)
264263
static void intel_pstate_hwp_set(void)
265264
{
266-
int min, max, cpu;
267-
u64 value, freq;
265+
int min, hw_min, max, hw_max, cpu, range, adj_range;
266+
u64 value, cap;
267+
268+
rdmsrl(MSR_HWP_CAPABILITIES, cap);
269+
hw_min = HWP_LOWEST_PERF(cap);
270+
hw_max = HWP_HIGHEST_PERF(cap);
271+
range = hw_max - hw_min;
268272

269273
get_online_cpus();
270274

271275
for_each_online_cpu(cpu) {
272276
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
273-
min = PCT_TO_HWP(limits.min_perf_pct);
277+
adj_range = limits.min_perf_pct * range / 100;
278+
min = hw_min + adj_range;
274279
value &= ~HWP_MIN_PERF(~0L);
275280
value |= HWP_MIN_PERF(min);
276281

277-
max = PCT_TO_HWP(limits.max_perf_pct);
282+
adj_range = limits.max_perf_pct * range / 100;
283+
max = hw_min + adj_range;
278284
if (limits.no_turbo) {
279-
rdmsrl( MSR_HWP_CAPABILITIES, freq);
280-
max = HWP_GUARANTEED_PERF(freq);
285+
hw_max = HWP_GUARANTEED_PERF(cap);
286+
if (hw_max < max)
287+
max = hw_max;
281288
}
282289

283290
value &= ~HWP_MAX_PERF(~0L);
@@ -423,6 +430,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
423430

424431
limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
425432
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
433+
limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
434+
limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct);
426435
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
427436

428437
if (hwp_active)
@@ -442,6 +451,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
442451

443452
limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
444453
limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
454+
limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
455+
limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
445456
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
446457

447458
if (hwp_active)
@@ -989,12 +1000,19 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
9891000

9901001
limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
9911002
limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
992-
limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
993-
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
994-
9951003
limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
9961004
limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
1005+
1006+
/* Normalize user input to [min_policy_pct, max_policy_pct] */
1007+
limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
1008+
limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
9971009
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
1010+
limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
1011+
1012+
/* Make sure min_perf_pct <= max_perf_pct */
1013+
limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
1014+
1015+
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
9981016
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
9991017

10001018
if (hwp_active)

drivers/cpuidle/coupled.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,28 @@ bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
186186
return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
187187
}
188188

189+
/**
190+
* cpuidle_coupled_state_verify - check if the coupled states are correctly set.
191+
* @drv: struct cpuidle_driver for the platform
192+
*
193+
* Returns 0 for valid state values, a negative error code otherwise:
194+
* * -EINVAL if any coupled state(safe_state_index) is wrongly set.
195+
*/
196+
int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
197+
{
198+
int i;
199+
200+
for (i = drv->state_count - 1; i >= 0; i--) {
201+
if (cpuidle_state_is_coupled(drv, i) &&
202+
(drv->safe_state_index == i ||
203+
drv->safe_state_index < 0 ||
204+
drv->safe_state_index >= drv->state_count))
205+
return -EINVAL;
206+
}
207+
208+
return 0;
209+
}
210+
189211
/**
190212
* cpuidle_coupled_set_ready - mark a cpu as ready
191213
* @coupled: the struct coupled that contains the current cpu

drivers/cpuidle/cpuidle.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ extern void cpuidle_remove_sysfs(struct cpuidle_device *dev);
3535

3636
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
3737
bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state);
38+
int cpuidle_coupled_state_verify(struct cpuidle_driver *drv);
3839
int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
3940
struct cpuidle_driver *drv, int next_state);
4041
int cpuidle_coupled_register_device(struct cpuidle_device *dev);
@@ -46,6 +47,11 @@ bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
4647
return false;
4748
}
4849

50+
static inline int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
51+
{
52+
return 0;
53+
}
54+
4955
static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
5056
struct cpuidle_driver *drv, int next_state)
5157
{

drivers/cpuidle/driver.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,10 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
227227
if (!drv || !drv->state_count)
228228
return -EINVAL;
229229

230+
ret = cpuidle_coupled_state_verify(drv);
231+
if (ret)
232+
return ret;
233+
230234
if (cpuidle_disabled())
231235
return -ENODEV;
232236

drivers/staging/board/armadillo800eva.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ static const struct board_staging_dev armadillo800eva_devices[] __initconst = {
9191
.pdev = &lcdc0_device,
9292
.clocks = lcdc0_clocks,
9393
.nclocks = ARRAY_SIZE(lcdc0_clocks),
94-
.domain = "a4lc",
94+
.domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
9595
},
9696
};
9797

0 commit comments

Comments
 (0)