Skip to content

Commit bcbeef5

Browse files
committed
Merge branch 'opp/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm
Pull more operating performance points (OPP) framework changes for v4.21 from Viresh Kumar: "- Fix missing OPP debugfs directory (Viresh Kumar). - Make genpd performance states orthogonal to idlestates (Ulf Hansson). - Propagate performance state changes from genpd to its master (Viresh Kumar). - Minor improvement of some OPP helpers (Viresh Kumar)." * 'opp/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm: PM / Domains: Propagate performance state updates PM / Domains: Factorize dev_pm_genpd_set_performance_state() PM / Domains: Save OPP table pointer in genpd OPP: Don't return 0 on error from of_get_required_opp_performance_state() OPP: Add dev_pm_opp_xlate_performance_state() helper OPP: Improve _find_table_of_opp_np() PM / Domains: Make genpd performance states orthogonal to the idlestates OPP: Fix missing debugfs supply directory for OPPs OPP: Use opp_table->regulators to verify no regulator case
2 parents 83fd1e5 + ade0c94 commit bcbeef5

File tree

6 files changed

+278
-79
lines changed

6 files changed

+278
-79
lines changed

drivers/base/power/domain.c

Lines changed: 148 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -239,6 +239,127 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)
239239
static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
240240
#endif
241241

242+
static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
243+
unsigned int state)
244+
{
245+
struct generic_pm_domain_data *pd_data;
246+
struct pm_domain_data *pdd;
247+
struct gpd_link *link;
248+
249+
/* New requested state is same as Max requested state */
250+
if (state == genpd->performance_state)
251+
return state;
252+
253+
/* New requested state is higher than Max requested state */
254+
if (state > genpd->performance_state)
255+
return state;
256+
257+
/* Traverse all devices within the domain */
258+
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
259+
pd_data = to_gpd_data(pdd);
260+
261+
if (pd_data->performance_state > state)
262+
state = pd_data->performance_state;
263+
}
264+
265+
/*
266+
* Traverse all sub-domains within the domain. This can be
267+
* done without any additional locking as the link->performance_state
268+
* field is protected by the master genpd->lock, which is already taken.
269+
*
270+
* Also note that link->performance_state (subdomain's performance state
271+
* requirement to master domain) is different from
272+
* link->slave->performance_state (current performance state requirement
273+
* of the devices/sub-domains of the subdomain) and so can have a
274+
* different value.
275+
*
276+
* Note that we also take vote from powered-off sub-domains into account
277+
* as the same is done for devices right now.
278+
*/
279+
list_for_each_entry(link, &genpd->master_links, master_node) {
280+
if (link->performance_state > state)
281+
state = link->performance_state;
282+
}
283+
284+
return state;
285+
}
286+
287+
static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
288+
unsigned int state, int depth)
289+
{
290+
struct generic_pm_domain *master;
291+
struct gpd_link *link;
292+
int master_state, ret;
293+
294+
if (state == genpd->performance_state)
295+
return 0;
296+
297+
/* Propagate to masters of genpd */
298+
list_for_each_entry(link, &genpd->slave_links, slave_node) {
299+
master = link->master;
300+
301+
if (!master->set_performance_state)
302+
continue;
303+
304+
/* Find master's performance state */
305+
ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
306+
master->opp_table,
307+
state);
308+
if (unlikely(ret < 0))
309+
goto err;
310+
311+
master_state = ret;
312+
313+
genpd_lock_nested(master, depth + 1);
314+
315+
link->prev_performance_state = link->performance_state;
316+
link->performance_state = master_state;
317+
master_state = _genpd_reeval_performance_state(master,
318+
master_state);
319+
ret = _genpd_set_performance_state(master, master_state, depth + 1);
320+
if (ret)
321+
link->performance_state = link->prev_performance_state;
322+
323+
genpd_unlock(master);
324+
325+
if (ret)
326+
goto err;
327+
}
328+
329+
ret = genpd->set_performance_state(genpd, state);
330+
if (ret)
331+
goto err;
332+
333+
genpd->performance_state = state;
334+
return 0;
335+
336+
err:
337+
/* Encountered an error, lets rollback */
338+
list_for_each_entry_continue_reverse(link, &genpd->slave_links,
339+
slave_node) {
340+
master = link->master;
341+
342+
if (!master->set_performance_state)
343+
continue;
344+
345+
genpd_lock_nested(master, depth + 1);
346+
347+
master_state = link->prev_performance_state;
348+
link->performance_state = master_state;
349+
350+
master_state = _genpd_reeval_performance_state(master,
351+
master_state);
352+
if (_genpd_set_performance_state(master, master_state, depth + 1)) {
353+
pr_err("%s: Failed to roll back to %d performance state\n",
354+
master->name, master_state);
355+
}
356+
357+
genpd_unlock(master);
358+
}
359+
360+
return ret;
361+
}
362+
242363
/**
243364
* dev_pm_genpd_set_performance_state- Set performance state of device's power
244365
* domain.
@@ -257,10 +378,9 @@ static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
257378
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
258379
{
259380
struct generic_pm_domain *genpd;
260-
struct generic_pm_domain_data *gpd_data, *pd_data;
261-
struct pm_domain_data *pdd;
381+
struct generic_pm_domain_data *gpd_data;
262382
unsigned int prev;
263-
int ret = 0;
383+
int ret;
264384

265385
genpd = dev_to_genpd(dev);
266386
if (IS_ERR(genpd))
@@ -281,47 +401,11 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
281401
prev = gpd_data->performance_state;
282402
gpd_data->performance_state = state;
283403

284-
/* New requested state is same as Max requested state */
285-
if (state == genpd->performance_state)
286-
goto unlock;
287-
288-
/* New requested state is higher than Max requested state */
289-
if (state > genpd->performance_state)
290-
goto update_state;
291-
292-
/* Traverse all devices within the domain */
293-
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
294-
pd_data = to_gpd_data(pdd);
295-
296-
if (pd_data->performance_state > state)
297-
state = pd_data->performance_state;
298-
}
299-
300-
if (state == genpd->performance_state)
301-
goto unlock;
302-
303-
/*
304-
* We aren't propagating performance state changes of a subdomain to its
305-
* masters as we don't have hardware that needs it. Over that, the
306-
* performance states of subdomain and its masters may not have
307-
* one-to-one mapping and would require additional information. We can
308-
* get back to this once we have hardware that needs it. For that
309-
* reason, we don't have to consider performance state of the subdomains
310-
* of genpd here.
311-
*/
312-
313-
update_state:
314-
if (genpd_status_on(genpd)) {
315-
ret = genpd->set_performance_state(genpd, state);
316-
if (ret) {
317-
gpd_data->performance_state = prev;
318-
goto unlock;
319-
}
320-
}
321-
322-
genpd->performance_state = state;
404+
state = _genpd_reeval_performance_state(genpd, state);
405+
ret = _genpd_set_performance_state(genpd, state, 0);
406+
if (ret)
407+
gpd_data->performance_state = prev;
323408

324-
unlock:
325409
genpd_unlock(genpd);
326410

327411
return ret;
@@ -347,15 +431,6 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
347431
return ret;
348432

349433
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
350-
351-
if (unlikely(genpd->set_performance_state)) {
352-
ret = genpd->set_performance_state(genpd, genpd->performance_state);
353-
if (ret) {
354-
pr_warn("%s: Failed to set performance state %d (%d)\n",
355-
genpd->name, genpd->performance_state, ret);
356-
}
357-
}
358-
359434
if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
360435
return ret;
361436

@@ -1907,12 +1982,21 @@ int of_genpd_add_provider_simple(struct device_node *np,
19071982
ret);
19081983
goto unlock;
19091984
}
1985+
1986+
/*
1987+
* Save table for faster processing while setting performance
1988+
* state.
1989+
*/
1990+
genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
1991+
WARN_ON(!genpd->opp_table);
19101992
}
19111993

19121994
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
19131995
if (ret) {
1914-
if (genpd->set_performance_state)
1996+
if (genpd->set_performance_state) {
1997+
dev_pm_opp_put_opp_table(genpd->opp_table);
19151998
dev_pm_opp_of_remove_table(&genpd->dev);
1999+
}
19162000

19172001
goto unlock;
19182002
}
@@ -1965,6 +2049,13 @@ int of_genpd_add_provider_onecell(struct device_node *np,
19652049
i, ret);
19662050
goto error;
19672051
}
2052+
2053+
/*
2054+
* Save table for faster processing while setting
2055+
* performance state.
2056+
*/
2057+
genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
2058+
WARN_ON(!genpd->opp_table);
19682059
}
19692060

19702061
genpd->provider = &np->fwnode;
@@ -1989,8 +2080,10 @@ int of_genpd_add_provider_onecell(struct device_node *np,
19892080
genpd->provider = NULL;
19902081
genpd->has_provider = false;
19912082

1992-
if (genpd->set_performance_state)
2083+
if (genpd->set_performance_state) {
2084+
dev_pm_opp_put_opp_table(genpd->opp_table);
19932085
dev_pm_opp_of_remove_table(&genpd->dev);
2086+
}
19942087
}
19952088

19962089
mutex_unlock(&gpd_list_lock);
@@ -2024,6 +2117,7 @@ void of_genpd_del_provider(struct device_node *np)
20242117
if (!gpd->set_performance_state)
20252118
continue;
20262119

2120+
dev_pm_opp_put_opp_table(gpd->opp_table);
20272121
dev_pm_opp_of_remove_table(&gpd->dev);
20282122
}
20292123
}

0 commit comments

Comments
 (0)