@@ -239,6 +239,127 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)
239
239
static inline void genpd_update_accounting (struct generic_pm_domain * genpd ) {}
240
240
#endif
241
241
242
+ static int _genpd_reeval_performance_state (struct generic_pm_domain * genpd ,
243
+ unsigned int state )
244
+ {
245
+ struct generic_pm_domain_data * pd_data ;
246
+ struct pm_domain_data * pdd ;
247
+ struct gpd_link * link ;
248
+
249
+ /* New requested state is same as Max requested state */
250
+ if (state == genpd -> performance_state )
251
+ return state ;
252
+
253
+ /* New requested state is higher than Max requested state */
254
+ if (state > genpd -> performance_state )
255
+ return state ;
256
+
257
+ /* Traverse all devices within the domain */
258
+ list_for_each_entry (pdd , & genpd -> dev_list , list_node ) {
259
+ pd_data = to_gpd_data (pdd );
260
+
261
+ if (pd_data -> performance_state > state )
262
+ state = pd_data -> performance_state ;
263
+ }
264
+
265
+ /*
266
+ * Traverse all sub-domains within the domain. This can be
267
+ * done without any additional locking as the link->performance_state
268
+ * field is protected by the master genpd->lock, which is already taken.
269
+ *
270
+ * Also note that link->performance_state (subdomain's performance state
271
+ * requirement to master domain) is different from
272
+ * link->slave->performance_state (current performance state requirement
273
+ * of the devices/sub-domains of the subdomain) and so can have a
274
+ * different value.
275
+ *
276
+ * Note that we also take vote from powered-off sub-domains into account
277
+ * as the same is done for devices right now.
278
+ */
279
+ list_for_each_entry (link , & genpd -> master_links , master_node ) {
280
+ if (link -> performance_state > state )
281
+ state = link -> performance_state ;
282
+ }
283
+
284
+ return state ;
285
+ }
286
+
287
+ static int _genpd_set_performance_state (struct generic_pm_domain * genpd ,
288
+ unsigned int state , int depth )
289
+ {
290
+ struct generic_pm_domain * master ;
291
+ struct gpd_link * link ;
292
+ int master_state , ret ;
293
+
294
+ if (state == genpd -> performance_state )
295
+ return 0 ;
296
+
297
+ /* Propagate to masters of genpd */
298
+ list_for_each_entry (link , & genpd -> slave_links , slave_node ) {
299
+ master = link -> master ;
300
+
301
+ if (!master -> set_performance_state )
302
+ continue ;
303
+
304
+ /* Find master's performance state */
305
+ ret = dev_pm_opp_xlate_performance_state (genpd -> opp_table ,
306
+ master -> opp_table ,
307
+ state );
308
+ if (unlikely (ret < 0 ))
309
+ goto err ;
310
+
311
+ master_state = ret ;
312
+
313
+ genpd_lock_nested (master , depth + 1 );
314
+
315
+ link -> prev_performance_state = link -> performance_state ;
316
+ link -> performance_state = master_state ;
317
+ master_state = _genpd_reeval_performance_state (master ,
318
+ master_state );
319
+ ret = _genpd_set_performance_state (master , master_state , depth + 1 );
320
+ if (ret )
321
+ link -> performance_state = link -> prev_performance_state ;
322
+
323
+ genpd_unlock (master );
324
+
325
+ if (ret )
326
+ goto err ;
327
+ }
328
+
329
+ ret = genpd -> set_performance_state (genpd , state );
330
+ if (ret )
331
+ goto err ;
332
+
333
+ genpd -> performance_state = state ;
334
+ return 0 ;
335
+
336
+ err :
337
+ /* Encountered an error, lets rollback */
338
+ list_for_each_entry_continue_reverse (link , & genpd -> slave_links ,
339
+ slave_node ) {
340
+ master = link -> master ;
341
+
342
+ if (!master -> set_performance_state )
343
+ continue ;
344
+
345
+ genpd_lock_nested (master , depth + 1 );
346
+
347
+ master_state = link -> prev_performance_state ;
348
+ link -> performance_state = master_state ;
349
+
350
+ master_state = _genpd_reeval_performance_state (master ,
351
+ master_state );
352
+ if (_genpd_set_performance_state (master , master_state , depth + 1 )) {
353
+ pr_err ("%s: Failed to roll back to %d performance state\n" ,
354
+ master -> name , master_state );
355
+ }
356
+
357
+ genpd_unlock (master );
358
+ }
359
+
360
+ return ret ;
361
+ }
362
+
242
363
/**
243
364
* dev_pm_genpd_set_performance_state- Set performance state of device's power
244
365
* domain.
@@ -257,10 +378,9 @@ static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
257
378
int dev_pm_genpd_set_performance_state (struct device * dev , unsigned int state )
258
379
{
259
380
struct generic_pm_domain * genpd ;
260
- struct generic_pm_domain_data * gpd_data , * pd_data ;
261
- struct pm_domain_data * pdd ;
381
+ struct generic_pm_domain_data * gpd_data ;
262
382
unsigned int prev ;
263
- int ret = 0 ;
383
+ int ret ;
264
384
265
385
genpd = dev_to_genpd (dev );
266
386
if (IS_ERR (genpd ))
@@ -281,47 +401,11 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
281
401
prev = gpd_data -> performance_state ;
282
402
gpd_data -> performance_state = state ;
283
403
284
- /* New requested state is same as Max requested state */
285
- if (state == genpd -> performance_state )
286
- goto unlock ;
287
-
288
- /* New requested state is higher than Max requested state */
289
- if (state > genpd -> performance_state )
290
- goto update_state ;
291
-
292
- /* Traverse all devices within the domain */
293
- list_for_each_entry (pdd , & genpd -> dev_list , list_node ) {
294
- pd_data = to_gpd_data (pdd );
295
-
296
- if (pd_data -> performance_state > state )
297
- state = pd_data -> performance_state ;
298
- }
299
-
300
- if (state == genpd -> performance_state )
301
- goto unlock ;
302
-
303
- /*
304
- * We aren't propagating performance state changes of a subdomain to its
305
- * masters as we don't have hardware that needs it. Over that, the
306
- * performance states of subdomain and its masters may not have
307
- * one-to-one mapping and would require additional information. We can
308
- * get back to this once we have hardware that needs it. For that
309
- * reason, we don't have to consider performance state of the subdomains
310
- * of genpd here.
311
- */
312
-
313
- update_state :
314
- if (genpd_status_on (genpd )) {
315
- ret = genpd -> set_performance_state (genpd , state );
316
- if (ret ) {
317
- gpd_data -> performance_state = prev ;
318
- goto unlock ;
319
- }
320
- }
321
-
322
- genpd -> performance_state = state ;
404
+ state = _genpd_reeval_performance_state (genpd , state );
405
+ ret = _genpd_set_performance_state (genpd , state , 0 );
406
+ if (ret )
407
+ gpd_data -> performance_state = prev ;
323
408
324
- unlock :
325
409
genpd_unlock (genpd );
326
410
327
411
return ret ;
@@ -347,15 +431,6 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
347
431
return ret ;
348
432
349
433
elapsed_ns = ktime_to_ns (ktime_sub (ktime_get (), time_start ));
350
-
351
- if (unlikely (genpd -> set_performance_state )) {
352
- ret = genpd -> set_performance_state (genpd , genpd -> performance_state );
353
- if (ret ) {
354
- pr_warn ("%s: Failed to set performance state %d (%d)\n" ,
355
- genpd -> name , genpd -> performance_state , ret );
356
- }
357
- }
358
-
359
434
if (elapsed_ns <= genpd -> states [state_idx ].power_on_latency_ns )
360
435
return ret ;
361
436
@@ -1907,12 +1982,21 @@ int of_genpd_add_provider_simple(struct device_node *np,
1907
1982
ret );
1908
1983
goto unlock ;
1909
1984
}
1985
+
1986
+ /*
1987
+ * Save table for faster processing while setting performance
1988
+ * state.
1989
+ */
1990
+ genpd -> opp_table = dev_pm_opp_get_opp_table (& genpd -> dev );
1991
+ WARN_ON (!genpd -> opp_table );
1910
1992
}
1911
1993
1912
1994
ret = genpd_add_provider (np , genpd_xlate_simple , genpd );
1913
1995
if (ret ) {
1914
- if (genpd -> set_performance_state )
1996
+ if (genpd -> set_performance_state ) {
1997
+ dev_pm_opp_put_opp_table (genpd -> opp_table );
1915
1998
dev_pm_opp_of_remove_table (& genpd -> dev );
1999
+ }
1916
2000
1917
2001
goto unlock ;
1918
2002
}
@@ -1965,6 +2049,13 @@ int of_genpd_add_provider_onecell(struct device_node *np,
1965
2049
i , ret );
1966
2050
goto error ;
1967
2051
}
2052
+
2053
+ /*
2054
+ * Save table for faster processing while setting
2055
+ * performance state.
2056
+ */
2057
+ genpd -> opp_table = dev_pm_opp_get_opp_table_indexed (& genpd -> dev , i );
2058
+ WARN_ON (!genpd -> opp_table );
1968
2059
}
1969
2060
1970
2061
genpd -> provider = & np -> fwnode ;
@@ -1989,8 +2080,10 @@ int of_genpd_add_provider_onecell(struct device_node *np,
1989
2080
genpd -> provider = NULL ;
1990
2081
genpd -> has_provider = false;
1991
2082
1992
- if (genpd -> set_performance_state )
2083
+ if (genpd -> set_performance_state ) {
2084
+ dev_pm_opp_put_opp_table (genpd -> opp_table );
1993
2085
dev_pm_opp_of_remove_table (& genpd -> dev );
2086
+ }
1994
2087
}
1995
2088
1996
2089
mutex_unlock (& gpd_list_lock );
@@ -2024,6 +2117,7 @@ void of_genpd_del_provider(struct device_node *np)
2024
2117
if (!gpd -> set_performance_state )
2025
2118
continue ;
2026
2119
2120
+ dev_pm_opp_put_opp_table (gpd -> opp_table );
2027
2121
dev_pm_opp_of_remove_table (& gpd -> dev );
2028
2122
}
2029
2123
}
@@ -2338,7 +2432,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2338
2432
struct device * genpd_dev_pm_attach_by_id (struct device * dev ,
2339
2433
unsigned int index )
2340
2434
{
2341
- struct device * genpd_dev ;
2435
+ struct device * virt_dev ;
2342
2436
int num_domains ;
2343
2437
int ret ;
2344
2438
@@ -2352,31 +2446,31 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2352
2446
return NULL ;
2353
2447
2354
2448
/* Allocate and register device on the genpd bus. */
2355
- genpd_dev = kzalloc (sizeof (* genpd_dev ), GFP_KERNEL );
2356
- if (!genpd_dev )
2449
+ virt_dev = kzalloc (sizeof (* virt_dev ), GFP_KERNEL );
2450
+ if (!virt_dev )
2357
2451
return ERR_PTR (- ENOMEM );
2358
2452
2359
- dev_set_name (genpd_dev , "genpd:%u:%s" , index , dev_name (dev ));
2360
- genpd_dev -> bus = & genpd_bus_type ;
2361
- genpd_dev -> release = genpd_release_dev ;
2453
+ dev_set_name (virt_dev , "genpd:%u:%s" , index , dev_name (dev ));
2454
+ virt_dev -> bus = & genpd_bus_type ;
2455
+ virt_dev -> release = genpd_release_dev ;
2362
2456
2363
- ret = device_register (genpd_dev );
2457
+ ret = device_register (virt_dev );
2364
2458
if (ret ) {
2365
- kfree (genpd_dev );
2459
+ kfree (virt_dev );
2366
2460
return ERR_PTR (ret );
2367
2461
}
2368
2462
2369
2463
/* Try to attach the device to the PM domain at the specified index. */
2370
- ret = __genpd_dev_pm_attach (genpd_dev , dev -> of_node , index , false);
2464
+ ret = __genpd_dev_pm_attach (virt_dev , dev -> of_node , index , false);
2371
2465
if (ret < 1 ) {
2372
- device_unregister (genpd_dev );
2466
+ device_unregister (virt_dev );
2373
2467
return ret ? ERR_PTR (ret ) : NULL ;
2374
2468
}
2375
2469
2376
- pm_runtime_enable (genpd_dev );
2377
- genpd_queue_power_off_work (dev_to_genpd (genpd_dev ));
2470
+ pm_runtime_enable (virt_dev );
2471
+ genpd_queue_power_off_work (dev_to_genpd (virt_dev ));
2378
2472
2379
- return genpd_dev ;
2473
+ return virt_dev ;
2380
2474
}
2381
2475
EXPORT_SYMBOL_GPL (genpd_dev_pm_attach_by_id );
2382
2476
@@ -2521,52 +2615,36 @@ int of_genpd_parse_idle_states(struct device_node *dn,
2521
2615
EXPORT_SYMBOL_GPL (of_genpd_parse_idle_states );
2522
2616
2523
2617
/**
2524
- * of_genpd_opp_to_performance_state- Gets performance state of device's
2525
- * power domain corresponding to a DT node's "required-opps" property.
2618
+ * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2526
2619
*
2527
- * @dev: Device for which the performance-state needs to be found.
2528
- * @np: DT node where the "required-opps" property is present. This can be
2529
- * the device node itself (if it doesn't have an OPP table) or a node
2530
- * within the OPP table of a device (if device has an OPP table).
2620
+ * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2621
+ * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2622
+ * state.
2531
2623
*
2532
- * Returns performance state corresponding to the "required-opps" property of
2533
- * a DT node. This calls platform specific genpd->opp_to_performance_state()
2534
- * callback to translate power domain OPP to performance state.
2624
+ * Returns performance state encoded in the OPP of the genpd. This calls
2625
+ * platform specific genpd->opp_to_performance_state() callback to translate
2626
+ * power domain OPP to performance state.
2535
2627
*
2536
2628
* Returns performance state on success and 0 on failure.
2537
2629
*/
2538
- unsigned int of_genpd_opp_to_performance_state (struct device * dev ,
2539
- struct device_node * np )
2630
+ unsigned int pm_genpd_opp_to_performance_state (struct device * genpd_dev ,
2631
+ struct dev_pm_opp * opp )
2540
2632
{
2541
- struct generic_pm_domain * genpd ;
2542
- struct dev_pm_opp * opp ;
2543
- int state = 0 ;
2633
+ struct generic_pm_domain * genpd = NULL ;
2634
+ int state ;
2544
2635
2545
- genpd = dev_to_genpd (dev );
2546
- if (IS_ERR (genpd ))
2547
- return 0 ;
2636
+ genpd = container_of (genpd_dev , struct generic_pm_domain , dev );
2548
2637
2549
- if (unlikely (!genpd -> set_performance_state ))
2638
+ if (unlikely (!genpd -> opp_to_performance_state ))
2550
2639
return 0 ;
2551
2640
2552
2641
genpd_lock (genpd );
2553
-
2554
- opp = of_dev_pm_opp_find_required_opp (& genpd -> dev , np );
2555
- if (IS_ERR (opp )) {
2556
- dev_err (dev , "Failed to find required OPP: %ld\n" ,
2557
- PTR_ERR (opp ));
2558
- goto unlock ;
2559
- }
2560
-
2561
2642
state = genpd -> opp_to_performance_state (genpd , opp );
2562
- dev_pm_opp_put (opp );
2563
-
2564
- unlock :
2565
2643
genpd_unlock (genpd );
2566
2644
2567
2645
return state ;
2568
2646
}
2569
- EXPORT_SYMBOL_GPL (of_genpd_opp_to_performance_state );
2647
+ EXPORT_SYMBOL_GPL (pm_genpd_opp_to_performance_state );
2570
2648
2571
2649
static int __init genpd_bus_init (void )
2572
2650
{
0 commit comments