@@ -258,9 +258,9 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
258
258
* leader's sibling list:
259
259
*/
260
260
if (group_leader == counter )
261
- list_add_tail (& counter -> list_entry , & ctx -> counter_list );
261
+ list_add_tail (& counter -> group_entry , & ctx -> group_list );
262
262
else {
263
- list_add_tail (& counter -> list_entry , & group_leader -> sibling_list );
263
+ list_add_tail (& counter -> group_entry , & group_leader -> sibling_list );
264
264
group_leader -> nr_siblings ++ ;
265
265
}
266
266
@@ -279,13 +279,13 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
279
279
{
280
280
struct perf_counter * sibling , * tmp ;
281
281
282
- if (list_empty (& counter -> list_entry ))
282
+ if (list_empty (& counter -> group_entry ))
283
283
return ;
284
284
ctx -> nr_counters -- ;
285
285
if (counter -> attr .inherit_stat )
286
286
ctx -> nr_stat -- ;
287
287
288
- list_del_init (& counter -> list_entry );
288
+ list_del_init (& counter -> group_entry );
289
289
list_del_rcu (& counter -> event_entry );
290
290
291
291
if (counter -> group_leader != counter )
@@ -296,10 +296,9 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
296
296
* upgrade the siblings to singleton counters by adding them
297
297
* to the context list directly:
298
298
*/
299
- list_for_each_entry_safe (sibling , tmp ,
300
- & counter -> sibling_list , list_entry ) {
299
+ list_for_each_entry_safe (sibling , tmp , & counter -> sibling_list , group_entry ) {
301
300
302
- list_move_tail (& sibling -> list_entry , & ctx -> counter_list );
301
+ list_move_tail (& sibling -> group_entry , & ctx -> group_list );
303
302
sibling -> group_leader = sibling ;
304
303
}
305
304
}
@@ -343,7 +342,7 @@ group_sched_out(struct perf_counter *group_counter,
343
342
/*
344
343
* Schedule out siblings (if any):
345
344
*/
346
- list_for_each_entry (counter , & group_counter -> sibling_list , list_entry )
345
+ list_for_each_entry (counter , & group_counter -> sibling_list , group_entry )
347
346
counter_sched_out (counter , cpuctx , ctx );
348
347
349
348
if (group_counter -> attr .exclusive )
@@ -435,7 +434,7 @@ static void perf_counter_remove_from_context(struct perf_counter *counter)
435
434
/*
436
435
* If the context is active we need to retry the smp call.
437
436
*/
438
- if (ctx -> nr_active && !list_empty (& counter -> list_entry )) {
437
+ if (ctx -> nr_active && !list_empty (& counter -> group_entry )) {
439
438
spin_unlock_irq (& ctx -> lock );
440
439
goto retry ;
441
440
}
@@ -445,7 +444,7 @@ static void perf_counter_remove_from_context(struct perf_counter *counter)
445
444
* can remove the counter safely, if the call above did not
446
445
* succeed.
447
446
*/
448
- if (!list_empty (& counter -> list_entry )) {
447
+ if (!list_empty (& counter -> group_entry )) {
449
448
list_del_counter (counter , ctx );
450
449
}
451
450
spin_unlock_irq (& ctx -> lock );
@@ -497,7 +496,7 @@ static void update_group_times(struct perf_counter *leader)
497
496
struct perf_counter * counter ;
498
497
499
498
update_counter_times (leader );
500
- list_for_each_entry (counter , & leader -> sibling_list , list_entry )
499
+ list_for_each_entry (counter , & leader -> sibling_list , group_entry )
501
500
update_counter_times (counter );
502
501
}
503
502
@@ -643,7 +642,7 @@ group_sched_in(struct perf_counter *group_counter,
643
642
/*
644
643
* Schedule in siblings as one group (if any):
645
644
*/
646
- list_for_each_entry (counter , & group_counter -> sibling_list , list_entry ) {
645
+ list_for_each_entry (counter , & group_counter -> sibling_list , group_entry ) {
647
646
if (counter_sched_in (counter , cpuctx , ctx , cpu )) {
648
647
partial_group = counter ;
649
648
goto group_error ;
@@ -657,7 +656,7 @@ group_sched_in(struct perf_counter *group_counter,
657
656
* Groups can be scheduled in as one unit only, so undo any
658
657
* partial group before returning:
659
658
*/
660
- list_for_each_entry (counter , & group_counter -> sibling_list , list_entry ) {
659
+ list_for_each_entry (counter , & group_counter -> sibling_list , group_entry ) {
661
660
if (counter == partial_group )
662
661
break ;
663
662
counter_sched_out (counter , cpuctx , ctx );
@@ -678,7 +677,7 @@ static int is_software_only_group(struct perf_counter *leader)
678
677
if (!is_software_counter (leader ))
679
678
return 0 ;
680
679
681
- list_for_each_entry (counter , & leader -> sibling_list , list_entry )
680
+ list_for_each_entry (counter , & leader -> sibling_list , group_entry )
682
681
if (!is_software_counter (counter ))
683
682
return 0 ;
684
683
@@ -842,7 +841,7 @@ perf_install_in_context(struct perf_counter_context *ctx,
842
841
/*
843
842
* we need to retry the smp call.
844
843
*/
845
- if (ctx -> is_active && list_empty (& counter -> list_entry )) {
844
+ if (ctx -> is_active && list_empty (& counter -> group_entry )) {
846
845
spin_unlock_irq (& ctx -> lock );
847
846
goto retry ;
848
847
}
@@ -852,7 +851,7 @@ perf_install_in_context(struct perf_counter_context *ctx,
852
851
* can add the counter safely, if it the call above did not
853
852
* succeed.
854
853
*/
855
- if (list_empty (& counter -> list_entry ))
854
+ if (list_empty (& counter -> group_entry ))
856
855
add_counter_to_ctx (counter , ctx );
857
856
spin_unlock_irq (& ctx -> lock );
858
857
}
@@ -872,7 +871,7 @@ static void __perf_counter_mark_enabled(struct perf_counter *counter,
872
871
873
872
counter -> state = PERF_COUNTER_STATE_INACTIVE ;
874
873
counter -> tstamp_enabled = ctx -> time - counter -> total_time_enabled ;
875
- list_for_each_entry (sub , & counter -> sibling_list , list_entry )
874
+ list_for_each_entry (sub , & counter -> sibling_list , group_entry )
876
875
if (sub -> state >= PERF_COUNTER_STATE_INACTIVE )
877
876
sub -> tstamp_enabled =
878
877
ctx -> time - sub -> total_time_enabled ;
@@ -1032,7 +1031,7 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
1032
1031
1033
1032
perf_disable ();
1034
1033
if (ctx -> nr_active ) {
1035
- list_for_each_entry (counter , & ctx -> counter_list , list_entry ) {
1034
+ list_for_each_entry (counter , & ctx -> group_list , group_entry ) {
1036
1035
if (counter != counter -> group_leader )
1037
1036
counter_sched_out (counter , cpuctx , ctx );
1038
1037
else
@@ -1252,7 +1251,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
1252
1251
* First go through the list and put on any pinned groups
1253
1252
* in order to give them the best chance of going on.
1254
1253
*/
1255
- list_for_each_entry (counter , & ctx -> counter_list , list_entry ) {
1254
+ list_for_each_entry (counter , & ctx -> group_list , group_entry ) {
1256
1255
if (counter -> state <= PERF_COUNTER_STATE_OFF ||
1257
1256
!counter -> attr .pinned )
1258
1257
continue ;
@@ -1276,7 +1275,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
1276
1275
}
1277
1276
}
1278
1277
1279
- list_for_each_entry (counter , & ctx -> counter_list , list_entry ) {
1278
+ list_for_each_entry (counter , & ctx -> group_list , group_entry ) {
1280
1279
/*
1281
1280
* Ignore counters in OFF or ERROR state, and
1282
1281
* ignore pinned counters since we did them already.
@@ -1369,7 +1368,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1369
1368
u64 interrupts , freq ;
1370
1369
1371
1370
spin_lock (& ctx -> lock );
1372
- list_for_each_entry (counter , & ctx -> counter_list , list_entry ) {
1371
+ list_for_each_entry (counter , & ctx -> group_list , group_entry ) {
1373
1372
if (counter -> state != PERF_COUNTER_STATE_ACTIVE )
1374
1373
continue ;
1375
1374
@@ -1441,8 +1440,8 @@ static void rotate_ctx(struct perf_counter_context *ctx)
1441
1440
* Rotate the first entry last (works just fine for group counters too):
1442
1441
*/
1443
1442
perf_disable ();
1444
- list_for_each_entry (counter , & ctx -> counter_list , list_entry ) {
1445
- list_move_tail (& counter -> list_entry , & ctx -> counter_list );
1443
+ list_for_each_entry (counter , & ctx -> group_list , group_entry ) {
1444
+ list_move_tail (& counter -> group_entry , & ctx -> group_list );
1446
1445
break ;
1447
1446
}
1448
1447
perf_enable ();
@@ -1498,7 +1497,7 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
1498
1497
1499
1498
spin_lock (& ctx -> lock );
1500
1499
1501
- list_for_each_entry (counter , & ctx -> counter_list , list_entry ) {
1500
+ list_for_each_entry (counter , & ctx -> group_list , group_entry ) {
1502
1501
if (!counter -> attr .enable_on_exec )
1503
1502
continue ;
1504
1503
counter -> attr .enable_on_exec = 0 ;
@@ -1575,7 +1574,7 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
1575
1574
memset (ctx , 0 , sizeof (* ctx ));
1576
1575
spin_lock_init (& ctx -> lock );
1577
1576
mutex_init (& ctx -> mutex );
1578
- INIT_LIST_HEAD (& ctx -> counter_list );
1577
+ INIT_LIST_HEAD (& ctx -> group_list );
1579
1578
INIT_LIST_HEAD (& ctx -> event_list );
1580
1579
atomic_set (& ctx -> refcount , 1 );
1581
1580
ctx -> task = task ;
@@ -1818,7 +1817,7 @@ static int perf_counter_read_group(struct perf_counter *counter,
1818
1817
1819
1818
size += err ;
1820
1819
1821
- list_for_each_entry (sub , & leader -> sibling_list , list_entry ) {
1820
+ list_for_each_entry (sub , & leader -> sibling_list , group_entry ) {
1822
1821
err = perf_counter_read_entry (sub , read_format ,
1823
1822
buf + size );
1824
1823
if (err < 0 )
@@ -1948,7 +1947,7 @@ static void perf_counter_for_each(struct perf_counter *counter,
1948
1947
1949
1948
perf_counter_for_each_child (counter , func );
1950
1949
func (counter );
1951
- list_for_each_entry (sibling , & counter -> sibling_list , list_entry )
1950
+ list_for_each_entry (sibling , & counter -> sibling_list , group_entry )
1952
1951
perf_counter_for_each_child (counter , func );
1953
1952
mutex_unlock (& ctx -> mutex );
1954
1953
}
@@ -2832,7 +2831,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
2832
2831
2833
2832
perf_output_copy (handle , values , n * sizeof (u64 ));
2834
2833
2835
- list_for_each_entry (sub , & leader -> sibling_list , list_entry ) {
2834
+ list_for_each_entry (sub , & leader -> sibling_list , group_entry ) {
2836
2835
n = 0 ;
2837
2836
2838
2837
if (sub != counter )
@@ -4118,7 +4117,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
4118
4117
mutex_init (& counter -> child_mutex );
4119
4118
INIT_LIST_HEAD (& counter -> child_list );
4120
4119
4121
- INIT_LIST_HEAD (& counter -> list_entry );
4120
+ INIT_LIST_HEAD (& counter -> group_entry );
4122
4121
INIT_LIST_HEAD (& counter -> event_entry );
4123
4122
INIT_LIST_HEAD (& counter -> sibling_list );
4124
4123
init_waitqueue_head (& counter -> waitq );
@@ -4544,7 +4543,7 @@ static int inherit_group(struct perf_counter *parent_counter,
4544
4543
child , NULL , child_ctx );
4545
4544
if (IS_ERR (leader ))
4546
4545
return PTR_ERR (leader );
4547
- list_for_each_entry (sub , & parent_counter -> sibling_list , list_entry ) {
4546
+ list_for_each_entry (sub , & parent_counter -> sibling_list , group_entry ) {
4548
4547
child_ctr = inherit_counter (sub , parent , parent_ctx ,
4549
4548
child , leader , child_ctx );
4550
4549
if (IS_ERR (child_ctr ))
@@ -4670,16 +4669,16 @@ void perf_counter_exit_task(struct task_struct *child)
4670
4669
mutex_lock_nested (& child_ctx -> mutex , SINGLE_DEPTH_NESTING );
4671
4670
4672
4671
again :
4673
- list_for_each_entry_safe (child_counter , tmp , & child_ctx -> counter_list ,
4674
- list_entry )
4672
+ list_for_each_entry_safe (child_counter , tmp , & child_ctx -> group_list ,
4673
+ group_entry )
4675
4674
__perf_counter_exit_task (child_counter , child_ctx , child );
4676
4675
4677
4676
/*
4678
4677
* If the last counter was a group counter, it will have appended all
4679
4678
* its siblings to the list, but we obtained 'tmp' before that which
4680
4679
* will still point to the list head terminating the iteration.
4681
4680
*/
4682
- if (!list_empty (& child_ctx -> counter_list ))
4681
+ if (!list_empty (& child_ctx -> group_list ))
4683
4682
goto again ;
4684
4683
4685
4684
mutex_unlock (& child_ctx -> mutex );
@@ -4701,7 +4700,7 @@ void perf_counter_free_task(struct task_struct *task)
4701
4700
4702
4701
mutex_lock (& ctx -> mutex );
4703
4702
again :
4704
- list_for_each_entry_safe (counter , tmp , & ctx -> counter_list , list_entry ) {
4703
+ list_for_each_entry_safe (counter , tmp , & ctx -> group_list , group_entry ) {
4705
4704
struct perf_counter * parent = counter -> parent ;
4706
4705
4707
4706
if (WARN_ON_ONCE (!parent ))
@@ -4717,7 +4716,7 @@ void perf_counter_free_task(struct task_struct *task)
4717
4716
free_counter (counter );
4718
4717
}
4719
4718
4720
- if (!list_empty (& ctx -> counter_list ))
4719
+ if (!list_empty (& ctx -> group_list ))
4721
4720
goto again ;
4722
4721
4723
4722
mutex_unlock (& ctx -> mutex );
@@ -4847,7 +4846,7 @@ static void __perf_counter_exit_cpu(void *info)
4847
4846
struct perf_counter_context * ctx = & cpuctx -> ctx ;
4848
4847
struct perf_counter * counter , * tmp ;
4849
4848
4850
- list_for_each_entry_safe (counter , tmp , & ctx -> counter_list , list_entry )
4849
+ list_for_each_entry_safe (counter , tmp , & ctx -> group_list , group_entry )
4851
4850
__perf_counter_remove_from_context (counter );
4852
4851
}
4853
4852
static void perf_counter_exit_cpu (int cpu )
0 commit comments