@@ -40,13 +40,16 @@ struct bp_cpuinfo {
40
40
/* Number of pinned cpu breakpoints in a cpu */
41
41
unsigned int cpu_pinned ;
42
42
/* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
43
+ #ifdef hw_breakpoint_slots
44
+ unsigned int tsk_pinned [hw_breakpoint_slots (0 )];
45
+ #else
43
46
unsigned int * tsk_pinned ;
47
+ #endif
44
48
/* Number of non-pinned cpu/task breakpoints in a cpu */
45
49
unsigned int flexible ; /* XXX: placeholder, see fetch_this_slot() */
46
50
};
47
51
48
52
static DEFINE_PER_CPU (struct bp_cpuinfo , bp_cpuinfo [TYPE_MAX ]) ;
49
- static int nr_slots [TYPE_MAX ] __ro_after_init ;
50
53
51
54
static struct bp_cpuinfo * get_bp_info (int cpu , enum bp_type_idx type )
52
55
{
@@ -73,6 +76,54 @@ struct bp_busy_slots {
73
76
/* Serialize accesses to the above constraints */
74
77
static DEFINE_MUTEX (nr_bp_mutex );
75
78
79
+ #ifdef hw_breakpoint_slots
80
+ /*
81
+ * Number of breakpoint slots is constant, and the same for all types.
82
+ */
83
+ static_assert (hw_breakpoint_slots (TYPE_INST ) == hw_breakpoint_slots (TYPE_DATA ));
84
+ static inline int hw_breakpoint_slots_cached (int type ) { return hw_breakpoint_slots (type ); }
85
+ static inline int init_breakpoint_slots (void ) { return 0 ; }
86
+ #else
87
+ /*
88
+ * Dynamic number of breakpoint slots.
89
+ */
90
+ static int __nr_bp_slots [TYPE_MAX ] __ro_after_init ;
91
+
92
+ static inline int hw_breakpoint_slots_cached (int type )
93
+ {
94
+ return __nr_bp_slots [type ];
95
+ }
96
+
97
+ static __init int init_breakpoint_slots (void )
98
+ {
99
+ int i , cpu , err_cpu ;
100
+
101
+ for (i = 0 ; i < TYPE_MAX ; i ++ )
102
+ __nr_bp_slots [i ] = hw_breakpoint_slots (i );
103
+
104
+ for_each_possible_cpu (cpu ) {
105
+ for (i = 0 ; i < TYPE_MAX ; i ++ ) {
106
+ struct bp_cpuinfo * info = get_bp_info (cpu , i );
107
+
108
+ info -> tsk_pinned = kcalloc (__nr_bp_slots [i ], sizeof (int ), GFP_KERNEL );
109
+ if (!info -> tsk_pinned )
110
+ goto err ;
111
+ }
112
+ }
113
+
114
+ return 0 ;
115
+ err :
116
+ for_each_possible_cpu (err_cpu ) {
117
+ for (i = 0 ; i < TYPE_MAX ; i ++ )
118
+ kfree (get_bp_info (err_cpu , i )-> tsk_pinned );
119
+ if (err_cpu == cpu )
120
+ break ;
121
+ }
122
+
123
+ return - ENOMEM ;
124
+ }
125
+ #endif
126
+
76
127
__weak int hw_breakpoint_weight (struct perf_event * bp )
77
128
{
78
129
return 1 ;
@@ -95,7 +146,7 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
95
146
unsigned int * tsk_pinned = get_bp_info (cpu , type )-> tsk_pinned ;
96
147
int i ;
97
148
98
- for (i = nr_slots [ type ] - 1 ; i >= 0 ; i -- ) {
149
+ for (i = hw_breakpoint_slots_cached ( type ) - 1 ; i >= 0 ; i -- ) {
99
150
if (tsk_pinned [i ] > 0 )
100
151
return i + 1 ;
101
152
}
@@ -312,7 +363,7 @@ static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
312
363
fetch_this_slot (& slots , weight );
313
364
314
365
/* Flexible counters need to keep at least one slot */
315
- if (slots .pinned + (!!slots .flexible ) > nr_slots [ type ] )
366
+ if (slots .pinned + (!!slots .flexible ) > hw_breakpoint_slots_cached ( type ) )
316
367
return - ENOSPC ;
317
368
318
369
ret = arch_reserve_bp_slot (bp );
@@ -632,7 +683,7 @@ bool hw_breakpoint_is_used(void)
632
683
if (info -> cpu_pinned )
633
684
return true;
634
685
635
- for (int slot = 0 ; slot < nr_slots [ type ] ; ++ slot ) {
686
+ for (int slot = 0 ; slot < hw_breakpoint_slots_cached ( type ) ; ++ slot ) {
636
687
if (info -> tsk_pinned [slot ])
637
688
return true;
638
689
}
@@ -716,42 +767,19 @@ static struct pmu perf_breakpoint = {
716
767
717
768
int __init init_hw_breakpoint (void )
718
769
{
719
- int cpu , err_cpu ;
720
- int i , ret ;
721
-
722
- for (i = 0 ; i < TYPE_MAX ; i ++ )
723
- nr_slots [i ] = hw_breakpoint_slots (i );
724
-
725
- for_each_possible_cpu (cpu ) {
726
- for (i = 0 ; i < TYPE_MAX ; i ++ ) {
727
- struct bp_cpuinfo * info = get_bp_info (cpu , i );
728
-
729
- info -> tsk_pinned = kcalloc (nr_slots [i ], sizeof (int ),
730
- GFP_KERNEL );
731
- if (!info -> tsk_pinned ) {
732
- ret = - ENOMEM ;
733
- goto err ;
734
- }
735
- }
736
- }
770
+ int ret ;
737
771
738
772
ret = rhltable_init (& task_bps_ht , & task_bps_ht_params );
739
773
if (ret )
740
- goto err ;
774
+ return ret ;
775
+
776
+ ret = init_breakpoint_slots ();
777
+ if (ret )
778
+ return ret ;
741
779
742
780
constraints_initialized = true;
743
781
744
782
perf_pmu_register (& perf_breakpoint , "breakpoint" , PERF_TYPE_BREAKPOINT );
745
783
746
784
return register_die_notifier (& hw_breakpoint_exceptions_nb );
747
-
748
- err :
749
- for_each_possible_cpu (err_cpu ) {
750
- for (i = 0 ; i < TYPE_MAX ; i ++ )
751
- kfree (get_bp_info (err_cpu , i )-> tsk_pinned );
752
- if (err_cpu == cpu )
753
- break ;
754
- }
755
-
756
- return ret ;
757
785
}
0 commit comments