@@ -61,7 +61,7 @@ int function_trace_stop;
61
61
*/
62
62
static int ftrace_disabled __read_mostly ;
63
63
64
- static DEFINE_SPINLOCK (ftrace_lock );
64
+ static DEFINE_MUTEX (ftrace_lock );
65
65
static DEFINE_MUTEX (ftrace_sysctl_lock );
66
66
static DEFINE_MUTEX (ftrace_start_lock );
67
67
@@ -134,8 +134,7 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
134
134
135
135
static int __register_ftrace_function (struct ftrace_ops * ops )
136
136
{
137
- /* should not be called from interrupt context */
138
- spin_lock (& ftrace_lock );
137
+ mutex_lock (& ftrace_lock );
139
138
140
139
ops -> next = ftrace_list ;
141
140
/*
@@ -172,7 +171,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
172
171
#endif
173
172
}
174
173
175
- spin_unlock (& ftrace_lock );
174
+ mutex_unlock (& ftrace_lock );
176
175
177
176
return 0 ;
178
177
}
@@ -182,8 +181,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
182
181
struct ftrace_ops * * p ;
183
182
int ret = 0 ;
184
183
185
- /* should not be called from interrupt context */
186
- spin_lock (& ftrace_lock );
184
+ mutex_lock (& ftrace_lock );
187
185
188
186
/*
189
187
* If we are removing the last function, then simply point
@@ -224,7 +222,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
224
222
}
225
223
226
224
out :
227
- spin_unlock (& ftrace_lock );
225
+ mutex_unlock (& ftrace_lock );
228
226
229
227
return ret ;
230
228
}
@@ -233,8 +231,7 @@ static void ftrace_update_pid_func(void)
233
231
{
234
232
ftrace_func_t func ;
235
233
236
- /* should not be called from interrupt context */
237
- spin_lock (& ftrace_lock );
234
+ mutex_lock (& ftrace_lock );
238
235
239
236
if (ftrace_trace_function == ftrace_stub )
240
237
goto out ;
@@ -256,7 +253,7 @@ static void ftrace_update_pid_func(void)
256
253
#endif
257
254
258
255
out :
259
- spin_unlock (& ftrace_lock );
256
+ mutex_unlock (& ftrace_lock );
260
257
}
261
258
262
259
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -358,15 +355,12 @@ void ftrace_release(void *start, unsigned long size)
358
355
if (ftrace_disabled || !start )
359
356
return ;
360
357
361
- /* should not be called from interrupt context */
362
- spin_lock (& ftrace_lock );
363
-
358
+ mutex_lock (& ftrace_lock );
364
359
do_for_each_ftrace_rec (pg , rec ) {
365
360
if ((rec -> ip >= s ) && (rec -> ip < e ))
366
361
ftrace_free_rec (rec );
367
362
} while_for_each_ftrace_rec ();
368
-
369
- spin_unlock (& ftrace_lock );
363
+ mutex_unlock (& ftrace_lock );
370
364
}
371
365
372
366
static struct dyn_ftrace * ftrace_alloc_dyn_node (unsigned long ip )
@@ -803,8 +797,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
803
797
if (iter -> flags & FTRACE_ITER_PRINTALL )
804
798
return NULL ;
805
799
806
- /* should not be called from interrupt context */
807
- spin_lock (& ftrace_lock );
800
+ mutex_lock (& ftrace_lock );
808
801
retry :
809
802
if (iter -> idx >= iter -> pg -> index ) {
810
803
if (iter -> pg -> next ) {
@@ -833,7 +826,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
833
826
goto retry ;
834
827
}
835
828
}
836
- spin_unlock (& ftrace_lock );
829
+ mutex_unlock (& ftrace_lock );
837
830
838
831
return rec ;
839
832
}
@@ -962,17 +955,15 @@ static void ftrace_filter_reset(int enable)
962
955
struct dyn_ftrace * rec ;
963
956
unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE ;
964
957
965
- /* should not be called from interrupt context */
966
- spin_lock (& ftrace_lock );
958
+ mutex_lock (& ftrace_lock );
967
959
if (enable )
968
960
ftrace_filtered = 0 ;
969
961
do_for_each_ftrace_rec (pg , rec ) {
970
962
if (rec -> flags & FTRACE_FL_FAILED )
971
963
continue ;
972
964
rec -> flags &= ~type ;
973
965
} while_for_each_ftrace_rec ();
974
-
975
- spin_unlock (& ftrace_lock );
966
+ mutex_unlock (& ftrace_lock );
976
967
}
977
968
978
969
static int
@@ -1151,8 +1142,7 @@ static void ftrace_match_records(char *buff, int len, int enable)
1151
1142
1152
1143
search_len = strlen (search );
1153
1144
1154
- /* should not be called from interrupt context */
1155
- spin_lock (& ftrace_lock );
1145
+ mutex_lock (& ftrace_lock );
1156
1146
do_for_each_ftrace_rec (pg , rec ) {
1157
1147
1158
1148
if (rec -> flags & FTRACE_FL_FAILED )
@@ -1171,7 +1161,7 @@ static void ftrace_match_records(char *buff, int len, int enable)
1171
1161
if (enable && (rec -> flags & FTRACE_FL_FILTER ))
1172
1162
ftrace_filtered = 1 ;
1173
1163
} while_for_each_ftrace_rec ();
1174
- spin_unlock (& ftrace_lock );
1164
+ mutex_unlock (& ftrace_lock );
1175
1165
}
1176
1166
1177
1167
static int
@@ -1218,8 +1208,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable)
1218
1208
search_len = strlen (search );
1219
1209
}
1220
1210
1221
- /* should not be called from interrupt context */
1222
- spin_lock (& ftrace_lock );
1211
+ mutex_lock (& ftrace_lock );
1223
1212
do_for_each_ftrace_rec (pg , rec ) {
1224
1213
1225
1214
if (rec -> flags & FTRACE_FL_FAILED )
@@ -1236,7 +1225,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable)
1236
1225
ftrace_filtered = 1 ;
1237
1226
1238
1227
} while_for_each_ftrace_rec ();
1239
- spin_unlock (& ftrace_lock );
1228
+ mutex_unlock (& ftrace_lock );
1240
1229
}
1241
1230
1242
1231
/*
@@ -1676,9 +1665,7 @@ ftrace_set_func(unsigned long *array, int idx, char *buffer)
1676
1665
if (ftrace_disabled )
1677
1666
return - ENODEV ;
1678
1667
1679
- /* should not be called from interrupt context */
1680
- spin_lock (& ftrace_lock );
1681
-
1668
+ mutex_lock (& ftrace_lock );
1682
1669
do_for_each_ftrace_rec (pg , rec ) {
1683
1670
1684
1671
if (rec -> flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE ))
@@ -1699,7 +1686,7 @@ ftrace_set_func(unsigned long *array, int idx, char *buffer)
1699
1686
}
1700
1687
} while_for_each_ftrace_rec ();
1701
1688
out :
1702
- spin_unlock (& ftrace_lock );
1689
+ mutex_unlock (& ftrace_lock );
1703
1690
1704
1691
return found ? 0 : - EINVAL ;
1705
1692
}
0 commit comments