@@ -233,7 +233,8 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
233
233
234
234
/*
235
235
* Sample a process (thread group) clock for the given group_leader task.
236
- * Must be called with tasklist_lock held for reading.
236
+ * Must be called with task sighand lock held for safe while_each_thread()
237
+ * traversal.
237
238
*/
238
239
static int cpu_clock_sample_group (const clockid_t which_clock ,
239
240
struct task_struct * p ,
@@ -455,8 +456,7 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp)
455
456
456
457
/*
457
458
* Insert the timer on the appropriate list before any timers that
458
- * expire later. This must be called with the tasklist_lock held
459
- * for reading, interrupts disabled and p->sighand->siglock taken.
459
+ * expire later. This must be called with the sighand lock held.
460
460
*/
461
461
static void arm_timer (struct k_itimer * timer )
462
462
{
@@ -547,7 +547,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
547
547
548
548
/*
549
549
* Sample a process (thread group) timer for the given group_leader task.
550
- * Must be called with tasklist_lock held for reading.
550
+ * Must be called with task sighand lock held for safe while_each_thread()
551
+ * traversal.
551
552
*/
552
553
static int cpu_timer_sample_group (const clockid_t which_clock ,
553
554
struct task_struct * p ,
@@ -610,9 +611,11 @@ static inline void posix_cpu_timer_kick_nohz(void) { }
610
611
* If we return TIMER_RETRY, it's necessary to release the timer's lock
611
612
* and try again. (This happens when the timer is in the middle of firing.)
612
613
*/
613
- static int posix_cpu_timer_set (struct k_itimer * timer , int flags ,
614
+ static int posix_cpu_timer_set (struct k_itimer * timer , int timer_flags ,
614
615
struct itimerspec * new , struct itimerspec * old )
615
616
{
617
+ unsigned long flags ;
618
+ struct sighand_struct * sighand ;
616
619
struct task_struct * p = timer -> it .cpu .task ;
617
620
unsigned long long old_expires , new_expires , old_incr , val ;
618
621
int ret ;
@@ -621,14 +624,16 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
621
624
622
625
new_expires = timespec_to_sample (timer -> it_clock , & new -> it_value );
623
626
624
- read_lock (& tasklist_lock );
625
627
/*
626
- * We need the tasklist_lock to protect against reaping that
627
- * clears p->sighand. If p has just been reaped, we can no
628
+ * Protect against sighand release/switch in exit/exec and p->cpu_timers
629
+ * and p->signal->cpu_timers read/write in arm_timer()
630
+ */
631
+ sighand = lock_task_sighand (p , & flags );
632
+ /*
633
+ * If p has just been reaped, we can no
628
634
* longer get any information about it at all.
629
635
*/
630
- if (unlikely (p -> sighand == NULL )) {
631
- read_unlock (& tasklist_lock );
636
+ if (unlikely (sighand == NULL )) {
632
637
return - ESRCH ;
633
638
}
634
639
@@ -639,7 +644,6 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
639
644
640
645
ret = 0 ;
641
646
old_incr = timer -> it .cpu .incr ;
642
- spin_lock (& p -> sighand -> siglock );
643
647
old_expires = timer -> it .cpu .expires ;
644
648
if (unlikely (timer -> it .cpu .firing )) {
645
649
timer -> it .cpu .firing = -1 ;
@@ -696,12 +700,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
696
700
* disable this firing since we are already reporting
697
701
* it as an overrun (thanks to bump_cpu_timer above).
698
702
*/
699
- spin_unlock (& p -> sighand -> siglock );
700
- read_unlock (& tasklist_lock );
703
+ unlock_task_sighand (p , & flags );
701
704
goto out ;
702
705
}
703
706
704
- if (new_expires != 0 && !(flags & TIMER_ABSTIME )) {
707
+ if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME )) {
705
708
new_expires += val ;
706
709
}
707
710
@@ -715,9 +718,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
715
718
arm_timer (timer );
716
719
}
717
720
718
- spin_unlock (& p -> sighand -> siglock );
719
- read_unlock (& tasklist_lock );
720
-
721
+ unlock_task_sighand (p , & flags );
721
722
/*
722
723
* Install the new reload setting, and
723
724
* set up the signal and overrun bookkeeping.
@@ -779,8 +780,16 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
779
780
if (CPUCLOCK_PERTHREAD (timer -> it_clock )) {
780
781
cpu_clock_sample (timer -> it_clock , p , & now );
781
782
} else {
782
- read_lock (& tasklist_lock );
783
- if (unlikely (p -> sighand == NULL )) {
783
+ struct sighand_struct * sighand ;
784
+ unsigned long flags ;
785
+
786
+ /*
787
+ * Protect against sighand release/switch in exit/exec and
788
+ * also make timer sampling safe if it ends up calling
789
+ * thread_group_cputime().
790
+ */
791
+ sighand = lock_task_sighand (p , & flags );
792
+ if (unlikely (sighand == NULL )) {
784
793
/*
785
794
* The process has been reaped.
786
795
* We can't even collect a sample any more.
@@ -789,11 +798,10 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
789
798
timer -> it .cpu .expires = 0 ;
790
799
sample_to_timespec (timer -> it_clock , timer -> it .cpu .expires ,
791
800
& itp -> it_value );
792
- read_unlock (& tasklist_lock );
793
801
} else {
794
802
cpu_timer_sample_group (timer -> it_clock , p , & now );
803
+ unlock_task_sighand (p , & flags );
795
804
}
796
- read_unlock (& tasklist_lock );
797
805
}
798
806
799
807
if (now < timer -> it .cpu .expires ) {
@@ -1007,6 +1015,8 @@ static void check_process_timers(struct task_struct *tsk,
1007
1015
*/
1008
1016
void posix_cpu_timer_schedule (struct k_itimer * timer )
1009
1017
{
1018
+ struct sighand_struct * sighand ;
1019
+ unsigned long flags ;
1010
1020
struct task_struct * p = timer -> it .cpu .task ;
1011
1021
unsigned long long now ;
1012
1022
@@ -1021,40 +1031,42 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1021
1031
if (unlikely (p -> exit_state ))
1022
1032
goto out ;
1023
1033
1024
- read_lock (& tasklist_lock ); /* arm_timer needs it. */
1025
- spin_lock (& p -> sighand -> siglock );
1034
+ /* Protect timer list r/w in arm_timer() */
1035
+ sighand = lock_task_sighand (p , & flags );
1036
+ if (!sighand )
1037
+ goto out ;
1026
1038
} else {
1027
- read_lock (& tasklist_lock );
1028
- if (unlikely (p -> sighand == NULL )) {
1039
+ /*
1040
+ * Protect arm_timer() and timer sampling in case of call to
1041
+ * thread_group_cputime().
1042
+ */
1043
+ sighand = lock_task_sighand (p , & flags );
1044
+ if (unlikely (sighand == NULL )) {
1029
1045
/*
1030
1046
* The process has been reaped.
1031
1047
* We can't even collect a sample any more.
1032
1048
*/
1033
1049
timer -> it .cpu .expires = 0 ;
1034
- read_unlock (& tasklist_lock );
1035
1050
goto out ;
1036
1051
} else if (unlikely (p -> exit_state ) && thread_group_empty (p )) {
1037
- read_unlock ( & tasklist_lock );
1052
+ unlock_task_sighand ( p , & flags );
1038
1053
/* Optimizations: if the process is dying, no need to rearm */
1039
1054
goto out ;
1040
1055
}
1041
- spin_lock (& p -> sighand -> siglock );
1042
1056
cpu_timer_sample_group (timer -> it_clock , p , & now );
1043
1057
bump_cpu_timer (timer , now );
1044
- /* Leave the tasklist_lock locked for the call below. */
1058
+ /* Leave the sighand locked for the call below. */
1045
1059
}
1046
1060
1047
1061
/*
1048
1062
* Now re-arm for the new expiry time.
1049
1063
*/
1050
1064
BUG_ON (!irqs_disabled ());
1051
1065
arm_timer (timer );
1052
- spin_unlock (& p -> sighand -> siglock );
1053
- read_unlock (& tasklist_lock );
1066
+ unlock_task_sighand (p , & flags );
1054
1067
1055
1068
/* Kick full dynticks CPUs in case they need to tick on the new timer */
1056
1069
posix_cpu_timer_kick_nohz ();
1057
-
1058
1070
out :
1059
1071
timer -> it_overrun_last = timer -> it_overrun ;
1060
1072
timer -> it_overrun = -1 ;
0 commit comments