Skip to content

Commit e73d84e

Browse files
committed
posix-timers: Remove remaining uses of tasklist_lock
The remaining uses of tasklist_lock were mostly about synchronizing against sighand modifications, getting coherent and safe group samples and also thread/process wide timers list handling. All of this is already safely synchronizable with the target's sighand lock. Let's use it on these places instead. Also update the comments about locking. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Kosaki Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Andrew Morton <akpm@linux-foundation.org>
1 parent 3d7a142 commit e73d84e

File tree

1 file changed

+44
-32
lines changed

1 file changed

+44
-32
lines changed

kernel/posix-cpu-timers.c

Lines changed: 44 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,8 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
233233

234234
/*
235235
* Sample a process (thread group) clock for the given group_leader task.
236-
* Must be called with tasklist_lock held for reading.
236+
* Must be called with task sighand lock held for safe while_each_thread()
237+
* traversal.
237238
*/
238239
static int cpu_clock_sample_group(const clockid_t which_clock,
239240
struct task_struct *p,
@@ -455,8 +456,7 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp)
455456

456457
/*
457458
* Insert the timer on the appropriate list before any timers that
458-
* expire later. This must be called with the tasklist_lock held
459-
* for reading, interrupts disabled and p->sighand->siglock taken.
459+
* expire later. This must be called with the sighand lock held.
460460
*/
461461
static void arm_timer(struct k_itimer *timer)
462462
{
@@ -547,7 +547,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
547547

548548
/*
549549
* Sample a process (thread group) timer for the given group_leader task.
550-
* Must be called with tasklist_lock held for reading.
550+
* Must be called with task sighand lock held for safe while_each_thread()
551+
* traversal.
551552
*/
552553
static int cpu_timer_sample_group(const clockid_t which_clock,
553554
struct task_struct *p,
@@ -610,9 +611,11 @@ static inline void posix_cpu_timer_kick_nohz(void) { }
610611
* If we return TIMER_RETRY, it's necessary to release the timer's lock
611612
* and try again. (This happens when the timer is in the middle of firing.)
612613
*/
613-
static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
614+
static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
614615
struct itimerspec *new, struct itimerspec *old)
615616
{
617+
unsigned long flags;
618+
struct sighand_struct *sighand;
616619
struct task_struct *p = timer->it.cpu.task;
617620
unsigned long long old_expires, new_expires, old_incr, val;
618621
int ret;
@@ -621,14 +624,16 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
621624

622625
new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
623626

624-
read_lock(&tasklist_lock);
625627
/*
626-
* We need the tasklist_lock to protect against reaping that
627-
* clears p->sighand. If p has just been reaped, we can no
628+
* Protect against sighand release/switch in exit/exec and p->cpu_timers
629+
* and p->signal->cpu_timers read/write in arm_timer()
630+
*/
631+
sighand = lock_task_sighand(p, &flags);
632+
/*
633+
* If p has just been reaped, we can no
628634
* longer get any information about it at all.
629635
*/
630-
if (unlikely(p->sighand == NULL)) {
631-
read_unlock(&tasklist_lock);
636+
if (unlikely(sighand == NULL)) {
632637
return -ESRCH;
633638
}
634639

@@ -639,7 +644,6 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
639644

640645
ret = 0;
641646
old_incr = timer->it.cpu.incr;
642-
spin_lock(&p->sighand->siglock);
643647
old_expires = timer->it.cpu.expires;
644648
if (unlikely(timer->it.cpu.firing)) {
645649
timer->it.cpu.firing = -1;
@@ -696,12 +700,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
696700
* disable this firing since we are already reporting
697701
* it as an overrun (thanks to bump_cpu_timer above).
698702
*/
699-
spin_unlock(&p->sighand->siglock);
700-
read_unlock(&tasklist_lock);
703+
unlock_task_sighand(p, &flags);
701704
goto out;
702705
}
703706

704-
if (new_expires != 0 && !(flags & TIMER_ABSTIME)) {
707+
if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
705708
new_expires += val;
706709
}
707710

@@ -715,9 +718,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
715718
arm_timer(timer);
716719
}
717720

718-
spin_unlock(&p->sighand->siglock);
719-
read_unlock(&tasklist_lock);
720-
721+
unlock_task_sighand(p, &flags);
721722
/*
722723
* Install the new reload setting, and
723724
* set up the signal and overrun bookkeeping.
@@ -779,8 +780,16 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
779780
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
780781
cpu_clock_sample(timer->it_clock, p, &now);
781782
} else {
782-
read_lock(&tasklist_lock);
783-
if (unlikely(p->sighand == NULL)) {
783+
struct sighand_struct *sighand;
784+
unsigned long flags;
785+
786+
/*
787+
* Protect against sighand release/switch in exit/exec and
788+
* also make timer sampling safe if it ends up calling
789+
* thread_group_cputime().
790+
*/
791+
sighand = lock_task_sighand(p, &flags);
792+
if (unlikely(sighand == NULL)) {
784793
/*
785794
* The process has been reaped.
786795
* We can't even collect a sample any more.
@@ -789,11 +798,10 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
789798
timer->it.cpu.expires = 0;
790799
sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
791800
&itp->it_value);
792-
read_unlock(&tasklist_lock);
793801
} else {
794802
cpu_timer_sample_group(timer->it_clock, p, &now);
803+
unlock_task_sighand(p, &flags);
795804
}
796-
read_unlock(&tasklist_lock);
797805
}
798806

799807
if (now < timer->it.cpu.expires) {
@@ -1007,6 +1015,8 @@ static void check_process_timers(struct task_struct *tsk,
10071015
*/
10081016
void posix_cpu_timer_schedule(struct k_itimer *timer)
10091017
{
1018+
struct sighand_struct *sighand;
1019+
unsigned long flags;
10101020
struct task_struct *p = timer->it.cpu.task;
10111021
unsigned long long now;
10121022

@@ -1021,40 +1031,42 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
10211031
if (unlikely(p->exit_state))
10221032
goto out;
10231033

1024-
read_lock(&tasklist_lock); /* arm_timer needs it. */
1025-
spin_lock(&p->sighand->siglock);
1034+
/* Protect timer list r/w in arm_timer() */
1035+
sighand = lock_task_sighand(p, &flags);
1036+
if (!sighand)
1037+
goto out;
10261038
} else {
1027-
read_lock(&tasklist_lock);
1028-
if (unlikely(p->sighand == NULL)) {
1039+
/*
1040+
* Protect arm_timer() and timer sampling in case of call to
1041+
* thread_group_cputime().
1042+
*/
1043+
sighand = lock_task_sighand(p, &flags);
1044+
if (unlikely(sighand == NULL)) {
10291045
/*
10301046
* The process has been reaped.
10311047
* We can't even collect a sample any more.
10321048
*/
10331049
timer->it.cpu.expires = 0;
1034-
read_unlock(&tasklist_lock);
10351050
goto out;
10361051
} else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1037-
read_unlock(&tasklist_lock);
1052+
unlock_task_sighand(p, &flags);
10381053
/* Optimizations: if the process is dying, no need to rearm */
10391054
goto out;
10401055
}
1041-
spin_lock(&p->sighand->siglock);
10421056
cpu_timer_sample_group(timer->it_clock, p, &now);
10431057
bump_cpu_timer(timer, now);
1044-
/* Leave the tasklist_lock locked for the call below. */
1058+
/* Leave the sighand locked for the call below. */
10451059
}
10461060

10471061
/*
10481062
* Now re-arm for the new expiry time.
10491063
*/
10501064
BUG_ON(!irqs_disabled());
10511065
arm_timer(timer);
1052-
spin_unlock(&p->sighand->siglock);
1053-
read_unlock(&tasklist_lock);
1066+
unlock_task_sighand(p, &flags);
10541067

10551068
/* Kick full dynticks CPUs in case they need to tick on the new timer */
10561069
posix_cpu_timer_kick_nohz();
1057-
10581070
out:
10591071
timer->it_overrun_last = timer->it_overrun;
10601072
timer->it_overrun = -1;

0 commit comments

Comments
 (0)