Skip to content

Commit a934a56

Browse files
author
Ingo Molnar
committed
Merge branch 'timers/core-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/core
Pull dynticks updates from Frederic Weisbecker: * Fix a bug where posix cpu timers requeued due to interval got ignored on full dynticks CPUs (not a regression though as it only impacts full dynticks and the bug is there since we merged full dynticks). * Optimizations and cleanups on the use of per CPU APIs to improve code readability, performance and debuggability in the nohz subsystem; * Optimize posix cpu timer by sparing stub workqueue queue with full dynticks off case * Rename some functions to extend with *_this_cpu() suffix for clarity * Refine the naming of some context tracking subsystem state accessors * Trivial spelling fix by Paul Gortmaker Signed-off-by: Ingo Molnar <mingo@kernel.org>
2 parents dea4f48 + c925077 commit a934a56

File tree

11 files changed

+55
-62
lines changed

11 files changed

+55
-62
lines changed

include/linux/context_tracking.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,21 +17,21 @@ extern void __context_tracking_task_switch(struct task_struct *prev,
1717

1818
static inline void user_enter(void)
1919
{
20-
if (static_key_false(&context_tracking_enabled))
20+
if (context_tracking_is_enabled())
2121
context_tracking_user_enter();
2222

2323
}
2424
static inline void user_exit(void)
2525
{
26-
if (static_key_false(&context_tracking_enabled))
26+
if (context_tracking_is_enabled())
2727
context_tracking_user_exit();
2828
}
2929

3030
static inline enum ctx_state exception_enter(void)
3131
{
3232
enum ctx_state prev_ctx;
3333

34-
if (!static_key_false(&context_tracking_enabled))
34+
if (!context_tracking_is_enabled())
3535
return 0;
3636

3737
prev_ctx = this_cpu_read(context_tracking.state);
@@ -42,7 +42,7 @@ static inline enum ctx_state exception_enter(void)
4242

4343
static inline void exception_exit(enum ctx_state prev_ctx)
4444
{
45-
if (static_key_false(&context_tracking_enabled)) {
45+
if (context_tracking_is_enabled()) {
4646
if (prev_ctx == IN_USER)
4747
context_tracking_user_enter();
4848
}
@@ -51,7 +51,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
5151
static inline void context_tracking_task_switch(struct task_struct *prev,
5252
struct task_struct *next)
5353
{
54-
if (static_key_false(&context_tracking_enabled))
54+
if (context_tracking_is_enabled())
5555
__context_tracking_task_switch(prev, next);
5656
}
5757
#else

include/linux/context_tracking_state.h

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,20 @@ struct context_tracking {
2222
extern struct static_key context_tracking_enabled;
2323
DECLARE_PER_CPU(struct context_tracking, context_tracking);
2424

25-
static inline bool context_tracking_in_user(void)
25+
static inline bool context_tracking_is_enabled(void)
2626
{
27-
return __this_cpu_read(context_tracking.state) == IN_USER;
27+
return static_key_false(&context_tracking_enabled);
2828
}
2929

30-
static inline bool context_tracking_active(void)
30+
static inline bool context_tracking_cpu_is_enabled(void)
3131
{
3232
return __this_cpu_read(context_tracking.active);
3333
}
34+
35+
static inline bool context_tracking_in_user(void)
36+
{
37+
return __this_cpu_read(context_tracking.state) == IN_USER;
38+
}
3439
#else
3540
static inline bool context_tracking_in_user(void) { return false; }
3641
static inline bool context_tracking_active(void) { return false; }

include/linux/tick.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -104,15 +104,15 @@ extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
104104
extern void tick_clock_notify(void);
105105
extern int tick_check_oneshot_change(int allow_nohz);
106106
extern struct tick_sched *tick_get_tick_sched(int cpu);
107-
extern void tick_check_idle(int cpu);
107+
extern void tick_check_idle(void);
108108
extern int tick_oneshot_mode_active(void);
109109
# ifndef arch_needs_cpu
110110
# define arch_needs_cpu(cpu) (0)
111111
# endif
112112
# else
113113
static inline void tick_clock_notify(void) { }
114114
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
115-
static inline void tick_check_idle(int cpu) { }
115+
static inline void tick_check_idle(void) { }
116116
static inline int tick_oneshot_mode_active(void) { return 0; }
117117
# endif
118118

@@ -121,7 +121,7 @@ static inline void tick_init(void) { }
121121
static inline void tick_cancel_sched_timer(int cpu) { }
122122
static inline void tick_clock_notify(void) { }
123123
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
124-
static inline void tick_check_idle(int cpu) { }
124+
static inline void tick_check_idle(void) { }
125125
static inline int tick_oneshot_mode_active(void) { return 0; }
126126
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
127127

@@ -165,7 +165,7 @@ extern cpumask_var_t tick_nohz_full_mask;
165165

166166
static inline bool tick_nohz_full_enabled(void)
167167
{
168-
if (!static_key_false(&context_tracking_enabled))
168+
if (!context_tracking_is_enabled())
169169
return false;
170170

171171
return tick_nohz_full_running;

include/linux/vtime.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@ static inline bool vtime_accounting_enabled(void) { return true; }
1919
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2020
static inline bool vtime_accounting_enabled(void)
2121
{
22-
if (static_key_false(&context_tracking_enabled)) {
23-
if (context_tracking_active())
22+
if (context_tracking_is_enabled()) {
23+
if (context_tracking_cpu_is_enabled())
2424
return true;
2525
}
2626

init/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -532,7 +532,7 @@ config CONTEXT_TRACKING_FORCE
532532
dynticks subsystem by forcing the context tracking on all
533533
CPUs in the system.
534534

535-
Say Y only if you're working on the developpement of an
535+
Say Y only if you're working on the development of an
536536
architecture backend for the context tracking.
537537

538538
Say N otherwise, this option brings an overhead that you

kernel/context_tracking.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -53,10 +53,10 @@ void context_tracking_user_enter(void)
5353
/*
5454
* Repeat the user_enter() check here because some archs may be calling
5555
* this from asm and if no CPU needs context tracking, they shouldn't
56-
* go further. Repeat the check here until they support the static key
57-
* check.
56+
* go further. Repeat the check here until they support the inline static
57+
* key check.
5858
*/
59-
if (!static_key_false(&context_tracking_enabled))
59+
if (!context_tracking_is_enabled())
6060
return;
6161

6262
/*
@@ -160,7 +160,7 @@ void context_tracking_user_exit(void)
160160
{
161161
unsigned long flags;
162162

163-
if (!static_key_false(&context_tracking_enabled))
163+
if (!context_tracking_is_enabled())
164164
return;
165165

166166
if (in_interrupt())

kernel/posix-cpu-timers.c

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -608,7 +608,8 @@ static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
608608
*/
609609
static void posix_cpu_timer_kick_nohz(void)
610610
{
611-
schedule_work(&nohz_kick_work);
611+
if (context_tracking_is_enabled())
612+
schedule_work(&nohz_kick_work);
612613
}
613614

614615
bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
@@ -1090,7 +1091,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
10901091
put_task_struct(p);
10911092
timer->it.cpu.task = p = NULL;
10921093
timer->it.cpu.expires = 0;
1093-
goto out_unlock;
1094+
read_unlock(&tasklist_lock);
1095+
goto out;
10941096
} else if (unlikely(p->exit_state) && thread_group_empty(p)) {
10951097
/*
10961098
* We've noticed that the thread is dead, but
@@ -1099,7 +1101,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
10991101
*/
11001102
cpu_timer_sample_group(timer->it_clock, p, &now);
11011103
clear_dead_task(timer, now);
1102-
goto out_unlock;
1104+
read_unlock(&tasklist_lock);
1105+
goto out;
11031106
}
11041107
spin_lock(&p->sighand->siglock);
11051108
cpu_timer_sample_group(timer->it_clock, p, &now);
@@ -1113,10 +1116,11 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
11131116
BUG_ON(!irqs_disabled());
11141117
arm_timer(timer);
11151118
spin_unlock(&p->sighand->siglock);
1116-
1117-
out_unlock:
11181119
read_unlock(&tasklist_lock);
11191120

1121+
/* Kick full dynticks CPUs in case they need to tick on the new timer */
1122+
posix_cpu_timer_kick_nohz();
1123+
11201124
out:
11211125
timer->it_overrun_last = timer->it_overrun;
11221126
timer->it_overrun = -1;
@@ -1256,13 +1260,6 @@ void run_posix_cpu_timers(struct task_struct *tsk)
12561260
cpu_timer_fire(timer);
12571261
spin_unlock(&timer->it_lock);
12581262
}
1259-
1260-
/*
1261-
* In case some timers were rescheduled after the queue got emptied,
1262-
* wake up full dynticks CPUs.
1263-
*/
1264-
if (tsk->signal->cputimer.running)
1265-
posix_cpu_timer_kick_nohz();
12661263
}
12671264

12681265
/*

kernel/softirq.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -311,16 +311,14 @@ asmlinkage void do_softirq(void)
311311
*/
312312
void irq_enter(void)
313313
{
314-
int cpu = smp_processor_id();
315-
316314
rcu_irq_enter();
317315
if (is_idle_task(current) && !in_interrupt()) {
318316
/*
319317
* Prevent raise_softirq from needlessly waking up ksoftirqd
320318
* here, as softirq will be serviced on return from interrupt.
321319
*/
322320
local_bh_disable();
323-
tick_check_idle(cpu);
321+
tick_check_idle();
324322
_local_bh_enable();
325323
}
326324

kernel/time/tick-broadcast.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -538,10 +538,10 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
538538
* Called from irq_enter() when idle was interrupted to reenable the
539539
* per cpu device.
540540
*/
541-
void tick_check_oneshot_broadcast(int cpu)
541+
void tick_check_oneshot_broadcast_this_cpu(void)
542542
{
543-
if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
544-
struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
543+
if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
544+
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
545545

546546
/*
547547
* We might be in the middle of switching over from

kernel/time/tick-internal.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ extern void tick_broadcast_switch_to_oneshot(void);
5151
extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
5252
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
5353
extern int tick_broadcast_oneshot_active(void);
54-
extern void tick_check_oneshot_broadcast(int cpu);
54+
extern void tick_check_oneshot_broadcast_this_cpu(void);
5555
bool tick_broadcast_oneshot_available(void);
5656
# else /* BROADCAST */
5757
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
@@ -62,7 +62,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
6262
static inline void tick_broadcast_switch_to_oneshot(void) { }
6363
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
6464
static inline int tick_broadcast_oneshot_active(void) { return 0; }
65-
static inline void tick_check_oneshot_broadcast(int cpu) { }
65+
static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
6666
static inline bool tick_broadcast_oneshot_available(void) { return true; }
6767
# endif /* !BROADCAST */
6868

kernel/time/tick-sched.c

Lines changed: 16 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -391,11 +391,9 @@ __setup("nohz=", setup_tick_nohz);
391391
*/
392392
static void tick_nohz_update_jiffies(ktime_t now)
393393
{
394-
int cpu = smp_processor_id();
395-
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
396394
unsigned long flags;
397395

398-
ts->idle_waketime = now;
396+
__this_cpu_write(tick_cpu_sched.idle_waketime, now);
399397

400398
local_irq_save(flags);
401399
tick_do_update_jiffies64(now);
@@ -426,17 +424,15 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda
426424

427425
}
428426

429-
static void tick_nohz_stop_idle(int cpu, ktime_t now)
427+
static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
430428
{
431-
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
432-
433-
update_ts_time_stats(cpu, ts, now, NULL);
429+
update_ts_time_stats(smp_processor_id(), ts, now, NULL);
434430
ts->idle_active = 0;
435431

436432
sched_clock_idle_wakeup_event(0);
437433
}
438434

439-
static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
435+
static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
440436
{
441437
ktime_t now = ktime_get();
442438

@@ -752,7 +748,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
752748
ktime_t now, expires;
753749
int cpu = smp_processor_id();
754750

755-
now = tick_nohz_start_idle(cpu, ts);
751+
now = tick_nohz_start_idle(ts);
756752

757753
if (can_stop_idle_tick(cpu, ts)) {
758754
int was_stopped = ts->tick_stopped;
@@ -914,8 +910,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
914910
*/
915911
void tick_nohz_idle_exit(void)
916912
{
917-
int cpu = smp_processor_id();
918-
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
913+
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
919914
ktime_t now;
920915

921916
local_irq_disable();
@@ -928,7 +923,7 @@ void tick_nohz_idle_exit(void)
928923
now = ktime_get();
929924

930925
if (ts->idle_active)
931-
tick_nohz_stop_idle(cpu, now);
926+
tick_nohz_stop_idle(ts, now);
932927

933928
if (ts->tick_stopped) {
934929
tick_nohz_restart_sched_tick(ts, now);
@@ -1012,12 +1007,10 @@ static void tick_nohz_switch_to_nohz(void)
10121007
* timer and do not touch the other magic bits which need to be done
10131008
* when idle is left.
10141009
*/
1015-
static void tick_nohz_kick_tick(int cpu, ktime_t now)
1010+
static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
10161011
{
10171012
#if 0
10181013
/* Switch back to 2.6.27 behaviour */
1019-
1020-
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
10211014
ktime_t delta;
10221015

10231016
/*
@@ -1032,36 +1025,36 @@ static void tick_nohz_kick_tick(int cpu, ktime_t now)
10321025
#endif
10331026
}
10341027

1035-
static inline void tick_check_nohz(int cpu)
1028+
static inline void tick_check_nohz_this_cpu(void)
10361029
{
1037-
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
1030+
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
10381031
ktime_t now;
10391032

10401033
if (!ts->idle_active && !ts->tick_stopped)
10411034
return;
10421035
now = ktime_get();
10431036
if (ts->idle_active)
1044-
tick_nohz_stop_idle(cpu, now);
1037+
tick_nohz_stop_idle(ts, now);
10451038
if (ts->tick_stopped) {
10461039
tick_nohz_update_jiffies(now);
1047-
tick_nohz_kick_tick(cpu, now);
1040+
tick_nohz_kick_tick(ts, now);
10481041
}
10491042
}
10501043

10511044
#else
10521045

10531046
static inline void tick_nohz_switch_to_nohz(void) { }
1054-
static inline void tick_check_nohz(int cpu) { }
1047+
static inline void tick_check_nohz_this_cpu(void) { }
10551048

10561049
#endif /* CONFIG_NO_HZ_COMMON */
10571050

10581051
/*
10591052
* Called from irq_enter to notify about the possible interruption of idle()
10601053
*/
1061-
void tick_check_idle(int cpu)
1054+
void tick_check_idle(void)
10621055
{
1063-
tick_check_oneshot_broadcast(cpu);
1064-
tick_check_nohz(cpu);
1056+
tick_check_oneshot_broadcast_this_cpu();
1057+
tick_check_nohz_this_cpu();
10651058
}
10661059

10671060
/*

0 commit comments

Comments
 (0)