Skip to content

Commit 16e3081

Browse files
paulmckIngo Molnar
authored andcommitted
rcu: Fix synchronize_rcu() for TREE_PREEMPT_RCU
The redirection of synchronize_sched() to synchronize_rcu() was appropriate for TREE_RCU, but not for TREE_PREEMPT_RCU. Fix this by creating an underlying synchronize_sched(). TREE_RCU then redirects synchronize_rcu() to synchronize_sched(), while TREE_PREEMPT_RCU has its own version of synchronize_rcu(). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <12528585111916-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
1 parent c3422be commit 16e3081

File tree

3 files changed

+50
-21
lines changed

3 files changed

+50
-21
lines changed

include/linux/rcupdate.h

Lines changed: 5 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -52,8 +52,13 @@ struct rcu_head {
5252
};
5353

5454
/* Exported common interfaces */
55+
#ifdef CONFIG_TREE_PREEMPT_RCU
5556
extern void synchronize_rcu(void);
57+
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
58+
#define synchronize_rcu synchronize_sched
59+
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
5660
extern void synchronize_rcu_bh(void);
61+
extern void synchronize_sched(void);
5762
extern void rcu_barrier(void);
5863
extern void rcu_barrier_bh(void);
5964
extern void rcu_barrier_sched(void);
@@ -261,24 +266,6 @@ struct rcu_synchronize {
261266

262267
extern void wakeme_after_rcu(struct rcu_head *head);
263268

264-
/**
265-
* synchronize_sched - block until all CPUs have exited any non-preemptive
266-
* kernel code sequences.
267-
*
268-
* This means that all preempt_disable code sequences, including NMI and
269-
* hardware-interrupt handlers, in progress on entry will have completed
270-
* before this primitive returns. However, this does not guarantee that
271-
* softirq handlers will have completed, since in some kernels, these
272-
* handlers can run in process context, and can block.
273-
*
274-
* This primitive provides the guarantees made by the (now removed)
275-
* synchronize_kernel() API. In contrast, synchronize_rcu() only
276-
* guarantees that rcu_read_lock() sections will have completed.
277-
* In "classic RCU", these two guarantees happen to be one and
278-
* the same, but can differ in realtime RCU implementations.
279-
*/
280-
#define synchronize_sched() __synchronize_sched()
281-
282269
/**
283270
* call_rcu - Queue an RCU callback for invocation after a grace period.
284271
* @head: structure to be used for queueing the RCU updates.

include/linux/rcutree.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,8 @@ static inline void __rcu_read_unlock(void)
5353
preempt_enable();
5454
}
5555

56+
#define __synchronize_sched() synchronize_rcu()
57+
5658
static inline void exit_rcu(void)
5759
{
5860
}
@@ -68,8 +70,6 @@ static inline void __rcu_read_unlock_bh(void)
6870
local_bh_enable();
6971
}
7072

71-
#define __synchronize_sched() synchronize_rcu()
72-
7373
extern void call_rcu_sched(struct rcu_head *head,
7474
void (*func)(struct rcu_head *rcu));
7575

kernel/rcupdate.c

Lines changed: 43 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,8 @@ void wakeme_after_rcu(struct rcu_head *head)
7474
complete(&rcu->completion);
7575
}
7676

77+
#ifdef CONFIG_TREE_PREEMPT_RCU
78+
7779
/**
7880
* synchronize_rcu - wait until a grace period has elapsed.
7981
*
@@ -87,7 +89,7 @@ void synchronize_rcu(void)
8789
{
8890
struct rcu_synchronize rcu;
8991

90-
if (rcu_blocking_is_gp())
92+
if (!rcu_scheduler_active)
9193
return;
9294

9395
init_completion(&rcu.completion);
@@ -98,6 +100,46 @@ void synchronize_rcu(void)
98100
}
99101
EXPORT_SYMBOL_GPL(synchronize_rcu);
100102

103+
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
104+
105+
/**
106+
* synchronize_sched - wait until an rcu-sched grace period has elapsed.
107+
*
108+
* Control will return to the caller some time after a full rcu-sched
109+
* grace period has elapsed, in other words after all currently executing
110+
* rcu-sched read-side critical sections have completed. These read-side
111+
* critical sections are delimited by rcu_read_lock_sched() and
112+
* rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
113+
* local_irq_disable(), and so on may be used in place of
114+
* rcu_read_lock_sched().
115+
*
116+
* This means that all preempt_disable code sequences, including NMI and
117+
* hardware-interrupt handlers, in progress on entry will have completed
118+
* before this primitive returns. However, this does not guarantee that
119+
* softirq handlers will have completed, since in some kernels, these
120+
* handlers can run in process context, and can block.
121+
*
122+
* This primitive provides the guarantees made by the (now removed)
123+
* synchronize_kernel() API. In contrast, synchronize_rcu() only
124+
* guarantees that rcu_read_lock() sections will have completed.
125+
* In "classic RCU", these two guarantees happen to be one and
126+
* the same, but can differ in realtime RCU implementations.
127+
*/
128+
void synchronize_sched(void)
129+
{
130+
struct rcu_synchronize rcu;
131+
132+
if (rcu_blocking_is_gp())
133+
return;
134+
135+
init_completion(&rcu.completion);
136+
/* Will wake me after RCU finished. */
137+
call_rcu_sched(&rcu.head, wakeme_after_rcu);
138+
/* Wait for it. */
139+
wait_for_completion(&rcu.completion);
140+
}
141+
EXPORT_SYMBOL_GPL(synchronize_sched);
142+
101143
/**
102144
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
103145
*

0 commit comments

Comments
 (0)