Skip to content

Commit b8c7f1d

Browse files
committed
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: rcu: Fix whitespace inconsistencies rcu: Fix thinko, actually initialize full tree rcu: Apply results of code inspection of kernel/rcutree_plugin.h rcu: Add WARN_ON_ONCE() consistency checks covering state transitions rcu: Fix synchronize_rcu() for TREE_PREEMPT_RCU rcu: Simplify rcu_read_unlock_special() quiescent-state accounting rcu: Add debug checks to TREE_PREEMPT_RCU for premature grace periods rcu: Kconfig help needs to say that TREE_PREEMPT_RCU scales down rcutorture: Occasionally delay readers enough to make RCU force_quiescent_state rcu: Initialize multi-level RCU grace periods holding locks rcu: Need to update rnp->gpnum if preemptable RCU is to be reliable
2 parents f4eccb6 + a71fca5 commit b8c7f1d

File tree

11 files changed

+195
-156
lines changed

11 files changed

+195
-156
lines changed

include/linux/rculist_nulls.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
102102
*/
103103
#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
104104
for (pos = rcu_dereference((head)->first); \
105-
(!is_a_nulls(pos)) && \
105+
(!is_a_nulls(pos)) && \
106106
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
107107
pos = rcu_dereference(pos->next))
108108

include/linux/rcupdate.h

Lines changed: 8 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Read-Copy Update mechanism for mutual exclusion
2+
* Read-Copy Update mechanism for mutual exclusion
33
*
44
* This program is free software; you can redistribute it and/or modify
55
* it under the terms of the GNU General Public License as published by
@@ -18,15 +18,15 @@
1818
* Copyright IBM Corporation, 2001
1919
*
2020
* Author: Dipankar Sarma <dipankar@in.ibm.com>
21-
*
21+
*
2222
* Based on the original work by Paul McKenney <paulmck@us.ibm.com>
2323
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
2424
* Papers:
2525
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
2626
* http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
2727
*
2828
* For detailed explanation of Read-Copy Update mechanism see -
29-
* http://lse.sourceforge.net/locking/rcupdate.html
29+
* http://lse.sourceforge.net/locking/rcupdate.html
3030
*
3131
*/
3232

@@ -52,8 +52,13 @@ struct rcu_head {
5252
};
5353

5454
/* Exported common interfaces */
55+
#ifdef CONFIG_TREE_PREEMPT_RCU
5556
extern void synchronize_rcu(void);
57+
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
58+
#define synchronize_rcu synchronize_sched
59+
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
5660
extern void synchronize_rcu_bh(void);
61+
extern void synchronize_sched(void);
5762
extern void rcu_barrier(void);
5863
extern void rcu_barrier_bh(void);
5964
extern void rcu_barrier_sched(void);
@@ -261,24 +266,6 @@ struct rcu_synchronize {
261266

262267
extern void wakeme_after_rcu(struct rcu_head *head);
263268

264-
/**
265-
* synchronize_sched - block until all CPUs have exited any non-preemptive
266-
* kernel code sequences.
267-
*
268-
* This means that all preempt_disable code sequences, including NMI and
269-
* hardware-interrupt handlers, in progress on entry will have completed
270-
* before this primitive returns. However, this does not guarantee that
271-
* softirq handlers will have completed, since in some kernels, these
272-
* handlers can run in process context, and can block.
273-
*
274-
* This primitive provides the guarantees made by the (now removed)
275-
* synchronize_kernel() API. In contrast, synchronize_rcu() only
276-
* guarantees that rcu_read_lock() sections will have completed.
277-
* In "classic RCU", these two guarantees happen to be one and
278-
* the same, but can differ in realtime RCU implementations.
279-
*/
280-
#define synchronize_sched() __synchronize_sched()
281-
282269
/**
283270
* call_rcu - Queue an RCU callback for invocation after a grace period.
284271
* @head: structure to be used for queueing the RCU updates.

include/linux/rcutree.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
2525
*
2626
* For detailed explanation of Read-Copy Update mechanism see -
27-
* Documentation/RCU
27+
* Documentation/RCU
2828
*/
2929

3030
#ifndef __LINUX_RCUTREE_H
@@ -53,6 +53,8 @@ static inline void __rcu_read_unlock(void)
5353
preempt_enable();
5454
}
5555

56+
#define __synchronize_sched() synchronize_rcu()
57+
5658
static inline void exit_rcu(void)
5759
{
5860
}
@@ -68,8 +70,6 @@ static inline void __rcu_read_unlock_bh(void)
6870
local_bh_enable();
6971
}
7072

71-
#define __synchronize_sched() synchronize_rcu()
72-
7373
extern void call_rcu_sched(struct rcu_head *head,
7474
void (*func)(struct rcu_head *rcu));
7575

include/linux/sched.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1755,7 +1755,6 @@ extern cputime_t task_gtime(struct task_struct *p);
17551755

17561756
#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
17571757
#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1758-
#define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */
17591758

17601759
static inline void rcu_copy_process(struct task_struct *p)
17611760
{

init/Kconfig

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -331,7 +331,8 @@ config TREE_PREEMPT_RCU
331331
This option selects the RCU implementation that is
332332
designed for very large SMP systems with hundreds or
333333
thousands of CPUs, but for which real-time response
334-
is also required.
334+
is also required. It also scales down nicely to
335+
smaller systems.
335336

336337
endchoice
337338

kernel/rcupdate.c

Lines changed: 45 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,15 +19,15 @@
1919
*
2020
* Authors: Dipankar Sarma <dipankar@in.ibm.com>
2121
* Manfred Spraul <manfred@colorfullife.com>
22-
*
22+
*
2323
* Based on the original work by Paul McKenney <paulmck@us.ibm.com>
2424
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
2525
* Papers:
2626
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
2727
* http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
2828
*
2929
* For detailed explanation of Read-Copy Update mechanism see -
30-
* http://lse.sourceforge.net/locking/rcupdate.html
30+
* http://lse.sourceforge.net/locking/rcupdate.html
3131
*
3232
*/
3333
#include <linux/types.h>
@@ -74,6 +74,8 @@ void wakeme_after_rcu(struct rcu_head *head)
7474
complete(&rcu->completion);
7575
}
7676

77+
#ifdef CONFIG_TREE_PREEMPT_RCU
78+
7779
/**
7880
* synchronize_rcu - wait until a grace period has elapsed.
7981
*
@@ -87,7 +89,7 @@ void synchronize_rcu(void)
8789
{
8890
struct rcu_synchronize rcu;
8991

90-
if (rcu_blocking_is_gp())
92+
if (!rcu_scheduler_active)
9193
return;
9294

9395
init_completion(&rcu.completion);
@@ -98,6 +100,46 @@ void synchronize_rcu(void)
98100
}
99101
EXPORT_SYMBOL_GPL(synchronize_rcu);
100102

103+
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
104+
105+
/**
106+
* synchronize_sched - wait until an rcu-sched grace period has elapsed.
107+
*
108+
* Control will return to the caller some time after a full rcu-sched
109+
* grace period has elapsed, in other words after all currently executing
110+
* rcu-sched read-side critical sections have completed. These read-side
111+
* critical sections are delimited by rcu_read_lock_sched() and
112+
* rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
113+
* local_irq_disable(), and so on may be used in place of
114+
* rcu_read_lock_sched().
115+
*
116+
* This means that all preempt_disable code sequences, including NMI and
117+
* hardware-interrupt handlers, in progress on entry will have completed
118+
* before this primitive returns. However, this does not guarantee that
119+
* softirq handlers will have completed, since in some kernels, these
120+
* handlers can run in process context, and can block.
121+
*
122+
* This primitive provides the guarantees made by the (now removed)
123+
* synchronize_kernel() API. In contrast, synchronize_rcu() only
124+
* guarantees that rcu_read_lock() sections will have completed.
125+
* In "classic RCU", these two guarantees happen to be one and
126+
* the same, but can differ in realtime RCU implementations.
127+
*/
128+
void synchronize_sched(void)
129+
{
130+
struct rcu_synchronize rcu;
131+
132+
if (rcu_blocking_is_gp())
133+
return;
134+
135+
init_completion(&rcu.completion);
136+
/* Will wake me after RCU finished. */
137+
call_rcu_sched(&rcu.head, wakeme_after_rcu);
138+
/* Wait for it. */
139+
wait_for_completion(&rcu.completion);
140+
}
141+
EXPORT_SYMBOL_GPL(synchronize_sched);
142+
101143
/**
102144
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
103145
*

kernel/rcutorture.c

Lines changed: 24 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
* Copyright (C) IBM Corporation, 2005, 2006
1919
*
2020
* Authors: Paul E. McKenney <paulmck@us.ibm.com>
21-
* Josh Triplett <josh@freedesktop.org>
21+
* Josh Triplett <josh@freedesktop.org>
2222
*
2323
* See also: Documentation/RCU/torture.txt
2424
*/
@@ -50,7 +50,7 @@
5050

5151
MODULE_LICENSE("GPL");
5252
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
53-
"Josh Triplett <josh@freedesktop.org>");
53+
"Josh Triplett <josh@freedesktop.org>");
5454

5555
static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
5656
static int nfakewriters = 4; /* # fake writer threads */
@@ -110,8 +110,8 @@ struct rcu_torture {
110110
};
111111

112112
static LIST_HEAD(rcu_torture_freelist);
113-
static struct rcu_torture *rcu_torture_current = NULL;
114-
static long rcu_torture_current_version = 0;
113+
static struct rcu_torture *rcu_torture_current;
114+
static long rcu_torture_current_version;
115115
static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
116116
static DEFINE_SPINLOCK(rcu_torture_lock);
117117
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
@@ -124,11 +124,11 @@ static atomic_t n_rcu_torture_alloc_fail;
124124
static atomic_t n_rcu_torture_free;
125125
static atomic_t n_rcu_torture_mberror;
126126
static atomic_t n_rcu_torture_error;
127-
static long n_rcu_torture_timers = 0;
127+
static long n_rcu_torture_timers;
128128
static struct list_head rcu_torture_removed;
129129
static cpumask_var_t shuffle_tmp_mask;
130130

131-
static int stutter_pause_test = 0;
131+
static int stutter_pause_test;
132132

133133
#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
134134
#define RCUTORTURE_RUNNABLE_INIT 1
@@ -267,7 +267,8 @@ struct rcu_torture_ops {
267267
int irq_capable;
268268
char *name;
269269
};
270-
static struct rcu_torture_ops *cur_ops = NULL;
270+
271+
static struct rcu_torture_ops *cur_ops;
271272

272273
/*
273274
* Definitions for rcu torture testing.
@@ -281,14 +282,17 @@ static int rcu_torture_read_lock(void) __acquires(RCU)
281282

282283
static void rcu_read_delay(struct rcu_random_state *rrsp)
283284
{
284-
long delay;
285-
const long longdelay = 200;
285+
const unsigned long shortdelay_us = 200;
286+
const unsigned long longdelay_ms = 50;
286287

287-
/* We want there to be long-running readers, but not all the time. */
288+
/* We want a short delay sometimes to make a reader delay the grace
289+
* period, and we want a long delay occasionally to trigger
290+
* force_quiescent_state. */
288291

289-
delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
290-
if (!delay)
291-
udelay(longdelay);
292+
if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
293+
mdelay(longdelay_ms);
294+
if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
295+
udelay(shortdelay_us);
292296
}
293297

294298
static void rcu_torture_read_unlock(int idx) __releases(RCU)
@@ -339,8 +343,8 @@ static struct rcu_torture_ops rcu_ops = {
339343
.sync = synchronize_rcu,
340344
.cb_barrier = rcu_barrier,
341345
.stats = NULL,
342-
.irq_capable = 1,
343-
.name = "rcu"
346+
.irq_capable = 1,
347+
.name = "rcu"
344348
};
345349

346350
static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
@@ -638,7 +642,8 @@ rcu_torture_writer(void *arg)
638642

639643
do {
640644
schedule_timeout_uninterruptible(1);
641-
if ((rp = rcu_torture_alloc()) == NULL)
645+
rp = rcu_torture_alloc();
646+
if (rp == NULL)
642647
continue;
643648
rp->rtort_pipe_count = 0;
644649
udelay(rcu_random(&rand) & 0x3ff);
@@ -1110,7 +1115,7 @@ rcu_torture_init(void)
11101115
printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
11111116
torture_type);
11121117
mutex_unlock(&fullstop_mutex);
1113-
return (-EINVAL);
1118+
return -EINVAL;
11141119
}
11151120
if (cur_ops->init)
11161121
cur_ops->init(); /* no "goto unwind" prior to this point!!! */
@@ -1161,7 +1166,7 @@ rcu_torture_init(void)
11611166
goto unwind;
11621167
}
11631168
fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
1164-
GFP_KERNEL);
1169+
GFP_KERNEL);
11651170
if (fakewriter_tasks == NULL) {
11661171
VERBOSE_PRINTK_ERRSTRING("out of memory");
11671172
firsterr = -ENOMEM;
@@ -1170,7 +1175,7 @@ rcu_torture_init(void)
11701175
for (i = 0; i < nfakewriters; i++) {
11711176
VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
11721177
fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
1173-
"rcu_torture_fakewriter");
1178+
"rcu_torture_fakewriter");
11741179
if (IS_ERR(fakewriter_tasks[i])) {
11751180
firsterr = PTR_ERR(fakewriter_tasks[i]);
11761181
VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");

0 commit comments

Comments
 (0)