|
40 | 40 | #ifdef CONFIG_RCU_NOCB_CPU
|
41 | 41 | static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
|
42 | 42 | static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
|
43 |
| -static bool rcu_nocb_poll; /* Offload kthread are to poll. */ |
44 |
| -module_param(rcu_nocb_poll, bool, 0444); |
| 43 | +static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ |
45 | 44 | static char __initdata nocb_buf[NR_CPUS * 5];
|
46 | 45 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
|
47 | 46 |
|
@@ -2159,6 +2158,13 @@ static int __init rcu_nocb_setup(char *str)
|
2159 | 2158 | }
|
2160 | 2159 | __setup("rcu_nocbs=", rcu_nocb_setup);
|
2161 | 2160 |
|
| 2161 | +static int __init parse_rcu_nocb_poll(char *arg) |
| 2162 | +{ |
| 2163 | + rcu_nocb_poll = 1; |
| 2164 | + return 0; |
| 2165 | +} |
| 2166 | +early_param("rcu_nocb_poll", parse_rcu_nocb_poll); |
| 2167 | + |
2162 | 2168 | /* Is the specified CPU a no-CPUs CPU? */
|
2163 | 2169 | static bool is_nocb_cpu(int cpu)
|
2164 | 2170 | {
|
@@ -2366,10 +2372,11 @@ static int rcu_nocb_kthread(void *arg)
|
2366 | 2372 | for (;;) {
|
2367 | 2373 | /* If not polling, wait for next batch of callbacks. */
|
2368 | 2374 | if (!rcu_nocb_poll)
|
2369 |
| - wait_event(rdp->nocb_wq, rdp->nocb_head); |
| 2375 | + wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); |
2370 | 2376 | list = ACCESS_ONCE(rdp->nocb_head);
|
2371 | 2377 | if (!list) {
|
2372 | 2378 | schedule_timeout_interruptible(1);
|
| 2379 | + flush_signals(current); |
2373 | 2380 | continue;
|
2374 | 2381 | }
|
2375 | 2382 |
|
|
0 commit comments