@@ -64,34 +64,42 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
64
64
* not in a quiescent state. There might be any number of tasks blocked
65
65
* while in an RCU read-side critical section.
66
66
*/
67
- static void rcu_preempt_qs_record (int cpu )
67
+ static void rcu_preempt_qs (int cpu )
68
68
{
69
69
struct rcu_data * rdp = & per_cpu (rcu_preempt_data , cpu );
70
- rdp -> passed_quiesc = 1 ;
71
70
rdp -> passed_quiesc_completed = rdp -> completed ;
71
+ barrier ();
72
+ rdp -> passed_quiesc = 1 ;
72
73
}
73
74
74
75
/*
75
- * We have entered the scheduler or are between softirqs in ksoftirqd.
76
- * If we are in an RCU read-side critical section, we need to reflect
77
- * that in the state of the rcu_node structure corresponding to this CPU.
78
- * Caller must disable hardirqs.
76
+ * We have entered the scheduler, and the current task might soon be
77
+ * context-switched away from. If this task is in an RCU read-side
78
+ * critical section, we will no longer be able to rely on the CPU to
79
+ * record that fact, so we enqueue the task on the appropriate entry
80
+ * of the blocked_tasks[] array. The task will dequeue itself when
81
+ * it exits the outermost enclosing RCU read-side critical section.
82
+ * Therefore, the current grace period cannot be permitted to complete
83
+ * until the blocked_tasks[] entry indexed by the low-order bit of
84
+ * rnp->gpnum empties.
85
+ *
86
+ * Caller must disable preemption.
79
87
*/
80
- static void rcu_preempt_qs (int cpu )
88
+ static void rcu_preempt_note_context_switch (int cpu )
81
89
{
82
90
struct task_struct * t = current ;
91
+ unsigned long flags ;
83
92
int phase ;
84
93
struct rcu_data * rdp ;
85
94
struct rcu_node * rnp ;
86
95
87
96
if (t -> rcu_read_lock_nesting &&
88
97
(t -> rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED ) == 0 ) {
89
- WARN_ON_ONCE (cpu != smp_processor_id ());
90
98
91
99
/* Possibly blocking in an RCU read-side critical section. */
92
100
rdp = rcu_preempt_state .rda [cpu ];
93
101
rnp = rdp -> mynode ;
94
- spin_lock (& rnp -> lock );
102
+ spin_lock_irqsave (& rnp -> lock , flags );
95
103
t -> rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED ;
96
104
t -> rcu_blocked_node = rnp ;
97
105
@@ -112,7 +120,7 @@ static void rcu_preempt_qs(int cpu)
112
120
phase = !(rnp -> qsmask & rdp -> grpmask ) ^ (rnp -> gpnum & 0x1 );
113
121
list_add (& t -> rcu_node_entry , & rnp -> blocked_tasks [phase ]);
114
122
smp_mb (); /* Ensure later ctxt swtch seen after above. */
115
- spin_unlock (& rnp -> lock );
123
+ spin_unlock_irqrestore (& rnp -> lock , flags );
116
124
}
117
125
118
126
/*
@@ -124,9 +132,8 @@ static void rcu_preempt_qs(int cpu)
124
132
* grace period, then the fact that the task has been enqueued
125
133
* means that we continue to block the current grace period.
126
134
*/
127
- rcu_preempt_qs_record (cpu );
128
- t -> rcu_read_unlock_special &= ~(RCU_READ_UNLOCK_NEED_QS |
129
- RCU_READ_UNLOCK_GOT_QS );
135
+ rcu_preempt_qs (cpu );
136
+ t -> rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS ;
130
137
}
131
138
132
139
/*
@@ -162,7 +169,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
162
169
special = t -> rcu_read_unlock_special ;
163
170
if (special & RCU_READ_UNLOCK_NEED_QS ) {
164
171
t -> rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS ;
165
- t -> rcu_read_unlock_special |= RCU_READ_UNLOCK_GOT_QS ;
172
+ rcu_preempt_qs ( smp_processor_id ()) ;
166
173
}
167
174
168
175
/* Hardware IRQ handlers cannot block. */
@@ -199,9 +206,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
199
206
*/
200
207
if (!empty && rnp -> qsmask == 0 &&
201
208
list_empty (& rnp -> blocked_tasks [rnp -> gpnum & 0x1 ])) {
202
- t -> rcu_read_unlock_special &=
203
- ~(RCU_READ_UNLOCK_NEED_QS |
204
- RCU_READ_UNLOCK_GOT_QS );
209
+ t -> rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS ;
205
210
if (rnp -> parent == NULL ) {
206
211
/* Only one rcu_node in the tree. */
207
212
cpu_quiet_msk_finish (& rcu_preempt_state , flags );
@@ -352,19 +357,12 @@ static void rcu_preempt_check_callbacks(int cpu)
352
357
struct task_struct * t = current ;
353
358
354
359
if (t -> rcu_read_lock_nesting == 0 ) {
355
- t -> rcu_read_unlock_special &=
356
- ~(RCU_READ_UNLOCK_NEED_QS | RCU_READ_UNLOCK_GOT_QS );
357
- rcu_preempt_qs_record (cpu );
360
+ t -> rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS ;
361
+ rcu_preempt_qs (cpu );
358
362
return ;
359
363
}
360
364
if (per_cpu (rcu_preempt_data , cpu ).qs_pending ) {
361
- if (t -> rcu_read_unlock_special & RCU_READ_UNLOCK_GOT_QS ) {
362
- rcu_preempt_qs_record (cpu );
363
- t -> rcu_read_unlock_special &= ~RCU_READ_UNLOCK_GOT_QS ;
364
- } else if (!(t -> rcu_read_unlock_special &
365
- RCU_READ_UNLOCK_NEED_QS )) {
366
- t -> rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS ;
367
- }
365
+ t -> rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS ;
368
366
}
369
367
}
370
368
@@ -451,7 +449,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
451
449
* Because preemptable RCU does not exist, we never have to check for
452
450
* CPUs being in quiescent states.
453
451
*/
454
- static void rcu_preempt_qs (int cpu )
452
+ static void rcu_preempt_note_context_switch (int cpu )
455
453
{
456
454
}
457
455
0 commit comments