@@ -516,6 +516,33 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
516
516
mutex_unlock (& rsp -> exp_wake_mutex );
517
517
}
518
518
519
+ /*
520
+ * Given an rcu_state pointer and a smp_call_function() handler, kick
521
+ * off the specified flavor of expedited grace period.
522
+ */
523
+ static void _synchronize_rcu_expedited (struct rcu_state * rsp ,
524
+ smp_call_func_t func )
525
+ {
526
+ unsigned long s ;
527
+
528
+ /* If expedited grace periods are prohibited, fall back to normal. */
529
+ if (rcu_gp_is_normal ()) {
530
+ wait_rcu_gp (rsp -> call );
531
+ return ;
532
+ }
533
+
534
+ /* Take a snapshot of the sequence number. */
535
+ s = rcu_exp_gp_seq_snap (rsp );
536
+ if (exp_funnel_lock (rsp , s ))
537
+ return ; /* Someone else did our work for us. */
538
+
539
+ /* Initialize the rcu_node tree in preparation for the wait. */
540
+ sync_rcu_exp_select_cpus (rsp , func );
541
+
542
+ /* Wait and clean up, including waking everyone. */
543
+ rcu_exp_wait_wake (rsp , s );
544
+ }
545
+
519
546
/**
520
547
* synchronize_sched_expedited - Brute-force RCU-sched grace period
521
548
*
@@ -534,29 +561,13 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
534
561
*/
535
562
void synchronize_sched_expedited (void )
536
563
{
537
- unsigned long s ;
538
564
struct rcu_state * rsp = & rcu_sched_state ;
539
565
540
566
/* If only one CPU, this is automatically a grace period. */
541
567
if (rcu_blocking_is_gp ())
542
568
return ;
543
569
544
- /* If expedited grace periods are prohibited, fall back to normal. */
545
- if (rcu_gp_is_normal ()) {
546
- wait_rcu_gp (call_rcu_sched );
547
- return ;
548
- }
549
-
550
- /* Take a snapshot of the sequence number. */
551
- s = rcu_exp_gp_seq_snap (rsp );
552
- if (exp_funnel_lock (rsp , s ))
553
- return ; /* Someone else did our work for us. */
554
-
555
- /* Initialize the rcu_node tree in preparation for the wait. */
556
- sync_rcu_exp_select_cpus (rsp , sync_sched_exp_handler );
557
-
558
- /* Wait and clean up, including waking everyone. */
559
- rcu_exp_wait_wake (rsp , s );
570
+ _synchronize_rcu_expedited (rsp , sync_sched_exp_handler );
560
571
}
561
572
EXPORT_SYMBOL_GPL (synchronize_sched_expedited );
562
573
@@ -620,23 +631,8 @@ static void sync_rcu_exp_handler(void *info)
620
631
void synchronize_rcu_expedited (void )
621
632
{
622
633
struct rcu_state * rsp = rcu_state_p ;
623
- unsigned long s ;
624
634
625
- /* If expedited grace periods are prohibited, fall back to normal. */
626
- if (rcu_gp_is_normal ()) {
627
- wait_rcu_gp (call_rcu );
628
- return ;
629
- }
630
-
631
- s = rcu_exp_gp_seq_snap (rsp );
632
- if (exp_funnel_lock (rsp , s ))
633
- return ; /* Someone else did our work for us. */
634
-
635
- /* Initialize the rcu_node tree in preparation for the wait. */
636
- sync_rcu_exp_select_cpus (rsp , sync_rcu_exp_handler );
637
-
638
- /* Wait for ->blkd_tasks lists to drain, then wake everyone up. */
639
- rcu_exp_wait_wake (rsp , s );
635
+ _synchronize_rcu_expedited (rsp , sync_rcu_exp_handler );
640
636
}
641
637
EXPORT_SYMBOL_GPL (synchronize_rcu_expedited );
642
638
0 commit comments