@@ -1670,10 +1670,10 @@ static void busy_worker_rebind_fn(struct work_struct *work)
1670
1670
}
1671
1671
1672
1672
/**
1673
- * rebind_workers - rebind all workers of a gcwq to the associated CPU
1674
- * @gcwq: gcwq of interest
1673
+ * rebind_workers - rebind all workers of a pool to the associated CPU
1674
+ * @pool: pool of interest
1675
1675
*
1676
- * @gcwq ->cpu is coming online. Rebind all workers to the CPU. Rebinding
1676
+ * @pool ->cpu is coming online. Rebind all workers to the CPU. Rebinding
1677
1677
* is different for idle and busy ones.
1678
1678
*
1679
1679
* Idle ones will be removed from the idle_list and woken up. They will
@@ -1691,60 +1691,53 @@ static void busy_worker_rebind_fn(struct work_struct *work)
1691
1691
* including the manager will not appear on @idle_list until rebind is
1692
1692
* complete, making local wake-ups safe.
1693
1693
*/
1694
- static void rebind_workers (struct global_cwq * gcwq )
1694
+ static void rebind_workers (struct worker_pool * pool )
1695
1695
{
1696
- struct worker_pool * pool ;
1697
1696
struct worker * worker , * n ;
1698
1697
struct hlist_node * pos ;
1699
1698
int i ;
1700
1699
1701
- for_each_worker_pool (pool , gcwq ) {
1702
- lockdep_assert_held (& pool -> assoc_mutex );
1703
- lockdep_assert_held (& pool -> lock );
1704
- }
1700
+ lockdep_assert_held (& pool -> assoc_mutex );
1701
+ lockdep_assert_held (& pool -> lock );
1705
1702
1706
1703
/* dequeue and kick idle ones */
1707
- for_each_worker_pool (pool , gcwq ) {
1708
- list_for_each_entry_safe (worker , n , & pool -> idle_list , entry ) {
1709
- /*
1710
- * idle workers should be off @pool->idle_list
1711
- * until rebind is complete to avoid receiving
1712
- * premature local wake-ups.
1713
- */
1714
- list_del_init (& worker -> entry );
1704
+ list_for_each_entry_safe (worker , n , & pool -> idle_list , entry ) {
1705
+ /*
1706
+ * idle workers should be off @pool->idle_list until rebind
1707
+ * is complete to avoid receiving premature local wake-ups.
1708
+ */
1709
+ list_del_init (& worker -> entry );
1715
1710
1716
- /*
1717
- * worker_thread() will see the above dequeuing
1718
- * and call idle_worker_rebind().
1719
- */
1720
- wake_up_process (worker -> task );
1721
- }
1711
+ /*
1712
+ * worker_thread() will see the above dequeuing and call
1713
+ * idle_worker_rebind().
1714
+ */
1715
+ wake_up_process (worker -> task );
1716
+ }
1722
1717
1723
- /* rebind busy workers */
1724
- for_each_busy_worker (worker , i , pos , pool ) {
1725
- struct work_struct * rebind_work = & worker -> rebind_work ;
1726
- struct workqueue_struct * wq ;
1718
+ /* rebind busy workers */
1719
+ for_each_busy_worker (worker , i , pos , pool ) {
1720
+ struct work_struct * rebind_work = & worker -> rebind_work ;
1721
+ struct workqueue_struct * wq ;
1727
1722
1728
- if (test_and_set_bit (WORK_STRUCT_PENDING_BIT ,
1729
- work_data_bits (rebind_work )))
1730
- continue ;
1723
+ if (test_and_set_bit (WORK_STRUCT_PENDING_BIT ,
1724
+ work_data_bits (rebind_work )))
1725
+ continue ;
1731
1726
1732
- debug_work_activate (rebind_work );
1727
+ debug_work_activate (rebind_work );
1733
1728
1734
- /*
1735
- * wq doesn't really matter but let's keep
1736
- * @worker->pool and @cwq->pool consistent for
1737
- * sanity.
1738
- */
1739
- if (std_worker_pool_pri (worker -> pool ))
1740
- wq = system_highpri_wq ;
1741
- else
1742
- wq = system_wq ;
1743
-
1744
- insert_work (get_cwq (pool -> cpu , wq ), rebind_work ,
1745
- worker -> scheduled .next ,
1746
- work_color_to_flags (WORK_NO_COLOR ));
1747
- }
1729
+ /*
1730
+ * wq doesn't really matter but let's keep @worker->pool
1731
+ * and @cwq->pool consistent for sanity.
1732
+ */
1733
+ if (std_worker_pool_pri (worker -> pool ))
1734
+ wq = system_highpri_wq ;
1735
+ else
1736
+ wq = system_wq ;
1737
+
1738
+ insert_work (get_cwq (pool -> cpu , wq ), rebind_work ,
1739
+ worker -> scheduled .next ,
1740
+ work_color_to_flags (WORK_NO_COLOR ));
1748
1741
}
1749
1742
}
1750
1743
@@ -3497,40 +3490,14 @@ EXPORT_SYMBOL_GPL(work_busy);
3497
3490
* are a lot of assumptions on strong associations among work, cwq and
3498
3491
* gcwq which make migrating pending and scheduled works very
3499
3492
* difficult to implement without impacting hot paths. Secondly,
3500
- * gcwqs serve mix of short, long and very long running works making
3493
+ * worker pools serve mix of short, long and very long running works making
3501
3494
* blocked draining impractical.
3502
3495
*
3503
3496
* This is solved by allowing the pools to be disassociated from the CPU
3504
3497
* running as an unbound one and allowing it to be reattached later if the
3505
3498
* cpu comes back online.
3506
3499
*/
3507
3500
3508
- /* claim manager positions of all pools */
3509
- static void gcwq_claim_assoc_and_lock (struct global_cwq * gcwq )
3510
- {
3511
- struct worker_pool * pool ;
3512
-
3513
- for_each_worker_pool (pool , gcwq )
3514
- mutex_lock_nested (& pool -> assoc_mutex , pool - gcwq -> pools );
3515
-
3516
- local_irq_disable ();
3517
- for_each_worker_pool (pool , gcwq )
3518
- spin_lock_nested (& pool -> lock , pool - gcwq -> pools );
3519
- }
3520
-
3521
- /* release manager positions */
3522
- static void gcwq_release_assoc_and_unlock (struct global_cwq * gcwq )
3523
- {
3524
- struct worker_pool * pool ;
3525
-
3526
- for_each_worker_pool (pool , gcwq )
3527
- spin_unlock (& pool -> lock );
3528
- local_irq_enable ();
3529
-
3530
- for_each_worker_pool (pool , gcwq )
3531
- mutex_unlock (& pool -> assoc_mutex );
3532
- }
3533
-
3534
3501
static void gcwq_unbind_fn (struct work_struct * work )
3535
3502
{
3536
3503
struct global_cwq * gcwq = get_gcwq (smp_processor_id ());
@@ -3539,27 +3506,30 @@ static void gcwq_unbind_fn(struct work_struct *work)
3539
3506
struct hlist_node * pos ;
3540
3507
int i ;
3541
3508
3542
- BUG_ON (gcwq -> pools [0 ].cpu != smp_processor_id ());
3509
+ for_each_worker_pool (pool , gcwq ) {
3510
+ BUG_ON (pool -> cpu != smp_processor_id ());
3543
3511
3544
- gcwq_claim_assoc_and_lock (gcwq );
3512
+ mutex_lock (& pool -> assoc_mutex );
3513
+ spin_lock_irq (& pool -> lock );
3545
3514
3546
- /*
3547
- * We've claimed all manager positions. Make all workers unbound
3548
- * and set DISASSOCIATED. Before this, all workers except for the
3549
- * ones which are still executing works from before the last CPU
3550
- * down must be on the cpu. After this, they may become diasporas.
3551
- */
3552
- for_each_worker_pool ( pool , gcwq ) {
3515
+ /*
3516
+ * We've claimed all manager positions. Make all workers
3517
+ * unbound and set DISASSOCIATED. Before this, all workers
3518
+ * except for the ones which are still executing works from
3519
+ * before the last CPU down must be on the cpu. After
3520
+ * this, they may become diasporas.
3521
+ */
3553
3522
list_for_each_entry (worker , & pool -> idle_list , entry )
3554
3523
worker -> flags |= WORKER_UNBOUND ;
3555
3524
3556
3525
for_each_busy_worker (worker , i , pos , pool )
3557
3526
worker -> flags |= WORKER_UNBOUND ;
3558
3527
3559
3528
pool -> flags |= POOL_DISASSOCIATED ;
3560
- }
3561
3529
3562
- gcwq_release_assoc_and_unlock (gcwq );
3530
+ spin_unlock_irq (& pool -> lock );
3531
+ mutex_unlock (& pool -> assoc_mutex );
3532
+ }
3563
3533
3564
3534
/*
3565
3535
* Call schedule() so that we cross rq->lock and thus can guarantee
@@ -3615,11 +3585,16 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3615
3585
3616
3586
case CPU_DOWN_FAILED :
3617
3587
case CPU_ONLINE :
3618
- gcwq_claim_assoc_and_lock (gcwq );
3619
- for_each_worker_pool (pool , gcwq )
3588
+ for_each_worker_pool (pool , gcwq ) {
3589
+ mutex_lock (& pool -> assoc_mutex );
3590
+ spin_lock_irq (& pool -> lock );
3591
+
3620
3592
pool -> flags &= ~POOL_DISASSOCIATED ;
3621
- rebind_workers (gcwq );
3622
- gcwq_release_assoc_and_unlock (gcwq );
3593
+ rebind_workers (pool );
3594
+
3595
+ spin_unlock_irq (& pool -> lock );
3596
+ mutex_unlock (& pool -> assoc_mutex );
3597
+ }
3623
3598
break ;
3624
3599
}
3625
3600
return NOTIFY_OK ;
0 commit comments