@@ -1560,7 +1560,7 @@ static void worker_leave_idle(struct worker *worker)
1560
1560
* flushed from cpu callbacks while cpu is going down, they are
1561
1561
* guaranteed to execute on the cpu.
1562
1562
*
1563
- * This function is to be used by rogue workers and rescuers to bind
1563
+ * This function is to be used by unbound workers and rescuers to bind
1564
1564
* themselves to the target cpu and may race with cpu going down or
1565
1565
* coming online. kthread_bind() can't be used because it may put the
1566
1566
* worker to already dead cpu and set_cpus_allowed_ptr() can't be used
@@ -1585,7 +1585,6 @@ static bool worker_maybe_bind_and_lock(struct worker *worker)
1585
1585
__acquires (& pool - > lock )
1586
1586
{
1587
1587
struct worker_pool * pool = worker -> pool ;
1588
- struct task_struct * task = worker -> task ;
1589
1588
1590
1589
while (true) {
1591
1590
/*
@@ -1595,12 +1594,12 @@ __acquires(&pool->lock)
1595
1594
* against POOL_DISASSOCIATED.
1596
1595
*/
1597
1596
if (!(pool -> flags & POOL_DISASSOCIATED ))
1598
- set_cpus_allowed_ptr (task , get_cpu_mask (pool -> cpu ));
1597
+ set_cpus_allowed_ptr (current , get_cpu_mask (pool -> cpu ));
1599
1598
1600
1599
spin_lock_irq (& pool -> lock );
1601
1600
if (pool -> flags & POOL_DISASSOCIATED )
1602
1601
return false;
1603
- if (task_cpu (task ) == pool -> cpu &&
1602
+ if (task_cpu (current ) == pool -> cpu &&
1604
1603
cpumask_equal (& current -> cpus_allowed ,
1605
1604
get_cpu_mask (pool -> cpu )))
1606
1605
return true;
0 commit comments