Skip to content

Commit ef55718

Browse files
umgwanakikbutihtejun
authored andcommitted
workqueue: schedule WORK_CPU_UNBOUND work on wq_unbound_cpumask CPUs
WORK_CPU_UNBOUND work items queued to a bound workqueue always run locally. This is a good thing normally, but not when the user has asked us to keep unbound work away from certain CPUs. Round robin these to wq_unbound_cpumask CPUs instead, as perturbation avoidance trumps performance. tj: Cosmetic and comment changes. WARN_ON_ONCE() dropped from empty (wq_unbound_cpumask AND cpu_online_mask). If we want that, it should be done when config changes. Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
1 parent 041bd12 commit ef55718

File tree

1 file changed

+32
-2
lines changed

1 file changed

+32
-2
lines changed

kernel/workqueue.c

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -301,7 +301,11 @@ static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
301301
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
302302
static bool workqueue_freezing; /* PL: have wqs started freezing? */
303303

304-
static cpumask_var_t wq_unbound_cpumask; /* PL: low level cpumask for all unbound wqs */
304+
/* PL: allowable cpus for unbound wqs and work items */
305+
static cpumask_var_t wq_unbound_cpumask;
306+
307+
/* CPU where unbound work was last round robin scheduled from this CPU */
308+
static DEFINE_PER_CPU(int, wq_rr_cpu_last);
305309

306310
/* the per-cpu worker pools */
307311
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
@@ -1298,6 +1302,32 @@ static bool is_chained_work(struct workqueue_struct *wq)
12981302
return worker && worker->current_pwq->wq == wq;
12991303
}
13001304

1305+
/*
1306+
* When queueing an unbound work item to a wq, prefer local CPU if allowed
1307+
* by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
1308+
* avoid perturbing sensitive tasks.
1309+
*/
1310+
static int wq_select_unbound_cpu(int cpu)
1311+
{
1312+
int new_cpu;
1313+
1314+
if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1315+
return cpu;
1316+
if (cpumask_empty(wq_unbound_cpumask))
1317+
return cpu;
1318+
1319+
new_cpu = __this_cpu_read(wq_rr_cpu_last);
1320+
new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1321+
if (unlikely(new_cpu >= nr_cpu_ids)) {
1322+
new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1323+
if (unlikely(new_cpu >= nr_cpu_ids))
1324+
return cpu;
1325+
}
1326+
__this_cpu_write(wq_rr_cpu_last, new_cpu);
1327+
1328+
return new_cpu;
1329+
}
1330+
13011331
static void __queue_work(int cpu, struct workqueue_struct *wq,
13021332
struct work_struct *work)
13031333
{
@@ -1323,7 +1353,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
13231353
return;
13241354
retry:
13251355
if (req_cpu == WORK_CPU_UNBOUND)
1326-
cpu = raw_smp_processor_id();
1356+
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
13271357

13281358
/* pwq which will be used unless @work is executing elsewhere */
13291359
if (!(wq->flags & WQ_UNBOUND))

0 commit comments

Comments
 (0)