Skip to content

Commit 5c903e1

Browse files
Ming LeiKAGA-KOKO
authored andcommitted
genirq/affinity: Move two stage affinity spreading into a helper function
No functional change. Prepares for supporting allocating and affinitizing interrupt sets. [ tglx: Minor changelog tweaks ] Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Jens Axboe <axboe@kernel.dk> Cc: linux-block@vger.kernel.org Cc: Hannes Reinecke <hare@suse.com> Cc: Keith Busch <keith.busch@intel.com> Cc: Sagi Grimberg <sagi@grimberg.me> Link: https://lkml.kernel.org/r/20181102145951.31979-3-ming.lei@redhat.com
1 parent b825921 commit 5c903e1

File tree

1 file changed

+56
-36
lines changed

1 file changed

+56
-36
lines changed

kernel/irq/affinity.c

Lines changed: 56 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
9494
return nodes;
9595
}
9696

97-
static int irq_build_affinity_masks(const struct irq_affinity *affd,
97+
static int __irq_build_affinity_masks(const struct irq_affinity *affd,
9898
int startvec, int numvecs,
9999
cpumask_var_t *node_to_cpumask,
100100
const struct cpumask *cpu_mask,
@@ -165,6 +165,58 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
165165
return done;
166166
}
167167

168+
/*
169+
* build affinity in two stages:
170+
* 1) spread present CPU on these vectors
171+
* 2) spread other possible CPUs on these vectors
172+
*/
173+
static int irq_build_affinity_masks(const struct irq_affinity *affd,
174+
int startvec, int numvecs,
175+
cpumask_var_t *node_to_cpumask,
176+
struct cpumask *masks)
177+
{
178+
int curvec = startvec, usedvecs = -1;
179+
cpumask_var_t nmsk, npresmsk;
180+
181+
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
182+
return usedvecs;
183+
184+
if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
185+
goto fail;
186+
187+
/* Stabilize the cpumasks */
188+
get_online_cpus();
189+
build_node_to_cpumask(node_to_cpumask);
190+
191+
/* Spread on present CPUs starting from affd->pre_vectors */
192+
usedvecs = __irq_build_affinity_masks(affd, curvec, numvecs,
193+
node_to_cpumask, cpu_present_mask,
194+
nmsk, masks);
195+
196+
/*
197+
* Spread on non present CPUs starting from the next vector to be
198+
* handled. If the spreading of present CPUs already exhausted the
199+
* vector space, assign the non present CPUs to the already spread
200+
* out vectors.
201+
*/
202+
if (usedvecs >= numvecs)
203+
curvec = affd->pre_vectors;
204+
else
205+
curvec = affd->pre_vectors + usedvecs;
206+
cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
207+
usedvecs += __irq_build_affinity_masks(affd, curvec, numvecs,
208+
node_to_cpumask, npresmsk,
209+
nmsk, masks);
210+
put_online_cpus();
211+
212+
free_cpumask_var(npresmsk);
213+
214+
fail:
215+
free_cpumask_var(nmsk);
216+
217+
return usedvecs;
218+
}
219+
168220
/**
169221
* irq_create_affinity_masks - Create affinity masks for multiqueue spreading
170222
* @nvecs: The total number of vectors
@@ -177,7 +229,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
177229
{
178230
int affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
179231
int curvec, usedvecs;
180-
cpumask_var_t nmsk, npresmsk, *node_to_cpumask;
232+
cpumask_var_t *node_to_cpumask;
181233
struct cpumask *masks = NULL;
182234

183235
/*
@@ -187,15 +239,9 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
187239
if (nvecs == affd->pre_vectors + affd->post_vectors)
188240
return NULL;
189241

190-
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
191-
return NULL;
192-
193-
if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
194-
goto outcpumsk;
195-
196242
node_to_cpumask = alloc_node_to_cpumask();
197243
if (!node_to_cpumask)
198-
goto outnpresmsk;
244+
return NULL;
199245

200246
masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
201247
if (!masks)
@@ -205,30 +251,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
205251
for (curvec = 0; curvec < affd->pre_vectors; curvec++)
206252
cpumask_copy(masks + curvec, irq_default_affinity);
207253

208-
/* Stabilize the cpumasks */
209-
get_online_cpus();
210-
build_node_to_cpumask(node_to_cpumask);
211-
212-
/* Spread on present CPUs starting from affd->pre_vectors */
213254
usedvecs = irq_build_affinity_masks(affd, curvec, affvecs,
214-
node_to_cpumask, cpu_present_mask,
215-
nmsk, masks);
216-
217-
/*
218-
* Spread on non present CPUs starting from the next vector to be
219-
* handled. If the spreading of present CPUs already exhausted the
220-
* vector space, assign the non present CPUs to the already spread
221-
* out vectors.
222-
*/
223-
if (usedvecs >= affvecs)
224-
curvec = affd->pre_vectors;
225-
else
226-
curvec = affd->pre_vectors + usedvecs;
227-
cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
228-
usedvecs += irq_build_affinity_masks(affd, curvec, affvecs,
229-
node_to_cpumask, npresmsk,
230-
nmsk, masks);
231-
put_online_cpus();
255+
node_to_cpumask, masks);
232256

233257
/* Fill out vectors at the end that don't need affinity */
234258
if (usedvecs >= affvecs)
@@ -240,10 +264,6 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
240264

241265
outnodemsk:
242266
free_node_to_cpumask(node_to_cpumask);
243-
outnpresmsk:
244-
free_cpumask_var(npresmsk);
245-
outcpumsk:
246-
free_cpumask_var(nmsk);
247267
return masks;
248268
}
249269

0 commit comments

Comments
 (0)