Skip to content

Commit 060746d

Browse files
Ming LeiKAGA-KOKO
authored andcommitted
genirq/affinity: Pass first vector to __irq_build_affinity_masks()
No functional change. Prepares for support of allocating and affinitizing sets of interrupts, in which each set of interrupts needs a full two stage spreading. The first vector argument is necessary for this so the affinitizing starts from the first vector of each set. [ tglx: Minor changelog tweaks ] Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Jens Axboe <axboe@kernel.dk> Cc: linux-block@vger.kernel.org Cc: Hannes Reinecke <hare@suse.com> Cc: Keith Busch <keith.busch@intel.com> Cc: Sagi Grimberg <sagi@grimberg.me> Link: https://lkml.kernel.org/r/20181102145951.31979-4-ming.lei@redhat.com
1 parent 5c903e1 commit 060746d

File tree

1 file changed

+11
-9
lines changed

1 file changed

+11
-9
lines changed

kernel/irq/affinity.c

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -95,14 +95,14 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
9595
}
9696

9797
static int __irq_build_affinity_masks(const struct irq_affinity *affd,
98-
int startvec, int numvecs,
98+
int startvec, int numvecs, int firstvec,
9999
cpumask_var_t *node_to_cpumask,
100100
const struct cpumask *cpu_mask,
101101
struct cpumask *nmsk,
102102
struct cpumask *masks)
103103
{
104104
int n, nodes, cpus_per_vec, extra_vecs, done = 0;
105-
int last_affv = affd->pre_vectors + numvecs;
105+
int last_affv = firstvec + numvecs;
106106
int curvec = startvec;
107107
nodemask_t nodemsk = NODE_MASK_NONE;
108108

@@ -119,7 +119,7 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
119119
for_each_node_mask(n, nodemsk) {
120120
cpumask_or(masks + curvec, masks + curvec, node_to_cpumask[n]);
121121
if (++curvec == last_affv)
122-
curvec = affd->pre_vectors;
122+
curvec = firstvec;
123123
}
124124
done = numvecs;
125125
goto out;
@@ -129,7 +129,7 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
129129
int ncpus, v, vecs_to_assign, vecs_per_node;
130130

131131
/* Spread the vectors per node */
132-
vecs_per_node = (numvecs - (curvec - affd->pre_vectors)) / nodes;
132+
vecs_per_node = (numvecs - (curvec - firstvec)) / nodes;
133133

134134
/* Get the cpus on this node which are in the mask */
135135
cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
@@ -157,7 +157,7 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
157157
if (done >= numvecs)
158158
break;
159159
if (curvec >= last_affv)
160-
curvec = affd->pre_vectors;
160+
curvec = firstvec;
161161
--nodes;
162162
}
163163

@@ -190,8 +190,9 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
190190

191191
/* Spread on present CPUs starting from affd->pre_vectors */
192192
usedvecs = __irq_build_affinity_masks(affd, curvec, numvecs,
193-
node_to_cpumask, cpu_present_mask,
194-
nmsk, masks);
193+
affd->pre_vectors,
194+
node_to_cpumask,
195+
cpu_present_mask, nmsk, masks);
195196

196197
/*
197198
* Spread on non present CPUs starting from the next vector to be
@@ -205,8 +206,9 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
205206
curvec = affd->pre_vectors + usedvecs;
206207
cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
207208
usedvecs += __irq_build_affinity_masks(affd, curvec, numvecs,
208-
node_to_cpumask, npresmsk,
209-
nmsk, masks);
209+
affd->pre_vectors,
210+
node_to_cpumask, npresmsk,
211+
nmsk, masks);
210212
put_online_cpus();
211213

212214
free_cpumask_var(npresmsk);

0 commit comments

Comments
 (0)