Skip to content

Commit 84676c1

Browse files
Christoph Hellwigaxboe
authored andcommitted
genirq/affinity: assign vectors to all possible CPUs
Currently we assign managed interrupt vectors to all present CPUs. This works fine for systems were we only online/offline CPUs. But in case of systems that support physical CPU hotplug (or the virtualized version of it) this means the additional CPUs covered for in the ACPI tables or on the command line are not catered for. To fix this we'd either need to introduce new hotplug CPU states just for this case, or we can start assining vectors to possible but not present CPUs. Reported-by: Christian Borntraeger <borntraeger@de.ibm.com> Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> Tested-by: Stefan Haberland <sth@linux.vnet.ibm.com> Fixes: 4b855ad ("blk-mq: Create hctx for each present CPU") Cc: linux-kernel@vger.kernel.org Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent c27d53f commit 84676c1

File tree

1 file changed

+15
-15
lines changed

1 file changed

+15
-15
lines changed

kernel/irq/affinity.c

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
3939
}
4040
}
4141

42-
static cpumask_var_t *alloc_node_to_present_cpumask(void)
42+
static cpumask_var_t *alloc_node_to_possible_cpumask(void)
4343
{
4444
cpumask_var_t *masks;
4545
int node;
@@ -62,7 +62,7 @@ static cpumask_var_t *alloc_node_to_present_cpumask(void)
6262
return NULL;
6363
}
6464

65-
static void free_node_to_present_cpumask(cpumask_var_t *masks)
65+
static void free_node_to_possible_cpumask(cpumask_var_t *masks)
6666
{
6767
int node;
6868

@@ -71,22 +71,22 @@ static void free_node_to_present_cpumask(cpumask_var_t *masks)
7171
kfree(masks);
7272
}
7373

74-
static void build_node_to_present_cpumask(cpumask_var_t *masks)
74+
static void build_node_to_possible_cpumask(cpumask_var_t *masks)
7575
{
7676
int cpu;
7777

78-
for_each_present_cpu(cpu)
78+
for_each_possible_cpu(cpu)
7979
cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
8080
}
8181

82-
static int get_nodes_in_cpumask(cpumask_var_t *node_to_present_cpumask,
82+
static int get_nodes_in_cpumask(cpumask_var_t *node_to_possible_cpumask,
8383
const struct cpumask *mask, nodemask_t *nodemsk)
8484
{
8585
int n, nodes = 0;
8686

8787
/* Calculate the number of nodes in the supplied affinity mask */
8888
for_each_node(n) {
89-
if (cpumask_intersects(mask, node_to_present_cpumask[n])) {
89+
if (cpumask_intersects(mask, node_to_possible_cpumask[n])) {
9090
node_set(n, *nodemsk);
9191
nodes++;
9292
}
@@ -109,7 +109,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
109109
int last_affv = affv + affd->pre_vectors;
110110
nodemask_t nodemsk = NODE_MASK_NONE;
111111
struct cpumask *masks;
112-
cpumask_var_t nmsk, *node_to_present_cpumask;
112+
cpumask_var_t nmsk, *node_to_possible_cpumask;
113113

114114
/*
115115
* If there aren't any vectors left after applying the pre/post
@@ -125,8 +125,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
125125
if (!masks)
126126
goto out;
127127

128-
node_to_present_cpumask = alloc_node_to_present_cpumask();
129-
if (!node_to_present_cpumask)
128+
node_to_possible_cpumask = alloc_node_to_possible_cpumask();
129+
if (!node_to_possible_cpumask)
130130
goto out;
131131

132132
/* Fill out vectors at the beginning that don't need affinity */
@@ -135,8 +135,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
135135

136136
/* Stabilize the cpumasks */
137137
get_online_cpus();
138-
build_node_to_present_cpumask(node_to_present_cpumask);
139-
nodes = get_nodes_in_cpumask(node_to_present_cpumask, cpu_present_mask,
138+
build_node_to_possible_cpumask(node_to_possible_cpumask);
139+
nodes = get_nodes_in_cpumask(node_to_possible_cpumask, cpu_possible_mask,
140140
&nodemsk);
141141

142142
/*
@@ -146,7 +146,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
146146
if (affv <= nodes) {
147147
for_each_node_mask(n, nodemsk) {
148148
cpumask_copy(masks + curvec,
149-
node_to_present_cpumask[n]);
149+
node_to_possible_cpumask[n]);
150150
if (++curvec == last_affv)
151151
break;
152152
}
@@ -160,7 +160,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
160160
vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
161161

162162
/* Get the cpus on this node which are in the mask */
163-
cpumask_and(nmsk, cpu_present_mask, node_to_present_cpumask[n]);
163+
cpumask_and(nmsk, cpu_possible_mask, node_to_possible_cpumask[n]);
164164

165165
/* Calculate the number of cpus per vector */
166166
ncpus = cpumask_weight(nmsk);
@@ -192,7 +192,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
192192
/* Fill out vectors at the end that don't need affinity */
193193
for (; curvec < nvecs; curvec++)
194194
cpumask_copy(masks + curvec, irq_default_affinity);
195-
free_node_to_present_cpumask(node_to_present_cpumask);
195+
free_node_to_possible_cpumask(node_to_possible_cpumask);
196196
out:
197197
free_cpumask_var(nmsk);
198198
return masks;
@@ -214,7 +214,7 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity
214214
return 0;
215215

216216
get_online_cpus();
217-
ret = min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv;
217+
ret = min_t(int, cpumask_weight(cpu_possible_mask), vecs) + resv;
218218
put_online_cpus();
219219
return ret;
220220
}

0 commit comments

Comments
 (0)