@@ -171,52 +171,54 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
171
171
* 2) spread other possible CPUs on these vectors
172
172
*/
173
173
static int irq_build_affinity_masks (const struct irq_affinity * affd ,
174
- int startvec , int numvecs ,
174
+ int startvec , int numvecs , int firstvec ,
175
175
cpumask_var_t * node_to_cpumask ,
176
176
struct cpumask * masks )
177
177
{
178
- int curvec = startvec , usedvecs = -1 ;
178
+ int curvec = startvec , nr_present , nr_others ;
179
+ int ret = - ENOMEM ;
179
180
cpumask_var_t nmsk , npresmsk ;
180
181
181
182
if (!zalloc_cpumask_var (& nmsk , GFP_KERNEL ))
182
- return usedvecs ;
183
+ return ret ;
183
184
184
185
if (!zalloc_cpumask_var (& npresmsk , GFP_KERNEL ))
185
186
goto fail ;
186
187
188
+ ret = 0 ;
187
189
/* Stabilize the cpumasks */
188
190
get_online_cpus ();
189
191
build_node_to_cpumask (node_to_cpumask );
190
192
191
193
/* Spread on present CPUs starting from affd->pre_vectors */
192
- usedvecs = __irq_build_affinity_masks (affd , curvec , numvecs ,
193
- affd -> pre_vectors ,
194
- node_to_cpumask ,
195
- cpu_present_mask , nmsk , masks );
194
+ nr_present = __irq_build_affinity_masks (affd , curvec , numvecs ,
195
+ firstvec , node_to_cpumask ,
196
+ cpu_present_mask , nmsk , masks );
196
197
197
198
/*
198
199
* Spread on non present CPUs starting from the next vector to be
199
200
* handled. If the spreading of present CPUs already exhausted the
200
201
* vector space, assign the non present CPUs to the already spread
201
202
* out vectors.
202
203
*/
203
- if (usedvecs >= numvecs )
204
- curvec = affd -> pre_vectors ;
204
+ if (nr_present >= numvecs )
205
+ curvec = firstvec ;
205
206
else
206
- curvec = affd -> pre_vectors + usedvecs ;
207
+ curvec = firstvec + nr_present ;
207
208
cpumask_andnot (npresmsk , cpu_possible_mask , cpu_present_mask );
208
- usedvecs += __irq_build_affinity_masks (affd , curvec , numvecs ,
209
- affd -> pre_vectors ,
210
- node_to_cpumask , npresmsk ,
211
- nmsk , masks );
209
+ nr_others = __irq_build_affinity_masks (affd , curvec , numvecs ,
210
+ firstvec , node_to_cpumask ,
211
+ npresmsk , nmsk , masks );
212
212
put_online_cpus ();
213
213
214
+ if (nr_present < numvecs )
215
+ WARN_ON (nr_present + nr_others < numvecs );
216
+
214
217
free_cpumask_var (npresmsk );
215
218
216
219
fail :
217
220
free_cpumask_var (nmsk );
218
-
219
- return usedvecs ;
221
+ return ret ;
220
222
}
221
223
222
224
/**
@@ -233,6 +235,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
233
235
int curvec , usedvecs ;
234
236
cpumask_var_t * node_to_cpumask ;
235
237
struct cpumask * masks = NULL ;
238
+ int i , nr_sets ;
236
239
237
240
/*
238
241
* If there aren't any vectors left after applying the pre/post
@@ -253,8 +256,28 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
253
256
for (curvec = 0 ; curvec < affd -> pre_vectors ; curvec ++ )
254
257
cpumask_copy (masks + curvec , irq_default_affinity );
255
258
256
- usedvecs = irq_build_affinity_masks (affd , curvec , affvecs ,
257
- node_to_cpumask , masks );
259
+ /*
260
+ * Spread on present CPUs starting from affd->pre_vectors. If we
261
+ * have multiple sets, build each sets affinity mask separately.
262
+ */
263
+ nr_sets = affd -> nr_sets ;
264
+ if (!nr_sets )
265
+ nr_sets = 1 ;
266
+
267
+ for (i = 0 , usedvecs = 0 ; i < nr_sets ; i ++ ) {
268
+ int this_vecs = affd -> sets ? affd -> sets [i ] : affvecs ;
269
+ int ret ;
270
+
271
+ ret = irq_build_affinity_masks (affd , curvec , this_vecs ,
272
+ curvec , node_to_cpumask , masks );
273
+ if (ret ) {
274
+ kfree (masks );
275
+ masks = NULL ;
276
+ goto outnodemsk ;
277
+ }
278
+ curvec += this_vecs ;
279
+ usedvecs += this_vecs ;
280
+ }
258
281
259
282
/* Fill out vectors at the end that don't need affinity */
260
283
if (usedvecs >= affvecs )
@@ -279,13 +302,21 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity
279
302
{
280
303
int resv = affd -> pre_vectors + affd -> post_vectors ;
281
304
int vecs = maxvec - resv ;
282
- int ret ;
305
+ int set_vecs ;
283
306
284
307
if (resv > minvec )
285
308
return 0 ;
286
309
287
- get_online_cpus ();
288
- ret = min_t (int , cpumask_weight (cpu_possible_mask ), vecs ) + resv ;
289
- put_online_cpus ();
290
- return ret ;
310
+ if (affd -> nr_sets ) {
311
+ int i ;
312
+
313
+ for (i = 0 , set_vecs = 0 ; i < affd -> nr_sets ; i ++ )
314
+ set_vecs += affd -> sets [i ];
315
+ } else {
316
+ get_online_cpus ();
317
+ set_vecs = cpumask_weight (cpu_possible_mask );
318
+ put_online_cpus ();
319
+ }
320
+
321
+ return resv + min (set_vecs , vecs );
291
322
}
0 commit comments