Skip to content

Commit 222e923

Browse files
jsmart-ghmartinkpetersen
authored andcommitted
scsi: lpfc: Resize cpu maps structures based on possible cpus
The work done to date utilized the number of present cpus when sizing per-cpu structures. Structures should have been sized based on the max possible cpu count. Convert the driver over to possible cpu count for sizing allocation. Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
1 parent 75508a8 commit 222e923

File tree

4 files changed

+51
-41
lines changed

4 files changed

+51
-41
lines changed

drivers/scsi/lpfc/lpfc_attr.c

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -5176,16 +5176,22 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
51765176
case 1:
51775177
len += snprintf(buf + len, PAGE_SIZE-len,
51785178
"fcp_cpu_map: HBA centric mapping (%d): "
5179-
"%d online CPUs\n",
5180-
phba->cfg_fcp_cpu_map,
5181-
phba->sli4_hba.num_online_cpu);
5179+
"%d of %d CPUs online from %d possible CPUs\n",
5180+
phba->cfg_fcp_cpu_map, num_online_cpus(),
5181+
num_present_cpus(),
5182+
phba->sli4_hba.num_possible_cpu);
51825183
break;
51835184
}
51845185

5185-
while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) {
5186+
while (phba->sli4_hba.curr_disp_cpu <
5187+
phba->sli4_hba.num_possible_cpu) {
51865188
cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
51875189

5188-
if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) {
5190+
if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
5191+
len += snprintf(buf + len, PAGE_SIZE - len,
5192+
"CPU %02d not present\n",
5193+
phba->sli4_hba.curr_disp_cpu);
5194+
else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) {
51895195
if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
51905196
len += snprintf(
51915197
buf + len, PAGE_SIZE - len,
@@ -5225,14 +5231,15 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
52255231

52265232
/* display max number of CPUs keeping some margin */
52275233
if (phba->sli4_hba.curr_disp_cpu <
5228-
phba->sli4_hba.num_present_cpu &&
5234+
phba->sli4_hba.num_possible_cpu &&
52295235
(len >= (PAGE_SIZE - 64))) {
5230-
len += snprintf(buf + len, PAGE_SIZE-len, "more...\n");
5236+
len += snprintf(buf + len,
5237+
PAGE_SIZE - len, "more...\n");
52315238
break;
52325239
}
52335240
}
52345241

5235-
if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu)
5242+
if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
52365243
phba->sli4_hba.curr_disp_cpu = 0;
52375244

52385245
return len;

drivers/scsi/lpfc/lpfc_init.c

Lines changed: 13 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -6373,8 +6373,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
63736373
u32 if_type;
63746374
u32 if_fam;
63756375

6376-
phba->sli4_hba.num_online_cpu = num_online_cpus();
63776376
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6377+
phba->sli4_hba.num_possible_cpu = num_possible_cpus();
63786378
phba->sli4_hba.curr_disp_cpu = 0;
63796379

63806380
/* Get all the module params for configuring this host */
@@ -6796,7 +6796,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
67966796
goto out_free_fcf_rr_bmask;
67976797
}
67986798

6799-
phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
6799+
phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
68006800
sizeof(struct lpfc_vector_map_info),
68016801
GFP_KERNEL);
68026802
if (!phba->sli4_hba.cpu_map) {
@@ -6868,8 +6868,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
68686868

68696869
/* Free memory allocated for msi-x interrupt vector to CPU mapping */
68706870
kfree(phba->sli4_hba.cpu_map);
6871+
phba->sli4_hba.num_possible_cpu = 0;
68716872
phba->sli4_hba.num_present_cpu = 0;
6872-
phba->sli4_hba.num_online_cpu = 0;
68736873
phba->sli4_hba.curr_disp_cpu = 0;
68746874

68756875
/* Free memory allocated for fast-path work queue handles */
@@ -10519,15 +10519,14 @@ lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
1051910519
int cpu;
1052010520

1052110521
/* Find the desired phys_id for the specified EQ */
10522-
cpup = phba->sli4_hba.cpu_map;
10523-
for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
10522+
for_each_present_cpu(cpu) {
10523+
cpup = &phba->sli4_hba.cpu_map[cpu];
1052410524
if ((match == LPFC_FIND_BY_EQ) &&
1052510525
(cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
1052610526
(cpup->eq == id))
1052710527
return cpu;
1052810528
if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
1052910529
return cpu;
10530-
cpup++;
1053110530
}
1053210531
return 0;
1053310532
}
@@ -10545,11 +10544,10 @@ lpfc_find_eq_handle(struct lpfc_hba *phba, uint16_t hdwq)
1054510544
int cpu;
1054610545

1054710546
/* Find the desired phys_id for the specified EQ */
10548-
cpup = phba->sli4_hba.cpu_map;
10549-
for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
10547+
for_each_present_cpu(cpu) {
10548+
cpup = &phba->sli4_hba.cpu_map[cpu];
1055010549
if (cpup->hdwq == hdwq)
1055110550
return cpup->eq;
10552-
cpup++;
1055310551
}
1055410552
return 0;
1055510553
}
@@ -10569,15 +10567,13 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
1056910567
struct lpfc_vector_map_info *cpup;
1057010568
int idx;
1057110569

10572-
cpup = phba->sli4_hba.cpu_map;
10573-
for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
10570+
for_each_present_cpu(idx) {
10571+
cpup = &phba->sli4_hba.cpu_map[idx];
1057410572
/* Does the cpup match the one we are looking for */
1057510573
if ((cpup->phys_id == phys_id) &&
1057610574
(cpup->core_id == core_id) &&
10577-
(cpu != idx)) {
10575+
(cpu != idx))
1057810576
return 1;
10579-
}
10580-
cpup++;
1058110577
}
1058210578
return 0;
1058310579
}
@@ -10608,7 +10604,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
1060810604
/* Init cpu_map array */
1060910605
memset(phba->sli4_hba.cpu_map, 0xff,
1061010606
(sizeof(struct lpfc_vector_map_info) *
10611-
phba->sli4_hba.num_present_cpu));
10607+
phba->sli4_hba.num_possible_cpu));
1061210608

1061310609
max_phys_id = 0;
1061410610
min_phys_id = 0xffff;
@@ -10617,8 +10613,8 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
1061710613
phys_id = 0;
1061810614

1061910615
/* Update CPU map with physical id and core id of each CPU */
10620-
cpup = phba->sli4_hba.cpu_map;
10621-
for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
10616+
for_each_present_cpu(cpu) {
10617+
cpup = &phba->sli4_hba.cpu_map[cpu];
1062210618
#ifdef CONFIG_X86
1062310619
cpuinfo = &cpu_data(cpu);
1062410620
cpup->phys_id = cpuinfo->phys_proc_id;
@@ -10645,8 +10641,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
1064510641
max_core_id = cpup->core_id;
1064610642
if (cpup->core_id < min_core_id)
1064710643
min_core_id = cpup->core_id;
10648-
10649-
cpup++;
1065010644
}
1065110645

1065210646
for_each_possible_cpu(i) {

drivers/scsi/lpfc/lpfc_nvmet.c

Lines changed: 22 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1194,9 +1194,9 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
11941194

11951195
/* Cycle the the entire CPU context list for every MRQ */
11961196
for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1197-
for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
1197+
for_each_present_cpu(j) {
1198+
infop = lpfc_get_ctx_list(phba, j, i);
11981199
__lpfc_nvmet_clean_io_for_cpu(phba, infop);
1199-
infop++; /* next */
12001200
}
12011201
}
12021202
kfree(phba->sli4_hba.nvmet_ctx_info);
@@ -1211,14 +1211,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
12111211
union lpfc_wqe128 *wqe;
12121212
struct lpfc_nvmet_ctx_info *last_infop;
12131213
struct lpfc_nvmet_ctx_info *infop;
1214-
int i, j, idx;
1214+
int i, j, idx, cpu;
12151215

12161216
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
12171217
"6403 Allocate NVMET resources for %d XRIs\n",
12181218
phba->sli4_hba.nvmet_xri_cnt);
12191219

12201220
phba->sli4_hba.nvmet_ctx_info = kcalloc(
1221-
phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
1221+
phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
12221222
sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
12231223
if (!phba->sli4_hba.nvmet_ctx_info) {
12241224
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1246,13 +1246,12 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
12461246
* of the IO completion. Thus a context that was allocated for MRQ A
12471247
* whose IO completed on CPU B will be freed to cpuB/mrqA.
12481248
*/
1249-
infop = phba->sli4_hba.nvmet_ctx_info;
1250-
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1249+
for_each_possible_cpu(i) {
12511250
for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1251+
infop = lpfc_get_ctx_list(phba, i, j);
12521252
INIT_LIST_HEAD(&infop->nvmet_ctx_list);
12531253
spin_lock_init(&infop->nvmet_ctx_list_lock);
12541254
infop->nvmet_ctx_list_cnt = 0;
1255-
infop++;
12561255
}
12571256
}
12581257

@@ -1262,8 +1261,10 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
12621261
* MRQ 1 cycling thru CPUs 0 - X, and so on.
12631262
*/
12641263
for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1265-
last_infop = lpfc_get_ctx_list(phba, 0, j);
1266-
for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) {
1264+
last_infop = lpfc_get_ctx_list(phba,
1265+
cpumask_first(cpu_present_mask),
1266+
j);
1267+
for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
12671268
infop = lpfc_get_ctx_list(phba, i, j);
12681269
infop->nvmet_ctx_next_cpu = last_infop;
12691270
last_infop = infop;
@@ -1274,6 +1275,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
12741275
* received command on a per xri basis.
12751276
*/
12761277
idx = 0;
1278+
cpu = cpumask_first(cpu_present_mask);
12771279
for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
12781280
ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
12791281
if (!ctx_buf) {
@@ -1327,19 +1329,26 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
13271329
* is MRQidx will be associated with CPUidx. This association
13281330
* can change on the fly.
13291331
*/
1330-
infop = lpfc_get_ctx_list(phba, idx, idx);
1332+
infop = lpfc_get_ctx_list(phba, cpu, idx);
13311333
spin_lock(&infop->nvmet_ctx_list_lock);
13321334
list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
13331335
infop->nvmet_ctx_list_cnt++;
13341336
spin_unlock(&infop->nvmet_ctx_list_lock);
13351337

13361338
/* Spread ctx structures evenly across all MRQs */
13371339
idx++;
1338-
if (idx >= phba->cfg_nvmet_mrq)
1340+
if (idx >= phba->cfg_nvmet_mrq) {
13391341
idx = 0;
1342+
cpu = cpumask_first(cpu_present_mask);
1343+
continue;
1344+
}
1345+
cpu = cpumask_next(cpu, cpu_present_mask);
1346+
if (cpu == nr_cpu_ids)
1347+
cpu = cpumask_first(cpu_present_mask);
1348+
13401349
}
13411350

1342-
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1351+
for_each_present_cpu(i) {
13431352
for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
13441353
infop = lpfc_get_ctx_list(phba, i, j);
13451354
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
@@ -1839,7 +1848,7 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
18391848
else
18401849
get_infop = current_infop->nvmet_ctx_next_cpu;
18411850

1842-
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1851+
for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
18431852
if (get_infop == current_infop) {
18441853
get_infop = get_infop->nvmet_ctx_next_cpu;
18451854
continue;

drivers/scsi/lpfc/lpfc_sli4.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -890,7 +890,7 @@ struct lpfc_sli4_hba {
890890

891891
/* CPU to vector mapping information */
892892
struct lpfc_vector_map_info *cpu_map;
893-
uint16_t num_online_cpu;
893+
uint16_t num_possible_cpu;
894894
uint16_t num_present_cpu;
895895
uint16_t curr_disp_cpu;
896896
struct lpfc_eq_intr_info __percpu *eq_info;

0 commit comments

Comments
 (0)