Skip to content

Commit 46a1449

Browse files
aikmpe
authored andcommitted
powerpc/powernv: Move npu struct from pnv_phb to pci_controller
The powernv PCI code stores NPU data in the pnv_phb struct. The latter is referenced by pci_controller::private_data. We are going to have NPU2 support in the pseries platform as well but it does not store any private_data in in the pci_controller struct; and even if it did, it would be a different data structure. This makes npu a pointer and stores it one level higher in the pci_controller struct. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
1 parent c10c21e commit 46a1449

File tree

4 files changed

+58
-35
lines changed

4 files changed

+58
-35
lines changed

arch/powerpc/include/asm/pci-bridge.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,7 @@ struct pci_controller {
129129
#endif /* CONFIG_PPC64 */
130130

131131
void *private_data;
132+
struct npu *npu;
132133
};
133134

134135
/* These are used for config access before all the PCI probing

arch/powerpc/platforms/powernv/npu-dma.c

Lines changed: 56 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -326,6 +326,25 @@ struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
326326
return gpe;
327327
}
328328

329+
/*
330+
* NPU2 ATS
331+
*/
332+
/* Maximum possible number of ATSD MMIO registers per NPU */
333+
#define NV_NMMU_ATSD_REGS 8
334+
335+
/* An NPU descriptor, valid for POWER9 only */
336+
struct npu {
337+
int index;
338+
__be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
339+
unsigned int mmio_atsd_count;
340+
341+
/* Bitmask for MMIO register usage */
342+
unsigned long mmio_atsd_usage;
343+
344+
/* Do we need to explicitly flush the nest mmu? */
345+
bool nmmu_flush;
346+
};
347+
329348
/* Maximum number of nvlinks per npu */
330349
#define NV_MAX_LINKS 6
331350

@@ -477,7 +496,6 @@ static void acquire_atsd_reg(struct npu_context *npu_context,
477496
int i, j;
478497
struct npu *npu;
479498
struct pci_dev *npdev;
480-
struct pnv_phb *nphb;
481499

482500
for (i = 0; i <= max_npu2_index; i++) {
483501
mmio_atsd_reg[i].reg = -1;
@@ -492,8 +510,7 @@ static void acquire_atsd_reg(struct npu_context *npu_context,
492510
if (!npdev)
493511
continue;
494512

495-
nphb = pci_bus_to_host(npdev->bus)->private_data;
496-
npu = &nphb->npu;
513+
npu = pci_bus_to_host(npdev->bus)->npu;
497514
mmio_atsd_reg[i].npu = npu;
498515
mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
499516
while (mmio_atsd_reg[i].reg < 0) {
@@ -661,6 +678,7 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
661678
struct pnv_phb *nphb;
662679
struct npu *npu;
663680
struct npu_context *npu_context;
681+
struct pci_controller *hose;
664682

665683
/*
666684
* At present we don't support GPUs connected to multiple NPUs and I'm
@@ -688,8 +706,9 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
688706
return ERR_PTR(-EINVAL);
689707
}
690708

691-
nphb = pci_bus_to_host(npdev->bus)->private_data;
692-
npu = &nphb->npu;
709+
hose = pci_bus_to_host(npdev->bus);
710+
nphb = hose->private_data;
711+
npu = hose->npu;
693712

694713
/*
695714
* Setup the NPU context table for a particular GPU. These need to be
@@ -763,7 +782,7 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
763782
*/
764783
WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev);
765784

766-
if (!nphb->npu.nmmu_flush) {
785+
if (!npu->nmmu_flush) {
767786
/*
768787
* If we're not explicitly flushing ourselves we need to mark
769788
* the thread for global flushes
@@ -801,15 +820,17 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
801820
struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
802821
struct device_node *nvlink_dn;
803822
u32 nvlink_index;
823+
struct pci_controller *hose;
804824

805825
if (WARN_ON(!npdev))
806826
return;
807827

808828
if (!firmware_has_feature(FW_FEATURE_OPAL))
809829
return;
810830

811-
nphb = pci_bus_to_host(npdev->bus)->private_data;
812-
npu = &nphb->npu;
831+
hose = pci_bus_to_host(npdev->bus);
832+
nphb = hose->private_data;
833+
npu = hose->npu;
813834
nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
814835
if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
815836
&nvlink_index)))
@@ -887,9 +908,15 @@ int pnv_npu2_init(struct pnv_phb *phb)
887908
struct pci_dev *gpdev;
888909
static int npu_index;
889910
uint64_t rc = 0;
911+
struct pci_controller *hose = phb->hose;
912+
struct npu *npu;
913+
int ret;
914+
915+
npu = kzalloc(sizeof(*npu), GFP_KERNEL);
916+
if (!npu)
917+
return -ENOMEM;
890918

891-
phb->npu.nmmu_flush =
892-
of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
919+
npu->nmmu_flush = of_property_read_bool(hose->dn, "ibm,nmmu-flush");
893920
for_each_child_of_node(phb->hose->dn, dn) {
894921
gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
895922
if (gpdev) {
@@ -903,18 +930,29 @@ int pnv_npu2_init(struct pnv_phb *phb)
903930
}
904931
}
905932

906-
for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
933+
for (i = 0; !of_property_read_u64_index(hose->dn, "ibm,mmio-atsd",
907934
i, &mmio_atsd); i++)
908-
phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
935+
npu->mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
909936

910-
pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
911-
phb->npu.mmio_atsd_count = i;
912-
phb->npu.mmio_atsd_usage = 0;
937+
pr_info("NPU%d: Found %d MMIO ATSD registers", hose->global_number, i);
938+
npu->mmio_atsd_count = i;
939+
npu->mmio_atsd_usage = 0;
913940
npu_index++;
914-
if (WARN_ON(npu_index >= NV_MAX_NPUS))
915-
return -ENOSPC;
941+
if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
942+
ret = -ENOSPC;
943+
goto fail_exit;
944+
}
916945
max_npu2_index = npu_index;
917-
phb->npu.index = npu_index;
946+
npu->index = npu_index;
947+
hose->npu = npu;
918948

919949
return 0;
950+
951+
fail_exit:
952+
for (i = 0; i < npu->mmio_atsd_count; ++i)
953+
iounmap(npu->mmio_atsd_regs[i]);
954+
955+
kfree(npu);
956+
957+
return ret;
920958
}

arch/powerpc/platforms/powernv/pci-ioda.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1278,7 +1278,7 @@ static void pnv_pci_ioda_setup_PEs(void)
12781278
pnv_ioda_reserve_pe(phb, 0);
12791279
pnv_ioda_setup_npu_PEs(hose->bus);
12801280
if (phb->model == PNV_PHB_MODEL_NPU2)
1281-
pnv_npu2_init(phb);
1281+
WARN_ON_ONCE(pnv_npu2_init(phb));
12821282
}
12831283
if (phb->type == PNV_PHB_NPU_OCAPI) {
12841284
bus = hose->bus;

arch/powerpc/platforms/powernv/pci.h

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,6 @@
88

99
struct pci_dn;
1010

11-
/* Maximum possible number of ATSD MMIO registers per NPU */
12-
#define NV_NMMU_ATSD_REGS 8
13-
1411
enum pnv_phb_type {
1512
PNV_PHB_IODA1 = 0,
1613
PNV_PHB_IODA2 = 1,
@@ -174,19 +171,6 @@ struct pnv_phb {
174171
unsigned int diag_data_size;
175172
u8 *diag_data;
176173

177-
/* Nvlink2 data */
178-
struct npu {
179-
int index;
180-
__be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
181-
unsigned int mmio_atsd_count;
182-
183-
/* Bitmask for MMIO register usage */
184-
unsigned long mmio_atsd_usage;
185-
186-
/* Do we need to explicitly flush the nest mmu? */
187-
bool nmmu_flush;
188-
} npu;
189-
190174
int p2p_target_count;
191175
};
192176

0 commit comments

Comments
 (0)