@@ -511,6 +511,9 @@ static void acquire_atsd_reg(struct npu_context *npu_context,
511
511
continue ;
512
512
513
513
npu = pci_bus_to_host (npdev -> bus )-> npu ;
514
+ if (!npu )
515
+ continue ;
516
+
514
517
mmio_atsd_reg [i ].npu = npu ;
515
518
mmio_atsd_reg [i ].reg = get_mmio_atsd_reg (npu );
516
519
while (mmio_atsd_reg [i ].reg < 0 ) {
@@ -675,7 +678,6 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
675
678
u32 nvlink_index ;
676
679
struct device_node * nvlink_dn ;
677
680
struct mm_struct * mm = current -> mm ;
678
- struct pnv_phb * nphb ;
679
681
struct npu * npu ;
680
682
struct npu_context * npu_context ;
681
683
struct pci_controller * hose ;
@@ -686,13 +688,14 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
686
688
*/
687
689
struct pci_dev * npdev = pnv_pci_get_npu_dev (gpdev , 0 );
688
690
689
- if (!firmware_has_feature (FW_FEATURE_OPAL ))
690
- return ERR_PTR (- ENODEV );
691
-
692
691
if (!npdev )
693
692
/* No nvlink associated with this GPU device */
694
693
return ERR_PTR (- ENODEV );
695
694
695
+ /* We only support DR/PR/HV in pnv_npu2_map_lpar_dev() */
696
+ if (flags & ~(MSR_DR | MSR_PR | MSR_HV ))
697
+ return ERR_PTR (- EINVAL );
698
+
696
699
nvlink_dn = of_parse_phandle (npdev -> dev .of_node , "ibm,nvlink" , 0 );
697
700
if (WARN_ON (of_property_read_u32 (nvlink_dn , "ibm,npu-link-index" ,
698
701
& nvlink_index )))
@@ -707,20 +710,9 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
707
710
}
708
711
709
712
hose = pci_bus_to_host (npdev -> bus );
710
- nphb = hose -> private_data ;
711
713
npu = hose -> npu ;
712
-
713
- /*
714
- * Setup the NPU context table for a particular GPU. These need to be
715
- * per-GPU as we need the tables to filter ATSDs when there are no
716
- * active contexts on a particular GPU. It is safe for these to be
717
- * called concurrently with destroy as the OPAL call takes appropriate
718
- * locks and refcounts on init/destroy.
719
- */
720
- rc = opal_npu_init_context (nphb -> opal_id , mm -> context .id , flags ,
721
- PCI_DEVID (gpdev -> bus -> number , gpdev -> devfn ));
722
- if (rc < 0 )
723
- return ERR_PTR (- ENOSPC );
714
+ if (!npu )
715
+ return ERR_PTR (- ENODEV );
724
716
725
717
/*
726
718
* We store the npu pci device so we can more easily get at the
@@ -732,9 +724,6 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
732
724
if (npu_context -> release_cb != cb ||
733
725
npu_context -> priv != priv ) {
734
726
spin_unlock (& npu_context_lock );
735
- opal_npu_destroy_context (nphb -> opal_id , mm -> context .id ,
736
- PCI_DEVID (gpdev -> bus -> number ,
737
- gpdev -> devfn ));
738
727
return ERR_PTR (- EINVAL );
739
728
}
740
729
@@ -760,9 +749,6 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
760
749
761
750
if (rc ) {
762
751
kfree (npu_context );
763
- opal_npu_destroy_context (nphb -> opal_id , mm -> context .id ,
764
- PCI_DEVID (gpdev -> bus -> number ,
765
- gpdev -> devfn ));
766
752
return ERR_PTR (rc );
767
753
}
768
754
@@ -815,7 +801,6 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
815
801
struct pci_dev * gpdev )
816
802
{
817
803
int removed ;
818
- struct pnv_phb * nphb ;
819
804
struct npu * npu ;
820
805
struct pci_dev * npdev = pnv_pci_get_npu_dev (gpdev , 0 );
821
806
struct device_node * nvlink_dn ;
@@ -825,19 +810,15 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
825
810
if (WARN_ON (!npdev ))
826
811
return ;
827
812
828
- if (!firmware_has_feature (FW_FEATURE_OPAL ))
829
- return ;
830
-
831
813
hose = pci_bus_to_host (npdev -> bus );
832
- nphb = hose -> private_data ;
833
814
npu = hose -> npu ;
815
+ if (!npu )
816
+ return ;
834
817
nvlink_dn = of_parse_phandle (npdev -> dev .of_node , "ibm,nvlink" , 0 );
835
818
if (WARN_ON (of_property_read_u32 (nvlink_dn , "ibm,npu-link-index" ,
836
819
& nvlink_index )))
837
820
return ;
838
821
WRITE_ONCE (npu_context -> npdev [npu -> index ][nvlink_index ], NULL );
839
- opal_npu_destroy_context (nphb -> opal_id , npu_context -> mm -> context .id ,
840
- PCI_DEVID (gpdev -> bus -> number , gpdev -> devfn ));
841
822
spin_lock (& npu_context_lock );
842
823
removed = kref_put (& npu_context -> kref , pnv_npu2_release_context );
843
824
spin_unlock (& npu_context_lock );
@@ -869,9 +850,6 @@ int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
869
850
/* mmap_sem should be held so the struct_mm must be present */
870
851
struct mm_struct * mm = context -> mm ;
871
852
872
- if (!firmware_has_feature (FW_FEATURE_OPAL ))
873
- return - ENODEV ;
874
-
875
853
WARN_ON (!rwsem_is_locked (& mm -> mmap_sem ));
876
854
877
855
for (i = 0 ; i < count ; i ++ ) {
@@ -900,15 +878,11 @@ int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
900
878
}
901
879
EXPORT_SYMBOL (pnv_npu2_handle_fault );
902
880
903
- int pnv_npu2_init (struct pnv_phb * phb )
881
+ int pnv_npu2_init (struct pci_controller * hose )
904
882
{
905
883
unsigned int i ;
906
884
u64 mmio_atsd ;
907
- struct device_node * dn ;
908
- struct pci_dev * gpdev ;
909
885
static int npu_index ;
910
- uint64_t rc = 0 ;
911
- struct pci_controller * hose = phb -> hose ;
912
886
struct npu * npu ;
913
887
int ret ;
914
888
@@ -917,18 +891,6 @@ int pnv_npu2_init(struct pnv_phb *phb)
917
891
return - ENOMEM ;
918
892
919
893
npu -> nmmu_flush = of_property_read_bool (hose -> dn , "ibm,nmmu-flush" );
920
- for_each_child_of_node (phb -> hose -> dn , dn ) {
921
- gpdev = pnv_pci_get_gpu_dev (get_pci_dev (dn ));
922
- if (gpdev ) {
923
- rc = opal_npu_map_lpar (phb -> opal_id ,
924
- PCI_DEVID (gpdev -> bus -> number , gpdev -> devfn ),
925
- 0 , 0 );
926
- if (rc )
927
- dev_err (& gpdev -> dev ,
928
- "Error %lld mapping device to LPAR\n" ,
929
- rc );
930
- }
931
- }
932
894
933
895
for (i = 0 ; !of_property_read_u64_index (hose -> dn , "ibm,mmio-atsd" ,
934
896
i , & mmio_atsd ); i ++ )
@@ -956,3 +918,52 @@ int pnv_npu2_init(struct pnv_phb *phb)
956
918
957
919
return ret ;
958
920
}
921
+
922
+ int pnv_npu2_map_lpar_dev (struct pci_dev * gpdev , unsigned int lparid ,
923
+ unsigned long msr )
924
+ {
925
+ int ret ;
926
+ struct pci_dev * npdev = pnv_pci_get_npu_dev (gpdev , 0 );
927
+ struct pci_controller * hose ;
928
+ struct pnv_phb * nphb ;
929
+
930
+ if (!npdev )
931
+ return - ENODEV ;
932
+
933
+ hose = pci_bus_to_host (npdev -> bus );
934
+ nphb = hose -> private_data ;
935
+
936
+ dev_dbg (& gpdev -> dev , "Map LPAR opalid=%llu lparid=%u\n" ,
937
+ nphb -> opal_id , lparid );
938
+ /*
939
+ * Currently we only support radix and non-zero LPCR only makes sense
940
+ * for hash tables so skiboot expects the LPCR parameter to be a zero.
941
+ */
942
+ ret = opal_npu_map_lpar (nphb -> opal_id ,
943
+ PCI_DEVID (gpdev -> bus -> number , gpdev -> devfn ), lparid ,
944
+ 0 /* LPCR bits */ );
945
+ if (ret ) {
946
+ dev_err (& gpdev -> dev , "Error %d mapping device to LPAR\n" , ret );
947
+ return ret ;
948
+ }
949
+
950
+ dev_dbg (& gpdev -> dev , "init context opalid=%llu msr=%lx\n" ,
951
+ nphb -> opal_id , msr );
952
+ ret = opal_npu_init_context (nphb -> opal_id , 0 /*__unused*/ , msr ,
953
+ PCI_DEVID (gpdev -> bus -> number , gpdev -> devfn ));
954
+ if (ret < 0 )
955
+ dev_err (& gpdev -> dev , "Failed to init context: %d\n" , ret );
956
+ else
957
+ ret = 0 ;
958
+
959
+ return 0 ;
960
+ }
961
+ EXPORT_SYMBOL_GPL (pnv_npu2_map_lpar_dev );
962
+
963
+ void pnv_npu2_map_lpar (struct pnv_ioda_pe * gpe , unsigned long msr )
964
+ {
965
+ struct pci_dev * gpdev ;
966
+
967
+ list_for_each_entry (gpdev , & gpe -> pbus -> devices , bus_list )
968
+ pnv_npu2_map_lpar_dev (gpdev , 0 , msr );
969
+ }
0 commit comments