@@ -91,6 +91,14 @@ struct its_device {
91
91
u32 device_id ;
92
92
};
93
93
94
+ static LIST_HEAD (its_nodes );
95
+ static DEFINE_SPINLOCK (its_lock );
96
+ static struct device_node * gic_root_node ;
97
+ static struct rdists * gic_rdists ;
98
+
99
+ #define gic_data_rdist () (raw_cpu_ptr(gic_rdists->rdist))
100
+ #define gic_data_rdist_rd_base () (gic_data_rdist()->rd_base)
101
+
94
102
/*
95
103
* ITS command descriptors - parameters to be encoded in a command
96
104
* block.
@@ -689,3 +697,287 @@ static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
689
697
690
698
kfree (bitmap );
691
699
}
700
+
701
+ /*
702
+ * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to
703
+ * deal with (one configuration byte per interrupt). PENDBASE has to
704
+ * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
705
+ */
706
+ #define LPI_PROPBASE_SZ SZ_64K
707
+ #define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K)
708
+
709
+ /*
710
+ * This is how many bits of ID we need, including the useless ones.
711
+ */
712
+ #define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K)
713
+
714
+ #define LPI_PROP_DEFAULT_PRIO 0xa0
715
+
716
+ static int __init its_alloc_lpi_tables (void )
717
+ {
718
+ phys_addr_t paddr ;
719
+
720
+ gic_rdists -> prop_page = alloc_pages (GFP_NOWAIT ,
721
+ get_order (LPI_PROPBASE_SZ ));
722
+ if (!gic_rdists -> prop_page ) {
723
+ pr_err ("Failed to allocate PROPBASE\n" );
724
+ return - ENOMEM ;
725
+ }
726
+
727
+ paddr = page_to_phys (gic_rdists -> prop_page );
728
+ pr_info ("GIC: using LPI property table @%pa\n" , & paddr );
729
+
730
+ /* Priority 0xa0, Group-1, disabled */
731
+ memset (page_address (gic_rdists -> prop_page ),
732
+ LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1 ,
733
+ LPI_PROPBASE_SZ );
734
+
735
+ /* Make sure the GIC will observe the written configuration */
736
+ __flush_dcache_area (page_address (gic_rdists -> prop_page ), LPI_PROPBASE_SZ );
737
+
738
+ return 0 ;
739
+ }
740
+
741
+ static const char * its_base_type_string [] = {
742
+ [GITS_BASER_TYPE_DEVICE ] = "Devices" ,
743
+ [GITS_BASER_TYPE_VCPU ] = "Virtual CPUs" ,
744
+ [GITS_BASER_TYPE_CPU ] = "Physical CPUs" ,
745
+ [GITS_BASER_TYPE_COLLECTION ] = "Interrupt Collections" ,
746
+ [GITS_BASER_TYPE_RESERVED5 ] = "Reserved (5)" ,
747
+ [GITS_BASER_TYPE_RESERVED6 ] = "Reserved (6)" ,
748
+ [GITS_BASER_TYPE_RESERVED7 ] = "Reserved (7)" ,
749
+ };
750
+
751
+ static void its_free_tables (struct its_node * its )
752
+ {
753
+ int i ;
754
+
755
+ for (i = 0 ; i < GITS_BASER_NR_REGS ; i ++ ) {
756
+ if (its -> tables [i ]) {
757
+ free_page ((unsigned long )its -> tables [i ]);
758
+ its -> tables [i ] = NULL ;
759
+ }
760
+ }
761
+ }
762
+
763
+ static int its_alloc_tables (struct its_node * its )
764
+ {
765
+ int err ;
766
+ int i ;
767
+ int psz = PAGE_SIZE ;
768
+ u64 shr = GITS_BASER_InnerShareable ;
769
+
770
+ for (i = 0 ; i < GITS_BASER_NR_REGS ; i ++ ) {
771
+ u64 val = readq_relaxed (its -> base + GITS_BASER + i * 8 );
772
+ u64 type = GITS_BASER_TYPE (val );
773
+ u64 entry_size = GITS_BASER_ENTRY_SIZE (val );
774
+ u64 tmp ;
775
+ void * base ;
776
+
777
+ if (type == GITS_BASER_TYPE_NONE )
778
+ continue ;
779
+
780
+ /* We're lazy and only allocate a single page for now */
781
+ base = (void * )get_zeroed_page (GFP_KERNEL );
782
+ if (!base ) {
783
+ err = - ENOMEM ;
784
+ goto out_free ;
785
+ }
786
+
787
+ its -> tables [i ] = base ;
788
+
789
+ retry_baser :
790
+ val = (virt_to_phys (base ) |
791
+ (type << GITS_BASER_TYPE_SHIFT ) |
792
+ ((entry_size - 1 ) << GITS_BASER_ENTRY_SIZE_SHIFT ) |
793
+ GITS_BASER_WaWb |
794
+ shr |
795
+ GITS_BASER_VALID );
796
+
797
+ switch (psz ) {
798
+ case SZ_4K :
799
+ val |= GITS_BASER_PAGE_SIZE_4K ;
800
+ break ;
801
+ case SZ_16K :
802
+ val |= GITS_BASER_PAGE_SIZE_16K ;
803
+ break ;
804
+ case SZ_64K :
805
+ val |= GITS_BASER_PAGE_SIZE_64K ;
806
+ break ;
807
+ }
808
+
809
+ val |= (PAGE_SIZE / psz ) - 1 ;
810
+
811
+ writeq_relaxed (val , its -> base + GITS_BASER + i * 8 );
812
+ tmp = readq_relaxed (its -> base + GITS_BASER + i * 8 );
813
+
814
+ if ((val ^ tmp ) & GITS_BASER_SHAREABILITY_MASK ) {
815
+ /*
816
+ * Shareability didn't stick. Just use
817
+ * whatever the read reported, which is likely
818
+ * to be the only thing this redistributor
819
+ * supports.
820
+ */
821
+ shr = tmp & GITS_BASER_SHAREABILITY_MASK ;
822
+ goto retry_baser ;
823
+ }
824
+
825
+ if ((val ^ tmp ) & GITS_BASER_PAGE_SIZE_MASK ) {
826
+ /*
827
+ * Page size didn't stick. Let's try a smaller
828
+ * size and retry. If we reach 4K, then
829
+ * something is horribly wrong...
830
+ */
831
+ switch (psz ) {
832
+ case SZ_16K :
833
+ psz = SZ_4K ;
834
+ goto retry_baser ;
835
+ case SZ_64K :
836
+ psz = SZ_16K ;
837
+ goto retry_baser ;
838
+ }
839
+ }
840
+
841
+ if (val != tmp ) {
842
+ pr_err ("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n" ,
843
+ its -> msi_chip .of_node -> full_name , i ,
844
+ (unsigned long ) val , (unsigned long ) tmp );
845
+ err = - ENXIO ;
846
+ goto out_free ;
847
+ }
848
+
849
+ pr_info ("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n" ,
850
+ (int )(PAGE_SIZE / entry_size ),
851
+ its_base_type_string [type ],
852
+ (unsigned long )virt_to_phys (base ),
853
+ psz / SZ_1K , (int )shr >> GITS_BASER_SHAREABILITY_SHIFT );
854
+ }
855
+
856
+ return 0 ;
857
+
858
+ out_free :
859
+ its_free_tables (its );
860
+
861
+ return err ;
862
+ }
863
+
864
+ static int its_alloc_collections (struct its_node * its )
865
+ {
866
+ its -> collections = kzalloc (nr_cpu_ids * sizeof (* its -> collections ),
867
+ GFP_KERNEL );
868
+ if (!its -> collections )
869
+ return - ENOMEM ;
870
+
871
+ return 0 ;
872
+ }
873
+
874
+ static void its_cpu_init_lpis (void )
875
+ {
876
+ void __iomem * rbase = gic_data_rdist_rd_base ();
877
+ struct page * pend_page ;
878
+ u64 val , tmp ;
879
+
880
+ /* If we didn't allocate the pending table yet, do it now */
881
+ pend_page = gic_data_rdist ()-> pend_page ;
882
+ if (!pend_page ) {
883
+ phys_addr_t paddr ;
884
+ /*
885
+ * The pending pages have to be at least 64kB aligned,
886
+ * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
887
+ */
888
+ pend_page = alloc_pages (GFP_NOWAIT | __GFP_ZERO ,
889
+ get_order (max (LPI_PENDBASE_SZ , SZ_64K )));
890
+ if (!pend_page ) {
891
+ pr_err ("Failed to allocate PENDBASE for CPU%d\n" ,
892
+ smp_processor_id ());
893
+ return ;
894
+ }
895
+
896
+ /* Make sure the GIC will observe the zero-ed page */
897
+ __flush_dcache_area (page_address (pend_page ), LPI_PENDBASE_SZ );
898
+
899
+ paddr = page_to_phys (pend_page );
900
+ pr_info ("CPU%d: using LPI pending table @%pa\n" ,
901
+ smp_processor_id (), & paddr );
902
+ gic_data_rdist ()-> pend_page = pend_page ;
903
+ }
904
+
905
+ /* Disable LPIs */
906
+ val = readl_relaxed (rbase + GICR_CTLR );
907
+ val &= ~GICR_CTLR_ENABLE_LPIS ;
908
+ writel_relaxed (val , rbase + GICR_CTLR );
909
+
910
+ /*
911
+ * Make sure any change to the table is observable by the GIC.
912
+ */
913
+ dsb (sy );
914
+
915
+ /* set PROPBASE */
916
+ val = (page_to_phys (gic_rdists -> prop_page ) |
917
+ GICR_PROPBASER_InnerShareable |
918
+ GICR_PROPBASER_WaWb |
919
+ ((LPI_NRBITS - 1 ) & GICR_PROPBASER_IDBITS_MASK ));
920
+
921
+ writeq_relaxed (val , rbase + GICR_PROPBASER );
922
+ tmp = readq_relaxed (rbase + GICR_PROPBASER );
923
+
924
+ if ((tmp ^ val ) & GICR_PROPBASER_SHAREABILITY_MASK ) {
925
+ pr_info_once ("GIC: using cache flushing for LPI property table\n" );
926
+ gic_rdists -> flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING ;
927
+ }
928
+
929
+ /* set PENDBASE */
930
+ val = (page_to_phys (pend_page ) |
931
+ GICR_PROPBASER_InnerShareable |
932
+ GICR_PROPBASER_WaWb );
933
+
934
+ writeq_relaxed (val , rbase + GICR_PENDBASER );
935
+
936
+ /* Enable LPIs */
937
+ val = readl_relaxed (rbase + GICR_CTLR );
938
+ val |= GICR_CTLR_ENABLE_LPIS ;
939
+ writel_relaxed (val , rbase + GICR_CTLR );
940
+
941
+ /* Make sure the GIC has seen the above */
942
+ dsb (sy );
943
+ }
944
+
945
+ static void its_cpu_init_collection (void )
946
+ {
947
+ struct its_node * its ;
948
+ int cpu ;
949
+
950
+ spin_lock (& its_lock );
951
+ cpu = smp_processor_id ();
952
+
953
+ list_for_each_entry (its , & its_nodes , entry ) {
954
+ u64 target ;
955
+
956
+ /*
957
+ * We now have to bind each collection to its target
958
+ * redistributor.
959
+ */
960
+ if (readq_relaxed (its -> base + GITS_TYPER ) & GITS_TYPER_PTA ) {
961
+ /*
962
+ * This ITS wants the physical address of the
963
+ * redistributor.
964
+ */
965
+ target = gic_data_rdist ()-> phys_base ;
966
+ } else {
967
+ /*
968
+ * This ITS wants a linear CPU number.
969
+ */
970
+ target = readq_relaxed (gic_data_rdist_rd_base () + GICR_TYPER );
971
+ target = GICR_TYPER_CPU_NUMBER (target );
972
+ }
973
+
974
+ /* Perform collection mapping */
975
+ its -> collections [cpu ].target_address = target ;
976
+ its -> collections [cpu ].col_id = cpu ;
977
+
978
+ its_send_mapc (its , & its -> collections [cpu ], 1 );
979
+ its_send_invall (its , & its -> collections [cpu ]);
980
+ }
981
+
982
+ spin_unlock (& its_lock );
983
+ }
0 commit comments