@@ -71,7 +71,8 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
71
71
72
72
/* meta feature for alternatives */
73
73
static bool __maybe_unused
74
- cpufeature_pan_not_uao (const struct arm64_cpu_capabilities * entry );
74
+ cpufeature_pan_not_uao (const struct arm64_cpu_capabilities * entry , int __unused );
75
+
75
76
76
77
static struct arm64_ftr_bits ftr_id_aa64isar0 [] = {
77
78
ARM64_FTR_BITS (FTR_STRICT , FTR_EXACT , 32 , 32 , 0 ),
@@ -626,6 +627,49 @@ u64 read_system_reg(u32 id)
626
627
return regp -> sys_val ;
627
628
}
628
629
630
+ /*
631
+ * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated.
632
+ * Read the system register on the current CPU
633
+ */
634
+ static u64 __raw_read_system_reg (u32 sys_id )
635
+ {
636
+ switch (sys_id ) {
637
+ case SYS_ID_PFR0_EL1 : return read_cpuid (ID_PFR0_EL1 );
638
+ case SYS_ID_PFR1_EL1 : return read_cpuid (ID_PFR1_EL1 );
639
+ case SYS_ID_DFR0_EL1 : return read_cpuid (ID_DFR0_EL1 );
640
+ case SYS_ID_MMFR0_EL1 : return read_cpuid (ID_MMFR0_EL1 );
641
+ case SYS_ID_MMFR1_EL1 : return read_cpuid (ID_MMFR1_EL1 );
642
+ case SYS_ID_MMFR2_EL1 : return read_cpuid (ID_MMFR2_EL1 );
643
+ case SYS_ID_MMFR3_EL1 : return read_cpuid (ID_MMFR3_EL1 );
644
+ case SYS_ID_ISAR0_EL1 : return read_cpuid (ID_ISAR0_EL1 );
645
+ case SYS_ID_ISAR1_EL1 : return read_cpuid (ID_ISAR1_EL1 );
646
+ case SYS_ID_ISAR2_EL1 : return read_cpuid (ID_ISAR2_EL1 );
647
+ case SYS_ID_ISAR3_EL1 : return read_cpuid (ID_ISAR3_EL1 );
648
+ case SYS_ID_ISAR4_EL1 : return read_cpuid (ID_ISAR4_EL1 );
649
+ case SYS_ID_ISAR5_EL1 : return read_cpuid (ID_ISAR4_EL1 );
650
+ case SYS_MVFR0_EL1 : return read_cpuid (MVFR0_EL1 );
651
+ case SYS_MVFR1_EL1 : return read_cpuid (MVFR1_EL1 );
652
+ case SYS_MVFR2_EL1 : return read_cpuid (MVFR2_EL1 );
653
+
654
+ case SYS_ID_AA64PFR0_EL1 : return read_cpuid (ID_AA64PFR0_EL1 );
655
+ case SYS_ID_AA64PFR1_EL1 : return read_cpuid (ID_AA64PFR0_EL1 );
656
+ case SYS_ID_AA64DFR0_EL1 : return read_cpuid (ID_AA64DFR0_EL1 );
657
+ case SYS_ID_AA64DFR1_EL1 : return read_cpuid (ID_AA64DFR0_EL1 );
658
+ case SYS_ID_AA64MMFR0_EL1 : return read_cpuid (ID_AA64MMFR0_EL1 );
659
+ case SYS_ID_AA64MMFR1_EL1 : return read_cpuid (ID_AA64MMFR1_EL1 );
660
+ case SYS_ID_AA64MMFR2_EL1 : return read_cpuid (ID_AA64MMFR2_EL1 );
661
+ case SYS_ID_AA64ISAR0_EL1 : return read_cpuid (ID_AA64ISAR0_EL1 );
662
+ case SYS_ID_AA64ISAR1_EL1 : return read_cpuid (ID_AA64ISAR1_EL1 );
663
+
664
+ case SYS_CNTFRQ_EL0 : return read_cpuid (CNTFRQ_EL0 );
665
+ case SYS_CTR_EL0 : return read_cpuid (CTR_EL0 );
666
+ case SYS_DCZID_EL0 : return read_cpuid (DCZID_EL0 );
667
+ default :
668
+ BUG ();
669
+ return 0 ;
670
+ }
671
+ }
672
+
629
673
#include <linux/irqchip/arm-gic-v3.h>
630
674
631
675
static bool
@@ -637,19 +681,24 @@ feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
637
681
}
638
682
639
683
static bool
640
- has_cpuid_feature (const struct arm64_cpu_capabilities * entry )
684
+ has_cpuid_feature (const struct arm64_cpu_capabilities * entry , int scope )
641
685
{
642
686
u64 val ;
643
687
644
- val = read_system_reg (entry -> sys_reg );
688
+ WARN_ON (scope == SCOPE_LOCAL_CPU && preemptible ());
689
+ if (scope == SCOPE_SYSTEM )
690
+ val = read_system_reg (entry -> sys_reg );
691
+ else
692
+ val = __raw_read_system_reg (entry -> sys_reg );
693
+
645
694
return feature_matches (val , entry );
646
695
}
647
696
648
- static bool has_useable_gicv3_cpuif (const struct arm64_cpu_capabilities * entry )
697
+ static bool has_useable_gicv3_cpuif (const struct arm64_cpu_capabilities * entry , int scope )
649
698
{
650
699
bool has_sre ;
651
700
652
- if (!has_cpuid_feature (entry ))
701
+ if (!has_cpuid_feature (entry , scope ))
653
702
return false;
654
703
655
704
has_sre = gic_enable_sre ();
@@ -660,7 +709,7 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
660
709
return has_sre ;
661
710
}
662
711
663
- static bool has_no_hw_prefetch (const struct arm64_cpu_capabilities * entry )
712
+ static bool has_no_hw_prefetch (const struct arm64_cpu_capabilities * entry , int __unused )
664
713
{
665
714
u32 midr = read_cpuid_id ();
666
715
u32 rv_min , rv_max ;
@@ -672,7 +721,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
672
721
return MIDR_IS_CPU_MODEL_RANGE (midr , MIDR_THUNDERX , rv_min , rv_max );
673
722
}
674
723
675
- static bool runs_at_el2 (const struct arm64_cpu_capabilities * entry )
724
+ static bool runs_at_el2 (const struct arm64_cpu_capabilities * entry , int __unused )
676
725
{
677
726
return is_kernel_in_hyp_mode ();
678
727
}
@@ -681,6 +730,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
681
730
{
682
731
.desc = "GIC system register CPU interface" ,
683
732
.capability = ARM64_HAS_SYSREG_GIC_CPUIF ,
733
+ .def_scope = SCOPE_SYSTEM ,
684
734
.matches = has_useable_gicv3_cpuif ,
685
735
.sys_reg = SYS_ID_AA64PFR0_EL1 ,
686
736
.field_pos = ID_AA64PFR0_GIC_SHIFT ,
@@ -691,6 +741,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
691
741
{
692
742
.desc = "Privileged Access Never" ,
693
743
.capability = ARM64_HAS_PAN ,
744
+ .def_scope = SCOPE_SYSTEM ,
694
745
.matches = has_cpuid_feature ,
695
746
.sys_reg = SYS_ID_AA64MMFR1_EL1 ,
696
747
.field_pos = ID_AA64MMFR1_PAN_SHIFT ,
@@ -703,6 +754,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
703
754
{
704
755
.desc = "LSE atomic instructions" ,
705
756
.capability = ARM64_HAS_LSE_ATOMICS ,
757
+ .def_scope = SCOPE_SYSTEM ,
706
758
.matches = has_cpuid_feature ,
707
759
.sys_reg = SYS_ID_AA64ISAR0_EL1 ,
708
760
.field_pos = ID_AA64ISAR0_ATOMICS_SHIFT ,
@@ -713,12 +765,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
713
765
{
714
766
.desc = "Software prefetching using PRFM" ,
715
767
.capability = ARM64_HAS_NO_HW_PREFETCH ,
768
+ .def_scope = SCOPE_SYSTEM ,
716
769
.matches = has_no_hw_prefetch ,
717
770
},
718
771
#ifdef CONFIG_ARM64_UAO
719
772
{
720
773
.desc = "User Access Override" ,
721
774
.capability = ARM64_HAS_UAO ,
775
+ .def_scope = SCOPE_SYSTEM ,
722
776
.matches = has_cpuid_feature ,
723
777
.sys_reg = SYS_ID_AA64MMFR2_EL1 ,
724
778
.field_pos = ID_AA64MMFR2_UAO_SHIFT ,
@@ -729,17 +783,20 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
729
783
#ifdef CONFIG_ARM64_PAN
730
784
{
731
785
.capability = ARM64_ALT_PAN_NOT_UAO ,
786
+ .def_scope = SCOPE_SYSTEM ,
732
787
.matches = cpufeature_pan_not_uao ,
733
788
},
734
789
#endif /* CONFIG_ARM64_PAN */
735
790
{
736
791
.desc = "Virtualization Host Extensions" ,
737
792
.capability = ARM64_HAS_VIRT_HOST_EXTN ,
793
+ .def_scope = SCOPE_SYSTEM ,
738
794
.matches = runs_at_el2 ,
739
795
},
740
796
{
741
797
.desc = "32-bit EL0 Support" ,
742
798
.capability = ARM64_HAS_32BIT_EL0 ,
799
+ .def_scope = SCOPE_SYSTEM ,
743
800
.matches = has_cpuid_feature ,
744
801
.sys_reg = SYS_ID_AA64PFR0_EL1 ,
745
802
.sign = FTR_UNSIGNED ,
@@ -752,6 +809,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
752
809
#define HWCAP_CAP (reg , field , s , min_value , type , cap ) \
753
810
{ \
754
811
.desc = #cap, \
812
+ .def_scope = SCOPE_SYSTEM, \
755
813
.matches = has_cpuid_feature, \
756
814
.sys_reg = reg, \
757
815
.field_pos = field, \
@@ -834,15 +892,15 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
834
892
static void __init setup_elf_hwcaps (const struct arm64_cpu_capabilities * hwcaps )
835
893
{
836
894
for (; hwcaps -> matches ; hwcaps ++ )
837
- if (hwcaps -> matches (hwcaps ))
895
+ if (hwcaps -> matches (hwcaps , hwcaps -> def_scope ))
838
896
cap_set_elf_hwcap (hwcaps );
839
897
}
840
898
841
899
void update_cpu_capabilities (const struct arm64_cpu_capabilities * caps ,
842
900
const char * info )
843
901
{
844
902
for (; caps -> matches ; caps ++ ) {
845
- if (!caps -> matches (caps ))
903
+ if (!caps -> matches (caps , caps -> def_scope ))
846
904
continue ;
847
905
848
906
if (!cpus_have_cap (caps -> capability ) && caps -> desc )
@@ -878,48 +936,6 @@ static inline void set_sys_caps_initialised(void)
878
936
sys_caps_initialised = true;
879
937
}
880
938
881
- /*
882
- * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated.
883
- */
884
- static u64 __raw_read_system_reg (u32 sys_id )
885
- {
886
- switch (sys_id ) {
887
- case SYS_ID_PFR0_EL1 : return read_cpuid (ID_PFR0_EL1 );
888
- case SYS_ID_PFR1_EL1 : return read_cpuid (ID_PFR1_EL1 );
889
- case SYS_ID_DFR0_EL1 : return read_cpuid (ID_DFR0_EL1 );
890
- case SYS_ID_MMFR0_EL1 : return read_cpuid (ID_MMFR0_EL1 );
891
- case SYS_ID_MMFR1_EL1 : return read_cpuid (ID_MMFR1_EL1 );
892
- case SYS_ID_MMFR2_EL1 : return read_cpuid (ID_MMFR2_EL1 );
893
- case SYS_ID_MMFR3_EL1 : return read_cpuid (ID_MMFR3_EL1 );
894
- case SYS_ID_ISAR0_EL1 : return read_cpuid (ID_ISAR0_EL1 );
895
- case SYS_ID_ISAR1_EL1 : return read_cpuid (ID_ISAR1_EL1 );
896
- case SYS_ID_ISAR2_EL1 : return read_cpuid (ID_ISAR2_EL1 );
897
- case SYS_ID_ISAR3_EL1 : return read_cpuid (ID_ISAR3_EL1 );
898
- case SYS_ID_ISAR4_EL1 : return read_cpuid (ID_ISAR4_EL1 );
899
- case SYS_ID_ISAR5_EL1 : return read_cpuid (ID_ISAR4_EL1 );
900
- case SYS_MVFR0_EL1 : return read_cpuid (MVFR0_EL1 );
901
- case SYS_MVFR1_EL1 : return read_cpuid (MVFR1_EL1 );
902
- case SYS_MVFR2_EL1 : return read_cpuid (MVFR2_EL1 );
903
-
904
- case SYS_ID_AA64PFR0_EL1 : return read_cpuid (ID_AA64PFR0_EL1 );
905
- case SYS_ID_AA64PFR1_EL1 : return read_cpuid (ID_AA64PFR0_EL1 );
906
- case SYS_ID_AA64DFR0_EL1 : return read_cpuid (ID_AA64DFR0_EL1 );
907
- case SYS_ID_AA64DFR1_EL1 : return read_cpuid (ID_AA64DFR0_EL1 );
908
- case SYS_ID_AA64MMFR0_EL1 : return read_cpuid (ID_AA64MMFR0_EL1 );
909
- case SYS_ID_AA64MMFR1_EL1 : return read_cpuid (ID_AA64MMFR1_EL1 );
910
- case SYS_ID_AA64MMFR2_EL1 : return read_cpuid (ID_AA64MMFR2_EL1 );
911
- case SYS_ID_AA64ISAR0_EL1 : return read_cpuid (ID_AA64ISAR0_EL1 );
912
- case SYS_ID_AA64ISAR1_EL1 : return read_cpuid (ID_AA64ISAR1_EL1 );
913
-
914
- case SYS_CNTFRQ_EL0 : return read_cpuid (CNTFRQ_EL0 );
915
- case SYS_CTR_EL0 : return read_cpuid (CTR_EL0 );
916
- case SYS_DCZID_EL0 : return read_cpuid (DCZID_EL0 );
917
- default :
918
- BUG ();
919
- return 0 ;
920
- }
921
- }
922
-
923
939
/*
924
940
* Check for CPU features that are used in early boot
925
941
* based on the Boot CPU value.
@@ -934,28 +950,25 @@ static void
934
950
verify_local_elf_hwcaps (const struct arm64_cpu_capabilities * caps )
935
951
{
936
952
937
- for (; caps -> matches ; caps ++ ) {
938
- if (!cpus_have_elf_hwcap (caps ))
939
- continue ;
940
- if (!feature_matches (__raw_read_system_reg (caps -> sys_reg ), caps )) {
953
+ for (; caps -> matches ; caps ++ )
954
+ if (cpus_have_elf_hwcap (caps ) && !caps -> matches (caps , SCOPE_LOCAL_CPU )) {
941
955
pr_crit ("CPU%d: missing HWCAP: %s\n" ,
942
956
smp_processor_id (), caps -> desc );
943
957
cpu_die_early ();
944
958
}
945
- }
946
959
}
947
960
948
961
static void
949
962
verify_local_cpu_features (const struct arm64_cpu_capabilities * caps )
950
963
{
951
964
for (; caps -> matches ; caps ++ ) {
952
- if (!cpus_have_cap (caps -> capability ) || ! caps -> sys_reg )
965
+ if (!cpus_have_cap (caps -> capability ))
953
966
continue ;
954
967
/*
955
968
* If the new CPU misses an advertised feature, we cannot proceed
956
969
* further, park the cpu.
957
970
*/
958
- if (!feature_matches ( __raw_read_system_reg ( caps -> sys_reg ), caps )) {
971
+ if (!caps -> matches ( caps , SCOPE_LOCAL_CPU )) {
959
972
pr_crit ("CPU%d: missing feature: %s\n" ,
960
973
smp_processor_id (), caps -> desc );
961
974
cpu_die_early ();
@@ -1026,7 +1039,7 @@ void __init setup_cpu_features(void)
1026
1039
}
1027
1040
1028
1041
static bool __maybe_unused
1029
- cpufeature_pan_not_uao (const struct arm64_cpu_capabilities * entry )
1042
+ cpufeature_pan_not_uao (const struct arm64_cpu_capabilities * entry , int __unused )
1030
1043
{
1031
1044
return (cpus_have_cap (ARM64_HAS_PAN ) && !cpus_have_cap (ARM64_HAS_UAO ));
1032
1045
}
0 commit comments