40
40
#include <asm/sclp.h>
41
41
#include <asm/cpacf.h>
42
42
#include <asm/timex.h>
43
+ #include <asm/ap.h>
43
44
#include "kvm-s390.h"
44
45
#include "gaccess.h"
45
46
@@ -844,45 +845,67 @@ void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
844
845
845
846
kvm_s390_vcpu_block_all (kvm );
846
847
847
- kvm_for_each_vcpu (i , vcpu , kvm )
848
+ kvm_for_each_vcpu (i , vcpu , kvm ) {
848
849
kvm_s390_vcpu_crypto_setup (vcpu );
850
+ /* recreate the shadow crycb by leaving the VSIE handler */
851
+ kvm_s390_sync_request (KVM_REQ_VSIE_RESTART , vcpu );
852
+ }
849
853
850
854
kvm_s390_vcpu_unblock_all (kvm );
851
855
}
852
856
853
857
static int kvm_s390_vm_set_crypto (struct kvm * kvm , struct kvm_device_attr * attr )
854
858
{
855
- if (!test_kvm_facility (kvm , 76 ))
856
- return - EINVAL ;
857
-
858
859
mutex_lock (& kvm -> lock );
859
860
switch (attr -> attr ) {
860
861
case KVM_S390_VM_CRYPTO_ENABLE_AES_KW :
862
+ if (!test_kvm_facility (kvm , 76 ))
863
+ return - EINVAL ;
861
864
get_random_bytes (
862
865
kvm -> arch .crypto .crycb -> aes_wrapping_key_mask ,
863
866
sizeof (kvm -> arch .crypto .crycb -> aes_wrapping_key_mask ));
864
867
kvm -> arch .crypto .aes_kw = 1 ;
865
868
VM_EVENT (kvm , 3 , "%s" , "ENABLE: AES keywrapping support" );
866
869
break ;
867
870
case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW :
871
+ if (!test_kvm_facility (kvm , 76 ))
872
+ return - EINVAL ;
868
873
get_random_bytes (
869
874
kvm -> arch .crypto .crycb -> dea_wrapping_key_mask ,
870
875
sizeof (kvm -> arch .crypto .crycb -> dea_wrapping_key_mask ));
871
876
kvm -> arch .crypto .dea_kw = 1 ;
872
877
VM_EVENT (kvm , 3 , "%s" , "ENABLE: DEA keywrapping support" );
873
878
break ;
874
879
case KVM_S390_VM_CRYPTO_DISABLE_AES_KW :
880
+ if (!test_kvm_facility (kvm , 76 ))
881
+ return - EINVAL ;
875
882
kvm -> arch .crypto .aes_kw = 0 ;
876
883
memset (kvm -> arch .crypto .crycb -> aes_wrapping_key_mask , 0 ,
877
884
sizeof (kvm -> arch .crypto .crycb -> aes_wrapping_key_mask ));
878
885
VM_EVENT (kvm , 3 , "%s" , "DISABLE: AES keywrapping support" );
879
886
break ;
880
887
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW :
888
+ if (!test_kvm_facility (kvm , 76 ))
889
+ return - EINVAL ;
881
890
kvm -> arch .crypto .dea_kw = 0 ;
882
891
memset (kvm -> arch .crypto .crycb -> dea_wrapping_key_mask , 0 ,
883
892
sizeof (kvm -> arch .crypto .crycb -> dea_wrapping_key_mask ));
884
893
VM_EVENT (kvm , 3 , "%s" , "DISABLE: DEA keywrapping support" );
885
894
break ;
895
+ case KVM_S390_VM_CRYPTO_ENABLE_APIE :
896
+ if (!ap_instructions_available ()) {
897
+ mutex_unlock (& kvm -> lock );
898
+ return - EOPNOTSUPP ;
899
+ }
900
+ kvm -> arch .crypto .apie = 1 ;
901
+ break ;
902
+ case KVM_S390_VM_CRYPTO_DISABLE_APIE :
903
+ if (!ap_instructions_available ()) {
904
+ mutex_unlock (& kvm -> lock );
905
+ return - EOPNOTSUPP ;
906
+ }
907
+ kvm -> arch .crypto .apie = 0 ;
908
+ break ;
886
909
default :
887
910
mutex_unlock (& kvm -> lock );
888
911
return - ENXIO ;
@@ -1491,6 +1514,10 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1491
1514
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW :
1492
1515
ret = 0 ;
1493
1516
break ;
1517
+ case KVM_S390_VM_CRYPTO_ENABLE_APIE :
1518
+ case KVM_S390_VM_CRYPTO_DISABLE_APIE :
1519
+ ret = ap_instructions_available () ? 0 : - ENXIO ;
1520
+ break ;
1494
1521
default :
1495
1522
ret = - ENXIO ;
1496
1523
break ;
@@ -1992,55 +2019,60 @@ long kvm_arch_vm_ioctl(struct file *filp,
1992
2019
return r ;
1993
2020
}
1994
2021
1995
- static int kvm_s390_query_ap_config (u8 * config )
1996
- {
1997
- u32 fcn_code = 0x04000000UL ;
1998
- u32 cc = 0 ;
1999
-
2000
- memset (config , 0 , 128 );
2001
- asm volatile (
2002
- "lgr 0,%1\n"
2003
- "lgr 2,%2\n"
2004
- ".long 0xb2af0000\n" /* PQAP(QCI) */
2005
- "0: ipm %0\n"
2006
- "srl %0,28\n"
2007
- "1:\n"
2008
- EX_TABLE (0b , 1b )
2009
- : "+ r " (cc)
2010
- : " r " (fcn_code), " r " (config)
2011
- : " cc ", " 0 ", " 2 ", " memory "
2012
- );
2013
-
2014
- return cc ;
2015
- }
2016
-
2017
2022
static int kvm_s390_apxa_installed (void )
2018
2023
{
2019
- u8 config [128 ];
2020
- int cc ;
2021
-
2022
- if (test_facility (12 )) {
2023
- cc = kvm_s390_query_ap_config (config );
2024
+ struct ap_config_info info ;
2024
2025
2025
- if (cc )
2026
- pr_err ("PQAP(QCI) failed with cc=%d" , cc );
2027
- else
2028
- return config [0 ] & 0x40 ;
2026
+ if (ap_instructions_available ()) {
2027
+ if (ap_qci (& info ) == 0 )
2028
+ return info .apxa ;
2029
2029
}
2030
2030
2031
2031
return 0 ;
2032
2032
}
2033
2033
2034
+ /*
2035
+ * The format of the crypto control block (CRYCB) is specified in the 3 low
2036
+ * order bits of the CRYCB designation (CRYCBD) field as follows:
2037
+ * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2038
+ * AP extended addressing (APXA) facility are installed.
2039
+ * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2040
+ * Format 2: Both the APXA and MSAX3 facilities are installed
2041
+ */
2034
2042
static void kvm_s390_set_crycb_format (struct kvm * kvm )
2035
2043
{
2036
2044
kvm -> arch .crypto .crycbd = (__u32 )(unsigned long ) kvm -> arch .crypto .crycb ;
2037
2045
2046
+ /* Clear the CRYCB format bits - i.e., set format 0 by default */
2047
+ kvm -> arch .crypto .crycbd &= ~(CRYCB_FORMAT_MASK );
2048
+
2049
+ /* Check whether MSAX3 is installed */
2050
+ if (!test_kvm_facility (kvm , 76 ))
2051
+ return ;
2052
+
2038
2053
if (kvm_s390_apxa_installed ())
2039
2054
kvm -> arch .crypto .crycbd |= CRYCB_FORMAT2 ;
2040
2055
else
2041
2056
kvm -> arch .crypto .crycbd |= CRYCB_FORMAT1 ;
2042
2057
}
2043
2058
2059
+ void kvm_arch_crypto_clear_masks (struct kvm * kvm )
2060
+ {
2061
+ mutex_lock (& kvm -> lock );
2062
+ kvm_s390_vcpu_block_all (kvm );
2063
+
2064
+ memset (& kvm -> arch .crypto .crycb -> apcb0 , 0 ,
2065
+ sizeof (kvm -> arch .crypto .crycb -> apcb0 ));
2066
+ memset (& kvm -> arch .crypto .crycb -> apcb1 , 0 ,
2067
+ sizeof (kvm -> arch .crypto .crycb -> apcb1 ));
2068
+
2069
+ /* recreate the shadow crycb for each vcpu */
2070
+ kvm_s390_sync_request_broadcast (kvm , KVM_REQ_VSIE_RESTART );
2071
+ kvm_s390_vcpu_unblock_all (kvm );
2072
+ mutex_unlock (& kvm -> lock );
2073
+ }
2074
+ EXPORT_SYMBOL_GPL (kvm_arch_crypto_clear_masks );
2075
+
2044
2076
static u64 kvm_s390_get_initial_cpuid (void )
2045
2077
{
2046
2078
struct cpuid cpuid ;
@@ -2052,12 +2084,12 @@ static u64 kvm_s390_get_initial_cpuid(void)
2052
2084
2053
2085
static void kvm_s390_crypto_init (struct kvm * kvm )
2054
2086
{
2055
- if (!test_kvm_facility (kvm , 76 ))
2056
- return ;
2057
-
2058
2087
kvm -> arch .crypto .crycb = & kvm -> arch .sie_page2 -> crycb ;
2059
2088
kvm_s390_set_crycb_format (kvm );
2060
2089
2090
+ if (!test_kvm_facility (kvm , 76 ))
2091
+ return ;
2092
+
2061
2093
/* Enable AES/DEA protected key functions by default */
2062
2094
kvm -> arch .crypto .aes_kw = 1 ;
2063
2095
kvm -> arch .crypto .dea_kw = 1 ;
@@ -2583,17 +2615,25 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
2583
2615
2584
2616
static void kvm_s390_vcpu_crypto_setup (struct kvm_vcpu * vcpu )
2585
2617
{
2586
- if (!test_kvm_facility (vcpu -> kvm , 76 ))
2618
+ /*
2619
+ * If the AP instructions are not being interpreted and the MSAX3
2620
+ * facility is not configured for the guest, there is nothing to set up.
2621
+ */
2622
+ if (!vcpu -> kvm -> arch .crypto .apie && !test_kvm_facility (vcpu -> kvm , 76 ))
2587
2623
return ;
2588
2624
2625
+ vcpu -> arch .sie_block -> crycbd = vcpu -> kvm -> arch .crypto .crycbd ;
2589
2626
vcpu -> arch .sie_block -> ecb3 &= ~(ECB3_AES | ECB3_DEA );
2627
+ vcpu -> arch .sie_block -> eca &= ~ECA_APIE ;
2590
2628
2629
+ if (vcpu -> kvm -> arch .crypto .apie )
2630
+ vcpu -> arch .sie_block -> eca |= ECA_APIE ;
2631
+
2632
+ /* Set up protected key support */
2591
2633
if (vcpu -> kvm -> arch .crypto .aes_kw )
2592
2634
vcpu -> arch .sie_block -> ecb3 |= ECB3_AES ;
2593
2635
if (vcpu -> kvm -> arch .crypto .dea_kw )
2594
2636
vcpu -> arch .sie_block -> ecb3 |= ECB3_DEA ;
2595
-
2596
- vcpu -> arch .sie_block -> crycbd = vcpu -> kvm -> arch .crypto .crycbd ;
2597
2637
}
2598
2638
2599
2639
void kvm_s390_vcpu_unsetup_cmma (struct kvm_vcpu * vcpu )
@@ -2770,18 +2810,25 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2770
2810
exit_sie (vcpu );
2771
2811
}
2772
2812
2813
+ bool kvm_s390_vcpu_sie_inhibited (struct kvm_vcpu * vcpu )
2814
+ {
2815
+ return atomic_read (& vcpu -> arch .sie_block -> prog20 ) &
2816
+ (PROG_BLOCK_SIE | PROG_REQUEST );
2817
+ }
2818
+
2773
2819
static void kvm_s390_vcpu_request_handled (struct kvm_vcpu * vcpu )
2774
2820
{
2775
2821
atomic_andnot (PROG_REQUEST , & vcpu -> arch .sie_block -> prog20 );
2776
2822
}
2777
2823
2778
2824
/*
2779
- * Kick a guest cpu out of SIE and wait until SIE is not running.
2825
+ * Kick a guest cpu out of (v) SIE and wait until (v) SIE is not running.
2780
2826
* If the CPU is not running (e.g. waiting as idle) the function will
2781
2827
* return immediately. */
2782
2828
void exit_sie (struct kvm_vcpu * vcpu )
2783
2829
{
2784
2830
kvm_s390_set_cpuflags (vcpu , CPUSTAT_STOP_INT );
2831
+ kvm_s390_vsie_kick (vcpu );
2785
2832
while (vcpu -> arch .sie_block -> prog0c & PROG_IN_SIE )
2786
2833
cpu_relax ();
2787
2834
}
@@ -3198,6 +3245,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3198
3245
3199
3246
/* nothing to do, just clear the request */
3200
3247
kvm_clear_request (KVM_REQ_UNHALT , vcpu );
3248
+ /* we left the vsie handler, nothing to do, just clear the request */
3249
+ kvm_clear_request (KVM_REQ_VSIE_RESTART , vcpu );
3201
3250
3202
3251
return 0 ;
3203
3252
}
0 commit comments