@@ -1594,8 +1594,9 @@ static void kvm_send_hwpoison_signal(unsigned long address,
1594
1594
send_sig_mceerr (BUS_MCEERR_AR , (void __user * )address , lsb , current );
1595
1595
}
1596
1596
1597
- static bool fault_supports_stage2_pmd_mappings (struct kvm_memory_slot * memslot ,
1598
- unsigned long hva )
1597
+ static bool fault_supports_stage2_huge_mapping (struct kvm_memory_slot * memslot ,
1598
+ unsigned long hva ,
1599
+ unsigned long map_size )
1599
1600
{
1600
1601
gpa_t gpa_start ;
1601
1602
hva_t uaddr_start , uaddr_end ;
@@ -1610,34 +1611,34 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
1610
1611
1611
1612
/*
1612
1613
* Pages belonging to memslots that don't have the same alignment
1613
- * within a PMD for userspace and IPA cannot be mapped with stage-2
1614
- * PMD entries, because we'll end up mapping the wrong pages.
1614
+ * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
1615
+ * PMD/PUD entries, because we'll end up mapping the wrong pages.
1615
1616
*
1616
1617
* Consider a layout like the following:
1617
1618
*
1618
1619
* memslot->userspace_addr:
1619
1620
* +-----+--------------------+--------------------+---+
1620
- * |abcde|fgh Stage-1 PMD | Stage-1 PMD tv|xyz|
1621
+ * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
1621
1622
* +-----+--------------------+--------------------+---+
1622
1623
*
1623
1624
* memslot->base_gfn << PAGE_SIZE:
1624
1625
* +---+--------------------+--------------------+-----+
1625
- * |abc|def Stage-2 PMD | Stage-2 PMD |tvxyz|
1626
+ * |abc|def Stage-2 block | Stage-2 block |tvxyz|
1626
1627
* +---+--------------------+--------------------+-----+
1627
1628
*
1628
- * If we create those stage-2 PMDs , we'll end up with this incorrect
1629
+ * If we create those stage-2 blocks , we'll end up with this incorrect
1629
1630
* mapping:
1630
1631
* d -> f
1631
1632
* e -> g
1632
1633
* f -> h
1633
1634
*/
1634
- if ((gpa_start & ~ S2_PMD_MASK ) != (uaddr_start & ~ S2_PMD_MASK ))
1635
+ if ((gpa_start & ( map_size - 1 )) != (uaddr_start & ( map_size - 1 ) ))
1635
1636
return false;
1636
1637
1637
1638
/*
1638
1639
* Next, let's make sure we're not trying to map anything not covered
1639
- * by the memslot. This means we have to prohibit PMD size mappings
1640
- * for the beginning and end of a non-PMD aligned and non-PMD sized
1640
+ * by the memslot. This means we have to prohibit block size mappings
1641
+ * for the beginning and end of a non-block aligned and non-block sized
1641
1642
* memory slot (illustrated by the head and tail parts of the
1642
1643
* userspace view above containing pages 'abcde' and 'xyz',
1643
1644
* respectively).
@@ -1646,8 +1647,8 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
1646
1647
* userspace_addr or the base_gfn, as both are equally aligned (per
1647
1648
* the check above) and equally sized.
1648
1649
*/
1649
- return (hva & S2_PMD_MASK ) >= uaddr_start &&
1650
- (hva & S2_PMD_MASK ) + S2_PMD_SIZE <= uaddr_end ;
1650
+ return (hva & ~( map_size - 1 ) ) >= uaddr_start &&
1651
+ (hva & ~( map_size - 1 )) + map_size <= uaddr_end ;
1651
1652
}
1652
1653
1653
1654
static int user_mem_abort (struct kvm_vcpu * vcpu , phys_addr_t fault_ipa ,
@@ -1676,12 +1677,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1676
1677
return - EFAULT ;
1677
1678
}
1678
1679
1679
- if (!fault_supports_stage2_pmd_mappings (memslot , hva ))
1680
- force_pte = true;
1681
-
1682
- if (logging_active )
1683
- force_pte = true;
1684
-
1685
1680
/* Let's check if we will get back a huge page backed by hugetlbfs */
1686
1681
down_read (& current -> mm -> mmap_sem );
1687
1682
vma = find_vma_intersection (current -> mm , hva , hva + 1 );
@@ -1692,18 +1687,22 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1692
1687
}
1693
1688
1694
1689
vma_pagesize = vma_kernel_pagesize (vma );
1690
+ if (logging_active ||
1691
+ !fault_supports_stage2_huge_mapping (memslot , hva , vma_pagesize )) {
1692
+ force_pte = true;
1693
+ vma_pagesize = PAGE_SIZE ;
1694
+ }
1695
+
1695
1696
/*
1696
1697
* The stage2 has a minimum of 2 level table (For arm64 see
1697
1698
* kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
1698
1699
* use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
1699
1700
* As for PUD huge maps, we must make sure that we have at least
1700
1701
* 3 levels, i.e, PMD is not folded.
1701
1702
*/
1702
- if ((vma_pagesize == PMD_SIZE ||
1703
- (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd (kvm ))) &&
1704
- !force_pte ) {
1703
+ if (vma_pagesize == PMD_SIZE ||
1704
+ (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd (kvm )))
1705
1705
gfn = (fault_ipa & huge_page_mask (hstate_vma (vma ))) >> PAGE_SHIFT ;
1706
- }
1707
1706
up_read (& current -> mm -> mmap_sem );
1708
1707
1709
1708
/* We need minimum second+third level pages */
0 commit comments