Skip to content

Commit 574c0cf

Browse files
committed
Merge tag 'kvm-ppc-next-4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD
Second PPC KVM update for 4.20. Two commits; one is an optimization for PCI pass-through, and the other disables nested HV-KVM on early POWER9 chips that need a particular hardware bug workaround.
2 parents cbe3f89 + 6e301a8 commit 574c0cf

File tree

5 files changed

+38
-11
lines changed

5 files changed

+38
-11
lines changed

arch/powerpc/include/asm/iommu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ struct iommu_table {
126126
int it_nid;
127127
};
128128

129-
#define IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry) \
129+
#define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \
130130
((tbl)->it_ops->useraddrptr((tbl), (entry), false))
131131
#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
132132
((tbl)->it_ops->useraddrptr((tbl), (entry), true))

arch/powerpc/kvm/book3s_64_vio.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -410,11 +410,10 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
410410
{
411411
struct mm_iommu_table_group_mem_t *mem = NULL;
412412
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
413-
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
413+
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
414414

415415
if (!pua)
416-
/* it_userspace allocation might be delayed */
417-
return H_TOO_HARD;
416+
return H_SUCCESS;
418417

419418
mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
420419
if (!mem)

arch/powerpc/kvm/book3s_64_vio_hv.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@ static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
214214

215215
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
216216
(*direction == DMA_BIDIRECTIONAL))) {
217-
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
217+
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
218218
/*
219219
* kvmppc_rm_tce_iommu_do_map() updates the UA cache after
220220
* calling this so we still get here a valid UA.
@@ -240,7 +240,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
240240
{
241241
struct mm_iommu_table_group_mem_t *mem = NULL;
242242
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
243-
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
243+
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
244244

245245
if (!pua)
246246
/* it_userspace allocation might be delayed */
@@ -304,7 +304,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
304304
{
305305
long ret;
306306
unsigned long hpa = 0;
307-
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
307+
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
308308
struct mm_iommu_table_group_mem_t *mem;
309309

310310
if (!pua)

arch/powerpc/kvm/book3s_hv.c

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4174,7 +4174,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
41744174
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
41754175

41764176
do {
4177-
if (kvm->arch.threads_indep && kvm_is_radix(kvm))
4177+
/*
4178+
* The early POWER9 chips that can't mix radix and HPT threads
4179+
* on the same core also need the workaround for the problem
4180+
* where the TLB would prefetch entries in the guest exit path
4181+
* for radix guests using the guest PIDR value and LPID 0.
4182+
* The workaround is in the old path (kvmppc_run_vcpu())
4183+
* but not the new path (kvmhv_run_single_vcpu()).
4184+
*/
4185+
if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
4186+
!no_mixing_hpt_and_radix)
41784187
r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0,
41794188
vcpu->arch.vcore->lpcr);
41804189
else
@@ -5196,7 +5205,7 @@ static int kvmhv_enable_nested(struct kvm *kvm)
51965205
{
51975206
if (!nested)
51985207
return -EPERM;
5199-
if (!cpu_has_feature(CPU_FTR_ARCH_300))
5208+
if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix)
52005209
return -ENODEV;
52015210

52025211
/* kvm == NULL means the caller is testing if the capability exists */

drivers/vfio/vfio_iommu_spapr_tce.c

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -444,7 +444,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
444444
struct mm_iommu_table_group_mem_t *mem = NULL;
445445
int ret;
446446
unsigned long hpa = 0;
447-
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
447+
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
448448

449449
if (!pua)
450450
return;
@@ -467,8 +467,27 @@ static int tce_iommu_clear(struct tce_container *container,
467467
unsigned long oldhpa;
468468
long ret;
469469
enum dma_data_direction direction;
470+
unsigned long lastentry = entry + pages;
471+
472+
for ( ; entry < lastentry; ++entry) {
473+
if (tbl->it_indirect_levels && tbl->it_userspace) {
474+
/*
475+
* For multilevel tables, we can take a shortcut here
476+
* and skip some TCEs as we know that the userspace
477+
* addresses cache is a mirror of the real TCE table
478+
* and if it is missing some indirect levels, then
479+
* the hardware table does not have them allocated
480+
* either and therefore does not require updating.
481+
*/
482+
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl,
483+
entry);
484+
if (!pua) {
485+
/* align to level_size which is power of two */
486+
entry |= tbl->it_level_size - 1;
487+
continue;
488+
}
489+
}
470490

471-
for ( ; pages; --pages, ++entry) {
472491
cond_resched();
473492

474493
direction = DMA_NONE;

0 commit comments

Comments
 (0)