Skip to content

Commit 30bda41

Browse files
kvaneeshmpe
authored andcommitted
powerpc/mm: Drop WIMG in favour of new constants
PowerISA 3.0 introduces two pte bits with the below meaning for radix: 00 -> Normal Memory 01 -> Strong Access Order (SAO) 10 -> Non idempotent I/O (Cache inhibited and guarded) 11 -> Tolerant I/O (Cache inhibited) We drop the existing WIMG bits in the Linux page table in favour of the above constants. We loose _PAGE_WRITETHRU with this conversion. We only use writethru via pgprot_cached_wthru() which is used by fbdev/controlfb.c which is Apple control display and also PPC32. With respect to _PAGE_COHERENCE, we have been marking hpte always coherent for some time now. htab_convert_pte_flags() always added HPTE_R_M. NOTE: KVM changes need closer review. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
1 parent 72176dd commit 30bda41

File tree

9 files changed

+66
-90
lines changed

9 files changed

+66
-90
lines changed

arch/powerpc/include/asm/book3s/64/hash.h

Lines changed: 31 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,9 @@
2121
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
2222
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
2323
#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
24-
#define _PAGE_GUARDED 0x00010 /* G: guarded (side-effect) page */
25-
/* M (memory coherence) is always set in the HPTE, so we don't need it here */
26-
#define _PAGE_COHERENT 0x0
27-
#define _PAGE_NO_CACHE 0x00020 /* I: cache inhibit */
28-
#define _PAGE_WRITETHRU 0x00040 /* W: cache write-through */
24+
#define _PAGE_SAO 0x00010 /* Strong access order */
25+
#define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */
26+
#define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */
2927
#define _PAGE_DIRTY 0x00080 /* C: page changed */
3028
#define _PAGE_ACCESSED 0x00100 /* R: page referenced */
3129
#define _PAGE_SPECIAL 0x00400 /* software: special page */
@@ -43,7 +41,12 @@
4341
#define _PAGE_HASHPTE (1ul << 61) /* PTE has associated HPTE */
4442
#define _PAGE_PTE (1ul << 62) /* distinguishes PTEs from pointers */
4543
#define _PAGE_PRESENT (1ul << 63) /* pte contains a translation */
46-
44+
/*
45+
* Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE
46+
* Instead of fixing all of them, add an alternate define which
47+
* maps CI pte mapping.
48+
*/
49+
#define _PAGE_NO_CACHE _PAGE_TOLERANT
4750
/*
4851
* We need to differentiate between explicit huge page and THP huge
4952
* page, since THP huge page also need to track real subpage details
@@ -126,9 +129,6 @@
126129
#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
127130
_PAGE_RW | _PAGE_EXEC)
128131

129-
/* Strong Access Ordering */
130-
#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
131-
132132
/* No page size encoding in the linux PTE */
133133
#define _PAGE_PSIZE 0
134134

@@ -147,10 +147,9 @@
147147
/*
148148
* Mask of bits returned by pte_pgprot()
149149
*/
150-
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
151-
_PAGE_WRITETHRU | _PAGE_4K_PFN | \
152-
_PAGE_PRIVILEGED | _PAGE_ACCESSED | _PAGE_READ |\
153-
_PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \
150+
#define PAGE_PROT_BITS (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT | \
151+
_PAGE_4K_PFN | _PAGE_PRIVILEGED | _PAGE_ACCESSED | \
152+
_PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \
154153
_PAGE_SOFT_DIRTY)
155154
/*
156155
* We define 2 sets of base prot bits, one for basic pages (ie,
@@ -159,7 +158,7 @@
159158
* the processor might need it for DMA coherency.
160159
*/
161160
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
162-
#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
161+
#define _PAGE_BASE (_PAGE_BASE_NC)
163162

164163
/* Permission masks used to generate the __P and __S table,
165164
*
@@ -200,9 +199,9 @@
200199
/* Permission masks used for kernel mappings */
201200
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
202201
#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
203-
_PAGE_NO_CACHE)
202+
_PAGE_TOLERANT)
204203
#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
205-
_PAGE_NO_CACHE | _PAGE_GUARDED)
204+
_PAGE_NON_IDEMPOTENT)
206205
#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
207206
#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
208207
#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
@@ -509,52 +508,45 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
509508
*ptep = pte;
510509
}
511510

512-
/*
513-
* Macro to mark a page protection value as "uncacheable".
514-
*/
515-
516-
#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
517-
_PAGE_WRITETHRU)
511+
#define _PAGE_CACHE_CTL (_PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT)
518512

519513
#define pgprot_noncached pgprot_noncached
520514
static inline pgprot_t pgprot_noncached(pgprot_t prot)
521515
{
522516
return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
523-
_PAGE_NO_CACHE | _PAGE_GUARDED);
517+
_PAGE_NON_IDEMPOTENT);
524518
}
525519

526520
#define pgprot_noncached_wc pgprot_noncached_wc
527521
static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
528522
{
529523
return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
530-
_PAGE_NO_CACHE);
524+
_PAGE_TOLERANT);
531525
}
532526

533527
#define pgprot_cached pgprot_cached
534528
static inline pgprot_t pgprot_cached(pgprot_t prot)
535529
{
536-
return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
537-
_PAGE_COHERENT);
538-
}
539-
540-
#define pgprot_cached_wthru pgprot_cached_wthru
541-
static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
542-
{
543-
return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
544-
_PAGE_COHERENT | _PAGE_WRITETHRU);
545-
}
546-
547-
#define pgprot_cached_noncoherent pgprot_cached_noncoherent
548-
static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
549-
{
550-
return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
530+
return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL));
551531
}
552532

553533
#define pgprot_writecombine pgprot_writecombine
554534
static inline pgprot_t pgprot_writecombine(pgprot_t prot)
555535
{
556536
return pgprot_noncached_wc(prot);
557537
}
538+
/*
539+
* check a pte mapping have cache inhibited property
540+
*/
541+
static inline bool pte_ci(pte_t pte)
542+
{
543+
unsigned long pte_v = pte_val(pte);
544+
545+
if (((pte_v & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) ||
546+
((pte_v & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT))
547+
return true;
548+
return false;
549+
}
558550

559551
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
560552
extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,

arch/powerpc/include/asm/kvm_book3s_64.h

Lines changed: 10 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -276,19 +276,24 @@ static inline unsigned long hpte_make_readonly(unsigned long ptel)
276276
return ptel;
277277
}
278278

279-
static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
279+
static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
280280
{
281-
unsigned int wimg = ptel & HPTE_R_WIMG;
281+
unsigned int wimg = hptel & HPTE_R_WIMG;
282282

283283
/* Handle SAO */
284284
if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
285285
cpu_has_feature(CPU_FTR_ARCH_206))
286286
wimg = HPTE_R_M;
287287

288-
if (!io_type)
288+
if (!is_ci)
289289
return wimg == HPTE_R_M;
290-
291-
return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
290+
/*
291+
* if host is mapped cache inhibited, make sure hptel also have
292+
* cache inhibited.
293+
*/
294+
if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
295+
return false;
296+
return !!(wimg & HPTE_R_I);
292297
}
293298

294299
/*
@@ -325,18 +330,6 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
325330
return new_pte;
326331
}
327332

328-
329-
/* Return HPTE cache control bits corresponding to Linux pte bits */
330-
static inline unsigned long hpte_cache_bits(unsigned long pte_val)
331-
{
332-
#if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
333-
return pte_val & (HPTE_R_W | HPTE_R_I);
334-
#else
335-
return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
336-
((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
337-
#endif
338-
}
339-
340333
static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
341334
{
342335
if (key)

arch/powerpc/kvm/book3s_64_mmu_hv.c

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
447447
struct revmap_entry *rev;
448448
struct page *page, *pages[1];
449449
long index, ret, npages;
450-
unsigned long is_io;
450+
bool is_ci;
451451
unsigned int writing, write_ok;
452452
struct vm_area_struct *vma;
453453
unsigned long rcbits;
@@ -503,7 +503,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
503503
smp_rmb();
504504

505505
ret = -EFAULT;
506-
is_io = 0;
506+
is_ci = false;
507507
pfn = 0;
508508
page = NULL;
509509
pte_size = PAGE_SIZE;
@@ -521,7 +521,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
521521
pfn = vma->vm_pgoff +
522522
((hva - vma->vm_start) >> PAGE_SHIFT);
523523
pte_size = psize;
524-
is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
524+
is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot))));
525525
write_ok = vma->vm_flags & VM_WRITE;
526526
}
527527
up_read(&current->mm->mmap_sem);
@@ -558,10 +558,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
558558
goto out_put;
559559

560560
/* Check WIMG vs. the actual page we're accessing */
561-
if (!hpte_cache_flags_ok(r, is_io)) {
562-
if (is_io)
561+
if (!hpte_cache_flags_ok(r, is_ci)) {
562+
if (is_ci)
563563
goto out_put;
564-
565564
/*
566565
* Allow guest to map emulated device memory as
567566
* uncacheable, but actually make it cacheable.

arch/powerpc/kvm/book3s_hv_rm_mmu.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
175175
unsigned long g_ptel;
176176
struct kvm_memory_slot *memslot;
177177
unsigned hpage_shift;
178-
unsigned long is_io;
178+
bool is_ci;
179179
unsigned long *rmap;
180180
pte_t *ptep;
181181
unsigned int writing;
@@ -199,7 +199,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
199199
gfn = gpa >> PAGE_SHIFT;
200200
memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
201201
pa = 0;
202-
is_io = ~0ul;
202+
is_ci = false;
203203
rmap = NULL;
204204
if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
205205
/* Emulated MMIO - mark this with key=31 */
@@ -250,7 +250,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
250250
if (writing && !pte_write(pte))
251251
/* make the actual HPTE be read-only */
252252
ptel = hpte_make_readonly(ptel);
253-
is_io = hpte_cache_bits(pte_val(pte));
253+
is_ci = pte_ci(pte);
254254
pa = pte_pfn(pte) << PAGE_SHIFT;
255255
pa |= hva & (host_pte_size - 1);
256256
pa |= gpa & ~PAGE_MASK;
@@ -267,9 +267,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
267267
else
268268
pteh |= HPTE_V_ABSENT;
269269

270-
/* Check WIMG */
271-
if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
272-
if (is_io)
270+
/*If we had host pte mapping then Check WIMG */
271+
if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) {
272+
if (is_ci)
273273
return H_PARAMETER;
274274
/*
275275
* Allow guest to map emulated device memory as

arch/powerpc/mm/hash64_64k.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
244244
* If so, bail out and refault as a 4k page
245245
*/
246246
if (!mmu_has_feature(MMU_FTR_CI_LARGE_PAGE) &&
247-
unlikely(old_pte & _PAGE_NO_CACHE))
247+
unlikely(pte_ci(pte)))
248248
return 0;
249249
/*
250250
* Try to lock the PTE, add ACCESSED and DIRTY if it was

arch/powerpc/mm/hash_utils_64.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -192,12 +192,13 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
192192
/*
193193
* Add in WIG bits
194194
*/
195-
if (pteflags & _PAGE_WRITETHRU)
196-
rflags |= HPTE_R_W;
197-
if (pteflags & _PAGE_NO_CACHE)
195+
196+
if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
198197
rflags |= HPTE_R_I;
199-
if (pteflags & _PAGE_GUARDED)
200-
rflags |= HPTE_R_G;
198+
if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT)
199+
rflags |= (HPTE_R_I | HPTE_R_G);
200+
if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
201+
rflags |= (HPTE_R_I | HPTE_R_W);
201202

202203
return rflags;
203204
}
@@ -1142,8 +1143,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
11421143
/* If this PTE is non-cacheable and we have restrictions on
11431144
* using non cacheable large pages, then we switch to 4k
11441145
*/
1145-
if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
1146-
(pte_val(*ptep) & _PAGE_NO_CACHE)) {
1146+
if (mmu_ci_restrictions && psize == MMU_PAGE_64K && pte_ci(*ptep)) {
11471147
if (user_region) {
11481148
demote_segment_4k(mm, ea);
11491149
psize = MMU_PAGE_4K;
@@ -1297,13 +1297,13 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
12971297

12981298
WARN_ON(hugepage_shift);
12991299
#ifdef CONFIG_PPC_64K_PAGES
1300-
/* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
1300+
/* If either _PAGE_4K_PFN or cache inhibited is set (and we are on
13011301
* a 64K kernel), then we don't preload, hash_page() will take
13021302
* care of it once we actually try to access the page.
13031303
* That way we don't have to duplicate all of the logic for segment
13041304
* page size demotion here
13051305
*/
1306-
if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
1306+
if ((pte_val(*ptep) & _PAGE_4K_PFN) || pte_ci(*ptep))
13071307
goto out_exit;
13081308
#endif /* CONFIG_PPC_64K_PAGES */
13091309

arch/powerpc/mm/pgtable.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,16 +38,16 @@ static inline int is_exec_fault(void)
3838

3939
/* We only try to do i/d cache coherency on stuff that looks like
4040
* reasonably "normal" PTEs. We currently require a PTE to be present
41-
* and we avoid _PAGE_SPECIAL and _PAGE_NO_CACHE. We also only do that
41+
* and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
4242
* on userspace PTEs
4343
*/
4444
static inline int pte_looks_normal(pte_t pte)
4545
{
4646

4747
#if defined(CONFIG_PPC_BOOK3S_64)
48-
if ((pte_val(pte) &
49-
(_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE)) ==
50-
_PAGE_PRESENT) {
48+
if ((pte_val(pte) & (_PAGE_PRESENT | _PAGE_SPECIAL)) == _PAGE_PRESENT) {
49+
if (pte_ci(pte))
50+
return 0;
5151
if (pte_user(pte))
5252
return 1;
5353
}

arch/powerpc/mm/pgtable_64.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -167,10 +167,6 @@ void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
167167
if ((flags & _PAGE_PRESENT) == 0)
168168
flags |= pgprot_val(PAGE_KERNEL);
169169

170-
/* Non-cacheable page cannot be coherent */
171-
if (flags & _PAGE_NO_CACHE)
172-
flags &= ~_PAGE_COHERENT;
173-
174170
/* We don't support the 4K PFN hack with ioremap */
175171
if (flags & _PAGE_4K_PFN)
176172
return NULL;

arch/powerpc/platforms/pseries/lpar.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -152,10 +152,6 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
152152
/* Exact = 0 */
153153
flags = 0;
154154

155-
/* Make pHyp happy */
156-
if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
157-
hpte_r &= ~HPTE_R_M;
158-
159155
if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
160156
flags |= H_COALESCE_CAND;
161157

0 commit comments

Comments
 (0)