Skip to content

Commit e15a4fe

Browse files
npigginmpe
authored andcommitted
powerpc/64s/hash: Add some SLB debugging tests
This adds CONFIG_DEBUG_VM checks to ensure: - The kernel stack is in the SLB after it's flushed and bolted. - We don't insert an SLB for an address that is aleady in the SLB. - The kernel SLB miss handler does not take an SLB miss. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
1 parent 94ee427 commit e15a4fe

File tree

2 files changed

+53
-3
lines changed

2 files changed

+53
-3
lines changed

arch/powerpc/include/asm/paca.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,9 @@ struct paca_struct {
115115
u16 vmalloc_sllp;
116116
u8 slb_cache_ptr;
117117
u8 stab_rr; /* stab/slb round-robin counter */
118+
#ifdef CONFIG_DEBUG_VM
119+
u8 in_kernel_slb_handler;
120+
#endif
118121
u32 slb_used_bitmap; /* Bitmaps for first 32 SLB entries. */
119122
u32 slb_kern_bitmap;
120123
u32 slb_cache[SLB_CACHE_ENTRIES];

arch/powerpc/mm/slb.c

Lines changed: 50 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,30 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
5858
return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
5959
}
6060

61+
static void assert_slb_exists(unsigned long ea)
62+
{
63+
#ifdef CONFIG_DEBUG_VM
64+
unsigned long tmp;
65+
66+
WARN_ON_ONCE(mfmsr() & MSR_EE);
67+
68+
asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
69+
WARN_ON(tmp == 0);
70+
#endif
71+
}
72+
73+
static void assert_slb_notexists(unsigned long ea)
74+
{
75+
#ifdef CONFIG_DEBUG_VM
76+
unsigned long tmp;
77+
78+
WARN_ON_ONCE(mfmsr() & MSR_EE);
79+
80+
asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
81+
WARN_ON(tmp != 0);
82+
#endif
83+
}
84+
6185
static inline void slb_shadow_update(unsigned long ea, int ssize,
6286
unsigned long flags,
6387
enum slb_index index)
@@ -90,6 +114,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
90114
*/
91115
slb_shadow_update(ea, ssize, flags, index);
92116

117+
assert_slb_notexists(ea);
93118
asm volatile("slbmte %0,%1" :
94119
: "r" (mk_vsid_data(ea, ssize, flags)),
95120
"r" (mk_esid_data(ea, ssize, index))
@@ -111,6 +136,8 @@ void __slb_restore_bolted_realmode(void)
111136
: "r" (be64_to_cpu(p->save_area[index].vsid)),
112137
"r" (be64_to_cpu(p->save_area[index].esid)));
113138
}
139+
140+
assert_slb_exists(local_paca->kstack);
114141
}
115142

116143
/*
@@ -158,6 +185,7 @@ void slb_flush_and_restore_bolted(void)
158185
:: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
159186
"r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
160187
: "memory");
188+
assert_slb_exists(get_paca()->kstack);
161189

162190
get_paca()->slb_cache_ptr = 0;
163191

@@ -410,9 +438,17 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
410438
unsigned long slbie_data = 0;
411439

412440
for (i = 0; i < offset; i++) {
413-
/* EA */
414-
slbie_data = (unsigned long)
441+
unsigned long ea;
442+
443+
ea = (unsigned long)
415444
get_paca()->slb_cache[i] << SID_SHIFT;
445+
/*
446+
* Could assert_slb_exists here, but hypervisor
447+
* or machine check could have come in and
448+
* removed the entry at this point.
449+
*/
450+
451+
slbie_data = ea;
416452
slbie_data |= user_segment_size(slbie_data)
417453
<< SLBIE_SSIZE_SHIFT;
418454
slbie_data |= SLBIE_C; /* user slbs have C=1 */
@@ -640,6 +676,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
640676
* User preloads should add isync afterwards in case the kernel
641677
* accesses user memory before it returns to userspace with rfid.
642678
*/
679+
assert_slb_notexists(ea);
643680
asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
644681

645682
barrier();
@@ -740,7 +777,17 @@ long do_slb_fault(struct pt_regs *regs, unsigned long ea)
740777
* if they go via fast_exception_return too.
741778
*/
742779
if (id >= KERNEL_REGION_ID) {
743-
return slb_allocate_kernel(ea, id);
780+
long err;
781+
#ifdef CONFIG_DEBUG_VM
782+
/* Catch recursive kernel SLB faults. */
783+
BUG_ON(local_paca->in_kernel_slb_handler);
784+
local_paca->in_kernel_slb_handler = 1;
785+
#endif
786+
err = slb_allocate_kernel(ea, id);
787+
#ifdef CONFIG_DEBUG_VM
788+
local_paca->in_kernel_slb_handler = 0;
789+
#endif
790+
return err;
744791
} else {
745792
struct mm_struct *mm = current->mm;
746793
long err;

0 commit comments

Comments
 (0)