@@ -58,6 +58,30 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
58
58
return __mk_vsid_data (get_kernel_vsid (ea , ssize ), ssize , flags );
59
59
}
60
60
61
+ static void assert_slb_exists (unsigned long ea )
62
+ {
63
+ #ifdef CONFIG_DEBUG_VM
64
+ unsigned long tmp ;
65
+
66
+ WARN_ON_ONCE (mfmsr () & MSR_EE );
67
+
68
+ asm volatile ("slbfee. %0, %1" : "=r" (tmp ) : "r" (ea ) : "cr0" );
69
+ WARN_ON (tmp == 0 );
70
+ #endif
71
+ }
72
+
73
+ static void assert_slb_notexists (unsigned long ea )
74
+ {
75
+ #ifdef CONFIG_DEBUG_VM
76
+ unsigned long tmp ;
77
+
78
+ WARN_ON_ONCE (mfmsr () & MSR_EE );
79
+
80
+ asm volatile ("slbfee. %0, %1" : "=r" (tmp ) : "r" (ea ) : "cr0" );
81
+ WARN_ON (tmp != 0 );
82
+ #endif
83
+ }
84
+
61
85
static inline void slb_shadow_update (unsigned long ea , int ssize ,
62
86
unsigned long flags ,
63
87
enum slb_index index )
@@ -90,6 +114,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
90
114
*/
91
115
slb_shadow_update (ea , ssize , flags , index );
92
116
117
+ assert_slb_notexists (ea );
93
118
asm volatile ("slbmte %0,%1" :
94
119
: "r" (mk_vsid_data (ea , ssize , flags )),
95
120
"r" (mk_esid_data (ea , ssize , index ))
@@ -111,6 +136,8 @@ void __slb_restore_bolted_realmode(void)
111
136
: "r" (be64_to_cpu (p -> save_area [index ].vsid )),
112
137
"r" (be64_to_cpu (p -> save_area [index ].esid )));
113
138
}
139
+
140
+ assert_slb_exists (local_paca -> kstack );
114
141
}
115
142
116
143
/*
@@ -158,6 +185,7 @@ void slb_flush_and_restore_bolted(void)
158
185
:: "r" (be64_to_cpu (p -> save_area [KSTACK_INDEX ].vsid )),
159
186
"r" (be64_to_cpu (p -> save_area [KSTACK_INDEX ].esid ))
160
187
: "memory" );
188
+ assert_slb_exists (get_paca ()-> kstack );
161
189
162
190
get_paca ()-> slb_cache_ptr = 0 ;
163
191
@@ -410,9 +438,17 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
410
438
unsigned long slbie_data = 0 ;
411
439
412
440
for (i = 0 ; i < offset ; i ++ ) {
413
- /* EA */
414
- slbie_data = (unsigned long )
441
+ unsigned long ea ;
442
+
443
+ ea = (unsigned long )
415
444
get_paca ()-> slb_cache [i ] << SID_SHIFT ;
445
+ /*
446
+ * Could assert_slb_exists here, but hypervisor
447
+ * or machine check could have come in and
448
+ * removed the entry at this point.
449
+ */
450
+
451
+ slbie_data = ea ;
416
452
slbie_data |= user_segment_size (slbie_data )
417
453
<< SLBIE_SSIZE_SHIFT ;
418
454
slbie_data |= SLBIE_C ; /* user slbs have C=1 */
@@ -640,6 +676,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
640
676
* User preloads should add isync afterwards in case the kernel
641
677
* accesses user memory before it returns to userspace with rfid.
642
678
*/
679
+ assert_slb_notexists (ea );
643
680
asm volatile ("slbmte %0, %1" : : "r" (vsid_data ), "r" (esid_data ));
644
681
645
682
barrier ();
@@ -740,7 +777,17 @@ long do_slb_fault(struct pt_regs *regs, unsigned long ea)
740
777
* if they go via fast_exception_return too.
741
778
*/
742
779
if (id >= KERNEL_REGION_ID ) {
743
- return slb_allocate_kernel (ea , id );
780
+ long err ;
781
+ #ifdef CONFIG_DEBUG_VM
782
+ /* Catch recursive kernel SLB faults. */
783
+ BUG_ON (local_paca -> in_kernel_slb_handler );
784
+ local_paca -> in_kernel_slb_handler = 1 ;
785
+ #endif
786
+ err = slb_allocate_kernel (ea , id );
787
+ #ifdef CONFIG_DEBUG_VM
788
+ local_paca -> in_kernel_slb_handler = 0 ;
789
+ #endif
790
+ return err ;
744
791
} else {
745
792
struct mm_struct * mm = current -> mm ;
746
793
long err ;
0 commit comments