@@ -141,3 +141,116 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
141
141
142
142
local_irq_restore (flags );
143
143
}
144
+
145
+ #ifdef CONFIG_DEBUG_TLB_SANITY
146
+
147
+ static unsigned get_pte_for_vaddr (unsigned vaddr )
148
+ {
149
+ struct task_struct * task = get_current ();
150
+ struct mm_struct * mm = task -> mm ;
151
+ pgd_t * pgd ;
152
+ pmd_t * pmd ;
153
+ pte_t * pte ;
154
+
155
+ if (!mm )
156
+ mm = task -> active_mm ;
157
+ pgd = pgd_offset (mm , vaddr );
158
+ if (pgd_none_or_clear_bad (pgd ))
159
+ return 0 ;
160
+ pmd = pmd_offset (pgd , vaddr );
161
+ if (pmd_none_or_clear_bad (pmd ))
162
+ return 0 ;
163
+ pte = pte_offset_map (pmd , vaddr );
164
+ if (!pte )
165
+ return 0 ;
166
+ return pte_val (* pte );
167
+ }
168
+
169
+ enum {
170
+ TLB_SUSPICIOUS = 1 ,
171
+ TLB_INSANE = 2 ,
172
+ };
173
+
174
+ static void tlb_insane (void )
175
+ {
176
+ BUG_ON (1 );
177
+ }
178
+
179
+ static void tlb_suspicious (void )
180
+ {
181
+ WARN_ON (1 );
182
+ }
183
+
184
+ /*
185
+ * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
186
+ * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
187
+ *
188
+ * Check that valid TLB entries either have the same PA as the PTE, or PTE is
189
+ * marked as non-present. Non-present PTE and the page with non-zero refcount
190
+ * and zero mapcount is normal for batched TLB flush operation. Zero refcount
191
+ * means that the page was freed prematurely. Non-zero mapcount is unusual,
192
+ * but does not necessary means an error, thus marked as suspicious.
193
+ */
194
+ static int check_tlb_entry (unsigned w , unsigned e , bool dtlb )
195
+ {
196
+ unsigned tlbidx = w | (e << PAGE_SHIFT );
197
+ unsigned r0 = dtlb ?
198
+ read_dtlb_virtual (tlbidx ) : read_itlb_virtual (tlbidx );
199
+ unsigned vpn = (r0 & PAGE_MASK ) | (e << PAGE_SHIFT );
200
+ unsigned pte = get_pte_for_vaddr (vpn );
201
+ unsigned mm_asid = (get_rasid_register () >> 8 ) & ASID_MASK ;
202
+ unsigned tlb_asid = r0 & ASID_MASK ;
203
+ bool kernel = tlb_asid == 1 ;
204
+ int rc = 0 ;
205
+
206
+ if (tlb_asid > 0 && ((vpn < TASK_SIZE ) == kernel )) {
207
+ pr_err ("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n" ,
208
+ dtlb ? 'D' : 'I' , w , e , vpn ,
209
+ kernel ? "kernel" : "user" );
210
+ rc |= TLB_INSANE ;
211
+ }
212
+
213
+ if (tlb_asid == mm_asid ) {
214
+ unsigned r1 = dtlb ? read_dtlb_translation (tlbidx ) :
215
+ read_itlb_translation (tlbidx );
216
+ if ((pte ^ r1 ) & PAGE_MASK ) {
217
+ pr_err ("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n" ,
218
+ dtlb ? 'D' : 'I' , w , e , r0 , r1 , pte );
219
+ if (pte == 0 || !pte_present (__pte (pte ))) {
220
+ struct page * p = pfn_to_page (r1 >> PAGE_SHIFT );
221
+ pr_err ("page refcount: %d, mapcount: %d\n" ,
222
+ page_count (p ),
223
+ page_mapcount (p ));
224
+ if (!page_count (p ))
225
+ rc |= TLB_INSANE ;
226
+ else if (page_mapped (p ))
227
+ rc |= TLB_SUSPICIOUS ;
228
+ } else {
229
+ rc |= TLB_INSANE ;
230
+ }
231
+ }
232
+ }
233
+ return rc ;
234
+ }
235
+
236
+ void check_tlb_sanity (void )
237
+ {
238
+ unsigned long flags ;
239
+ unsigned w , e ;
240
+ int bug = 0 ;
241
+
242
+ local_irq_save (flags );
243
+ for (w = 0 ; w < DTLB_ARF_WAYS ; ++ w )
244
+ for (e = 0 ; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2 ); ++ e )
245
+ bug |= check_tlb_entry (w , e , true);
246
+ for (w = 0 ; w < ITLB_ARF_WAYS ; ++ w )
247
+ for (e = 0 ; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2 ); ++ e )
248
+ bug |= check_tlb_entry (w , e , false);
249
+ if (bug & TLB_INSANE )
250
+ tlb_insane ();
251
+ if (bug & TLB_SUSPICIOUS )
252
+ tlb_suspicious ();
253
+ local_irq_restore (flags );
254
+ }
255
+
256
+ #endif /* CONFIG_DEBUG_TLB_SANITY */
0 commit comments