Skip to content

Commit 94ee427

Browse files
npigginmpe
authored andcommitted
powerpc/64s/hash: Simplify slb_flush_and_rebolt()
slb_flush_and_rebolt() is misleading, it is called in virtual mode, so it can not possibly change the stack, so it should not be touching the shadow area. And since vmalloc is no longer bolted, it should not change any bolted mappings at all. Change the name to slb_flush_and_restore_bolted(), and have it just load the kernel stack from what's currently in the shadow SLB area. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
1 parent 5434ae7 commit 94ee427

File tree

5 files changed

+21
-35
lines changed

5 files changed

+21
-35
lines changed

arch/powerpc/include/asm/book3s/64/mmu-hash.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -503,7 +503,7 @@ struct slb_entry {
503503
};
504504

505505
extern void slb_initialize(void);
506-
extern void slb_flush_and_rebolt(void);
506+
void slb_flush_and_restore_bolted(void);
507507
void slb_flush_all_realmode(void);
508508
void __slb_restore_bolted_realmode(void);
509509
void slb_restore_bolted_realmode(void);

arch/powerpc/kernel/swsusp_asm64.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
262262

263263
addi r1,r1,-128
264264
#ifdef CONFIG_PPC_BOOK3S_64
265-
bl slb_flush_and_rebolt
265+
bl slb_flush_and_restore_bolted
266266
#endif
267267
bl do_after_copyback
268268
addi r1,r1,128

arch/powerpc/mm/hash_utils_64.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1125,7 +1125,7 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
11251125
if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
11261126

11271127
copy_mm_to_paca(mm);
1128-
slb_flush_and_rebolt();
1128+
slb_flush_and_restore_bolted();
11291129
}
11301130
}
11311131
#endif /* CONFIG_PPC_64K_PAGES */
@@ -1197,7 +1197,7 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
11971197
if (user_region) {
11981198
if (psize != get_paca_psize(ea)) {
11991199
copy_mm_to_paca(mm);
1200-
slb_flush_and_rebolt();
1200+
slb_flush_and_restore_bolted();
12011201
}
12021202
} else if (get_paca()->vmalloc_sllp !=
12031203
mmu_psize_defs[mmu_vmalloc_psize].sllp) {

arch/powerpc/mm/slb.c

Lines changed: 16 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -115,8 +115,6 @@ void __slb_restore_bolted_realmode(void)
115115

116116
/*
117117
* Insert the bolted entries into an empty SLB.
118-
* This is not the same as rebolt because the bolted segments are not
119-
* changed, just loaded from the shadow area.
120118
*/
121119
void slb_restore_bolted_realmode(void)
122120
{
@@ -135,12 +133,15 @@ void slb_flush_all_realmode(void)
135133
asm volatile("slbmte %0,%0; slbia" : : "r" (0));
136134
}
137135

138-
void slb_flush_and_rebolt(void)
136+
/*
137+
* This flushes non-bolted entries, it can be run in virtual mode. Must
138+
* be called with interrupts disabled.
139+
*/
140+
void slb_flush_and_restore_bolted(void)
139141
{
140-
/* If you change this make sure you change SLB_NUM_BOLTED
141-
* and PR KVM appropriately too. */
142-
unsigned long linear_llp, lflags;
143-
unsigned long ksp_esid_data, ksp_vsid_data;
142+
struct slb_shadow *p = get_slb_shadow();
143+
144+
BUILD_BUG_ON(SLB_NUM_BOLTED != 2);
144145

145146
WARN_ON(!irqs_disabled());
146147

@@ -150,30 +151,12 @@ void slb_flush_and_rebolt(void)
150151
*/
151152
hard_irq_disable();
152153

153-
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
154-
lflags = SLB_VSID_KERNEL | linear_llp;
155-
156-
ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX);
157-
if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
158-
ksp_esid_data &= ~SLB_ESID_V;
159-
ksp_vsid_data = 0;
160-
slb_shadow_clear(KSTACK_INDEX);
161-
} else {
162-
/* Update stack entry; others don't change */
163-
slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX);
164-
ksp_vsid_data =
165-
be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid);
166-
}
167-
168-
/* We need to do this all in asm, so we're sure we don't touch
169-
* the stack between the slbia and rebolting it. */
170154
asm volatile("isync\n"
171155
"slbia\n"
172-
/* Slot 1 - kernel stack */
173-
"slbmte %0,%1\n"
174-
"isync"
175-
:: "r"(ksp_vsid_data),
176-
"r"(ksp_esid_data)
156+
"slbmte %0, %1\n"
157+
"isync\n"
158+
:: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
159+
"r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
177160
: "memory");
178161

179162
get_paca()->slb_cache_ptr = 0;
@@ -254,7 +237,10 @@ void slb_dump_contents(struct slb_entry *slb_ptr)
254237

255238
void slb_vmalloc_update(void)
256239
{
257-
slb_flush_and_rebolt();
240+
/*
241+
* vmalloc is not bolted, so just have to flush non-bolted.
242+
*/
243+
slb_flush_and_restore_bolted();
258244
}
259245

260246
static bool preload_hit(struct thread_info *ti, unsigned long esid)

arch/powerpc/mm/slice.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ static void slice_flush_segments(void *parm)
219219
copy_mm_to_paca(current->active_mm);
220220

221221
local_irq_save(flags);
222-
slb_flush_and_rebolt();
222+
slb_flush_and_restore_bolted();
223223
local_irq_restore(flags);
224224
#endif
225225
}

0 commit comments

Comments
 (0)