Skip to content

Commit caca285

Browse files
kvaneeshmpe
authored andcommitted
powerpc/mm/radix: Use STD_MMU_64 to properly isolate hash related code
We also use MMU_FTR_RADIX to branch out from code path specific to hash. No functionality change. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
1 parent a8ed87c commit caca285

File tree

6 files changed

+52
-16
lines changed

6 files changed

+52
-16
lines changed

arch/powerpc/kernel/entry_64.S

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -529,7 +529,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
529529
std r6,PACACURRENT(r13) /* Set new 'current' */
530530

531531
ld r8,KSP(r4) /* new stack pointer */
532-
#ifdef CONFIG_PPC_BOOK3S
532+
#ifdef CONFIG_PPC_STD_MMU_64
533+
BEGIN_MMU_FTR_SECTION
534+
b 2f
535+
END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
533536
BEGIN_FTR_SECTION
534537
clrrdi r6,r8,28 /* get its ESID */
535538
clrrdi r9,r1,28 /* get current sp ESID */
@@ -575,7 +578,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
575578
slbmte r7,r0
576579
isync
577580
2:
578-
#endif /* !CONFIG_PPC_BOOK3S */
581+
#endif /* CONFIG_PPC_STD_MMU_64 */
579582

580583
CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
581584
/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE

arch/powerpc/kernel/exceptions-64s.S

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -939,7 +939,13 @@ data_access_common:
939939
ld r3,PACA_EXGEN+EX_DAR(r13)
940940
lwz r4,PACA_EXGEN+EX_DSISR(r13)
941941
li r5,0x300
942+
std r3,_DAR(r1)
943+
std r4,_DSISR(r1)
944+
BEGIN_MMU_FTR_SECTION
942945
b do_hash_page /* Try to handle as hpte fault */
946+
MMU_FTR_SECTION_ELSE
947+
b handle_page_fault
948+
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
943949

944950
.align 7
945951
.globl h_data_storage_common
@@ -964,7 +970,13 @@ instruction_access_common:
964970
ld r3,_NIP(r1)
965971
andis. r4,r12,0x5820
966972
li r5,0x400
973+
std r3,_DAR(r1)
974+
std r4,_DSISR(r1)
975+
BEGIN_MMU_FTR_SECTION
967976
b do_hash_page /* Try to handle as hpte fault */
977+
MMU_FTR_SECTION_ELSE
978+
b handle_page_fault
979+
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
968980

969981
STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
970982

@@ -1375,16 +1387,21 @@ slb_miss_realmode:
13751387
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
13761388
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
13771389

1390+
#ifdef CONFIG_PPC_STD_MMU_64
1391+
BEGIN_MMU_FTR_SECTION
13781392
bl slb_allocate_realmode
1379-
1393+
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
1394+
#endif
13801395
/* All done -- return from exception. */
13811396

13821397
ld r10,PACA_EXSLB+EX_LR(r13)
13831398
ld r3,PACA_EXSLB+EX_R3(r13)
13841399
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
13851400

13861401
mtlr r10
1387-
1402+
BEGIN_MMU_FTR_SECTION
1403+
b 2f
1404+
END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
13881405
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
13891406
beq- 2f
13901407

@@ -1435,9 +1452,7 @@ power4_fixup_nap:
14351452
*/
14361453
.align 7
14371454
do_hash_page:
1438-
std r3,_DAR(r1)
1439-
std r4,_DSISR(r1)
1440-
1455+
#ifdef CONFIG_PPC_STD_MMU_64
14411456
andis. r0,r4,0xa410 /* weird error? */
14421457
bne- handle_page_fault /* if not, try to insert a HPTE */
14431458
andis. r0,r4,DSISR_DABRMATCH@h
@@ -1465,6 +1480,7 @@ do_hash_page:
14651480

14661481
/* Error */
14671482
blt- 13f
1483+
#endif /* CONFIG_PPC_STD_MMU_64 */
14681484

14691485
/* Here we have a page fault that hash_page can't handle. */
14701486
handle_page_fault:
@@ -1491,6 +1507,7 @@ handle_dabr_fault:
14911507
12: b ret_from_except_lite
14921508

14931509

1510+
#ifdef CONFIG_PPC_STD_MMU_64
14941511
/* We have a page fault that hash_page could handle but HV refused
14951512
* the PTE insertion
14961513
*/
@@ -1500,6 +1517,7 @@ handle_dabr_fault:
15001517
ld r4,_DAR(r1)
15011518
bl low_hash_fault
15021519
b ret_from_except
1520+
#endif
15031521

15041522
/*
15051523
* We come here as a result of a DSI at a point where we don't want

arch/powerpc/kernel/machine_kexec_64.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ int default_machine_kexec_prepare(struct kimage *image)
7676
* end of the blocked region (begin >= high). Use the
7777
* boolean identity !(a || b) === (!a && !b).
7878
*/
79+
#ifdef CONFIG_PPC_STD_MMU_64
7980
if (htab_address) {
8081
low = __pa(htab_address);
8182
high = low + htab_size_bytes;
@@ -88,6 +89,7 @@ int default_machine_kexec_prepare(struct kimage *image)
8889
return -ETXTBSY;
8990
}
9091
}
92+
#endif /* CONFIG_PPC_STD_MMU_64 */
9193

9294
/* We also should not overwrite the tce tables */
9395
for_each_node_by_type(node, "pci") {
@@ -381,7 +383,7 @@ void default_machine_kexec(struct kimage *image)
381383
/* NOTREACHED */
382384
}
383385

384-
#ifndef CONFIG_PPC_BOOK3E
386+
#ifdef CONFIG_PPC_STD_MMU_64
385387
/* Values we need to export to the second kernel via the device tree. */
386388
static unsigned long htab_base;
387389
static unsigned long htab_size;
@@ -428,4 +430,4 @@ static int __init export_htab_values(void)
428430
return 0;
429431
}
430432
late_initcall(export_htab_values);
431-
#endif /* !CONFIG_PPC_BOOK3E */
433+
#endif /* CONFIG_PPC_STD_MMU_64 */

arch/powerpc/kernel/mce_power.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ void __flush_tlb_power9(unsigned int action)
8080

8181

8282
/* flush SLBs and reload */
83+
#ifdef CONFIG_PPC_MMU_STD_64
8384
static void flush_and_reload_slb(void)
8485
{
8586
struct slb_shadow *slb;
@@ -113,6 +114,7 @@ static void flush_and_reload_slb(void)
113114
asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
114115
}
115116
}
117+
#endif
116118

117119
static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
118120
{
@@ -123,6 +125,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
123125
* reset the error bits whenever we handle them so that at the end
124126
* we can check whether we handled all of them or not.
125127
* */
128+
#ifdef CONFIG_PPC_MMU_STD_64
126129
if (dsisr & slb_error_bits) {
127130
flush_and_reload_slb();
128131
/* reset error bits */
@@ -134,6 +137,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
134137
/* reset error bits */
135138
dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
136139
}
140+
#endif
137141
/* Any other errors we don't understand? */
138142
if (dsisr & 0xffffffffUL)
139143
handled = 0;
@@ -153,6 +157,7 @@ static long mce_handle_common_ierror(uint64_t srr1)
153157
switch (P7_SRR1_MC_IFETCH(srr1)) {
154158
case 0:
155159
break;
160+
#ifdef CONFIG_PPC_MMU_STD_64
156161
case P7_SRR1_MC_IFETCH_SLB_PARITY:
157162
case P7_SRR1_MC_IFETCH_SLB_MULTIHIT:
158163
/* flush and reload SLBs for SLB errors. */
@@ -165,6 +170,7 @@ static long mce_handle_common_ierror(uint64_t srr1)
165170
handled = 1;
166171
}
167172
break;
173+
#endif
168174
default:
169175
break;
170176
}
@@ -178,10 +184,12 @@ static long mce_handle_ierror_p7(uint64_t srr1)
178184

179185
handled = mce_handle_common_ierror(srr1);
180186

187+
#ifdef CONFIG_PPC_MMU_STD_64
181188
if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) {
182189
flush_and_reload_slb();
183190
handled = 1;
184191
}
192+
#endif
185193
return handled;
186194
}
187195

@@ -324,10 +332,12 @@ static long mce_handle_ierror_p8(uint64_t srr1)
324332

325333
handled = mce_handle_common_ierror(srr1);
326334

335+
#ifdef CONFIG_PPC_MMU_STD_64
327336
if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) {
328337
flush_and_reload_slb();
329338
handled = 1;
330339
}
340+
#endif
331341
return handled;
332342
}
333343

arch/powerpc/kernel/process.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1079,15 +1079,15 @@ struct task_struct *__switch_to(struct task_struct *prev,
10791079
}
10801080
#endif /* CONFIG_PPC64 */
10811081

1082-
#ifdef CONFIG_PPC_BOOK3S_64
1082+
#ifdef CONFIG_PPC_STD_MMU_64
10831083
batch = this_cpu_ptr(&ppc64_tlb_batch);
10841084
if (batch->active) {
10851085
current_thread_info()->local_flags |= _TLF_LAZY_MMU;
10861086
if (batch->index)
10871087
__flush_tlb_pending(batch);
10881088
batch->active = 0;
10891089
}
1090-
#endif /* CONFIG_PPC_BOOK3S_64 */
1090+
#endif /* CONFIG_PPC_STD_MMU_64 */
10911091

10921092
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
10931093
switch_booke_debug_regs(&new->thread.debug);
@@ -1133,7 +1133,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
11331133

11341134
last = _switch(old_thread, new_thread);
11351135

1136-
#ifdef CONFIG_PPC_BOOK3S_64
1136+
#ifdef CONFIG_PPC_STD_MMU_64
11371137
if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
11381138
current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
11391139
batch = this_cpu_ptr(&ppc64_tlb_batch);
@@ -1142,8 +1142,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
11421142

11431143
if (current_thread_info()->task->thread.regs)
11441144
restore_math(current_thread_info()->task->thread.regs);
1145-
1146-
#endif /* CONFIG_PPC_BOOK3S_64 */
1145+
#endif /* CONFIG_PPC_STD_MMU_64 */
11471146

11481147
return last;
11491148
}
@@ -1378,6 +1377,9 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
13781377
unsigned long sp_vsid;
13791378
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
13801379

1380+
if (radix_enabled())
1381+
return;
1382+
13811383
if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
13821384
sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
13831385
<< SLB_VSID_SHIFT_1T;
@@ -1926,7 +1928,8 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
19261928
* the heap, we can put it above 1TB so it is backed by a 1TB
19271929
* segment. Otherwise the heap will be in the bottom 1TB
19281930
* which always uses 256MB segments and this may result in a
1929-
* performance penalty.
1931+
* performance penalty. We don't need to worry about radix. For
1932+
* radix, mmu_highuser_ssize remains unchanged from 256MB.
19301933
*/
19311934
if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
19321935
base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);

arch/powerpc/xmon/xmon.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2913,7 +2913,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
29132913
printf("%s", after);
29142914
}
29152915

2916-
#ifdef CONFIG_PPC_BOOK3S_64
2916+
#ifdef CONFIG_PPC_STD_MMU_64
29172917
void dump_segments(void)
29182918
{
29192919
int i;

0 commit comments

Comments
 (0)