Skip to content

Commit 14b3466

Browse files
dgibsonpaulusmack
authored andcommitted
[PATCH] Invert sense of SLB class bit
Currently, we set the class bit in kernel SLB entries, and clear it on user SLB entries. On POWER5, ERAT entries created in real mode have the class bit clear. So to avoid flushing kernel ERAT entries on each context switch, this patch inverts our usage of the class bit, setting it on user SLB entries and clearing it on kernel SLB entries. Booted on POWER5 and G5. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
1 parent 0fdf0b8 commit 14b3466

File tree

4 files changed

+15
-12
lines changed

4 files changed

+15
-12
lines changed

arch/ppc64/kernel/entry.S

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -400,15 +400,14 @@ BEGIN_FTR_SECTION
400400
cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
401401
cror eq,4*cr1+eq,eq
402402
beq 2f /* if yes, don't slbie it */
403-
oris r0,r6,0x0800 /* set C (class) bit */
404403

405404
/* Bolt in the new stack SLB entry */
406405
ld r7,KSP_VSID(r4) /* Get new stack's VSID */
407-
oris r6,r6,(SLB_ESID_V)@h
408-
ori r6,r6,(SLB_NUM_BOLTED-1)@l
409-
slbie r0
410-
slbie r0 /* Workaround POWER5 < DD2.1 issue */
411-
slbmte r7,r6
406+
oris r0,r6,(SLB_ESID_V)@h
407+
ori r0,r0,(SLB_NUM_BOLTED-1)@l
408+
slbie r6
409+
slbie r6 /* Workaround POWER5 < DD2.1 issue */
410+
slbmte r7,r0
412411
isync
413412

414413
2:

arch/ppc64/mm/hugetlbpage.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,8 @@ static void flush_low_segments(void *parm)
144144
for (i = 0; i < NUM_LOW_AREAS; i++) {
145145
if (! (areas & (1U << i)))
146146
continue;
147-
asm volatile("slbie %0" : : "r" (i << SID_SHIFT));
147+
asm volatile("slbie %0"
148+
: : "r" ((i << SID_SHIFT) | SLBIE_C));
148149
}
149150

150151
asm volatile("isync" : : : "memory");
@@ -164,7 +165,8 @@ static void flush_high_segments(void *parm)
164165
continue;
165166
for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
166167
asm volatile("slbie %0"
167-
:: "r" ((i << HTLB_AREA_SHIFT) + (j << SID_SHIFT)));
168+
:: "r" (((i << HTLB_AREA_SHIFT)
169+
+ (j << SID_SHIFT)) | SLBIE_C));
168170
}
169171

170172
asm volatile("isync" : : : "memory");

arch/ppc64/mm/slb.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
8787
int i;
8888
asm volatile("isync" : : : "memory");
8989
for (i = 0; i < offset; i++) {
90-
esid_data = (unsigned long)get_paca()->slb_cache[i]
91-
<< SID_SHIFT;
90+
esid_data = ((unsigned long)get_paca()->slb_cache[i]
91+
<< SID_SHIFT) | SLBIE_C;
9292
asm volatile("slbie %0" : : "r" (esid_data));
9393
}
9494
asm volatile("isync" : : : "memory");

include/asm-ppc64/mmu.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,10 @@ extern char initial_stab[];
5454
#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
5555
#define SLB_VSID_LS ASM_CONST(0x0000000000000070) /* size of largepage */
5656

57-
#define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C)
58-
#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS)
57+
#define SLB_VSID_KERNEL (SLB_VSID_KP)
58+
#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
59+
60+
#define SLBIE_C (0x08000000)
5961

6062
/*
6163
* Hash table

0 commit comments

Comments
 (0)