Skip to content

Commit e1ba1c9

Browse files
committed
Merge tag 'riscv-for-linus-4.15-rc2_cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/linux
Pull RISC-V cleanups and ABI fixes from Palmer Dabbelt: "This contains a handful of small cleanups that are a result of feedback that didn't make it into our original patch set, either because the feedback hadn't been given yet, I missed the original emails, or we weren't ready to submit the changes yet. I've been maintaining the various cleanup patch sets I have as their own branches, which I then merged together and signed. Each merge commit has a short summary of the changes, and each branch is based on your latest tag (4.15-rc1, in this case). If this isn't the right way to do this then feel free to suggest something else, but it seems sane to me. Here's a short summary of the changes, roughly in order of how interesting they are. - libgcc.h has been moved from include/lib, where it's the only member, to include/linux. This is meant to avoid tab completion conflicts. - VDSO entries for clock_get/gettimeofday/getcpu have been added. These are simple syscalls now, but we want to let glibc use them from the start so we can make them faster later. - A VDSO entry for instruction cache flushing has been added so userspace can flush the instruction cache. - The VDSO symbol versions for __vdso_cmpxchg{32,64} have been removed, as those VDSO entries don't actually exist. - __io_writes has been corrected to respect the given type. - A new READ_ONCE in arch_spin_is_locked(). - __test_and_op_bit_ord() is now actually ordered. - Various small fixes throughout the tree to enable allmodconfig to build cleanly. - Removal of some dead code in our atomic support headers. - Improvements to various comments in our atomic support headers" * tag 'riscv-for-linus-4.15-rc2_cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/linux: (23 commits) RISC-V: __io_writes should respect the length argument move libgcc.h to include/linux RISC-V: Clean up an unused include RISC-V: Allow userspace to flush the instruction cache RISC-V: Flush I$ when making a dirty page executable RISC-V: Add missing include RISC-V: Use define for get_cycles like other architectures RISC-V: Provide stub of setup_profiling_timer() RISC-V: Export some expected symbols for modules RISC-V: move empty_zero_page definition to C and export it RISC-V: io.h: type fixes for warnings RISC-V: use RISCV_{INT,SHORT} instead of {INT,SHORT} for asm macros RISC-V: use generic serial.h RISC-V: remove spin_unlock_wait() RISC-V: `sfence.vma` orderes the instruction cache RISC-V: Add READ_ONCE in arch_spin_is_locked() RISC-V: __test_and_op_bit_ord should be strongly ordered RISC-V: Remove smb_mb__{before,after}_spinlock() RISC-V: Remove __smp_bp__{before,after}_atomic RISC-V: Comment on why {,cmp}xchg is ordered how it is ...
2 parents 4b1967c + 3b62de2 commit e1ba1c9

40 files changed

+498
-146
lines changed

arch/riscv/include/asm/Kbuild

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ generic-y += resource.h
4040
generic-y += scatterlist.h
4141
generic-y += sections.h
4242
generic-y += sembuf.h
43+
generic-y += serial.h
4344
generic-y += setup.h
4445
generic-y += shmbuf.h
4546
generic-y += shmparam.h

arch/riscv/include/asm/asm.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -58,17 +58,17 @@
5858
#endif
5959

6060
#if (__SIZEOF_INT__ == 4)
61-
#define INT __ASM_STR(.word)
62-
#define SZINT __ASM_STR(4)
63-
#define LGINT __ASM_STR(2)
61+
#define RISCV_INT __ASM_STR(.word)
62+
#define RISCV_SZINT __ASM_STR(4)
63+
#define RISCV_LGINT __ASM_STR(2)
6464
#else
6565
#error "Unexpected __SIZEOF_INT__"
6666
#endif
6767

6868
#if (__SIZEOF_SHORT__ == 2)
69-
#define SHORT __ASM_STR(.half)
70-
#define SZSHORT __ASM_STR(2)
71-
#define LGSHORT __ASM_STR(1)
69+
#define RISCV_SHORT __ASM_STR(.half)
70+
#define RISCV_SZSHORT __ASM_STR(2)
71+
#define RISCV_LGSHORT __ASM_STR(1)
7272
#else
7373
#error "Unexpected __SIZEOF_SHORT__"
7474
#endif

arch/riscv/include/asm/atomic.h

Lines changed: 54 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -50,30 +50,30 @@ static __always_inline void atomic64_set(atomic64_t *v, long i)
5050
* have the AQ or RL bits set. These don't return anything, so there's only
5151
* one version to worry about.
5252
*/
53-
#define ATOMIC_OP(op, asm_op, c_op, I, asm_type, c_type, prefix) \
54-
static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
55-
{ \
56-
__asm__ __volatile__ ( \
57-
"amo" #asm_op "." #asm_type " zero, %1, %0" \
58-
: "+A" (v->counter) \
59-
: "r" (I) \
60-
: "memory"); \
53+
#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
54+
static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
55+
{ \
56+
__asm__ __volatile__ ( \
57+
"amo" #asm_op "." #asm_type " zero, %1, %0" \
58+
: "+A" (v->counter) \
59+
: "r" (I) \
60+
: "memory"); \
6161
}
6262

6363
#ifdef CONFIG_GENERIC_ATOMIC64
64-
#define ATOMIC_OPS(op, asm_op, c_op, I) \
65-
ATOMIC_OP (op, asm_op, c_op, I, w, int, )
64+
#define ATOMIC_OPS(op, asm_op, I) \
65+
ATOMIC_OP (op, asm_op, I, w, int, )
6666
#else
67-
#define ATOMIC_OPS(op, asm_op, c_op, I) \
68-
ATOMIC_OP (op, asm_op, c_op, I, w, int, ) \
69-
ATOMIC_OP (op, asm_op, c_op, I, d, long, 64)
67+
#define ATOMIC_OPS(op, asm_op, I) \
68+
ATOMIC_OP (op, asm_op, I, w, int, ) \
69+
ATOMIC_OP (op, asm_op, I, d, long, 64)
7070
#endif
7171

72-
ATOMIC_OPS(add, add, +, i)
73-
ATOMIC_OPS(sub, add, +, -i)
74-
ATOMIC_OPS(and, and, &, i)
75-
ATOMIC_OPS( or, or, |, i)
76-
ATOMIC_OPS(xor, xor, ^, i)
72+
ATOMIC_OPS(add, add, i)
73+
ATOMIC_OPS(sub, add, -i)
74+
ATOMIC_OPS(and, and, i)
75+
ATOMIC_OPS( or, or, i)
76+
ATOMIC_OPS(xor, xor, i)
7777

7878
#undef ATOMIC_OP
7979
#undef ATOMIC_OPS
@@ -83,7 +83,7 @@ ATOMIC_OPS(xor, xor, ^, i)
8383
* There's two flavors of these: the arithmatic ops have both fetch and return
8484
* versions, while the logical ops only have fetch versions.
8585
*/
86-
#define ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix) \
86+
#define ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, asm_type, c_type, prefix) \
8787
static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v) \
8888
{ \
8989
register c_type ret; \
@@ -103,13 +103,13 @@ static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, ato
103103

104104
#ifdef CONFIG_GENERIC_ATOMIC64
105105
#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
106-
ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
106+
ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, w, int, ) \
107107
ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, )
108108
#else
109109
#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
110-
ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
110+
ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, w, int, ) \
111111
ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
112-
ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, d, long, 64) \
112+
ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, d, long, 64) \
113113
ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
114114
#endif
115115

@@ -126,28 +126,28 @@ ATOMIC_OPS(sub, add, +, -i, .aqrl, )
126126
#undef ATOMIC_OPS
127127

128128
#ifdef CONFIG_GENERIC_ATOMIC64
129-
#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
130-
ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, )
129+
#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or) \
130+
ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w, int, )
131131
#else
132-
#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
133-
ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
134-
ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
132+
#define ATOMIC_OPS(op, asm_op, I, asm_or, c_or) \
133+
ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w, int, ) \
134+
ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, d, long, 64)
135135
#endif
136136

137-
ATOMIC_OPS(and, and, &, i, , _relaxed)
138-
ATOMIC_OPS(and, and, &, i, .aq , _acquire)
139-
ATOMIC_OPS(and, and, &, i, .rl , _release)
140-
ATOMIC_OPS(and, and, &, i, .aqrl, )
137+
ATOMIC_OPS(and, and, i, , _relaxed)
138+
ATOMIC_OPS(and, and, i, .aq , _acquire)
139+
ATOMIC_OPS(and, and, i, .rl , _release)
140+
ATOMIC_OPS(and, and, i, .aqrl, )
141141

142-
ATOMIC_OPS( or, or, |, i, , _relaxed)
143-
ATOMIC_OPS( or, or, |, i, .aq , _acquire)
144-
ATOMIC_OPS( or, or, |, i, .rl , _release)
145-
ATOMIC_OPS( or, or, |, i, .aqrl, )
142+
ATOMIC_OPS( or, or, i, , _relaxed)
143+
ATOMIC_OPS( or, or, i, .aq , _acquire)
144+
ATOMIC_OPS( or, or, i, .rl , _release)
145+
ATOMIC_OPS( or, or, i, .aqrl, )
146146

147-
ATOMIC_OPS(xor, xor, ^, i, , _relaxed)
148-
ATOMIC_OPS(xor, xor, ^, i, .aq , _acquire)
149-
ATOMIC_OPS(xor, xor, ^, i, .rl , _release)
150-
ATOMIC_OPS(xor, xor, ^, i, .aqrl, )
147+
ATOMIC_OPS(xor, xor, i, , _relaxed)
148+
ATOMIC_OPS(xor, xor, i, .aq , _acquire)
149+
ATOMIC_OPS(xor, xor, i, .rl , _release)
150+
ATOMIC_OPS(xor, xor, i, .aqrl, )
151151

152152
#undef ATOMIC_OPS
153153

@@ -182,13 +182,13 @@ ATOMIC_OPS(add_negative, add, <, 0)
182182
#undef ATOMIC_OP
183183
#undef ATOMIC_OPS
184184

185-
#define ATOMIC_OP(op, func_op, c_op, I, c_type, prefix) \
185+
#define ATOMIC_OP(op, func_op, I, c_type, prefix) \
186186
static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v) \
187187
{ \
188188
atomic##prefix##_##func_op(I, v); \
189189
}
190190

191-
#define ATOMIC_FETCH_OP(op, func_op, c_op, I, c_type, prefix) \
191+
#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix) \
192192
static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \
193193
{ \
194194
return atomic##prefix##_fetch_##func_op(I, v); \
@@ -202,16 +202,16 @@ static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t
202202

203203
#ifdef CONFIG_GENERIC_ATOMIC64
204204
#define ATOMIC_OPS(op, asm_op, c_op, I) \
205-
ATOMIC_OP (op, asm_op, c_op, I, int, ) \
206-
ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \
205+
ATOMIC_OP (op, asm_op, I, int, ) \
206+
ATOMIC_FETCH_OP (op, asm_op, I, int, ) \
207207
ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, )
208208
#else
209209
#define ATOMIC_OPS(op, asm_op, c_op, I) \
210-
ATOMIC_OP (op, asm_op, c_op, I, int, ) \
211-
ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \
210+
ATOMIC_OP (op, asm_op, I, int, ) \
211+
ATOMIC_FETCH_OP (op, asm_op, I, int, ) \
212212
ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
213-
ATOMIC_OP (op, asm_op, c_op, I, long, 64) \
214-
ATOMIC_FETCH_OP (op, asm_op, c_op, I, long, 64) \
213+
ATOMIC_OP (op, asm_op, I, long, 64) \
214+
ATOMIC_FETCH_OP (op, asm_op, I, long, 64) \
215215
ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
216216
#endif
217217

@@ -300,8 +300,13 @@ static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
300300

301301
/*
302302
* atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
303-
* {cmp,}xchg and the operations that return, so they need a barrier. We just
304-
* use the other implementations directly.
303+
* {cmp,}xchg and the operations that return, so they need a barrier.
304+
*/
305+
/*
306+
* FIXME: atomic_cmpxchg_{acquire,release,relaxed} are all implemented by
307+
* assigning the same barrier to both the LR and SC operations, but that might
308+
* not make any sense. We're waiting on a memory model specification to
309+
* determine exactly what the right thing to do is here.
305310
*/
306311
#define ATOMIC_OP(c_t, prefix, c_or, size, asm_or) \
307312
static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) \

arch/riscv/include/asm/barrier.h

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -38,29 +38,6 @@
3838
#define smp_rmb() RISCV_FENCE(r,r)
3939
#define smp_wmb() RISCV_FENCE(w,w)
4040

41-
/*
42-
* These fences exist to enforce ordering around the relaxed AMOs. The
43-
* documentation defines that
44-
* "
45-
* atomic_fetch_add();
46-
* is equivalent to:
47-
* smp_mb__before_atomic();
48-
* atomic_fetch_add_relaxed();
49-
* smp_mb__after_atomic();
50-
* "
51-
* So we emit full fences on both sides.
52-
*/
53-
#define __smb_mb__before_atomic() smp_mb()
54-
#define __smb_mb__after_atomic() smp_mb()
55-
56-
/*
57-
* These barriers prevent accesses performed outside a spinlock from being moved
58-
* inside a spinlock. Since RISC-V sets the aq/rl bits on our spinlock only
59-
* enforce release consistency, we need full fences here.
60-
*/
61-
#define smb_mb__before_spinlock() smp_mb()
62-
#define smb_mb__after_spinlock() smp_mb()
63-
6441
#include <asm-generic/barrier.h>
6542

6643
#endif /* __ASSEMBLY__ */

arch/riscv/include/asm/bitops.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@
6767
: "memory");
6868

6969
#define __test_and_op_bit(op, mod, nr, addr) \
70-
__test_and_op_bit_ord(op, mod, nr, addr, )
70+
__test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
7171
#define __op_bit(op, mod, nr, addr) \
7272
__op_bit_ord(op, mod, nr, addr, )
7373

arch/riscv/include/asm/bug.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,8 @@
2727
typedef u32 bug_insn_t;
2828

2929
#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
30-
#define __BUG_ENTRY_ADDR INT " 1b - 2b"
31-
#define __BUG_ENTRY_FILE INT " %0 - 2b"
30+
#define __BUG_ENTRY_ADDR RISCV_INT " 1b - 2b"
31+
#define __BUG_ENTRY_FILE RISCV_INT " %0 - 2b"
3232
#else
3333
#define __BUG_ENTRY_ADDR RISCV_PTR " 1b"
3434
#define __BUG_ENTRY_FILE RISCV_PTR " %0"
@@ -38,7 +38,7 @@ typedef u32 bug_insn_t;
3838
#define __BUG_ENTRY \
3939
__BUG_ENTRY_ADDR "\n\t" \
4040
__BUG_ENTRY_FILE "\n\t" \
41-
SHORT " %1"
41+
RISCV_SHORT " %1"
4242
#else
4343
#define __BUG_ENTRY \
4444
__BUG_ENTRY_ADDR

arch/riscv/include/asm/cacheflush.h

Lines changed: 26 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,22 +18,44 @@
1818

1919
#undef flush_icache_range
2020
#undef flush_icache_user_range
21+
#undef flush_dcache_page
2122

2223
static inline void local_flush_icache_all(void)
2324
{
2425
asm volatile ("fence.i" ::: "memory");
2526
}
2627

28+
#define PG_dcache_clean PG_arch_1
29+
30+
static inline void flush_dcache_page(struct page *page)
31+
{
32+
if (test_bit(PG_dcache_clean, &page->flags))
33+
clear_bit(PG_dcache_clean, &page->flags);
34+
}
35+
36+
/*
37+
* RISC-V doesn't have an instruction to flush parts of the instruction cache,
38+
* so instead we just flush the whole thing.
39+
*/
40+
#define flush_icache_range(start, end) flush_icache_all()
41+
#define flush_icache_user_range(vma, pg, addr, len) flush_icache_all()
42+
2743
#ifndef CONFIG_SMP
2844

29-
#define flush_icache_range(start, end) local_flush_icache_all()
30-
#define flush_icache_user_range(vma, pg, addr, len) local_flush_icache_all()
45+
#define flush_icache_all() local_flush_icache_all()
46+
#define flush_icache_mm(mm, local) flush_icache_all()
3147

3248
#else /* CONFIG_SMP */
3349

34-
#define flush_icache_range(start, end) sbi_remote_fence_i(0)
35-
#define flush_icache_user_range(vma, pg, addr, len) sbi_remote_fence_i(0)
50+
#define flush_icache_all() sbi_remote_fence_i(0)
51+
void flush_icache_mm(struct mm_struct *mm, bool local);
3652

3753
#endif /* CONFIG_SMP */
3854

55+
/*
56+
* Bits in sys_riscv_flush_icache()'s flags argument.
57+
*/
58+
#define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
59+
#define SYS_RISCV_FLUSH_ICACHE_ALL (SYS_RISCV_FLUSH_ICACHE_LOCAL)
60+
3961
#endif /* _ASM_RISCV_CACHEFLUSH_H */

arch/riscv/include/asm/io.h

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919
#ifndef _ASM_RISCV_IO_H
2020
#define _ASM_RISCV_IO_H
2121

22+
#include <linux/types.h>
23+
2224
#ifdef CONFIG_MMU
2325

2426
extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
@@ -32,7 +34,7 @@ extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
3234
#define ioremap_wc(addr, size) ioremap((addr), (size))
3335
#define ioremap_wt(addr, size) ioremap((addr), (size))
3436

35-
extern void iounmap(void __iomem *addr);
37+
extern void iounmap(volatile void __iomem *addr);
3638

3739
#endif /* CONFIG_MMU */
3840

@@ -250,7 +252,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
250252
const ctype *buf = buffer; \
251253
\
252254
do { \
253-
__raw_writeq(*buf++, addr); \
255+
__raw_write ## len(*buf++, addr); \
254256
} while (--count); \
255257
} \
256258
afence; \
@@ -266,9 +268,9 @@ __io_reads_ins(reads, u32, l, __io_br(), __io_ar())
266268
__io_reads_ins(ins, u8, b, __io_pbr(), __io_par())
267269
__io_reads_ins(ins, u16, w, __io_pbr(), __io_par())
268270
__io_reads_ins(ins, u32, l, __io_pbr(), __io_par())
269-
#define insb(addr, buffer, count) __insb((void __iomem *)addr, buffer, count)
270-
#define insw(addr, buffer, count) __insw((void __iomem *)addr, buffer, count)
271-
#define insl(addr, buffer, count) __insl((void __iomem *)addr, buffer, count)
271+
#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count)
272+
#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count)
273+
#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count)
272274

273275
__io_writes_outs(writes, u8, b, __io_bw(), __io_aw())
274276
__io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
@@ -280,9 +282,9 @@ __io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
280282
__io_writes_outs(outs, u8, b, __io_pbw(), __io_paw())
281283
__io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
282284
__io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
283-
#define outsb(addr, buffer, count) __outsb((void __iomem *)addr, buffer, count)
284-
#define outsw(addr, buffer, count) __outsw((void __iomem *)addr, buffer, count)
285-
#define outsl(addr, buffer, count) __outsl((void __iomem *)addr, buffer, count)
285+
#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count)
286+
#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count)
287+
#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count)
286288

287289
#ifdef CONFIG_64BIT
288290
__io_reads_ins(reads, u64, q, __io_br(), __io_ar())

arch/riscv/include/asm/mmu.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,10 @@
1919

2020
typedef struct {
2121
void *vdso;
22+
#ifdef CONFIG_SMP
23+
/* A local icache flush is needed before user execution can resume. */
24+
cpumask_t icache_stale_mask;
25+
#endif
2226
} mm_context_t;
2327

2428
#endif /* __ASSEMBLY__ */

0 commit comments

Comments
 (0)