Skip to content

Commit 92b0729

Browse files
aeglIngo Molnar
authored andcommitted
x86/mm, x86/mce: Add memcpy_mcsafe()
Make use of the EXTABLE_FAULT exception table entries to write a kernel copy routine that doesn't crash the system if it encounters a machine check. Prime use case for this is to copy from large arrays of non-volatile memory used as storage. We have to use an unrolled copy loop for now because current hardware implementations treat a machine check in "rep mov" as fatal. When that is fixed we can simplify. Return type is a "bool". True means that we copied OK, false means that it didn't. Signed-off-by: Tony Luck <tony.luck@intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@gmail.com> Link: http://lkml.kernel.org/r/a44e1055efc2d2a9473307b22c91caa437aa3f8b.1456439214.git.tony.luck@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent ea2ca36 commit 92b0729

File tree

3 files changed

+132
-0
lines changed

3 files changed

+132
-0
lines changed

arch/x86/include/asm/string_64.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,19 @@ int strcmp(const char *cs, const char *ct);
7878
#define memset(s, c, n) __memset(s, c, n)
7979
#endif
8080

81+
/**
82+
* memcpy_mcsafe - copy memory with indication if a machine check happened
83+
*
84+
* @dst: destination address
85+
* @src: source address
86+
* @cnt: number of bytes to copy
87+
*
88+
* Low level memory copy function that catches machine checks
89+
*
90+
* Return true for success, false for fail
91+
*/
92+
bool memcpy_mcsafe(void *dst, const void *src, size_t cnt);
93+
8194
#endif /* __KERNEL__ */
8295

8396
#endif /* _ASM_X86_STRING_64_H */

arch/x86/kernel/x8664_ksyms_64.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,8 @@ EXPORT_SYMBOL(__copy_user_nocache);
3737
EXPORT_SYMBOL(_copy_from_user);
3838
EXPORT_SYMBOL(_copy_to_user);
3939

40+
EXPORT_SYMBOL_GPL(memcpy_mcsafe);
41+
4042
EXPORT_SYMBOL(copy_page);
4143
EXPORT_SYMBOL(clear_page);
4244

arch/x86/lib/memcpy_64.S

Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -177,3 +177,120 @@ ENTRY(memcpy_orig)
177177
.Lend:
178178
retq
179179
ENDPROC(memcpy_orig)
180+
181+
#ifndef CONFIG_UML
182+
/*
183+
* memcpy_mcsafe - memory copy with machine check exception handling
184+
* Note that we only catch machine checks when reading the source addresses.
185+
* Writes to target are posted and don't generate machine checks.
186+
*/
187+
ENTRY(memcpy_mcsafe)
188+
cmpl $8, %edx
189+
/* Less than 8 bytes? Go to byte copy loop */
190+
jb .L_no_whole_words
191+
192+
/* Check for bad alignment of source */
193+
testl $7, %esi
194+
/* Already aligned */
195+
jz .L_8byte_aligned
196+
197+
/* Copy one byte at a time until source is 8-byte aligned */
198+
movl %esi, %ecx
199+
andl $7, %ecx
200+
subl $8, %ecx
201+
negl %ecx
202+
subl %ecx, %edx
203+
.L_copy_leading_bytes:
204+
movb (%rsi), %al
205+
movb %al, (%rdi)
206+
incq %rsi
207+
incq %rdi
208+
decl %ecx
209+
jnz .L_copy_leading_bytes
210+
211+
.L_8byte_aligned:
212+
/* Figure out how many whole cache lines (64-bytes) to copy */
213+
movl %edx, %ecx
214+
andl $63, %edx
215+
shrl $6, %ecx
216+
jz .L_no_whole_cache_lines
217+
218+
/* Loop copying whole cache lines */
219+
.L_cache_w0: movq (%rsi), %r8
220+
.L_cache_w1: movq 1*8(%rsi), %r9
221+
.L_cache_w2: movq 2*8(%rsi), %r10
222+
.L_cache_w3: movq 3*8(%rsi), %r11
223+
movq %r8, (%rdi)
224+
movq %r9, 1*8(%rdi)
225+
movq %r10, 2*8(%rdi)
226+
movq %r11, 3*8(%rdi)
227+
.L_cache_w4: movq 4*8(%rsi), %r8
228+
.L_cache_w5: movq 5*8(%rsi), %r9
229+
.L_cache_w6: movq 6*8(%rsi), %r10
230+
.L_cache_w7: movq 7*8(%rsi), %r11
231+
movq %r8, 4*8(%rdi)
232+
movq %r9, 5*8(%rdi)
233+
movq %r10, 6*8(%rdi)
234+
movq %r11, 7*8(%rdi)
235+
leaq 64(%rsi), %rsi
236+
leaq 64(%rdi), %rdi
237+
decl %ecx
238+
jnz .L_cache_w0
239+
240+
/* Are there any trailing 8-byte words? */
241+
.L_no_whole_cache_lines:
242+
movl %edx, %ecx
243+
andl $7, %edx
244+
shrl $3, %ecx
245+
jz .L_no_whole_words
246+
247+
/* Copy trailing words */
248+
.L_copy_trailing_words:
249+
movq (%rsi), %r8
250+
mov %r8, (%rdi)
251+
leaq 8(%rsi), %rsi
252+
leaq 8(%rdi), %rdi
253+
decl %ecx
254+
jnz .L_copy_trailing_words
255+
256+
/* Any trailing bytes? */
257+
.L_no_whole_words:
258+
andl %edx, %edx
259+
jz .L_done_memcpy_trap
260+
261+
/* Copy trailing bytes */
262+
movl %edx, %ecx
263+
.L_copy_trailing_bytes:
264+
movb (%rsi), %al
265+
movb %al, (%rdi)
266+
incq %rsi
267+
incq %rdi
268+
decl %ecx
269+
jnz .L_copy_trailing_bytes
270+
271+
/* Copy successful. Return true */
272+
.L_done_memcpy_trap:
273+
xorq %rax, %rax
274+
ret
275+
ENDPROC(memcpy_mcsafe)
276+
277+
.section .fixup, "ax"
278+
/* Return false for any failure */
279+
.L_memcpy_mcsafe_fail:
280+
mov $1, %rax
281+
ret
282+
283+
.previous
284+
285+
_ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
286+
_ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
287+
_ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
288+
_ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
289+
_ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
290+
_ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
291+
_ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
292+
_ASM_EXTABLE_FAULT(.L_cache_w6, .L_memcpy_mcsafe_fail)
293+
_ASM_EXTABLE_FAULT(.L_cache_w7, .L_memcpy_mcsafe_fail)
294+
_ASM_EXTABLE_FAULT(.L_copy_trailing_words, .L_memcpy_mcsafe_fail)
295+
_ASM_EXTABLE_FAULT(.L_copy_trailing_bytes, .L_memcpy_mcsafe_fail)
296+
#endif

0 commit comments

Comments
 (0)