Skip to content

Commit 494b516

Browse files
anadavIngo Molnar
authored andcommitted
x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops
As described in: 77b0bf5: ("kbuild/Makefile: Prepare for using macros in inline assembly code to work around asm() related GCC inlining bugs") GCC's inlining heuristics are broken with common asm() patterns used in kernel code, resulting in the effective disabling of inlining. The workaround is to set an assembly macro and call it from the inline assembly block. As a result GCC considers the inline assembly block as a single instruction. (Which it isn't, but that's the best we can get.) In this patch we wrap the paravirt call section tricks in a macro, to hide it from GCC. The effect of the patch is a more aggressive inlining, which also causes a size increase of kernel. text data bss dec hex filename 18147336 10226688 2957312 31331336 1de1408 ./vmlinux before 18162555 10226288 2957312 31346155 1de4deb ./vmlinux after (+14819) The number of static text symbols (non-inlined functions) goes down: Before: 40053 After: 39942 (-111) [ mingo: Rewrote the changelog. ] Tested-by: Kees Cook <keescook@chromium.org> Signed-off-by: Nadav Amit <namit@vmware.com> Reviewed-by: Juergen Gross <jgross@suse.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alok Kataria <akataria@vmware.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: virtualization@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20181003213100.189959-8-namit@vmware.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent f81f8ad commit 494b516

File tree

2 files changed

+28
-29
lines changed

2 files changed

+28
-29
lines changed

arch/x86/include/asm/paravirt_types.h

Lines changed: 27 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -346,23 +346,11 @@ extern struct pv_lock_ops pv_lock_ops;
346346
#define paravirt_clobber(clobber) \
347347
[paravirt_clobber] "i" (clobber)
348348

349-
/*
350-
* Generate some code, and mark it as patchable by the
351-
* apply_paravirt() alternate instruction patcher.
352-
*/
353-
#define _paravirt_alt(insn_string, type, clobber) \
354-
"771:\n\t" insn_string "\n" "772:\n" \
355-
".pushsection .parainstructions,\"a\"\n" \
356-
_ASM_ALIGN "\n" \
357-
_ASM_PTR " 771b\n" \
358-
" .byte " type "\n" \
359-
" .byte 772b-771b\n" \
360-
" .short " clobber "\n" \
361-
".popsection\n"
362-
363349
/* Generate patchable code, with the default asm parameters. */
364-
#define paravirt_alt(insn_string) \
365-
_paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
350+
#define paravirt_call \
351+
"PARAVIRT_CALL type=\"%c[paravirt_typenum]\"" \
352+
" clobber=\"%c[paravirt_clobber]\"" \
353+
" pv_opptr=\"%c[paravirt_opptr]\";"
366354

367355
/* Simple instruction patching code. */
368356
#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
@@ -390,16 +378,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
390378

391379
int paravirt_disable_iospace(void);
392380

393-
/*
394-
* This generates an indirect call based on the operation type number.
395-
* The type number, computed in PARAVIRT_PATCH, is derived from the
396-
* offset into the paravirt_patch_template structure, and can therefore be
397-
* freely converted back into a structure offset.
398-
*/
399-
#define PARAVIRT_CALL \
400-
ANNOTATE_RETPOLINE_SAFE \
401-
"call *%c[paravirt_opptr];"
402-
403381
/*
404382
* These macros are intended to wrap calls through one of the paravirt
405383
* ops structs, so that they can be later identified and patched at
@@ -537,7 +515,7 @@ int paravirt_disable_iospace(void);
537515
/* since this condition will never hold */ \
538516
if (sizeof(rettype) > sizeof(unsigned long)) { \
539517
asm volatile(pre \
540-
paravirt_alt(PARAVIRT_CALL) \
518+
paravirt_call \
541519
post \
542520
: call_clbr, ASM_CALL_CONSTRAINT \
543521
: paravirt_type(op), \
@@ -547,7 +525,7 @@ int paravirt_disable_iospace(void);
547525
__ret = (rettype)((((u64)__edx) << 32) | __eax); \
548526
} else { \
549527
asm volatile(pre \
550-
paravirt_alt(PARAVIRT_CALL) \
528+
paravirt_call \
551529
post \
552530
: call_clbr, ASM_CALL_CONSTRAINT \
553531
: paravirt_type(op), \
@@ -574,7 +552,7 @@ int paravirt_disable_iospace(void);
574552
PVOP_VCALL_ARGS; \
575553
PVOP_TEST_NULL(op); \
576554
asm volatile(pre \
577-
paravirt_alt(PARAVIRT_CALL) \
555+
paravirt_call \
578556
post \
579557
: call_clbr, ASM_CALL_CONSTRAINT \
580558
: paravirt_type(op), \
@@ -694,6 +672,26 @@ struct paravirt_patch_site {
694672
extern struct paravirt_patch_site __parainstructions[],
695673
__parainstructions_end[];
696674

675+
#else /* __ASSEMBLY__ */
676+
677+
/*
678+
* This generates an indirect call based on the operation type number.
679+
* The type number, computed in PARAVIRT_PATCH, is derived from the
680+
* offset into the paravirt_patch_template structure, and can therefore be
681+
* freely converted back into a structure offset.
682+
*/
683+
.macro PARAVIRT_CALL type:req clobber:req pv_opptr:req
684+
771: ANNOTATE_RETPOLINE_SAFE
685+
call *\pv_opptr
686+
772: .pushsection .parainstructions,"a"
687+
_ASM_ALIGN
688+
_ASM_PTR 771b
689+
.byte \type
690+
.byte 772b-771b
691+
.short \clobber
692+
.popsection
693+
.endm
694+
697695
#endif /* __ASSEMBLY__ */
698696

699697
#endif /* _ASM_X86_PARAVIRT_TYPES_H */

arch/x86/kernel/macros.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,3 +10,4 @@
1010
#include <asm/refcount.h>
1111
#include <asm/alternative-asm.h>
1212
#include <asm/bug.h>
13+
#include <asm/paravirt.h>

0 commit comments

Comments
 (0)