Skip to content

Commit 6f121e5

Browse files
amlutoH. Peter Anvin
authored andcommitted
x86, vdso: Reimplement vdso.so preparation in build-time C
Currently, vdso.so files are prepared and analyzed by a combination of objcopy, nm, some linker script tricks, and some simple ELF parsers in the kernel. Replace all of that with plain C code that runs at build time. All five vdso images now generate .c files that are compiled and linked in to the kernel image. This should cause only one userspace-visible change: the loaded vDSO images are stripped more heavily than they used to be. Everything outside the loadable segment is dropped. In particular, this causes the section table and section name strings to be missing. This should be fine: real dynamic loaders don't load or inspect these tables anyway. The result is roughly equivalent to eu-strip's --strip-sections option. The purpose of this change is to enable the vvar and hpet mappings to be moved to the page following the vDSO load segment. Currently, it is possible for the section table to extend into the page after the load segment, so, if we map it, it risks overlapping the vvar or hpet page. This happens whenever the load segment is just under a multiple of PAGE_SIZE. The only real subtlety here is that the old code had a C file with inline assembler that did 'call VDSO32_vsyscall' and a linker script that defined 'VDSO32_vsyscall = __kernel_vsyscall'. This most likely worked by accident: the linker script entry defines a symbol associated with an address as opposed to an alias for the real dynamic symbol __kernel_vsyscall. That caused ld to relocate the reference at link time instead of leaving an interposable dynamic relocation. Since the VDSO32_vsyscall hack is no longer needed, I now use 'call __kernel_vsyscall', and I added -Bsymbolic to make it work. vdso2c will generate an error and abort the build if the resulting image contains any dynamic relocations, so we won't silently generate bad vdso images. (Dynamic relocations are a problem because nothing will even attempt to relocate the vdso.) Signed-off-by: Andy Lutomirski <luto@amacapital.net> Link: http://lkml.kernel.org/r/2c4fcf45524162a34d87fdda1eb046b2a5cecee7.1399317206.git.luto@amacapital.net Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
1 parent cfda7bb commit 6f121e5

File tree

18 files changed

+400
-260
lines changed

18 files changed

+400
-260
lines changed

arch/x86/ia32/ia32_signal.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -383,8 +383,8 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
383383
} else {
384384
/* Return stub is in 32bit vsyscall page */
385385
if (current->mm->context.vdso)
386-
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
387-
sigreturn);
386+
restorer = current->mm->context.vdso +
387+
selected_vdso32->sym___kernel_sigreturn;
388388
else
389389
restorer = &frame->retcode;
390390
}
@@ -462,8 +462,8 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
462462
if (ksig->ka.sa.sa_flags & SA_RESTORER)
463463
restorer = ksig->ka.sa.sa_restorer;
464464
else
465-
restorer = VDSO32_SYMBOL(current->mm->context.vdso,
466-
rt_sigreturn);
465+
restorer = current->mm->context.vdso +
466+
selected_vdso32->sym___kernel_rt_sigreturn;
467467
put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
468468

469469
/*

arch/x86/include/asm/elf.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -299,15 +299,15 @@ do { \
299299
do { \
300300
if (vdso64_enabled) \
301301
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
302-
(unsigned long)current->mm->context.vdso); \
302+
(unsigned long __force)current->mm->context.vdso); \
303303
} while (0)
304304

305305
/* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
306306
#define ARCH_DLINFO_X32 \
307307
do { \
308308
if (vdso64_enabled) \
309309
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
310-
(unsigned long)current->mm->context.vdso); \
310+
(unsigned long __force)current->mm->context.vdso); \
311311
} while (0)
312312

313313
#define AT_SYSINFO 32
@@ -325,7 +325,8 @@ else \
325325
#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
326326

327327
#define VDSO_ENTRY \
328-
((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
328+
((unsigned long)current->mm->context.vdso + \
329+
selected_vdso32->sym___kernel_vsyscall)
329330

330331
struct linux_binprm;
331332

arch/x86/include/asm/mmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ typedef struct {
1818
#endif
1919

2020
struct mutex lock;
21-
void *vdso;
21+
void __user *vdso;
2222
} mm_context_t;
2323

2424
#ifdef CONFIG_SMP

arch/x86/include/asm/vdso.h

Lines changed: 25 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -3,63 +3,43 @@
33

44
#include <asm/page_types.h>
55
#include <linux/linkage.h>
6+
#include <linux/init.h>
67

7-
#ifdef __ASSEMBLER__
8+
#ifndef __ASSEMBLER__
89

9-
#define DEFINE_VDSO_IMAGE(symname, filename) \
10-
__PAGE_ALIGNED_DATA ; \
11-
.globl symname##_start, symname##_end ; \
12-
.align PAGE_SIZE ; \
13-
symname##_start: ; \
14-
.incbin filename ; \
15-
symname##_end: ; \
16-
.align PAGE_SIZE /* extra data here leaks to userspace. */ ; \
17-
\
18-
.previous ; \
19-
\
20-
.globl symname##_pages ; \
21-
.bss ; \
22-
.align 8 ; \
23-
.type symname##_pages, @object ; \
24-
symname##_pages: ; \
25-
.zero (symname##_end - symname##_start + PAGE_SIZE - 1) / PAGE_SIZE * (BITS_PER_LONG / 8) ; \
26-
.size symname##_pages, .-symname##_pages
10+
struct vdso_image {
11+
void *data;
12+
unsigned long size; /* Always a multiple of PAGE_SIZE */
13+
struct page **pages; /* Big enough for data/size page pointers */
2714

28-
#else
15+
unsigned long alt, alt_len;
2916

30-
#define DECLARE_VDSO_IMAGE(symname) \
31-
extern char symname##_start[], symname##_end[]; \
32-
extern struct page *symname##_pages[]
17+
unsigned long sym_VDSO32_NOTE_MASK;
18+
unsigned long sym___kernel_sigreturn;
19+
unsigned long sym___kernel_rt_sigreturn;
20+
unsigned long sym___kernel_vsyscall;
21+
unsigned long sym_VDSO32_SYSENTER_RETURN;
22+
};
3323

34-
#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
24+
#ifdef CONFIG_X86_64
25+
extern const struct vdso_image vdso_image_64;
26+
#endif
3527

36-
#include <asm/vdso32.h>
28+
#ifdef CONFIG_X86_X32
29+
extern const struct vdso_image vdso_image_x32;
30+
#endif
3731

38-
DECLARE_VDSO_IMAGE(vdso32_int80);
32+
#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
33+
extern const struct vdso_image vdso_image_32_int80;
3934
#ifdef CONFIG_COMPAT
40-
DECLARE_VDSO_IMAGE(vdso32_syscall);
35+
extern const struct vdso_image vdso_image_32_syscall;
4136
#endif
42-
DECLARE_VDSO_IMAGE(vdso32_sysenter);
37+
extern const struct vdso_image vdso_image_32_sysenter;
4338

44-
/*
45-
* Given a pointer to the vDSO image, find the pointer to VDSO32_name
46-
* as that symbol is defined in the vDSO sources or linker script.
47-
*/
48-
#define VDSO32_SYMBOL(base, name) \
49-
({ \
50-
extern const char VDSO32_##name[]; \
51-
(void __user *)(VDSO32_##name + (unsigned long)(base)); \
52-
})
39+
extern const struct vdso_image *selected_vdso32;
5340
#endif
5441

55-
/*
56-
* These symbols are defined with the addresses in the vsyscall page.
57-
* See vsyscall-sigreturn.S.
58-
*/
59-
extern void __user __kernel_sigreturn;
60-
extern void __user __kernel_rt_sigreturn;
61-
62-
void __init patch_vdso32(void *vdso, size_t len);
42+
extern void __init init_vdso_image(const struct vdso_image *image);
6343

6444
#endif /* __ASSEMBLER__ */
6545

arch/x86/kernel/signal.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -298,7 +298,8 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
298298
}
299299

300300
if (current->mm->context.vdso)
301-
restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
301+
restorer = current->mm->context.vdso +
302+
selected_vdso32->sym___kernel_sigreturn;
302303
else
303304
restorer = &frame->retcode;
304305
if (ksig->ka.sa.sa_flags & SA_RESTORER)
@@ -361,7 +362,8 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
361362
save_altstack_ex(&frame->uc.uc_stack, regs->sp);
362363

363364
/* Set up to return from userspace. */
364-
restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
365+
restorer = current->mm->context.vdso +
366+
selected_vdso32->sym___kernel_sigreturn;
365367
if (ksig->ka.sa.sa_flags & SA_RESTORER)
366368
restorer = ksig->ka.sa.sa_restorer;
367369
put_user_ex(restorer, &frame->pretcode);

arch/x86/mm/init_64.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1223,7 +1223,8 @@ int in_gate_area_no_mm(unsigned long addr)
12231223

12241224
const char *arch_vma_name(struct vm_area_struct *vma)
12251225
{
1226-
if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
1226+
if (vma->vm_mm && vma->vm_start ==
1227+
(long __force)vma->vm_mm->context.vdso)
12271228
return "[vdso]";
12281229
if (vma == &gate_vma)
12291230
return "[vsyscall]";

arch/x86/vdso/.gitignore

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
vdso.lds
2-
vdso-syms.lds
32
vdsox32.lds
4-
vdsox32-syms.lds
5-
vdso32-syms.lds
63
vdso32-syscall-syms.lds
74
vdso32-sysenter-syms.lds
85
vdso32-int80-syms.lds
6+
vdso-image-*.c
7+
vdso2c

arch/x86/vdso/Makefile

Lines changed: 32 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -24,15 +24,30 @@ vobj64s := $(filter-out $(vobjx32s-compat),$(vobjs-y))
2424

2525
# files to link into kernel
2626
obj-y += vma.o
27-
obj-$(VDSO64-y) += vdso.o
28-
obj-$(VDSOX32-y) += vdsox32.o
29-
obj-$(VDSO32-y) += vdso32.o vdso32-setup.o
27+
28+
# vDSO images to build
29+
vdso_img-$(VDSO64-y) += 64
30+
vdso_img-$(VDSOX32-y) += x32
31+
vdso_img-$(VDSO32-y) += 32-int80
32+
vdso_img-$(CONFIG_COMPAT) += 32-syscall
33+
vdso_img-$(VDSO32-y) += 32-sysenter
34+
35+
obj-$(VDSO32-y) += vdso32-setup.o
3036

3137
vobjs := $(foreach F,$(vobj64s),$(obj)/$F)
3238

3339
$(obj)/vdso.o: $(obj)/vdso.so
3440

35-
targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
41+
targets += vdso.lds $(vobjs-y)
42+
43+
# Build the vDSO image C files and link them in.
44+
vdso_img_objs := $(vdso_img-y:%=vdso-image-%.o)
45+
vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c)
46+
vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg)
47+
obj-y += $(vdso_img_objs)
48+
targets += $(vdso_img_cfiles)
49+
targets += $(vdso_img_sodbg)
50+
.SECONDARY: $(vdso_img-y:%=$(obj)/vdso-image-%.c)
3651

3752
export CPPFLAGS_vdso.lds += -P -C
3853

@@ -41,14 +56,18 @@ VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
4156
-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
4257
$(DISABLE_LTO)
4358

44-
$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
45-
46-
$(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
59+
$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
4760
$(call if_changed,vdso)
4861

49-
$(obj)/%.so: OBJCOPYFLAGS := -S
50-
$(obj)/%.so: $(obj)/%.so.dbg FORCE
51-
$(call if_changed,objcopy)
62+
hostprogs-y += vdso2c
63+
64+
quiet_cmd_vdso2c = VDSO2C $@
65+
define cmd_vdso2c
66+
$(obj)/vdso2c $< $@
67+
endef
68+
69+
$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso2c FORCE
70+
$(call if_changed,vdso2c)
5271

5372
#
5473
# Don't omit frame pointers for ease of userspace debugging, but do
@@ -68,22 +87,6 @@ CFLAGS_REMOVE_vclock_gettime.o = -pg
6887
CFLAGS_REMOVE_vgetcpu.o = -pg
6988
CFLAGS_REMOVE_vvar.o = -pg
7089

71-
targets += vdso-syms.lds
72-
obj-$(VDSO64-y) += vdso-syms.lds
73-
74-
#
75-
# Match symbols in the DSO that look like VDSO*; produce a file of constants.
76-
#
77-
sed-vdsosym := -e 's/^00*/0/' \
78-
-e 's/^\([0-9a-fA-F]*\) . \(VDSO[a-zA-Z0-9_]*\)$$/\2 = 0x\1;/p'
79-
quiet_cmd_vdsosym = VDSOSYM $@
80-
define cmd_vdsosym
81-
$(NM) $< | LC_ALL=C sed -n $(sed-vdsosym) | LC_ALL=C sort > $@
82-
endef
83-
84-
$(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
85-
$(call if_changed,vdsosym)
86-
8790
#
8891
# X32 processes use x32 vDSO to access 64bit kernel data.
8992
#
@@ -94,9 +97,6 @@ $(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
9497
# so that it can reach 64bit address space with 64bit pointers.
9598
#
9699

97-
targets += vdsox32-syms.lds
98-
obj-$(VDSOX32-y) += vdsox32-syms.lds
99-
100100
CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
101101
VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
102102
-Wl,-soname=linux-vdso.so.1 \
@@ -113,17 +113,14 @@ quiet_cmd_x32 = X32 $@
113113
$(obj)/%-x32.o: $(obj)/%.o FORCE
114114
$(call if_changed,x32)
115115

116-
targets += vdsox32.so vdsox32.so.dbg vdsox32.lds $(vobjx32s-y)
117-
118-
$(obj)/vdsox32.o: $(src)/vdsox32.S $(obj)/vdsox32.so
116+
targets += vdsox32.lds $(vobjx32s-y)
119117

120118
$(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
121119
$(call if_changed,vdso)
122120

123121
#
124122
# Build multiple 32-bit vDSO images to choose from at boot time.
125123
#
126-
obj-$(VDSO32-y) += vdso32-syms.lds
127124
vdso32.so-$(VDSO32-y) += int80
128125
vdso32.so-$(CONFIG_COMPAT) += syscall
129126
vdso32.so-$(VDSO32-y) += sysenter
@@ -138,10 +135,8 @@ VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
138135
override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
139136

140137
targets += vdso32/vdso32.lds
141-
targets += $(vdso32-images) $(vdso32-images:=.dbg)
142138
targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o)
143-
144-
extra-y += $(vdso32-images)
139+
targets += vdso32/vclock_gettime.o
145140

146141
$(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%)
147142

@@ -166,27 +161,6 @@ $(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
166161
$(obj)/vdso32/%.o
167162
$(call if_changed,vdso)
168163

169-
# Make vdso32-*-syms.lds from each image, and then make sure they match.
170-
# The only difference should be that some do not define VDSO32_SYSENTER_RETURN.
171-
172-
targets += vdso32-syms.lds $(vdso32.so-y:%=vdso32-%-syms.lds)
173-
174-
quiet_cmd_vdso32sym = VDSOSYM $@
175-
define cmd_vdso32sym
176-
if LC_ALL=C sort -u $(filter-out FORCE,$^) > $(@D)/.tmp_$(@F) && \
177-
$(foreach H,$(filter-out FORCE,$^),\
178-
if grep -q VDSO32_SYSENTER_RETURN $H; \
179-
then diff -u $(@D)/.tmp_$(@F) $H; \
180-
else sed /VDSO32_SYSENTER_RETURN/d $(@D)/.tmp_$(@F) | \
181-
diff -u - $H; fi &&) : ;\
182-
then mv -f $(@D)/.tmp_$(@F) $@; \
183-
else rm -f $(@D)/.tmp_$(@F); exit 1; \
184-
fi
185-
endef
186-
187-
$(obj)/vdso32-syms.lds: $(vdso32.so-y:%=$(obj)/vdso32-%-syms.lds) FORCE
188-
$(call if_changed,vdso32sym)
189-
190164
#
191165
# The DSO images are built using a special linker script.
192166
#
@@ -197,7 +171,7 @@ quiet_cmd_vdso = VDSO $@
197171
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
198172

199173
VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
200-
$(LTO_CFLAGS)
174+
-Wl,-Bsymbolic $(LTO_CFLAGS)
201175
GCOV_PROFILE := n
202176

203177
#

arch/x86/vdso/vclock_gettime.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
154154
asm(
155155
"mov %%ebx, %%edx \n"
156156
"mov %2, %%ebx \n"
157-
"call VDSO32_vsyscall \n"
157+
"call __kernel_vsyscall \n"
158158
"mov %%edx, %%ebx \n"
159159
: "=a" (ret)
160160
: "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
@@ -169,7 +169,7 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
169169
asm(
170170
"mov %%ebx, %%edx \n"
171171
"mov %2, %%ebx \n"
172-
"call VDSO32_vsyscall \n"
172+
"call __kernel_vsyscall \n"
173173
"mov %%edx, %%ebx \n"
174174
: "=a" (ret)
175175
: "0" (__NR_gettimeofday), "g" (tv), "c" (tz)

arch/x86/vdso/vdso.S

Lines changed: 0 additions & 3 deletions
This file was deleted.

0 commit comments

Comments
 (0)