Skip to content

Commit 4f72adc

Browse files
committed
x86/vdso: Simplify the invalid vclock case
The code flow for the vclocks is convoluted as it requires the vclocks which can be invalidated separately from the vsyscall_gtod_data sequence to store the fact in a separate variable. That's inefficient. Restructure the code so the vclock readout returns cycles and the conversion to nanoseconds is handled at the call site. If the clock gets invalidated or vclock is already VCLOCK_NONE, return U64_MAX as the cycle value, which is invalid for all clocks and leave the sequence loop immediately in that case by calling the fallback function directly. This allows to remove the gettimeofday fallback as it now uses the clock_gettime() fallback and does the nanoseconds to microseconds conversion in the same way as it does when the vclock is functional. It does not make a difference whether the division by 1000 happens in the kernel fallback or in userspace. Generates way better code and gains a few cycles back. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Andy Lutomirski <luto@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Matt Rickard <matt@softrans.com.au> Cc: Stephen Boyd <sboyd@kernel.org> Cc: John Stultz <john.stultz@linaro.org> Cc: Florian Weimer <fweimer@redhat.com> Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Vitaly Kuznetsov <vkuznets@redhat.com> Cc: devel@linuxdriverproject.org Cc: virtualization@lists.linux-foundation.org Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Juergen Gross <jgross@suse.com> Link: https://lkml.kernel.org/r/20180917130707.657928937@linutronix.de
1 parent f3e8393 commit 4f72adc

File tree

1 file changed

+21
-61
lines changed

1 file changed

+21
-61
lines changed

arch/x86/entry/vdso/vclock_gettime.c

Lines changed: 21 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -49,17 +49,6 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
4949
return ret;
5050
}
5151

52-
notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
53-
{
54-
long ret;
55-
56-
asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
57-
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
58-
"memory", "rcx", "r11");
59-
return ret;
60-
}
61-
62-
6352
#else
6453

6554
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
@@ -77,21 +66,6 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
7766
return ret;
7867
}
7968

80-
notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
81-
{
82-
long ret;
83-
84-
asm (
85-
"mov %%ebx, %%edx \n"
86-
"mov %[tv], %%ebx \n"
87-
"call __kernel_vsyscall \n"
88-
"mov %%edx, %%ebx \n"
89-
: "=a" (ret), "=m" (*tv), "=m" (*tz)
90-
: "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
91-
: "memory", "edx");
92-
return ret;
93-
}
94-
9569
#endif
9670

9771
#ifdef CONFIG_PARAVIRT_CLOCK
@@ -100,7 +74,7 @@ static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void)
10074
return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
10175
}
10276

103-
static notrace u64 vread_pvclock(int *mode)
77+
static notrace u64 vread_pvclock(void)
10478
{
10579
const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
10680
u64 ret;
@@ -132,10 +106,8 @@ static notrace u64 vread_pvclock(int *mode)
132106
do {
133107
version = pvclock_read_begin(pvti);
134108

135-
if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) {
136-
*mode = VCLOCK_NONE;
137-
return 0;
138-
}
109+
if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT)))
110+
return U64_MAX;
139111

140112
ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
141113
} while (pvclock_read_retry(pvti, version));
@@ -150,17 +122,12 @@ static notrace u64 vread_pvclock(int *mode)
150122
}
151123
#endif
152124
#ifdef CONFIG_HYPERV_TSCPAGE
153-
static notrace u64 vread_hvclock(int *mode)
125+
static notrace u64 vread_hvclock(void)
154126
{
155127
const struct ms_hyperv_tsc_page *tsc_pg =
156128
(const struct ms_hyperv_tsc_page *)&hvclock_page;
157-
u64 current_tick = hv_read_tsc_page(tsc_pg);
158-
159-
if (current_tick != U64_MAX)
160-
return current_tick;
161129

162-
*mode = VCLOCK_NONE;
163-
return 0;
130+
return hv_read_tsc_page(tsc_pg);
164131
}
165132
#endif
166133

@@ -184,47 +151,42 @@ notrace static u64 vread_tsc(void)
184151
return last;
185152
}
186153

187-
notrace static inline u64 vgetsns(int *mode)
154+
notrace static inline u64 vgetcyc(int mode)
188155
{
189-
u64 v;
190-
cycles_t cycles;
191-
192-
if (gtod->vclock_mode == VCLOCK_TSC)
193-
cycles = vread_tsc();
156+
if (mode == VCLOCK_TSC)
157+
return vread_tsc();
194158
#ifdef CONFIG_PARAVIRT_CLOCK
195-
else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
196-
cycles = vread_pvclock(mode);
159+
else if (mode == VCLOCK_PVCLOCK)
160+
return vread_pvclock();
197161
#endif
198162
#ifdef CONFIG_HYPERV_TSCPAGE
199-
else if (gtod->vclock_mode == VCLOCK_HVCLOCK)
200-
cycles = vread_hvclock(mode);
163+
else if (mode == VCLOCK_HVCLOCK)
164+
return vread_hvclock();
201165
#endif
202-
else
203-
return 0;
204-
v = cycles - gtod->cycle_last;
205-
return v * gtod->mult;
166+
return U64_MAX;
206167
}
207168

208169
notrace static int do_hres(clockid_t clk, struct timespec *ts)
209170
{
210171
struct vgtod_ts *base = &gtod->basetime[clk];
211172
unsigned int seq;
212-
int mode;
213-
u64 ns;
173+
u64 cycles, ns;
214174

215175
do {
216176
seq = gtod_read_begin(gtod);
217-
mode = gtod->vclock_mode;
218177
ts->tv_sec = base->sec;
219178
ns = base->nsec;
220-
ns += vgetsns(&mode);
179+
cycles = vgetcyc(gtod->vclock_mode);
180+
if (unlikely((s64)cycles < 0))
181+
return vdso_fallback_gettime(clk, ts);
182+
ns += (cycles - gtod->cycle_last) * gtod->mult;
221183
ns >>= gtod->shift;
222184
} while (unlikely(gtod_read_retry(gtod, seq)));
223185

224186
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
225187
ts->tv_nsec = ns;
226188

227-
return mode;
189+
return 0;
228190
}
229191

230192
notrace static void do_coarse(clockid_t clk, struct timespec *ts)
@@ -253,8 +215,7 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
253215
*/
254216
msk = 1U << clock;
255217
if (likely(msk & VGTOD_HRES)) {
256-
if (do_hres(clock, ts) != VCLOCK_NONE)
257-
return 0;
218+
return do_hres(clock, ts);
258219
} else if (msk & VGTOD_COARSE) {
259220
do_coarse(clock, ts);
260221
return 0;
@@ -270,8 +231,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
270231
if (likely(tv != NULL)) {
271232
struct timespec *ts = (struct timespec *) tv;
272233

273-
if (unlikely(do_hres(CLOCK_REALTIME, ts) == VCLOCK_NONE))
274-
return vdso_fallback_gtod(tv, tz);
234+
do_hres(CLOCK_REALTIME, ts);
275235
tv->tv_usec /= 1000;
276236
}
277237
if (unlikely(tz != NULL)) {

0 commit comments

Comments
 (0)