Skip to content

Commit 3e89bf3

Browse files
committed
x86/vdso: Move cycle_last handling into the caller
Dereferencing gtod->cycle_last all over the place and foing the cycles < last comparison in the vclock read functions generates horrible code. Doing it at the call site is much better and gains a few cycles both for TSC and pvclock. Caveat: This adds the comparison to the hyperv vclock as well, but I have no way to test that. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Andy Lutomirski <luto@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Matt Rickard <matt@softrans.com.au> Cc: Stephen Boyd <sboyd@kernel.org> Cc: John Stultz <john.stultz@linaro.org> Cc: Florian Weimer <fweimer@redhat.com> Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Vitaly Kuznetsov <vkuznets@redhat.com> Cc: devel@linuxdriverproject.org Cc: virtualization@lists.linux-foundation.org Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Juergen Gross <jgross@suse.com> Link: https://lkml.kernel.org/r/20180917130707.741440803@linutronix.de
1 parent 4f72adc commit 3e89bf3

File tree

1 file changed

+7
-32
lines changed

1 file changed

+7
-32
lines changed

arch/x86/entry/vdso/vclock_gettime.c

Lines changed: 7 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -77,9 +77,8 @@ static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void)
7777
static notrace u64 vread_pvclock(void)
7878
{
7979
const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
80-
u64 ret;
81-
u64 last;
8280
u32 version;
81+
u64 ret;
8382

8483
/*
8584
* Note: The kernel and hypervisor must guarantee that cpu ID
@@ -112,13 +111,7 @@ static notrace u64 vread_pvclock(void)
112111
ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
113112
} while (pvclock_read_retry(pvti, version));
114113

115-
/* refer to vread_tsc() comment for rationale */
116-
last = gtod->cycle_last;
117-
118-
if (likely(ret >= last))
119-
return ret;
120-
121-
return last;
114+
return ret;
122115
}
123116
#endif
124117
#ifdef CONFIG_HYPERV_TSCPAGE
@@ -131,30 +124,10 @@ static notrace u64 vread_hvclock(void)
131124
}
132125
#endif
133126

134-
notrace static u64 vread_tsc(void)
135-
{
136-
u64 ret = (u64)rdtsc_ordered();
137-
u64 last = gtod->cycle_last;
138-
139-
if (likely(ret >= last))
140-
return ret;
141-
142-
/*
143-
* GCC likes to generate cmov here, but this branch is extremely
144-
* predictable (it's just a function of time and the likely is
145-
* very likely) and there's a data dependence, so force GCC
146-
* to generate a branch instead. I don't barrier() because
147-
* we don't actually need a barrier, and if this function
148-
* ever gets inlined it will generate worse code.
149-
*/
150-
asm volatile ("");
151-
return last;
152-
}
153-
154127
notrace static inline u64 vgetcyc(int mode)
155128
{
156129
if (mode == VCLOCK_TSC)
157-
return vread_tsc();
130+
return (u64)rdtsc_ordered();
158131
#ifdef CONFIG_PARAVIRT_CLOCK
159132
else if (mode == VCLOCK_PVCLOCK)
160133
return vread_pvclock();
@@ -169,17 +142,19 @@ notrace static inline u64 vgetcyc(int mode)
169142
notrace static int do_hres(clockid_t clk, struct timespec *ts)
170143
{
171144
struct vgtod_ts *base = &gtod->basetime[clk];
145+
u64 cycles, last, ns;
172146
unsigned int seq;
173-
u64 cycles, ns;
174147

175148
do {
176149
seq = gtod_read_begin(gtod);
177150
ts->tv_sec = base->sec;
178151
ns = base->nsec;
152+
last = gtod->cycle_last;
179153
cycles = vgetcyc(gtod->vclock_mode);
180154
if (unlikely((s64)cycles < 0))
181155
return vdso_fallback_gettime(clk, ts);
182-
ns += (cycles - gtod->cycle_last) * gtod->mult;
156+
if (cycles > last)
157+
ns += (cycles - last) * gtod->mult;
183158
ns >>= gtod->shift;
184159
} while (unlikely(gtod_read_retry(gtod, seq)));
185160

0 commit comments

Comments
 (0)