@@ -90,33 +90,34 @@ notrace static __always_inline u64 vread_tick(void)
90
90
{
91
91
u64 ret ;
92
92
93
- __asm__ __volatile__("1:\n\t"
94
- "rd %%tick, %0\n\t"
95
- ".pushsection .tick_patch, \"a\"\n\t"
96
- ".word 1b - ., 1f - .\n\t"
97
- ".popsection\n\t"
98
- ".pushsection .tick_patch_replacement, \"ax\"\n\t"
99
- "1:\n\t"
100
- "rd %%asr24, %0\n\t"
101
- ".popsection\n"
102
- : "=r" (ret ));
93
+ __asm__ __volatile__("rd %%tick, %0" : "=r" (ret ));
94
+ return ret ;
95
+ }
96
+
97
+ notrace static __always_inline u64 vread_tick_stick (void )
98
+ {
99
+ u64 ret ;
100
+
101
+ __asm__ __volatile__("rd %%asr24, %0" : "=r" (ret ));
103
102
return ret ;
104
103
}
105
104
#else
106
105
notrace static __always_inline u64 vread_tick (void )
107
106
{
108
107
register unsigned long long ret asm("o4" );
109
108
110
- __asm__ __volatile__("1:\n\t"
111
- "rd %%tick, %L0\n\t"
112
- "srlx %L0, 32, %H0\n\t"
113
- ".pushsection .tick_patch, \"a\"\n\t"
114
- ".word 1b - ., 1f - .\n\t"
115
- ".popsection\n\t"
116
- ".pushsection .tick_patch_replacement, \"ax\"\n\t"
117
- "1:\n\t"
118
- "rd %%asr24, %L0\n\t"
119
- ".popsection\n"
109
+ __asm__ __volatile__("rd %%tick, %L0\n\t"
110
+ "srlx %L0, 32, %H0"
111
+ : "=r" (ret ));
112
+ return ret ;
113
+ }
114
+
115
+ notrace static __always_inline u64 vread_tick_stick (void )
116
+ {
117
+ register unsigned long long ret asm("o4" );
118
+
119
+ __asm__ __volatile__("rd %%asr24, %L0\n\t"
120
+ "srlx %L0, 32, %H0"
120
121
: "=r" (ret ));
121
122
return ret ;
122
123
}
@@ -132,6 +133,16 @@ notrace static __always_inline u64 vgetsns(struct vvar_data *vvar)
132
133
return v * vvar -> clock .mult ;
133
134
}
134
135
136
+ notrace static __always_inline u64 vgetsns_stick (struct vvar_data * vvar )
137
+ {
138
+ u64 v ;
139
+ u64 cycles ;
140
+
141
+ cycles = vread_tick_stick ();
142
+ v = (cycles - vvar -> clock .cycle_last ) & vvar -> clock .mask ;
143
+ return v * vvar -> clock .mult ;
144
+ }
145
+
135
146
notrace static __always_inline int do_realtime (struct vvar_data * vvar ,
136
147
struct timespec * ts )
137
148
{
@@ -152,6 +163,26 @@ notrace static __always_inline int do_realtime(struct vvar_data *vvar,
152
163
return 0 ;
153
164
}
154
165
166
+ notrace static __always_inline int do_realtime_stick (struct vvar_data * vvar ,
167
+ struct timespec * ts )
168
+ {
169
+ unsigned long seq ;
170
+ u64 ns ;
171
+
172
+ do {
173
+ seq = vvar_read_begin (vvar );
174
+ ts -> tv_sec = vvar -> wall_time_sec ;
175
+ ns = vvar -> wall_time_snsec ;
176
+ ns += vgetsns_stick (vvar );
177
+ ns >>= vvar -> clock .shift ;
178
+ } while (unlikely (vvar_read_retry (vvar , seq )));
179
+
180
+ ts -> tv_sec += __iter_div_u64_rem (ns , NSEC_PER_SEC , & ns );
181
+ ts -> tv_nsec = ns ;
182
+
183
+ return 0 ;
184
+ }
185
+
155
186
notrace static __always_inline int do_monotonic (struct vvar_data * vvar ,
156
187
struct timespec * ts )
157
188
{
@@ -172,6 +203,26 @@ notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
172
203
return 0 ;
173
204
}
174
205
206
+ notrace static __always_inline int do_monotonic_stick (struct vvar_data * vvar ,
207
+ struct timespec * ts )
208
+ {
209
+ unsigned long seq ;
210
+ u64 ns ;
211
+
212
+ do {
213
+ seq = vvar_read_begin (vvar );
214
+ ts -> tv_sec = vvar -> monotonic_time_sec ;
215
+ ns = vvar -> monotonic_time_snsec ;
216
+ ns += vgetsns_stick (vvar );
217
+ ns >>= vvar -> clock .shift ;
218
+ } while (unlikely (vvar_read_retry (vvar , seq )));
219
+
220
+ ts -> tv_sec += __iter_div_u64_rem (ns , NSEC_PER_SEC , & ns );
221
+ ts -> tv_nsec = ns ;
222
+
223
+ return 0 ;
224
+ }
225
+
175
226
notrace static int do_realtime_coarse (struct vvar_data * vvar ,
176
227
struct timespec * ts )
177
228
{
227
278
clock_gettime (clockid_t , struct timespec * )
228
279
__attribute__((weak , alias ("__vdso_clock_gettime" )));
229
280
281
+ notrace int
282
+ __vdso_clock_gettime_stick (clockid_t clock , struct timespec * ts )
283
+ {
284
+ struct vvar_data * vvd = get_vvar_data ();
285
+
286
+ switch (clock ) {
287
+ case CLOCK_REALTIME :
288
+ if (unlikely (vvd -> vclock_mode == VCLOCK_NONE ))
289
+ break ;
290
+ return do_realtime_stick (vvd , ts );
291
+ case CLOCK_MONOTONIC :
292
+ if (unlikely (vvd -> vclock_mode == VCLOCK_NONE ))
293
+ break ;
294
+ return do_monotonic_stick (vvd , ts );
295
+ case CLOCK_REALTIME_COARSE :
296
+ return do_realtime_coarse (vvd , ts );
297
+ case CLOCK_MONOTONIC_COARSE :
298
+ return do_monotonic_coarse (vvd , ts );
299
+ }
300
+ /*
301
+ * Unknown clock ID ? Fall back to the syscall.
302
+ */
303
+ return vdso_fallback_gettime (clock , ts );
304
+ }
305
+
230
306
notrace int
231
307
__vdso_gettimeofday (struct timeval * tv , struct timezone * tz )
232
308
{
@@ -262,3 +338,36 @@ __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
262
338
int
263
339
gettimeofday (struct timeval * , struct timezone * )
264
340
__attribute__((weak , alias ("__vdso_gettimeofday" )));
341
+
342
+ notrace int
343
+ __vdso_gettimeofday_stick (struct timeval * tv , struct timezone * tz )
344
+ {
345
+ struct vvar_data * vvd = get_vvar_data ();
346
+
347
+ if (likely (vvd -> vclock_mode != VCLOCK_NONE )) {
348
+ if (likely (tv != NULL )) {
349
+ union tstv_t {
350
+ struct timespec ts ;
351
+ struct timeval tv ;
352
+ } * tstv = (union tstv_t * ) tv ;
353
+ do_realtime_stick (vvd , & tstv -> ts );
354
+ /*
355
+ * Assign before dividing to ensure that the division is
356
+ * done in the type of tv_usec, not tv_nsec.
357
+ *
358
+ * There cannot be > 1 billion usec in a second:
359
+ * do_realtime() has already distributed such overflow
360
+ * into tv_sec. So we can assign it to an int safely.
361
+ */
362
+ tstv -> tv .tv_usec = tstv -> ts .tv_nsec ;
363
+ tstv -> tv .tv_usec /= 1000 ;
364
+ }
365
+ if (unlikely (tz != NULL )) {
366
+ /* Avoid memcpy. Some old compilers fail to inline it */
367
+ tz -> tz_minuteswest = vvd -> tz_minuteswest ;
368
+ tz -> tz_dsttime = vvd -> tz_dsttime ;
369
+ }
370
+ return 0 ;
371
+ }
372
+ return vdso_fallback_gettimeofday (tv , tz );
373
+ }
0 commit comments