|
38 | 38 |
|
39 | 39 | static unsigned long clocktick __read_mostly; /* timer cycles per tick */
|
40 | 40 |
|
| 41 | +#ifndef CONFIG_64BIT |
| 42 | +/* |
| 43 | + * The processor-internal cycle counter (Control Register 16) is used as time |
| 44 | + * source for the sched_clock() function. This register is 64bit wide on a |
| 45 | + * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always |
| 46 | + * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits |
| 47 | + * with a per-cpu variable which we increase every time the counter |
| 48 | + * wraps-around (which happens every ~4 secounds). |
| 49 | + */ |
| 50 | +static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits); |
| 51 | +#endif |
| 52 | + |
41 | 53 | /*
|
42 | 54 | * We keep time on PA-RISC Linux by using the Interval Timer which is
|
43 | 55 | * a pair of registers; one is read-only and one is write-only; both
|
@@ -108,6 +120,12 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
|
108 | 120 | */
|
109 | 121 | mtctl(next_tick, 16);
|
110 | 122 |
|
| 123 | +#if !defined(CONFIG_64BIT) |
| 124 | + /* check for overflow on a 32bit kernel (every ~4 seconds). */ |
| 125 | + if (unlikely(next_tick < now)) |
| 126 | + this_cpu_inc(cr16_high_32_bits); |
| 127 | +#endif |
| 128 | + |
111 | 129 | /* Skip one clocktick on purpose if we missed next_tick.
|
112 | 130 | * The new CR16 must be "later" than current CR16 otherwise
|
113 | 131 | * itimer would not fire until CR16 wrapped - e.g 4 seconds
|
@@ -219,6 +237,12 @@ void __init start_cpu_itimer(void)
|
219 | 237 | unsigned int cpu = smp_processor_id();
|
220 | 238 | unsigned long next_tick = mfctl(16) + clocktick;
|
221 | 239 |
|
| 240 | +#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT) |
| 241 | + /* With multiple 64bit CPUs online, the cr16's are not syncronized. */ |
| 242 | + if (cpu != 0) |
| 243 | + clear_sched_clock_stable(); |
| 244 | +#endif |
| 245 | + |
222 | 246 | mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
|
223 | 247 |
|
224 | 248 | per_cpu(cpu_data, cpu).it_value = next_tick;
|
@@ -246,15 +270,52 @@ void read_persistent_clock(struct timespec *ts)
|
246 | 270 | }
|
247 | 271 | }
|
248 | 272 |
|
| 273 | + |
| 274 | +/* |
| 275 | + * sched_clock() framework |
| 276 | + */ |
| 277 | + |
| 278 | +static u32 cyc2ns_mul __read_mostly; |
| 279 | +static u32 cyc2ns_shift __read_mostly; |
| 280 | + |
| 281 | +u64 sched_clock(void) |
| 282 | +{ |
| 283 | + u64 now; |
| 284 | + |
| 285 | + /* Get current cycle counter (Control Register 16). */ |
| 286 | +#ifdef CONFIG_64BIT |
| 287 | + now = mfctl(16); |
| 288 | +#else |
| 289 | + now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32); |
| 290 | +#endif |
| 291 | + |
| 292 | + /* return the value in ns (cycles_2_ns) */ |
| 293 | + return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift); |
| 294 | +} |
| 295 | + |
| 296 | + |
| 297 | +/* |
| 298 | + * timer interrupt and sched_clock() initialization |
| 299 | + */ |
| 300 | + |
249 | 301 | void __init time_init(void)
|
250 | 302 | {
|
251 | 303 | unsigned long current_cr16_khz;
|
252 | 304 |
|
| 305 | + current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */ |
253 | 306 | clocktick = (100 * PAGE0->mem_10msec) / HZ;
|
254 | 307 |
|
| 308 | + /* calculate mult/shift values for cr16 */ |
| 309 | + clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz, |
| 310 | + NSEC_PER_MSEC, 0); |
| 311 | + |
| 312 | +#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT) |
| 313 | + /* At bootup only one 64bit CPU is online and cr16 is "stable" */ |
| 314 | + set_sched_clock_stable(); |
| 315 | +#endif |
| 316 | + |
255 | 317 | start_cpu_itimer(); /* get CPU 0 started */
|
256 | 318 |
|
257 | 319 | /* register at clocksource framework */
|
258 |
| - current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */ |
259 | 320 | clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
|
260 | 321 | }
|
0 commit comments