Skip to content

Commit 85c3d2d

Browse files
bebarinojohnstultz-work
authored andcommitted
sched_clock: Use seqcount instead of rolling our own
We're going to increase the cyc value to 64 bits in the near future. Doing that is going to break the custom seqcount implementation in the sched_clock code because 64 bit numbers aren't guaranteed to be atomic. Replace the cyc_copy with a seqcount to avoid this problem. Cc: Russell King <linux@arm.linux.org.uk> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> Signed-off-by: John Stultz <john.stultz@linaro.org>
1 parent 87d8b9e commit 85c3d2d

File tree

1 file changed

+8
-19
lines changed

1 file changed

+8
-19
lines changed

kernel/time/sched_clock.c

Lines changed: 8 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,12 @@
1414
#include <linux/syscore_ops.h>
1515
#include <linux/timer.h>
1616
#include <linux/sched_clock.h>
17+
#include <linux/seqlock.h>
1718

1819
struct clock_data {
1920
u64 epoch_ns;
2021
u32 epoch_cyc;
21-
u32 epoch_cyc_copy;
22+
seqcount_t seq;
2223
unsigned long rate;
2324
u32 mult;
2425
u32 shift;
@@ -54,23 +55,16 @@ static unsigned long long notrace sched_clock_32(void)
5455
u64 epoch_ns;
5556
u32 epoch_cyc;
5657
u32 cyc;
58+
unsigned long seq;
5759

5860
if (cd.suspended)
5961
return cd.epoch_ns;
6062

61-
/*
62-
* Load the epoch_cyc and epoch_ns atomically. We do this by
63-
* ensuring that we always write epoch_cyc, epoch_ns and
64-
* epoch_cyc_copy in strict order, and read them in strict order.
65-
* If epoch_cyc and epoch_cyc_copy are not equal, then we're in
66-
* the middle of an update, and we should repeat the load.
67-
*/
6863
do {
64+
seq = read_seqcount_begin(&cd.seq);
6965
epoch_cyc = cd.epoch_cyc;
70-
smp_rmb();
7166
epoch_ns = cd.epoch_ns;
72-
smp_rmb();
73-
} while (epoch_cyc != cd.epoch_cyc_copy);
67+
} while (read_seqcount_retry(&cd.seq, seq));
7468

7569
cyc = read_sched_clock();
7670
cyc = (cyc - epoch_cyc) & sched_clock_mask;
@@ -90,16 +84,12 @@ static void notrace update_sched_clock(void)
9084
ns = cd.epoch_ns +
9185
cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
9286
cd.mult, cd.shift);
93-
/*
94-
* Write epoch_cyc and epoch_ns in a way that the update is
95-
* detectable in cyc_to_fixed_sched_clock().
96-
*/
87+
9788
raw_local_irq_save(flags);
98-
cd.epoch_cyc_copy = cyc;
99-
smp_wmb();
89+
write_seqcount_begin(&cd.seq);
10090
cd.epoch_ns = ns;
101-
smp_wmb();
10291
cd.epoch_cyc = cyc;
92+
write_seqcount_end(&cd.seq);
10393
raw_local_irq_restore(flags);
10494
}
10595

@@ -195,7 +185,6 @@ static int sched_clock_suspend(void)
195185
static void sched_clock_resume(void)
196186
{
197187
cd.epoch_cyc = read_sched_clock();
198-
cd.epoch_cyc_copy = cd.epoch_cyc;
199188
cd.suspended = false;
200189
}
201190

0 commit comments

Comments
 (0)