Skip to content

Commit 84c7afc

Browse files
osandovaxboe
authored andcommitted
block: use ktime_get_ns() instead of sched_clock() for cfq and bfq
cfq and bfq have some internal fields that use sched_clock() which can trivially use ktime_get_ns() instead. Their timestamp fields in struct request can also use ktime_get_ns(), which resolves the 8 year old comment added by commit 28f4197 ("block: disable preemption before using sched_clock()"). Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 544ccc8 commit 84c7afc

File tree

4 files changed

+57
-63
lines changed

4 files changed

+57
-63
lines changed

block/bfq-cgroup.c

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -55,13 +55,13 @@ BFQG_FLAG_FNS(empty)
5555
/* This should be called with the scheduler lock held. */
5656
static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
5757
{
58-
unsigned long long now;
58+
u64 now;
5959

6060
if (!bfqg_stats_waiting(stats))
6161
return;
6262

63-
now = sched_clock();
64-
if (time_after64(now, stats->start_group_wait_time))
63+
now = ktime_get_ns();
64+
if (now > stats->start_group_wait_time)
6565
blkg_stat_add(&stats->group_wait_time,
6666
now - stats->start_group_wait_time);
6767
bfqg_stats_clear_waiting(stats);
@@ -77,20 +77,20 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
7777
return;
7878
if (bfqg == curr_bfqg)
7979
return;
80-
stats->start_group_wait_time = sched_clock();
80+
stats->start_group_wait_time = ktime_get_ns();
8181
bfqg_stats_mark_waiting(stats);
8282
}
8383

8484
/* This should be called with the scheduler lock held. */
8585
static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
8686
{
87-
unsigned long long now;
87+
u64 now;
8888

8989
if (!bfqg_stats_empty(stats))
9090
return;
9191

92-
now = sched_clock();
93-
if (time_after64(now, stats->start_empty_time))
92+
now = ktime_get_ns();
93+
if (now > stats->start_empty_time)
9494
blkg_stat_add(&stats->empty_time,
9595
now - stats->start_empty_time);
9696
bfqg_stats_clear_empty(stats);
@@ -116,7 +116,7 @@ void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
116116
if (bfqg_stats_empty(stats))
117117
return;
118118

119-
stats->start_empty_time = sched_clock();
119+
stats->start_empty_time = ktime_get_ns();
120120
bfqg_stats_mark_empty(stats);
121121
}
122122

@@ -125,9 +125,9 @@ void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
125125
struct bfqg_stats *stats = &bfqg->stats;
126126

127127
if (bfqg_stats_idling(stats)) {
128-
unsigned long long now = sched_clock();
128+
u64 now = ktime_get_ns();
129129

130-
if (time_after64(now, stats->start_idle_time))
130+
if (now > stats->start_idle_time)
131131
blkg_stat_add(&stats->idle_time,
132132
now - stats->start_idle_time);
133133
bfqg_stats_clear_idling(stats);
@@ -138,7 +138,7 @@ void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
138138
{
139139
struct bfqg_stats *stats = &bfqg->stats;
140140

141-
stats->start_idle_time = sched_clock();
141+
stats->start_idle_time = ktime_get_ns();
142142
bfqg_stats_mark_idling(stats);
143143
}
144144

@@ -171,18 +171,18 @@ void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
171171
blkg_rwstat_add(&bfqg->stats.merged, op, 1);
172172
}
173173

174-
void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
175-
uint64_t io_start_time, unsigned int op)
174+
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
175+
u64 io_start_time_ns, unsigned int op)
176176
{
177177
struct bfqg_stats *stats = &bfqg->stats;
178-
unsigned long long now = sched_clock();
178+
u64 now = ktime_get_ns();
179179

180-
if (time_after64(now, io_start_time))
180+
if (now > io_start_time_ns)
181181
blkg_rwstat_add(&stats->service_time, op,
182-
now - io_start_time);
183-
if (time_after64(io_start_time, start_time))
182+
now - io_start_time_ns);
183+
if (io_start_time_ns > start_time_ns)
184184
blkg_rwstat_add(&stats->wait_time, op,
185-
io_start_time - start_time);
185+
io_start_time_ns - start_time_ns);
186186
}
187187

188188
#else /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
@@ -191,8 +191,8 @@ void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
191191
unsigned int op) { }
192192
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
193193
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
194-
void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
195-
uint64_t io_start_time, unsigned int op) { }
194+
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
195+
u64 io_start_time_ns, unsigned int op) { }
196196
void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
197197
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
198198
void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }

block/bfq-iosched.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -732,9 +732,9 @@ struct bfqg_stats {
732732
/* total time with empty current active q with other requests queued */
733733
struct blkg_stat empty_time;
734734
/* fields after this shouldn't be cleared on stat reset */
735-
uint64_t start_group_wait_time;
736-
uint64_t start_idle_time;
737-
uint64_t start_empty_time;
735+
u64 start_group_wait_time;
736+
u64 start_idle_time;
737+
u64 start_empty_time;
738738
uint16_t flags;
739739
#endif /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
740740
};
@@ -856,8 +856,8 @@ void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
856856
unsigned int op);
857857
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op);
858858
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op);
859-
void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
860-
uint64_t io_start_time, unsigned int op);
859+
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
860+
u64 io_start_time_ns, unsigned int op);
861861
void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
862862
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
863863
void bfqg_stats_update_idle_time(struct bfq_group *bfqg);

block/cfq-iosched.c

Lines changed: 26 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -210,9 +210,9 @@ struct cfqg_stats {
210210
/* total time with empty current active q with other requests queued */
211211
struct blkg_stat empty_time;
212212
/* fields after this shouldn't be cleared on stat reset */
213-
uint64_t start_group_wait_time;
214-
uint64_t start_idle_time;
215-
uint64_t start_empty_time;
213+
u64 start_group_wait_time;
214+
u64 start_idle_time;
215+
u64 start_empty_time;
216216
uint16_t flags;
217217
#endif /* CONFIG_DEBUG_BLK_CGROUP */
218218
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
@@ -491,13 +491,13 @@ CFQG_FLAG_FNS(empty)
491491
/* This should be called with the queue_lock held. */
492492
static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
493493
{
494-
unsigned long long now;
494+
u64 now;
495495

496496
if (!cfqg_stats_waiting(stats))
497497
return;
498498

499-
now = sched_clock();
500-
if (time_after64(now, stats->start_group_wait_time))
499+
now = ktime_get_ns();
500+
if (now > stats->start_group_wait_time)
501501
blkg_stat_add(&stats->group_wait_time,
502502
now - stats->start_group_wait_time);
503503
cfqg_stats_clear_waiting(stats);
@@ -513,20 +513,20 @@ static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
513513
return;
514514
if (cfqg == curr_cfqg)
515515
return;
516-
stats->start_group_wait_time = sched_clock();
516+
stats->start_group_wait_time = ktime_get_ns();
517517
cfqg_stats_mark_waiting(stats);
518518
}
519519

520520
/* This should be called with the queue_lock held. */
521521
static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
522522
{
523-
unsigned long long now;
523+
u64 now;
524524

525525
if (!cfqg_stats_empty(stats))
526526
return;
527527

528-
now = sched_clock();
529-
if (time_after64(now, stats->start_empty_time))
528+
now = ktime_get_ns();
529+
if (now > stats->start_empty_time)
530530
blkg_stat_add(&stats->empty_time,
531531
now - stats->start_empty_time);
532532
cfqg_stats_clear_empty(stats);
@@ -552,7 +552,7 @@ static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
552552
if (cfqg_stats_empty(stats))
553553
return;
554554

555-
stats->start_empty_time = sched_clock();
555+
stats->start_empty_time = ktime_get_ns();
556556
cfqg_stats_mark_empty(stats);
557557
}
558558

@@ -561,9 +561,9 @@ static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
561561
struct cfqg_stats *stats = &cfqg->stats;
562562

563563
if (cfqg_stats_idling(stats)) {
564-
unsigned long long now = sched_clock();
564+
u64 now = ktime_get_ns();
565565

566-
if (time_after64(now, stats->start_idle_time))
566+
if (now > stats->start_idle_time)
567567
blkg_stat_add(&stats->idle_time,
568568
now - stats->start_idle_time);
569569
cfqg_stats_clear_idling(stats);
@@ -576,7 +576,7 @@ static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
576576

577577
BUG_ON(cfqg_stats_idling(stats));
578578

579-
stats->start_idle_time = sched_clock();
579+
stats->start_idle_time = ktime_get_ns();
580580
cfqg_stats_mark_idling(stats);
581581
}
582582

@@ -701,17 +701,19 @@ static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
701701
}
702702

703703
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
704-
uint64_t start_time, uint64_t io_start_time,
705-
unsigned int op)
704+
u64 start_time_ns,
705+
u64 io_start_time_ns,
706+
unsigned int op)
706707
{
707708
struct cfqg_stats *stats = &cfqg->stats;
708-
unsigned long long now = sched_clock();
709+
u64 now = ktime_get_ns();
709710

710-
if (time_after64(now, io_start_time))
711-
blkg_rwstat_add(&stats->service_time, op, now - io_start_time);
712-
if (time_after64(io_start_time, start_time))
711+
if (now > io_start_time_ns)
712+
blkg_rwstat_add(&stats->service_time, op,
713+
now - io_start_time_ns);
714+
if (io_start_time_ns > start_time_ns)
713715
blkg_rwstat_add(&stats->wait_time, op,
714-
io_start_time - start_time);
716+
io_start_time_ns - start_time_ns);
715717
}
716718

717719
/* @stats = 0 */
@@ -797,8 +799,9 @@ static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
797799
static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
798800
unsigned int op) { }
799801
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
800-
uint64_t start_time, uint64_t io_start_time,
801-
unsigned int op) { }
802+
u64 start_time_ns,
803+
u64 io_start_time_ns,
804+
unsigned int op) { }
802805

803806
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
804807

include/linux/blkdev.h

Lines changed: 6 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1799,42 +1799,33 @@ int kblockd_schedule_work_on(int cpu, struct work_struct *work);
17991799
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
18001800

18011801
#ifdef CONFIG_BLK_CGROUP
1802-
/*
1803-
* This should not be using sched_clock(). A real patch is in progress
1804-
* to fix this up, until that is in place we need to disable preemption
1805-
* around sched_clock() in this function and set_io_start_time_ns().
1806-
*/
18071802
static inline void set_start_time_ns(struct request *req)
18081803
{
1809-
preempt_disable();
1810-
req->cgroup_start_time_ns = sched_clock();
1811-
preempt_enable();
1804+
req->cgroup_start_time_ns = ktime_get_ns();
18121805
}
18131806

18141807
static inline void set_io_start_time_ns(struct request *req)
18151808
{
1816-
preempt_disable();
1817-
req->cgroup_io_start_time_ns = sched_clock();
1818-
preempt_enable();
1809+
req->cgroup_io_start_time_ns = ktime_get_ns();
18191810
}
18201811

1821-
static inline uint64_t rq_start_time_ns(struct request *req)
1812+
static inline u64 rq_start_time_ns(struct request *req)
18221813
{
18231814
return req->cgroup_start_time_ns;
18241815
}
18251816

1826-
static inline uint64_t rq_io_start_time_ns(struct request *req)
1817+
static inline u64 rq_io_start_time_ns(struct request *req)
18271818
{
18281819
return req->cgroup_io_start_time_ns;
18291820
}
18301821
#else
18311822
static inline void set_start_time_ns(struct request *req) {}
18321823
static inline void set_io_start_time_ns(struct request *req) {}
1833-
static inline uint64_t rq_start_time_ns(struct request *req)
1824+
static inline u64 rq_start_time_ns(struct request *req)
18341825
{
18351826
return 0;
18361827
}
1837-
static inline uint64_t rq_io_start_time_ns(struct request *req)
1828+
static inline u64 rq_io_start_time_ns(struct request *req)
18381829
{
18391830
return 0;
18401831
}

0 commit comments

Comments
 (0)