@@ -42,16 +42,13 @@ static const int cfq_hist_divisor = 4;
42
42
*/
43
43
#define CFQ_MIN_TT (2)
44
44
45
- /*
46
- * Allow merged cfqqs to perform this amount of seeky I/O before
47
- * deciding to break the queues up again.
48
- */
49
- #define CFQQ_COOP_TOUT (HZ)
50
-
51
45
#define CFQ_SLICE_SCALE (5)
52
46
#define CFQ_HW_QUEUE_MIN (5)
53
47
#define CFQ_SERVICE_SHIFT 12
54
48
49
+ #define CFQQ_SEEK_THR 8 * 1024
50
+ #define CFQQ_SEEKY (cfqq ) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
51
+
55
52
#define RQ_CIC (rq ) \
56
53
((struct cfq_io_context *) (rq)->elevator_private)
57
54
#define RQ_CFQQ (rq ) (struct cfq_queue *) ((rq)->elevator_private2)
@@ -137,7 +134,6 @@ struct cfq_queue {
137
134
u64 seek_total ;
138
135
sector_t seek_mean ;
139
136
sector_t last_request_pos ;
140
- unsigned long seeky_start ;
141
137
142
138
pid_t pid ;
143
139
@@ -314,6 +310,7 @@ enum cfqq_state_flags {
314
310
CFQ_CFQQ_FLAG_slice_new , /* no requests dispatched in slice */
315
311
CFQ_CFQQ_FLAG_sync , /* synchronous queue */
316
312
CFQ_CFQQ_FLAG_coop , /* cfqq is shared */
313
+ CFQ_CFQQ_FLAG_split_coop , /* shared cfqq will be splitted */
317
314
CFQ_CFQQ_FLAG_deep , /* sync cfqq experienced large depth */
318
315
CFQ_CFQQ_FLAG_wait_busy , /* Waiting for next request */
319
316
};
@@ -342,6 +339,7 @@ CFQ_CFQQ_FNS(prio_changed);
342
339
CFQ_CFQQ_FNS (slice_new );
343
340
CFQ_CFQQ_FNS (sync );
344
341
CFQ_CFQQ_FNS (coop );
342
+ CFQ_CFQQ_FNS (split_coop );
345
343
CFQ_CFQQ_FNS (deep );
346
344
CFQ_CFQQ_FNS (wait_busy );
347
345
#undef CFQ_CFQQ_FNS
@@ -1565,6 +1563,15 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1565
1563
cfq_clear_cfqq_wait_request (cfqq );
1566
1564
cfq_clear_cfqq_wait_busy (cfqq );
1567
1565
1566
+ /*
1567
+ * If this cfqq is shared between multiple processes, check to
1568
+ * make sure that those processes are still issuing I/Os within
1569
+ * the mean seek distance. If not, it may be time to break the
1570
+ * queues apart again.
1571
+ */
1572
+ if (cfq_cfqq_coop (cfqq ) && CFQQ_SEEKY (cfqq ))
1573
+ cfq_mark_cfqq_split_coop (cfqq );
1574
+
1568
1575
/*
1569
1576
* store what was left of this slice, if the queue idled/timed out
1570
1577
*/
@@ -1663,9 +1670,6 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1663
1670
return cfqd -> last_position - blk_rq_pos (rq );
1664
1671
}
1665
1672
1666
- #define CFQQ_SEEK_THR 8 * 1024
1667
- #define CFQQ_SEEKY (cfqq ) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
1668
-
1669
1673
static inline int cfq_rq_close (struct cfq_data * cfqd , struct cfq_queue * cfqq ,
1670
1674
struct request * rq , bool for_preempt )
1671
1675
{
@@ -3000,19 +3004,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3000
3004
total = cfqq -> seek_total + (cfqq -> seek_samples /2 );
3001
3005
do_div (total , cfqq -> seek_samples );
3002
3006
cfqq -> seek_mean = (sector_t )total ;
3003
-
3004
- /*
3005
- * If this cfqq is shared between multiple processes, check to
3006
- * make sure that those processes are still issuing I/Os within
3007
- * the mean seek distance. If not, it may be time to break the
3008
- * queues apart again.
3009
- */
3010
- if (cfq_cfqq_coop (cfqq )) {
3011
- if (CFQQ_SEEKY (cfqq ) && !cfqq -> seeky_start )
3012
- cfqq -> seeky_start = jiffies ;
3013
- else if (!CFQQ_SEEKY (cfqq ))
3014
- cfqq -> seeky_start = 0 ;
3015
- }
3016
3007
}
3017
3008
3018
3009
/*
@@ -3453,14 +3444,6 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3453
3444
return cic_to_cfqq (cic , 1 );
3454
3445
}
3455
3446
3456
- static int should_split_cfqq (struct cfq_queue * cfqq )
3457
- {
3458
- if (cfqq -> seeky_start &&
3459
- time_after (jiffies , cfqq -> seeky_start + CFQQ_COOP_TOUT ))
3460
- return 1 ;
3461
- return 0 ;
3462
- }
3463
-
3464
3447
/*
3465
3448
* Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3466
3449
* was the last process referring to said cfqq.
@@ -3469,9 +3452,9 @@ static struct cfq_queue *
3469
3452
split_cfqq (struct cfq_io_context * cic , struct cfq_queue * cfqq )
3470
3453
{
3471
3454
if (cfqq_process_refs (cfqq ) == 1 ) {
3472
- cfqq -> seeky_start = 0 ;
3473
3455
cfqq -> pid = current -> pid ;
3474
3456
cfq_clear_cfqq_coop (cfqq );
3457
+ cfq_clear_cfqq_split_coop (cfqq );
3475
3458
return cfqq ;
3476
3459
}
3477
3460
@@ -3510,7 +3493,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3510
3493
/*
3511
3494
* If the queue was seeky for too long, break it apart.
3512
3495
*/
3513
- if (cfq_cfqq_coop (cfqq ) && should_split_cfqq (cfqq )) {
3496
+ if (cfq_cfqq_coop (cfqq ) && cfq_cfqq_split_coop (cfqq )) {
3514
3497
cfq_log_cfqq (cfqd , cfqq , "breaking apart cfqq" );
3515
3498
cfqq = split_cfqq (cic , cfqq );
3516
3499
if (!cfqq )
0 commit comments