Skip to content

Commit 76b1e9b

Browse files
Eliezer Tamirdavem330
authored andcommitted
net/fs: change busy poll time accounting
Suggested by Linus: Changed time accounting for busy-poll: - Make it microsecond based. - Use unsigned longs. - Revert back to use time_after instead of time_in_range. Reorder poll/select busy loop conditions: - Clear busy_flag after one time we can't busy-poll. - Only init busy_end if we actually are going to busy-poll. Added one more missing need_resched() test. Signed-off-by: Eliezer Tamir <eliezer.tamir@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent cbf5500 commit 76b1e9b

File tree

2 files changed

+38
-48
lines changed

2 files changed

+38
-48
lines changed

fs/select.c

Lines changed: 20 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -403,8 +403,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
403403
int retval, i, timed_out = 0;
404404
unsigned long slack = 0;
405405
unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
406-
u64 busy_start = busy_loop_start_time(busy_flag);
407-
u64 busy_end = busy_loop_end_time();
406+
unsigned long busy_end = 0;
408407

409408
rcu_read_lock();
410409
retval = max_select_fd(n, fds);
@@ -506,9 +505,15 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
506505
}
507506

508507
/* only if found POLL_BUSY_LOOP sockets && not out of time */
509-
if (!need_resched() && can_busy_loop &&
510-
busy_loop_range(busy_start, busy_end))
511-
continue;
508+
if (can_busy_loop && !need_resched()) {
509+
if (!busy_end) {
510+
busy_end = busy_loop_end_time();
511+
continue;
512+
}
513+
if (!busy_loop_timeout(busy_end))
514+
continue;
515+
}
516+
busy_flag = 0;
512517

513518
/*
514519
* If this is the first loop and we have a timeout
@@ -780,9 +785,7 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
780785
int timed_out = 0, count = 0;
781786
unsigned long slack = 0;
782787
unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
783-
u64 busy_start = busy_loop_start_time(busy_flag);
784-
u64 busy_end = busy_loop_end_time();
785-
788+
unsigned long busy_end = 0;
786789

787790
/* Optimise the no-wait case */
788791
if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
@@ -834,9 +837,15 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
834837
break;
835838

836839
/* only if found POLL_BUSY_LOOP sockets && not out of time */
837-
if (!need_resched() && can_busy_loop &&
838-
busy_loop_range(busy_start, busy_end))
839-
continue;
840+
if (can_busy_loop && !need_resched()) {
841+
if (!busy_end) {
842+
busy_end = busy_loop_end_time();
843+
continue;
844+
}
845+
if (!busy_loop_timeout(busy_end))
846+
continue;
847+
}
848+
busy_flag = 0;
840849

841850
/*
842851
* If this is the first loop and we have a timeout

include/net/ll_poll.h

Lines changed: 18 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -47,45 +47,32 @@ static inline bool net_busy_loop_on(void)
4747
* we only care that the average is bounded
4848
*/
4949
#ifdef CONFIG_DEBUG_PREEMPT
50-
static inline u64 busy_loop_sched_clock(void)
50+
static inline u64 busy_loop_us_clock(void)
5151
{
5252
u64 rc;
5353

5454
preempt_disable_notrace();
5555
rc = sched_clock();
5656
preempt_enable_no_resched_notrace();
5757

58-
return rc;
58+
return rc >> 10;
5959
}
6060
#else /* CONFIG_DEBUG_PREEMPT */
61-
static inline u64 busy_loop_sched_clock(void)
61+
static inline u64 busy_loop_us_clock(void)
6262
{
63-
return sched_clock();
63+
return sched_clock() >> 10;
6464
}
6565
#endif /* CONFIG_DEBUG_PREEMPT */
6666

67-
/* we don't mind a ~2.5% imprecision so <<10 instead of *1000
68-
* sk->sk_ll_usec is a u_int so this can't overflow
69-
*/
70-
static inline u64 sk_busy_loop_end_time(struct sock *sk)
67+
static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
7168
{
72-
return (u64)ACCESS_ONCE(sk->sk_ll_usec) << 10;
69+
return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
7370
}
7471

75-
/* in poll/select we use the global sysctl_net_ll_poll value
76-
* only call sched_clock() if enabled
77-
*/
78-
static inline u64 busy_loop_end_time(void)
79-
{
80-
return (u64)ACCESS_ONCE(sysctl_net_ll_poll) << 10;
81-
}
82-
83-
/* if flag is not set we don't need to know the time
84-
* so we want to avoid a potentially expensive sched_clock()
85-
*/
86-
static inline u64 busy_loop_start_time(unsigned int flag)
72+
/* in poll/select we use the global sysctl_net_ll_poll value */
73+
static inline unsigned long busy_loop_end_time(void)
8774
{
88-
return flag ? busy_loop_sched_clock() : 0;
75+
return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_ll_poll);
8976
}
9077

9178
static inline bool sk_can_busy_loop(struct sock *sk)
@@ -94,21 +81,20 @@ static inline bool sk_can_busy_loop(struct sock *sk)
9481
!need_resched() && !signal_pending(current);
9582
}
9683

97-
/* careful! time_in_range64 will evaluate now twice */
98-
static inline bool busy_loop_range(u64 start_time, u64 run_time)
84+
85+
static inline bool busy_loop_timeout(unsigned long end_time)
9986
{
100-
u64 now = busy_loop_sched_clock();
87+
unsigned long now = busy_loop_us_clock();
10188

102-
return time_in_range64(now, start_time, start_time + run_time);
89+
return time_after(now, end_time);
10390
}
10491

10592
/* when used in sock_poll() nonblock is known at compile time to be true
10693
* so the loop and end_time will be optimized out
10794
*/
10895
static inline bool sk_busy_loop(struct sock *sk, int nonblock)
10996
{
110-
u64 start_time = busy_loop_start_time(!nonblock);
111-
u64 end_time = sk_busy_loop_end_time(sk);
97+
unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
11298
const struct net_device_ops *ops;
11399
struct napi_struct *napi;
114100
int rc = false;
@@ -139,7 +125,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
139125
LINUX_MIB_LOWLATENCYRXPACKETS, rc);
140126

141127
} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
142-
busy_loop_range(start_time, end_time));
128+
!need_resched() && !busy_loop_timeout(end_time));
143129

144130
rc = !skb_queue_empty(&sk->sk_receive_queue);
145131
out:
@@ -165,12 +151,7 @@ static inline unsigned long net_busy_loop_on(void)
165151
return 0;
166152
}
167153

168-
static inline u64 busy_loop_start_time(unsigned int flag)
169-
{
170-
return 0;
171-
}
172-
173-
static inline u64 busy_loop_end_time(void)
154+
static inline unsigned long busy_loop_end_time(void)
174155
{
175156
return 0;
176157
}
@@ -193,9 +174,9 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
193174
{
194175
}
195176

196-
static inline bool busy_loop_range(u64 start_time, u64 run_time)
177+
static inline bool busy_loop_timeout(unsigned long end_time)
197178
{
198-
return false;
179+
return true;
199180
}
200181

201182
#endif /* CONFIG_NET_LL_RX_POLL */

0 commit comments

Comments
 (0)