Skip to content

Commit 3705671

Browse files
Alexander Duyckdavem330
authored andcommitted
net: Track start of busy loop instead of when it should end
This patch flips the logic we were using to determine if the busy polling has timed out. The main motivation for this is that we will need to support two different possible timeout values in the future and by recording the start time rather than when we would want to end we can focus on making the end_time specific to the task be it epoll or socket based polling. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 2b5cd0d commit 3705671

File tree

3 files changed

+49
-41
lines changed

3 files changed

+49
-41
lines changed

fs/select.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -409,7 +409,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
409409
int retval, i, timed_out = 0;
410410
u64 slack = 0;
411411
unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
412-
unsigned long busy_end = 0;
412+
unsigned long busy_start = 0;
413413

414414
rcu_read_lock();
415415
retval = max_select_fd(n, fds);
@@ -512,11 +512,11 @@ int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
512512

513513
/* only if found POLL_BUSY_LOOP sockets && not out of time */
514514
if (can_busy_loop && !need_resched()) {
515-
if (!busy_end) {
516-
busy_end = busy_loop_end_time();
515+
if (!busy_start) {
516+
busy_start = busy_loop_current_time();
517517
continue;
518518
}
519-
if (!busy_loop_timeout(busy_end))
519+
if (!busy_loop_timeout(busy_start))
520520
continue;
521521
}
522522
busy_flag = 0;
@@ -800,7 +800,7 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
800800
int timed_out = 0, count = 0;
801801
u64 slack = 0;
802802
unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
803-
unsigned long busy_end = 0;
803+
unsigned long busy_start = 0;
804804

805805
/* Optimise the no-wait case */
806806
if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
@@ -853,11 +853,11 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
853853

854854
/* only if found POLL_BUSY_LOOP sockets && not out of time */
855855
if (can_busy_loop && !need_resched()) {
856-
if (!busy_end) {
857-
busy_end = busy_loop_end_time();
856+
if (!busy_start) {
857+
busy_start = busy_loop_current_time();
858858
continue;
859859
}
860-
if (!busy_loop_timeout(busy_end))
860+
if (!busy_loop_timeout(busy_start))
861861
continue;
862862
}
863863
busy_flag = 0;

include/net/busy_poll.h

Lines changed: 38 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -46,62 +46,70 @@ static inline bool net_busy_loop_on(void)
4646
return sysctl_net_busy_poll;
4747
}
4848

49-
static inline u64 busy_loop_us_clock(void)
49+
static inline bool sk_can_busy_loop(const struct sock *sk)
5050
{
51-
return local_clock() >> 10;
51+
return sk->sk_ll_usec && !signal_pending(current);
5252
}
5353

54-
static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
55-
{
56-
return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
57-
}
54+
void sk_busy_loop(struct sock *sk, int nonblock);
5855

59-
/* in poll/select we use the global sysctl_net_ll_poll value */
60-
static inline unsigned long busy_loop_end_time(void)
56+
#else /* CONFIG_NET_RX_BUSY_POLL */
57+
static inline unsigned long net_busy_loop_on(void)
6158
{
62-
return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll);
59+
return 0;
6360
}
6461

65-
static inline bool sk_can_busy_loop(const struct sock *sk)
62+
static inline bool sk_can_busy_loop(struct sock *sk)
6663
{
67-
return sk->sk_ll_usec && !signal_pending(current);
64+
return false;
6865
}
6966

70-
static inline bool busy_loop_timeout(unsigned long end_time)
67+
static inline void sk_busy_loop(struct sock *sk, int nonblock)
7168
{
72-
unsigned long now = busy_loop_us_clock();
73-
74-
return time_after(now, end_time);
7569
}
7670

77-
void sk_busy_loop(struct sock *sk, int nonblock);
71+
#endif /* CONFIG_NET_RX_BUSY_POLL */
7872

79-
#else /* CONFIG_NET_RX_BUSY_POLL */
80-
static inline unsigned long net_busy_loop_on(void)
73+
static inline unsigned long busy_loop_current_time(void)
8174
{
75+
#ifdef CONFIG_NET_RX_BUSY_POLL
76+
return (unsigned long)(local_clock() >> 10);
77+
#else
8278
return 0;
79+
#endif
8380
}
8481

85-
static inline unsigned long busy_loop_end_time(void)
82+
/* in poll/select we use the global sysctl_net_ll_poll value */
83+
static inline bool busy_loop_timeout(unsigned long start_time)
8684
{
87-
return 0;
88-
}
85+
#ifdef CONFIG_NET_RX_BUSY_POLL
86+
unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll);
8987

90-
static inline bool sk_can_busy_loop(struct sock *sk)
91-
{
92-
return false;
93-
}
88+
if (bp_usec) {
89+
unsigned long end_time = start_time + bp_usec;
90+
unsigned long now = busy_loop_current_time();
9491

95-
static inline bool busy_loop_timeout(unsigned long end_time)
96-
{
92+
return time_after(now, end_time);
93+
}
94+
#endif
9795
return true;
9896
}
9997

100-
static inline void sk_busy_loop(struct sock *sk, int nonblock)
98+
static inline bool sk_busy_loop_timeout(struct sock *sk,
99+
unsigned long start_time)
101100
{
102-
}
101+
#ifdef CONFIG_NET_RX_BUSY_POLL
102+
unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec);
103103

104-
#endif /* CONFIG_NET_RX_BUSY_POLL */
104+
if (bp_usec) {
105+
unsigned long end_time = start_time + bp_usec;
106+
unsigned long now = busy_loop_current_time();
107+
108+
return time_after(now, end_time);
109+
}
110+
#endif
111+
return true;
112+
}
105113

106114
/* used in the NIC receive handler to mark the skb */
107115
static inline void skb_mark_napi_id(struct sk_buff *skb,

net/core/dev.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5062,7 +5062,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
50625062

50635063
void sk_busy_loop(struct sock *sk, int nonblock)
50645064
{
5065-
unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
5065+
unsigned long start_time = nonblock ? 0 : busy_loop_current_time();
50665066
int (*napi_poll)(struct napi_struct *napi, int budget);
50675067
void *have_poll_lock = NULL;
50685068
struct napi_struct *napi;
@@ -5111,7 +5111,7 @@ void sk_busy_loop(struct sock *sk, int nonblock)
51115111
local_bh_enable();
51125112

51135113
if (nonblock || !skb_queue_empty(&sk->sk_receive_queue) ||
5114-
busy_loop_timeout(end_time))
5114+
sk_busy_loop_timeout(sk, start_time))
51155115
break;
51165116

51175117
if (unlikely(need_resched())) {
@@ -5121,7 +5121,7 @@ void sk_busy_loop(struct sock *sk, int nonblock)
51215121
rcu_read_unlock();
51225122
cond_resched();
51235123
if (!skb_queue_empty(&sk->sk_receive_queue) ||
5124-
busy_loop_timeout(end_time))
5124+
sk_busy_loop_timeout(sk, start_time))
51255125
return;
51265126
goto restart;
51275127
}

0 commit comments

Comments
 (0)