@@ -47,45 +47,32 @@ static inline bool net_busy_loop_on(void)
47
47
* we only care that the average is bounded
48
48
*/
49
49
#ifdef CONFIG_DEBUG_PREEMPT
50
- static inline u64 busy_loop_sched_clock (void )
50
+ static inline u64 busy_loop_us_clock (void )
51
51
{
52
52
u64 rc ;
53
53
54
54
preempt_disable_notrace ();
55
55
rc = sched_clock ();
56
56
preempt_enable_no_resched_notrace ();
57
57
58
- return rc ;
58
+ return rc >> 10 ;
59
59
}
60
60
#else /* CONFIG_DEBUG_PREEMPT */
61
- static inline u64 busy_loop_sched_clock (void )
61
+ static inline u64 busy_loop_us_clock (void )
62
62
{
63
- return sched_clock ();
63
+ return sched_clock () >> 10 ;
64
64
}
65
65
#endif /* CONFIG_DEBUG_PREEMPT */
66
66
67
- /* we don't mind a ~2.5% imprecision so <<10 instead of *1000
68
- * sk->sk_ll_usec is a u_int so this can't overflow
69
- */
70
- static inline u64 sk_busy_loop_end_time (struct sock * sk )
67
+ static inline unsigned long sk_busy_loop_end_time (struct sock * sk )
71
68
{
72
- return ( u64 ) ACCESS_ONCE (sk -> sk_ll_usec ) << 10 ;
69
+ return busy_loop_us_clock () + ACCESS_ONCE (sk -> sk_ll_usec );
73
70
}
74
71
75
- /* in poll/select we use the global sysctl_net_ll_poll value
76
- * only call sched_clock() if enabled
77
- */
78
- static inline u64 busy_loop_end_time (void )
79
- {
80
- return (u64 )ACCESS_ONCE (sysctl_net_ll_poll ) << 10 ;
81
- }
82
-
83
- /* if flag is not set we don't need to know the time
84
- * so we want to avoid a potentially expensive sched_clock()
85
- */
86
- static inline u64 busy_loop_start_time (unsigned int flag )
72
+ /* in poll/select we use the global sysctl_net_ll_poll value */
73
+ static inline unsigned long busy_loop_end_time (void )
87
74
{
88
- return flag ? busy_loop_sched_clock () : 0 ;
75
+ return busy_loop_us_clock () + ACCESS_ONCE ( sysctl_net_ll_poll ) ;
89
76
}
90
77
91
78
static inline bool sk_can_busy_loop (struct sock * sk )
@@ -94,21 +81,20 @@ static inline bool sk_can_busy_loop(struct sock *sk)
94
81
!need_resched () && !signal_pending (current );
95
82
}
96
83
97
- /* careful! time_in_range64 will evaluate now twice */
98
- static inline bool busy_loop_range ( u64 start_time , u64 run_time )
84
+
85
+ static inline bool busy_loop_timeout ( unsigned long end_time )
99
86
{
100
- u64 now = busy_loop_sched_clock ();
87
+ unsigned long now = busy_loop_us_clock ();
101
88
102
- return time_in_range64 (now , start_time , start_time + run_time );
89
+ return time_after (now , end_time );
103
90
}
104
91
105
92
/* when used in sock_poll() nonblock is known at compile time to be true
106
93
* so the loop and end_time will be optimized out
107
94
*/
108
95
static inline bool sk_busy_loop (struct sock * sk , int nonblock )
109
96
{
110
- u64 start_time = busy_loop_start_time (!nonblock );
111
- u64 end_time = sk_busy_loop_end_time (sk );
97
+ unsigned long end_time = !nonblock ? sk_busy_loop_end_time (sk ) : 0 ;
112
98
const struct net_device_ops * ops ;
113
99
struct napi_struct * napi ;
114
100
int rc = false;
@@ -139,7 +125,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
139
125
LINUX_MIB_LOWLATENCYRXPACKETS , rc );
140
126
141
127
} while (!nonblock && skb_queue_empty (& sk -> sk_receive_queue ) &&
142
- busy_loop_range ( start_time , end_time ));
128
+ ! need_resched () && ! busy_loop_timeout ( end_time ));
143
129
144
130
rc = !skb_queue_empty (& sk -> sk_receive_queue );
145
131
out :
@@ -165,12 +151,7 @@ static inline unsigned long net_busy_loop_on(void)
165
151
return 0 ;
166
152
}
167
153
168
- static inline u64 busy_loop_start_time (unsigned int flag )
169
- {
170
- return 0 ;
171
- }
172
-
173
- static inline u64 busy_loop_end_time (void )
154
+ static inline unsigned long busy_loop_end_time (void )
174
155
{
175
156
return 0 ;
176
157
}
@@ -193,9 +174,9 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
193
174
{
194
175
}
195
176
196
- static inline bool busy_loop_range ( u64 start_time , u64 run_time )
177
+ static inline bool busy_loop_timeout ( unsigned long end_time )
197
178
{
198
- return false ;
179
+ return true ;
199
180
}
200
181
201
182
#endif /* CONFIG_NET_LL_RX_POLL */
0 commit comments