Skip to content

Commit a2a385d

Browse files
edumazetdavem330
authored andcommitted
tcp: bool conversions
bool conversions where possible. __inline__ -> inline space cleanups Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent e005d19 commit a2a385d

File tree

9 files changed

+219
-216
lines changed

9 files changed

+219
-216
lines changed

include/net/tcp.h

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -263,14 +263,14 @@ extern int tcp_memory_pressure;
263263
* and worry about wraparound (automatic with unsigned arithmetic).
264264
*/
265265

266-
static inline int before(__u32 seq1, __u32 seq2)
266+
static inline bool before(__u32 seq1, __u32 seq2)
267267
{
268268
return (__s32)(seq1-seq2) < 0;
269269
}
270270
#define after(seq2, seq1) before(seq1, seq2)
271271

272272
/* is s2<=s1<=s3 ? */
273-
static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
273+
static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
274274
{
275275
return seq3 - seq2 >= seq1 - seq2;
276276
}
@@ -305,7 +305,7 @@ static inline void tcp_synq_overflow(struct sock *sk)
305305
}
306306

307307
/* syncookies: no recent synqueue overflow on this listening socket? */
308-
static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
308+
static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
309309
{
310310
unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
311311
return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
@@ -383,7 +383,7 @@ extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
383383
struct request_sock **prev);
384384
extern int tcp_child_process(struct sock *parent, struct sock *child,
385385
struct sk_buff *skb);
386-
extern int tcp_use_frto(struct sock *sk);
386+
extern bool tcp_use_frto(struct sock *sk);
387387
extern void tcp_enter_frto(struct sock *sk);
388388
extern void tcp_enter_loss(struct sock *sk, int how);
389389
extern void tcp_clear_retrans(struct tcp_sock *tp);
@@ -470,7 +470,7 @@ static inline __u32 cookie_v6_init_sequence(struct sock *sk,
470470

471471
extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
472472
int nonagle);
473-
extern int tcp_may_send_now(struct sock *sk);
473+
extern bool tcp_may_send_now(struct sock *sk);
474474
extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
475475
extern void tcp_retransmit_timer(struct sock *sk);
476476
extern void tcp_xmit_retransmit_queue(struct sock *);
@@ -484,9 +484,9 @@ extern int tcp_write_wakeup(struct sock *);
484484
extern void tcp_send_fin(struct sock *sk);
485485
extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
486486
extern int tcp_send_synack(struct sock *);
487-
extern int tcp_syn_flood_action(struct sock *sk,
488-
const struct sk_buff *skb,
489-
const char *proto);
487+
extern bool tcp_syn_flood_action(struct sock *sk,
488+
const struct sk_buff *skb,
489+
const char *proto);
490490
extern void tcp_push_one(struct sock *, unsigned int mss_now);
491491
extern void tcp_send_ack(struct sock *sk);
492492
extern void tcp_send_delayed_ack(struct sock *sk);
@@ -794,12 +794,12 @@ static inline int tcp_is_sack(const struct tcp_sock *tp)
794794
return tp->rx_opt.sack_ok;
795795
}
796796

797-
static inline int tcp_is_reno(const struct tcp_sock *tp)
797+
static inline bool tcp_is_reno(const struct tcp_sock *tp)
798798
{
799799
return !tcp_is_sack(tp);
800800
}
801801

802-
static inline int tcp_is_fack(const struct tcp_sock *tp)
802+
static inline bool tcp_is_fack(const struct tcp_sock *tp)
803803
{
804804
return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
805805
}
@@ -901,7 +901,7 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
901901
{
902902
return tp->snd_una + tp->snd_wnd;
903903
}
904-
extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
904+
extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
905905

906906
static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
907907
const struct sk_buff *skb)
@@ -944,7 +944,7 @@ static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
944944
return __skb_checksum_complete(skb);
945945
}
946946

947-
static inline int tcp_checksum_complete(struct sk_buff *skb)
947+
static inline bool tcp_checksum_complete(struct sk_buff *skb)
948948
{
949949
return !skb_csum_unnecessary(skb) &&
950950
__tcp_checksum_complete(skb);
@@ -974,12 +974,12 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
974974
*
975975
* NOTE: is this not too big to inline?
976976
*/
977-
static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
977+
static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
978978
{
979979
struct tcp_sock *tp = tcp_sk(sk);
980980

981981
if (sysctl_tcp_low_latency || !tp->ucopy.task)
982-
return 0;
982+
return false;
983983

984984
__skb_queue_tail(&tp->ucopy.prequeue, skb);
985985
tp->ucopy.memory += skb->truesize;
@@ -1003,7 +1003,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
10031003
(3 * tcp_rto_min(sk)) / 4,
10041004
TCP_RTO_MAX);
10051005
}
1006-
return 1;
1006+
return true;
10071007
}
10081008

10091009

@@ -1108,28 +1108,28 @@ static inline int tcp_fin_time(const struct sock *sk)
11081108
return fin_timeout;
11091109
}
11101110

1111-
static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
1112-
int paws_win)
1111+
static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1112+
int paws_win)
11131113
{
11141114
if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1115-
return 1;
1115+
return true;
11161116
if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1117-
return 1;
1117+
return true;
11181118
/*
11191119
* Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
11201120
* then following tcp messages have valid values. Ignore 0 value,
11211121
* or else 'negative' tsval might forbid us to accept their packets.
11221122
*/
11231123
if (!rx_opt->ts_recent)
1124-
return 1;
1125-
return 0;
1124+
return true;
1125+
return false;
11261126
}
11271127

1128-
static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
1129-
int rst)
1128+
static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1129+
int rst)
11301130
{
11311131
if (tcp_paws_check(rx_opt, 0))
1132-
return 0;
1132+
return false;
11331133

11341134
/* RST segments are not recommended to carry timestamp,
11351135
and, if they do, it is recommended to ignore PAWS because
@@ -1144,8 +1144,8 @@ static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
11441144
However, we can relax time bounds for RST segments to MSL.
11451145
*/
11461146
if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1147-
return 0;
1148-
return 1;
1147+
return false;
1148+
return true;
11491149
}
11501150

11511151
static inline void tcp_mib_init(struct net *net)
@@ -1383,7 +1383,7 @@ static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
13831383
__skb_unlink(skb, &sk->sk_write_queue);
13841384
}
13851385

1386-
static inline int tcp_write_queue_empty(struct sock *sk)
1386+
static inline bool tcp_write_queue_empty(struct sock *sk)
13871387
{
13881388
return skb_queue_empty(&sk->sk_write_queue);
13891389
}
@@ -1440,7 +1440,7 @@ static inline void tcp_highest_sack_combine(struct sock *sk,
14401440
/* Determines whether this is a thin stream (which may suffer from
14411441
* increased latency). Used to trigger latency-reducing mechanisms.
14421442
*/
1443-
static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp)
1443+
static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
14441444
{
14451445
return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
14461446
}

net/ipv4/tcp.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -593,7 +593,7 @@ static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
593593
tp->pushed_seq = tp->write_seq;
594594
}
595595

596-
static inline int forced_push(const struct tcp_sock *tp)
596+
static inline bool forced_push(const struct tcp_sock *tp)
597597
{
598598
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
599599
}
@@ -1082,7 +1082,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
10821082
if (err)
10831083
goto do_fault;
10841084
} else {
1085-
int merge = 0;
1085+
bool merge = false;
10861086
int i = skb_shinfo(skb)->nr_frags;
10871087
struct page *page = sk->sk_sndmsg_page;
10881088
int off;
@@ -1096,7 +1096,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
10961096
off != PAGE_SIZE) {
10971097
/* We can extend the last page
10981098
* fragment. */
1099-
merge = 1;
1099+
merge = true;
11001100
} else if (i == MAX_SKB_FRAGS || !sg) {
11011101
/* Need to add new fragment and cannot
11021102
* do this because interface is non-SG,
@@ -1293,7 +1293,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
12931293
void tcp_cleanup_rbuf(struct sock *sk, int copied)
12941294
{
12951295
struct tcp_sock *tp = tcp_sk(sk);
1296-
int time_to_ack = 0;
1296+
bool time_to_ack = false;
12971297

12981298
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
12991299

@@ -1319,7 +1319,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
13191319
((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
13201320
!icsk->icsk_ack.pingpong)) &&
13211321
!atomic_read(&sk->sk_rmem_alloc)))
1322-
time_to_ack = 1;
1322+
time_to_ack = true;
13231323
}
13241324

13251325
/* We send an ACK if we can now advertise a non-zero window
@@ -1341,7 +1341,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
13411341
* "Lots" means "at least twice" here.
13421342
*/
13431343
if (new_window && new_window >= 2 * rcv_window_now)
1344-
time_to_ack = 1;
1344+
time_to_ack = true;
13451345
}
13461346
}
13471347
if (time_to_ack)
@@ -2171,7 +2171,7 @@ EXPORT_SYMBOL(tcp_close);
21712171

21722172
/* These states need RST on ABORT according to RFC793 */
21732173

2174-
static inline int tcp_need_reset(int state)
2174+
static inline bool tcp_need_reset(int state)
21752175
{
21762176
return (1 << state) &
21772177
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
@@ -2245,7 +2245,7 @@ int tcp_disconnect(struct sock *sk, int flags)
22452245
}
22462246
EXPORT_SYMBOL(tcp_disconnect);
22472247

2248-
static inline int tcp_can_repair_sock(struct sock *sk)
2248+
static inline bool tcp_can_repair_sock(const struct sock *sk)
22492249
{
22502250
return capable(CAP_NET_ADMIN) &&
22512251
((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
@@ -3172,13 +3172,13 @@ __tcp_alloc_md5sig_pool(struct sock *sk)
31723172
struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
31733173
{
31743174
struct tcp_md5sig_pool __percpu *pool;
3175-
int alloc = 0;
3175+
bool alloc = false;
31763176

31773177
retry:
31783178
spin_lock_bh(&tcp_md5sig_pool_lock);
31793179
pool = tcp_md5sig_pool;
31803180
if (tcp_md5sig_users++ == 0) {
3181-
alloc = 1;
3181+
alloc = true;
31823182
spin_unlock_bh(&tcp_md5sig_pool_lock);
31833183
} else if (!pool) {
31843184
tcp_md5sig_users--;

net/ipv4/tcp_cong.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -280,19 +280,19 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
280280
/* RFC2861 Check whether we are limited by application or congestion window
281281
* This is the inverse of cwnd check in tcp_tso_should_defer
282282
*/
283-
int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
283+
bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
284284
{
285285
const struct tcp_sock *tp = tcp_sk(sk);
286286
u32 left;
287287

288288
if (in_flight >= tp->snd_cwnd)
289-
return 1;
289+
return true;
290290

291291
left = tp->snd_cwnd - in_flight;
292292
if (sk_can_gso(sk) &&
293293
left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
294294
left * tp->mss_cache < sk->sk_gso_max_size)
295-
return 1;
295+
return true;
296296
return left <= tcp_max_tso_deferred_mss(tp);
297297
}
298298
EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);

net/ipv4/tcp_hybla.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616
/* Tcp Hybla structure. */
1717
struct hybla {
18-
u8 hybla_en;
18+
bool hybla_en;
1919
u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */
2020
u32 rho; /* Rho parameter, integer part */
2121
u32 rho2; /* Rho * Rho, integer part */
@@ -24,8 +24,7 @@ struct hybla {
2424
u32 minrtt; /* Minimum smoothed round trip time value seen */
2525
};
2626

27-
/* Hybla reference round trip time (default= 1/40 sec = 25 ms),
28-
expressed in jiffies */
27+
/* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */
2928
static int rtt0 = 25;
3029
module_param(rtt0, int, 0644);
3130
MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
@@ -39,7 +38,7 @@ static inline void hybla_recalc_param (struct sock *sk)
3938
ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8);
4039
ca->rho = ca->rho_3ls >> 3;
4140
ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
42-
ca->rho2 = ca->rho2_7ls >>7;
41+
ca->rho2 = ca->rho2_7ls >> 7;
4342
}
4443

4544
static void hybla_init(struct sock *sk)
@@ -52,7 +51,7 @@ static void hybla_init(struct sock *sk)
5251
ca->rho_3ls = 0;
5352
ca->rho2_7ls = 0;
5453
ca->snd_cwnd_cents = 0;
55-
ca->hybla_en = 1;
54+
ca->hybla_en = true;
5655
tp->snd_cwnd = 2;
5756
tp->snd_cwnd_clamp = 65535;
5857

@@ -67,6 +66,7 @@ static void hybla_init(struct sock *sk)
6766
static void hybla_state(struct sock *sk, u8 ca_state)
6867
{
6968
struct hybla *ca = inet_csk_ca(sk);
69+
7070
ca->hybla_en = (ca_state == TCP_CA_Open);
7171
}
7272

0 commit comments

Comments
 (0)