@@ -263,14 +263,14 @@ extern int tcp_memory_pressure;
263
263
* and worry about wraparound (automatic with unsigned arithmetic).
264
264
*/
265
265
266
- static inline int before (__u32 seq1 , __u32 seq2 )
266
+ static inline bool before (__u32 seq1 , __u32 seq2 )
267
267
{
268
268
return (__s32 )(seq1 - seq2 ) < 0 ;
269
269
}
270
270
#define after (seq2 , seq1 ) before(seq1, seq2)
271
271
272
272
/* is s2<=s1<=s3 ? */
273
- static inline int between (__u32 seq1 , __u32 seq2 , __u32 seq3 )
273
+ static inline bool between (__u32 seq1 , __u32 seq2 , __u32 seq3 )
274
274
{
275
275
return seq3 - seq2 >= seq1 - seq2 ;
276
276
}
@@ -305,7 +305,7 @@ static inline void tcp_synq_overflow(struct sock *sk)
305
305
}
306
306
307
307
/* syncookies: no recent synqueue overflow on this listening socket? */
308
- static inline int tcp_synq_no_recent_overflow (const struct sock * sk )
308
+ static inline bool tcp_synq_no_recent_overflow (const struct sock * sk )
309
309
{
310
310
unsigned long last_overflow = tcp_sk (sk )-> rx_opt .ts_recent_stamp ;
311
311
return time_after (jiffies , last_overflow + TCP_TIMEOUT_FALLBACK );
@@ -383,7 +383,7 @@ extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
383
383
struct request_sock * * prev );
384
384
extern int tcp_child_process (struct sock * parent , struct sock * child ,
385
385
struct sk_buff * skb );
386
- extern int tcp_use_frto (struct sock * sk );
386
+ extern bool tcp_use_frto (struct sock * sk );
387
387
extern void tcp_enter_frto (struct sock * sk );
388
388
extern void tcp_enter_loss (struct sock * sk , int how );
389
389
extern void tcp_clear_retrans (struct tcp_sock * tp );
@@ -470,7 +470,7 @@ static inline __u32 cookie_v6_init_sequence(struct sock *sk,
470
470
471
471
extern void __tcp_push_pending_frames (struct sock * sk , unsigned int cur_mss ,
472
472
int nonagle );
473
- extern int tcp_may_send_now (struct sock * sk );
473
+ extern bool tcp_may_send_now (struct sock * sk );
474
474
extern int tcp_retransmit_skb (struct sock * , struct sk_buff * );
475
475
extern void tcp_retransmit_timer (struct sock * sk );
476
476
extern void tcp_xmit_retransmit_queue (struct sock * );
@@ -484,9 +484,9 @@ extern int tcp_write_wakeup(struct sock *);
484
484
extern void tcp_send_fin (struct sock * sk );
485
485
extern void tcp_send_active_reset (struct sock * sk , gfp_t priority );
486
486
extern int tcp_send_synack (struct sock * );
487
- extern int tcp_syn_flood_action (struct sock * sk ,
488
- const struct sk_buff * skb ,
489
- const char * proto );
487
+ extern bool tcp_syn_flood_action (struct sock * sk ,
488
+ const struct sk_buff * skb ,
489
+ const char * proto );
490
490
extern void tcp_push_one (struct sock * , unsigned int mss_now );
491
491
extern void tcp_send_ack (struct sock * sk );
492
492
extern void tcp_send_delayed_ack (struct sock * sk );
@@ -794,12 +794,12 @@ static inline int tcp_is_sack(const struct tcp_sock *tp)
794
794
return tp -> rx_opt .sack_ok ;
795
795
}
796
796
797
- static inline int tcp_is_reno (const struct tcp_sock * tp )
797
+ static inline bool tcp_is_reno (const struct tcp_sock * tp )
798
798
{
799
799
return !tcp_is_sack (tp );
800
800
}
801
801
802
- static inline int tcp_is_fack (const struct tcp_sock * tp )
802
+ static inline bool tcp_is_fack (const struct tcp_sock * tp )
803
803
{
804
804
return tp -> rx_opt .sack_ok & TCP_FACK_ENABLED ;
805
805
}
@@ -901,7 +901,7 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
901
901
{
902
902
return tp -> snd_una + tp -> snd_wnd ;
903
903
}
904
- extern int tcp_is_cwnd_limited (const struct sock * sk , u32 in_flight );
904
+ extern bool tcp_is_cwnd_limited (const struct sock * sk , u32 in_flight );
905
905
906
906
static inline void tcp_minshall_update (struct tcp_sock * tp , unsigned int mss ,
907
907
const struct sk_buff * skb )
@@ -944,7 +944,7 @@ static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
944
944
return __skb_checksum_complete (skb );
945
945
}
946
946
947
- static inline int tcp_checksum_complete (struct sk_buff * skb )
947
+ static inline bool tcp_checksum_complete (struct sk_buff * skb )
948
948
{
949
949
return !skb_csum_unnecessary (skb ) &&
950
950
__tcp_checksum_complete (skb );
@@ -974,12 +974,12 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
974
974
*
975
975
* NOTE: is this not too big to inline?
976
976
*/
977
- static inline int tcp_prequeue (struct sock * sk , struct sk_buff * skb )
977
+ static inline bool tcp_prequeue (struct sock * sk , struct sk_buff * skb )
978
978
{
979
979
struct tcp_sock * tp = tcp_sk (sk );
980
980
981
981
if (sysctl_tcp_low_latency || !tp -> ucopy .task )
982
- return 0 ;
982
+ return false ;
983
983
984
984
__skb_queue_tail (& tp -> ucopy .prequeue , skb );
985
985
tp -> ucopy .memory += skb -> truesize ;
@@ -1003,7 +1003,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1003
1003
(3 * tcp_rto_min (sk )) / 4 ,
1004
1004
TCP_RTO_MAX );
1005
1005
}
1006
- return 1 ;
1006
+ return true ;
1007
1007
}
1008
1008
1009
1009
@@ -1108,28 +1108,28 @@ static inline int tcp_fin_time(const struct sock *sk)
1108
1108
return fin_timeout ;
1109
1109
}
1110
1110
1111
- static inline int tcp_paws_check (const struct tcp_options_received * rx_opt ,
1112
- int paws_win )
1111
+ static inline bool tcp_paws_check (const struct tcp_options_received * rx_opt ,
1112
+ int paws_win )
1113
1113
{
1114
1114
if ((s32 )(rx_opt -> ts_recent - rx_opt -> rcv_tsval ) <= paws_win )
1115
- return 1 ;
1115
+ return true ;
1116
1116
if (unlikely (get_seconds () >= rx_opt -> ts_recent_stamp + TCP_PAWS_24DAYS ))
1117
- return 1 ;
1117
+ return true ;
1118
1118
/*
1119
1119
* Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1120
1120
* then following tcp messages have valid values. Ignore 0 value,
1121
1121
* or else 'negative' tsval might forbid us to accept their packets.
1122
1122
*/
1123
1123
if (!rx_opt -> ts_recent )
1124
- return 1 ;
1125
- return 0 ;
1124
+ return true ;
1125
+ return false ;
1126
1126
}
1127
1127
1128
- static inline int tcp_paws_reject (const struct tcp_options_received * rx_opt ,
1129
- int rst )
1128
+ static inline bool tcp_paws_reject (const struct tcp_options_received * rx_opt ,
1129
+ int rst )
1130
1130
{
1131
1131
if (tcp_paws_check (rx_opt , 0 ))
1132
- return 0 ;
1132
+ return false ;
1133
1133
1134
1134
/* RST segments are not recommended to carry timestamp,
1135
1135
and, if they do, it is recommended to ignore PAWS because
@@ -1144,8 +1144,8 @@ static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
1144
1144
However, we can relax time bounds for RST segments to MSL.
1145
1145
*/
1146
1146
if (rst && get_seconds () >= rx_opt -> ts_recent_stamp + TCP_PAWS_MSL )
1147
- return 0 ;
1148
- return 1 ;
1147
+ return false ;
1148
+ return true ;
1149
1149
}
1150
1150
1151
1151
static inline void tcp_mib_init (struct net * net )
@@ -1383,7 +1383,7 @@ static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1383
1383
__skb_unlink (skb , & sk -> sk_write_queue );
1384
1384
}
1385
1385
1386
- static inline int tcp_write_queue_empty (struct sock * sk )
1386
+ static inline bool tcp_write_queue_empty (struct sock * sk )
1387
1387
{
1388
1388
return skb_queue_empty (& sk -> sk_write_queue );
1389
1389
}
@@ -1440,7 +1440,7 @@ static inline void tcp_highest_sack_combine(struct sock *sk,
1440
1440
/* Determines whether this is a thin stream (which may suffer from
1441
1441
* increased latency). Used to trigger latency-reducing mechanisms.
1442
1442
*/
1443
- static inline unsigned int tcp_stream_is_thin (struct tcp_sock * tp )
1443
+ static inline bool tcp_stream_is_thin (struct tcp_sock * tp )
1444
1444
{
1445
1445
return tp -> packets_out < 4 && !tcp_in_initial_slowstart (tp );
1446
1446
}
0 commit comments