Skip to content

Commit 2fd66ff

Browse files
Eric Dumazetdavem330
authored andcommitted
tcp: introduce tcp_skb_timestamp_us() helper
There are few places where TCP reads skb->skb_mstamp expecting a value in usec unit. skb->tstamp (aka skb->skb_mstamp) will soon store CLOCK_TAI nsec value. Add tcp_skb_timestamp_us() to provide proper conversion when needed. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 72b0094 commit 2fd66ff

File tree

6 files changed

+26
-17
lines changed

6 files changed

+26
-17
lines changed

include/net/tcp.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -774,6 +774,12 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
774774
return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
775775
}
776776

777+
/* provide the departure time in us unit */
778+
static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
779+
{
780+
return skb->skb_mstamp;
781+
}
782+
777783

778784
#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
779785

@@ -1940,7 +1946,7 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk)
19401946
{
19411947
const struct sk_buff *skb = tcp_rtx_queue_head(sk);
19421948
u32 rto = inet_csk(sk)->icsk_rto;
1943-
u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
1949+
u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
19441950

19451951
return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
19461952
}

net/ipv4/tcp_input.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1305,7 +1305,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
13051305
*/
13061306
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
13071307
start_seq, end_seq, dup_sack, pcount,
1308-
skb->skb_mstamp);
1308+
tcp_skb_timestamp_us(skb));
13091309
tcp_rate_skb_delivered(sk, skb, state->rate);
13101310

13111311
if (skb == tp->lost_skb_hint)
@@ -1580,7 +1580,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
15801580
TCP_SKB_CB(skb)->end_seq,
15811581
dup_sack,
15821582
tcp_skb_pcount(skb),
1583-
skb->skb_mstamp);
1583+
tcp_skb_timestamp_us(skb));
15841584
tcp_rate_skb_delivered(sk, skb, state->rate);
15851585
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
15861586
list_del_init(&skb->tcp_tsorted_anchor);
@@ -3103,7 +3103,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
31033103
tp->retrans_out -= acked_pcount;
31043104
flag |= FLAG_RETRANS_DATA_ACKED;
31053105
} else if (!(sacked & TCPCB_SACKED_ACKED)) {
3106-
last_ackt = skb->skb_mstamp;
3106+
last_ackt = tcp_skb_timestamp_us(skb);
31073107
WARN_ON_ONCE(last_ackt == 0);
31083108
if (!first_ackt)
31093109
first_ackt = last_ackt;
@@ -3121,7 +3121,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
31213121
tp->delivered += acked_pcount;
31223122
if (!tcp_skb_spurious_retrans(tp, skb))
31233123
tcp_rack_advance(tp, sacked, scb->end_seq,
3124-
skb->skb_mstamp);
3124+
tcp_skb_timestamp_us(skb));
31253125
}
31263126
if (sacked & TCPCB_LOST)
31273127
tp->lost_out -= acked_pcount;
@@ -3215,7 +3215,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
32153215
tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
32163216
}
32173217
} else if (skb && rtt_update && sack_rtt_us >= 0 &&
3218-
sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) {
3218+
sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
3219+
tcp_skb_timestamp_us(skb))) {
32193220
/* Do not re-arm RTO if the sack RTT is measured from data sent
32203221
* after when the head was last (re)transmitted. Otherwise the
32213222
* timeout may continue to extend in loss recovery.

net/ipv4/tcp_ipv4.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -544,7 +544,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
544544
BUG_ON(!skb);
545545

546546
tcp_mstamp_refresh(tp);
547-
delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
547+
delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
548548
remaining = icsk->icsk_rto -
549549
usecs_to_jiffies(delta_us);
550550

net/ipv4/tcp_output.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1966,7 +1966,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
19661966
head = tcp_rtx_queue_head(sk);
19671967
if (!head)
19681968
goto send_now;
1969-
age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp);
1969+
age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head));
19701970
/* If next ACK is likely to come too late (half srtt), do not defer */
19711971
if (age < (tp->srtt_us >> 4))
19721972
goto send_now;

net/ipv4/tcp_rate.c

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,10 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
5555
* bandwidth estimate.
5656
*/
5757
if (!tp->packets_out) {
58-
tp->first_tx_mstamp = skb->skb_mstamp;
59-
tp->delivered_mstamp = skb->skb_mstamp;
58+
u64 tstamp_us = tcp_skb_timestamp_us(skb);
59+
60+
tp->first_tx_mstamp = tstamp_us;
61+
tp->delivered_mstamp = tstamp_us;
6062
}
6163

6264
TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp;
@@ -88,13 +90,12 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
8890
rs->is_app_limited = scb->tx.is_app_limited;
8991
rs->is_retrans = scb->sacked & TCPCB_RETRANS;
9092

93+
/* Record send time of most recently ACKed packet: */
94+
tp->first_tx_mstamp = tcp_skb_timestamp_us(skb);
9195
/* Find the duration of the "send phase" of this window: */
92-
rs->interval_us = tcp_stamp_us_delta(
93-
skb->skb_mstamp,
94-
scb->tx.first_tx_mstamp);
96+
rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
97+
scb->tx.first_tx_mstamp);
9598

96-
/* Record send time of most recently ACKed packet: */
97-
tp->first_tx_mstamp = skb->skb_mstamp;
9899
}
99100
/* Mark off the skb delivered once it's sacked to avoid being
100101
* used again when it's cumulatively acked. For acked packets

net/ipv4/tcp_recovery.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk)
5050
s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
5151
{
5252
return tp->rack.rtt_us + reo_wnd -
53-
tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
53+
tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
5454
}
5555

5656
/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
@@ -91,7 +91,8 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
9191
!(scb->sacked & TCPCB_SACKED_RETRANS))
9292
continue;
9393

94-
if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
94+
if (!tcp_rack_sent_after(tp->rack.mstamp,
95+
tcp_skb_timestamp_us(skb),
9596
tp->rack.end_seq, scb->end_seq))
9697
break;
9798

0 commit comments

Comments
 (0)