Skip to content

Commit 85f7e75

Browse files
Florian Westphaldavem330
authored andcommitted
tcp: add cwnd_undo functions to various tcp cc algorithms
congestion control algorithms that do not halve cwnd in their .ssthresh should provide a .cwnd_undo rather than rely on current fallback which assumes reno halving (and thus doubles the cwnd). All of these do 'something else' in their .ssthresh implementation, thus store the cwnd on loss and provide .undo_cwnd to restore it again. A followup patch will remove the fallback and all algorithms will need to provide a .cwnd_undo function. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 2fcb58a commit 85f7e75

File tree

5 files changed

+55
-1
lines changed

5 files changed

+55
-1
lines changed

net/ipv4/tcp_highspeed.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,7 @@ static const struct hstcp_aimd_val {
9494

9595
struct hstcp {
9696
u32 ai;
97+
u32 loss_cwnd;
9798
};
9899

99100
static void hstcp_init(struct sock *sk)
@@ -150,16 +151,24 @@ static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
150151
static u32 hstcp_ssthresh(struct sock *sk)
151152
{
152153
const struct tcp_sock *tp = tcp_sk(sk);
153-
const struct hstcp *ca = inet_csk_ca(sk);
154+
struct hstcp *ca = inet_csk_ca(sk);
154155

156+
ca->loss_cwnd = tp->snd_cwnd;
155157
/* Do multiplicative decrease */
156158
return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
157159
}
158160

161+
static u32 hstcp_cwnd_undo(struct sock *sk)
162+
{
163+
const struct hstcp *ca = inet_csk_ca(sk);
164+
165+
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
166+
}
159167

160168
static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
161169
.init = hstcp_init,
162170
.ssthresh = hstcp_ssthresh,
171+
.undo_cwnd = hstcp_cwnd_undo,
163172
.cong_avoid = hstcp_cong_avoid,
164173

165174
.owner = THIS_MODULE,

net/ipv4/tcp_illinois.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ struct illinois {
4848
u32 end_seq; /* right edge of current RTT */
4949
u32 alpha; /* Additive increase */
5050
u32 beta; /* Muliplicative decrease */
51+
u32 loss_cwnd; /* cwnd on loss */
5152
u16 acked; /* # packets acked by current ACK */
5253
u8 rtt_above; /* average rtt has gone above threshold */
5354
u8 rtt_low; /* # of rtts measurements below threshold */
@@ -296,10 +297,18 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
296297
struct tcp_sock *tp = tcp_sk(sk);
297298
struct illinois *ca = inet_csk_ca(sk);
298299

300+
ca->loss_cwnd = tp->snd_cwnd;
299301
/* Multiplicative decrease */
300302
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
301303
}
302304

305+
static u32 tcp_illinois_cwnd_undo(struct sock *sk)
306+
{
307+
const struct illinois *ca = inet_csk_ca(sk);
308+
309+
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
310+
}
311+
303312
/* Extract info for Tcp socket info provided via netlink. */
304313
static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
305314
union tcp_cc_info *info)
@@ -327,6 +336,7 @@ static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
327336
static struct tcp_congestion_ops tcp_illinois __read_mostly = {
328337
.init = tcp_illinois_init,
329338
.ssthresh = tcp_illinois_ssthresh,
339+
.undo_cwnd = tcp_illinois_cwnd_undo,
330340
.cong_avoid = tcp_illinois_cong_avoid,
331341
.set_state = tcp_illinois_state,
332342
.get_info = tcp_illinois_info,

net/ipv4/tcp_scalable.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,10 @@
1515
#define TCP_SCALABLE_AI_CNT 50U
1616
#define TCP_SCALABLE_MD_SCALE 3
1717

18+
struct scalable {
19+
u32 loss_cwnd;
20+
};
21+
1822
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
1923
{
2024
struct tcp_sock *tp = tcp_sk(sk);
@@ -32,12 +36,23 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
3236
static u32 tcp_scalable_ssthresh(struct sock *sk)
3337
{
3438
const struct tcp_sock *tp = tcp_sk(sk);
39+
struct scalable *ca = inet_csk_ca(sk);
40+
41+
ca->loss_cwnd = tp->snd_cwnd;
3542

3643
return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
3744
}
3845

46+
static u32 tcp_scalable_cwnd_undo(struct sock *sk)
47+
{
48+
const struct scalable *ca = inet_csk_ca(sk);
49+
50+
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
51+
}
52+
3953
static struct tcp_congestion_ops tcp_scalable __read_mostly = {
4054
.ssthresh = tcp_scalable_ssthresh,
55+
.undo_cwnd = tcp_scalable_cwnd_undo,
4156
.cong_avoid = tcp_scalable_cong_avoid,
4257

4358
.owner = THIS_MODULE,

net/ipv4/tcp_veno.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ struct veno {
3030
u32 basertt; /* the min of all Veno rtt measurements seen (in usec) */
3131
u32 inc; /* decide whether to increase cwnd */
3232
u32 diff; /* calculate the diff rate */
33+
u32 loss_cwnd; /* cwnd when loss occured */
3334
};
3435

3536
/* There are several situations when we must "re-start" Veno:
@@ -193,6 +194,7 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
193194
const struct tcp_sock *tp = tcp_sk(sk);
194195
struct veno *veno = inet_csk_ca(sk);
195196

197+
veno->loss_cwnd = tp->snd_cwnd;
196198
if (veno->diff < beta)
197199
/* in "non-congestive state", cut cwnd by 1/5 */
198200
return max(tp->snd_cwnd * 4 / 5, 2U);
@@ -201,9 +203,17 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
201203
return max(tp->snd_cwnd >> 1U, 2U);
202204
}
203205

206+
static u32 tcp_veno_cwnd_undo(struct sock *sk)
207+
{
208+
const struct veno *veno = inet_csk_ca(sk);
209+
210+
return max(tcp_sk(sk)->snd_cwnd, veno->loss_cwnd);
211+
}
212+
204213
static struct tcp_congestion_ops tcp_veno __read_mostly = {
205214
.init = tcp_veno_init,
206215
.ssthresh = tcp_veno_ssthresh,
216+
.undo_cwnd = tcp_veno_cwnd_undo,
207217
.cong_avoid = tcp_veno_cong_avoid,
208218
.pkts_acked = tcp_veno_pkts_acked,
209219
.set_state = tcp_veno_state,

net/ipv4/tcp_yeah.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ struct yeah {
3737
u32 fast_count;
3838

3939
u32 pkts_acked;
40+
u32 loss_cwnd;
4041
};
4142

4243
static void tcp_yeah_init(struct sock *sk)
@@ -219,13 +220,22 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)
219220

220221
yeah->fast_count = 0;
221222
yeah->reno_count = max(yeah->reno_count>>1, 2U);
223+
yeah->loss_cwnd = tp->snd_cwnd;
222224

223225
return max_t(int, tp->snd_cwnd - reduction, 2);
224226
}
225227

228+
static u32 tcp_yeah_cwnd_undo(struct sock *sk)
229+
{
230+
const struct yeah *yeah = inet_csk_ca(sk);
231+
232+
return max(tcp_sk(sk)->snd_cwnd, yeah->loss_cwnd);
233+
}
234+
226235
static struct tcp_congestion_ops tcp_yeah __read_mostly = {
227236
.init = tcp_yeah_init,
228237
.ssthresh = tcp_yeah_ssthresh,
238+
.undo_cwnd = tcp_yeah_cwnd_undo,
229239
.cong_avoid = tcp_yeah_cong_avoid,
230240
.set_state = tcp_vegas_state,
231241
.cwnd_event = tcp_vegas_cwnd_event,

0 commit comments

Comments
 (0)