Skip to content

Commit 9e36ced

Browse files
committed
Merge branch 'tcp-cong-undo_cwnd-mandatory'
Florian Westphal says: ==================== tcp: make undo_cwnd mandatory for congestion modules highspeed, illinois, scalable, veno and yeah congestion control algorithms don't provide a 'cwnd_undo' function. This makes the stack default to a 'reno undo' which doubles cwnd. However, the ssthresh implementation of these algorithms do not halve the slowstart threshold. This causes similar issue as the one fixed for dctcp in ce6dd23 ("dctcp: avoid bogus doubling of cwnd after loss"). In light of this it seems better to remove the fallback and make undo_cwnd mandatory. First patch fixes those spots where reno undo seems incorrect by providing .cwnd_undo functions, second patch removes the fallback. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents 2fcb58a + e979918 commit 9e36ced

File tree

13 files changed

+74
-7
lines changed

13 files changed

+74
-7
lines changed

include/net/tcp.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -958,6 +958,7 @@ u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
958958
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
959959

960960
u32 tcp_reno_ssthresh(struct sock *sk);
961+
u32 tcp_reno_undo_cwnd(struct sock *sk);
961962
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
962963
extern struct tcp_congestion_ops tcp_reno;
963964

net/ipv4/tcp_cong.c

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,9 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
6868
{
6969
int ret = 0;
7070

71-
/* all algorithms must implement ssthresh and cong_avoid ops */
72-
if (!ca->ssthresh || !(ca->cong_avoid || ca->cong_control)) {
71+
/* all algorithms must implement these */
72+
if (!ca->ssthresh || !ca->undo_cwnd ||
73+
!(ca->cong_avoid || ca->cong_control)) {
7374
pr_err("%s does not implement required ops\n", ca->name);
7475
return -EINVAL;
7576
}
@@ -441,10 +442,19 @@ u32 tcp_reno_ssthresh(struct sock *sk)
441442
}
442443
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
443444

445+
u32 tcp_reno_undo_cwnd(struct sock *sk)
446+
{
447+
const struct tcp_sock *tp = tcp_sk(sk);
448+
449+
return max(tp->snd_cwnd, tp->snd_ssthresh << 1);
450+
}
451+
EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
452+
444453
struct tcp_congestion_ops tcp_reno = {
445454
.flags = TCP_CONG_NON_RESTRICTED,
446455
.name = "reno",
447456
.owner = THIS_MODULE,
448457
.ssthresh = tcp_reno_ssthresh,
449458
.cong_avoid = tcp_reno_cong_avoid,
459+
.undo_cwnd = tcp_reno_undo_cwnd,
450460
};

net/ipv4/tcp_dctcp.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -342,6 +342,7 @@ static struct tcp_congestion_ops dctcp __read_mostly = {
342342
static struct tcp_congestion_ops dctcp_reno __read_mostly = {
343343
.ssthresh = tcp_reno_ssthresh,
344344
.cong_avoid = tcp_reno_cong_avoid,
345+
.undo_cwnd = tcp_reno_undo_cwnd,
345346
.get_info = dctcp_get_info,
346347
.owner = THIS_MODULE,
347348
.name = "dctcp-reno",

net/ipv4/tcp_highspeed.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,7 @@ static const struct hstcp_aimd_val {
9494

9595
struct hstcp {
9696
u32 ai;
97+
u32 loss_cwnd;
9798
};
9899

99100
static void hstcp_init(struct sock *sk)
@@ -150,16 +151,24 @@ static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
150151
static u32 hstcp_ssthresh(struct sock *sk)
151152
{
152153
const struct tcp_sock *tp = tcp_sk(sk);
153-
const struct hstcp *ca = inet_csk_ca(sk);
154+
struct hstcp *ca = inet_csk_ca(sk);
154155

156+
ca->loss_cwnd = tp->snd_cwnd;
155157
/* Do multiplicative decrease */
156158
return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
157159
}
158160

161+
static u32 hstcp_cwnd_undo(struct sock *sk)
162+
{
163+
const struct hstcp *ca = inet_csk_ca(sk);
164+
165+
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
166+
}
159167

160168
static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
161169
.init = hstcp_init,
162170
.ssthresh = hstcp_ssthresh,
171+
.undo_cwnd = hstcp_cwnd_undo,
163172
.cong_avoid = hstcp_cong_avoid,
164173

165174
.owner = THIS_MODULE,

net/ipv4/tcp_hybla.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -166,6 +166,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
166166
static struct tcp_congestion_ops tcp_hybla __read_mostly = {
167167
.init = hybla_init,
168168
.ssthresh = tcp_reno_ssthresh,
169+
.undo_cwnd = tcp_reno_undo_cwnd,
169170
.cong_avoid = hybla_cong_avoid,
170171
.set_state = hybla_state,
171172

net/ipv4/tcp_illinois.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ struct illinois {
4848
u32 end_seq; /* right edge of current RTT */
4949
u32 alpha; /* Additive increase */
5050
u32 beta; /* Muliplicative decrease */
51+
u32 loss_cwnd; /* cwnd on loss */
5152
u16 acked; /* # packets acked by current ACK */
5253
u8 rtt_above; /* average rtt has gone above threshold */
5354
u8 rtt_low; /* # of rtts measurements below threshold */
@@ -296,10 +297,18 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
296297
struct tcp_sock *tp = tcp_sk(sk);
297298
struct illinois *ca = inet_csk_ca(sk);
298299

300+
ca->loss_cwnd = tp->snd_cwnd;
299301
/* Multiplicative decrease */
300302
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
301303
}
302304

305+
static u32 tcp_illinois_cwnd_undo(struct sock *sk)
306+
{
307+
const struct illinois *ca = inet_csk_ca(sk);
308+
309+
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
310+
}
311+
303312
/* Extract info for Tcp socket info provided via netlink. */
304313
static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
305314
union tcp_cc_info *info)
@@ -327,6 +336,7 @@ static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
327336
static struct tcp_congestion_ops tcp_illinois __read_mostly = {
328337
.init = tcp_illinois_init,
329338
.ssthresh = tcp_illinois_ssthresh,
339+
.undo_cwnd = tcp_illinois_cwnd_undo,
330340
.cong_avoid = tcp_illinois_cong_avoid,
331341
.set_state = tcp_illinois_state,
332342
.get_info = tcp_illinois_info,

net/ipv4/tcp_input.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2394,10 +2394,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
23942394
if (tp->prior_ssthresh) {
23952395
const struct inet_connection_sock *icsk = inet_csk(sk);
23962396

2397-
if (icsk->icsk_ca_ops->undo_cwnd)
2398-
tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
2399-
else
2400-
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
2397+
tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
24012398

24022399
if (tp->prior_ssthresh > tp->snd_ssthresh) {
24032400
tp->snd_ssthresh = tp->prior_ssthresh;

net/ipv4/tcp_lp.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -316,6 +316,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
316316
static struct tcp_congestion_ops tcp_lp __read_mostly = {
317317
.init = tcp_lp_init,
318318
.ssthresh = tcp_reno_ssthresh,
319+
.undo_cwnd = tcp_reno_undo_cwnd,
319320
.cong_avoid = tcp_lp_cong_avoid,
320321
.pkts_acked = tcp_lp_pkts_acked,
321322

net/ipv4/tcp_scalable.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,10 @@
1515
#define TCP_SCALABLE_AI_CNT 50U
1616
#define TCP_SCALABLE_MD_SCALE 3
1717

18+
struct scalable {
19+
u32 loss_cwnd;
20+
};
21+
1822
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
1923
{
2024
struct tcp_sock *tp = tcp_sk(sk);
@@ -32,12 +36,23 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
3236
static u32 tcp_scalable_ssthresh(struct sock *sk)
3337
{
3438
const struct tcp_sock *tp = tcp_sk(sk);
39+
struct scalable *ca = inet_csk_ca(sk);
40+
41+
ca->loss_cwnd = tp->snd_cwnd;
3542

3643
return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
3744
}
3845

46+
static u32 tcp_scalable_cwnd_undo(struct sock *sk)
47+
{
48+
const struct scalable *ca = inet_csk_ca(sk);
49+
50+
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
51+
}
52+
3953
static struct tcp_congestion_ops tcp_scalable __read_mostly = {
4054
.ssthresh = tcp_scalable_ssthresh,
55+
.undo_cwnd = tcp_scalable_cwnd_undo,
4156
.cong_avoid = tcp_scalable_cong_avoid,
4257

4358
.owner = THIS_MODULE,

net/ipv4/tcp_vegas.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -307,6 +307,7 @@ EXPORT_SYMBOL_GPL(tcp_vegas_get_info);
307307
static struct tcp_congestion_ops tcp_vegas __read_mostly = {
308308
.init = tcp_vegas_init,
309309
.ssthresh = tcp_reno_ssthresh,
310+
.undo_cwnd = tcp_reno_undo_cwnd,
310311
.cong_avoid = tcp_vegas_cong_avoid,
311312
.pkts_acked = tcp_vegas_pkts_acked,
312313
.set_state = tcp_vegas_state,

net/ipv4/tcp_veno.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ struct veno {
3030
u32 basertt; /* the min of all Veno rtt measurements seen (in usec) */
3131
u32 inc; /* decide whether to increase cwnd */
3232
u32 diff; /* calculate the diff rate */
33+
u32 loss_cwnd; /* cwnd when loss occured */
3334
};
3435

3536
/* There are several situations when we must "re-start" Veno:
@@ -193,6 +194,7 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
193194
const struct tcp_sock *tp = tcp_sk(sk);
194195
struct veno *veno = inet_csk_ca(sk);
195196

197+
veno->loss_cwnd = tp->snd_cwnd;
196198
if (veno->diff < beta)
197199
/* in "non-congestive state", cut cwnd by 1/5 */
198200
return max(tp->snd_cwnd * 4 / 5, 2U);
@@ -201,9 +203,17 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
201203
return max(tp->snd_cwnd >> 1U, 2U);
202204
}
203205

206+
static u32 tcp_veno_cwnd_undo(struct sock *sk)
207+
{
208+
const struct veno *veno = inet_csk_ca(sk);
209+
210+
return max(tcp_sk(sk)->snd_cwnd, veno->loss_cwnd);
211+
}
212+
204213
static struct tcp_congestion_ops tcp_veno __read_mostly = {
205214
.init = tcp_veno_init,
206215
.ssthresh = tcp_veno_ssthresh,
216+
.undo_cwnd = tcp_veno_cwnd_undo,
207217
.cong_avoid = tcp_veno_cong_avoid,
208218
.pkts_acked = tcp_veno_pkts_acked,
209219
.set_state = tcp_veno_state,

net/ipv4/tcp_westwood.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -278,6 +278,7 @@ static struct tcp_congestion_ops tcp_westwood __read_mostly = {
278278
.init = tcp_westwood_init,
279279
.ssthresh = tcp_reno_ssthresh,
280280
.cong_avoid = tcp_reno_cong_avoid,
281+
.undo_cwnd = tcp_reno_undo_cwnd,
281282
.cwnd_event = tcp_westwood_event,
282283
.in_ack_event = tcp_westwood_ack,
283284
.get_info = tcp_westwood_info,

net/ipv4/tcp_yeah.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ struct yeah {
3737
u32 fast_count;
3838

3939
u32 pkts_acked;
40+
u32 loss_cwnd;
4041
};
4142

4243
static void tcp_yeah_init(struct sock *sk)
@@ -219,13 +220,22 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)
219220

220221
yeah->fast_count = 0;
221222
yeah->reno_count = max(yeah->reno_count>>1, 2U);
223+
yeah->loss_cwnd = tp->snd_cwnd;
222224

223225
return max_t(int, tp->snd_cwnd - reduction, 2);
224226
}
225227

228+
static u32 tcp_yeah_cwnd_undo(struct sock *sk)
229+
{
230+
const struct yeah *yeah = inet_csk_ca(sk);
231+
232+
return max(tcp_sk(sk)->snd_cwnd, yeah->loss_cwnd);
233+
}
234+
226235
static struct tcp_congestion_ops tcp_yeah __read_mostly = {
227236
.init = tcp_yeah_init,
228237
.ssthresh = tcp_yeah_ssthresh,
238+
.undo_cwnd = tcp_yeah_cwnd_undo,
229239
.cong_avoid = tcp_yeah_cong_avoid,
230240
.set_state = tcp_vegas_state,
231241
.cwnd_event = tcp_vegas_cwnd_event,

0 commit comments

Comments
 (0)