@@ -388,7 +388,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
388
388
struct flow_filter * fold , * fnew ;
389
389
struct nlattr * opt = tca [TCA_OPTIONS ];
390
390
struct nlattr * tb [TCA_FLOW_MAX + 1 ];
391
- struct tcf_exts e ;
392
391
unsigned int nkeys = 0 ;
393
392
unsigned int perturb_period = 0 ;
394
393
u32 baseclass = 0 ;
@@ -424,31 +423,27 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
424
423
return - EOPNOTSUPP ;
425
424
}
426
425
427
- err = tcf_exts_init (& e , TCA_FLOW_ACT , TCA_FLOW_POLICE );
428
- if (err < 0 )
429
- goto err1 ;
430
- err = tcf_exts_validate (net , tp , tb , tca [TCA_RATE ], & e , ovr );
431
- if (err < 0 )
432
- goto err1 ;
433
-
434
- err = - ENOBUFS ;
435
426
fnew = kzalloc (sizeof (* fnew ), GFP_KERNEL );
436
427
if (!fnew )
437
- goto err1 ;
428
+ return - ENOBUFS ;
438
429
439
430
err = tcf_em_tree_validate (tp , tb [TCA_FLOW_EMATCHES ], & fnew -> ematches );
440
431
if (err < 0 )
441
- goto err2 ;
432
+ goto err1 ;
442
433
443
434
err = tcf_exts_init (& fnew -> exts , TCA_FLOW_ACT , TCA_FLOW_POLICE );
444
435
if (err < 0 )
445
- goto err3 ;
436
+ goto err2 ;
437
+
438
+ err = tcf_exts_validate (net , tp , tb , tca [TCA_RATE ], & fnew -> exts , ovr );
439
+ if (err < 0 )
440
+ goto err2 ;
446
441
447
442
fold = (struct flow_filter * )* arg ;
448
443
if (fold ) {
449
444
err = - EINVAL ;
450
445
if (fold -> handle != handle && handle )
451
- goto err3 ;
446
+ goto err2 ;
452
447
453
448
/* Copy fold into fnew */
454
449
fnew -> tp = fold -> tp ;
@@ -468,31 +463,31 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
468
463
if (tb [TCA_FLOW_MODE ])
469
464
mode = nla_get_u32 (tb [TCA_FLOW_MODE ]);
470
465
if (mode != FLOW_MODE_HASH && nkeys > 1 )
471
- goto err3 ;
466
+ goto err2 ;
472
467
473
468
if (mode == FLOW_MODE_HASH )
474
469
perturb_period = fold -> perturb_period ;
475
470
if (tb [TCA_FLOW_PERTURB ]) {
476
471
if (mode != FLOW_MODE_HASH )
477
- goto err3 ;
472
+ goto err2 ;
478
473
perturb_period = nla_get_u32 (tb [TCA_FLOW_PERTURB ]) * HZ ;
479
474
}
480
475
} else {
481
476
err = - EINVAL ;
482
477
if (!handle )
483
- goto err3 ;
478
+ goto err2 ;
484
479
if (!tb [TCA_FLOW_KEYS ])
485
- goto err3 ;
480
+ goto err2 ;
486
481
487
482
mode = FLOW_MODE_MAP ;
488
483
if (tb [TCA_FLOW_MODE ])
489
484
mode = nla_get_u32 (tb [TCA_FLOW_MODE ]);
490
485
if (mode != FLOW_MODE_HASH && nkeys > 1 )
491
- goto err3 ;
486
+ goto err2 ;
492
487
493
488
if (tb [TCA_FLOW_PERTURB ]) {
494
489
if (mode != FLOW_MODE_HASH )
495
- goto err3 ;
490
+ goto err2 ;
496
491
perturb_period = nla_get_u32 (tb [TCA_FLOW_PERTURB ]) * HZ ;
497
492
}
498
493
@@ -510,8 +505,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
510
505
setup_deferrable_timer (& fnew -> perturb_timer , flow_perturbation ,
511
506
(unsigned long )fnew );
512
507
513
- tcf_exts_change (tp , & fnew -> exts , & e );
514
-
515
508
netif_keep_dst (qdisc_dev (tp -> q ));
516
509
517
510
if (tb [TCA_FLOW_KEYS ]) {
@@ -550,13 +543,11 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
550
543
call_rcu (& fold -> rcu , flow_destroy_filter );
551
544
return 0 ;
552
545
553
- err3 :
546
+ err2 :
554
547
tcf_exts_destroy (& fnew -> exts );
555
548
tcf_em_tree_destroy (& fnew -> ematches );
556
- err2 :
557
- kfree (fnew );
558
549
err1 :
559
- tcf_exts_destroy ( & e );
550
+ kfree ( fnew );
560
551
return err ;
561
552
}
562
553
0 commit comments