@@ -95,6 +95,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
95
95
96
96
static void make_tx_response (struct xenvif_queue * queue ,
97
97
struct xen_netif_tx_request * txp ,
98
+ unsigned int extra_count ,
98
99
s8 st );
99
100
static void push_tx_responses (struct xenvif_queue * queue );
100
101
@@ -696,14 +697,15 @@ void xenvif_tx_credit_callback(unsigned long data)
696
697
}
697
698
698
699
static void xenvif_tx_err (struct xenvif_queue * queue ,
699
- struct xen_netif_tx_request * txp , RING_IDX end )
700
+ struct xen_netif_tx_request * txp ,
701
+ unsigned int extra_count , RING_IDX end )
700
702
{
701
703
RING_IDX cons = queue -> tx .req_cons ;
702
704
unsigned long flags ;
703
705
704
706
do {
705
707
spin_lock_irqsave (& queue -> response_lock , flags );
706
- make_tx_response (queue , txp , XEN_NETIF_RSP_ERROR );
708
+ make_tx_response (queue , txp , extra_count , XEN_NETIF_RSP_ERROR );
707
709
push_tx_responses (queue );
708
710
spin_unlock_irqrestore (& queue -> response_lock , flags );
709
711
if (cons == end )
@@ -724,6 +726,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
724
726
725
727
static int xenvif_count_requests (struct xenvif_queue * queue ,
726
728
struct xen_netif_tx_request * first ,
729
+ unsigned int extra_count ,
727
730
struct xen_netif_tx_request * txp ,
728
731
int work_to_do )
729
732
{
@@ -812,7 +815,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
812
815
} while (more_data );
813
816
814
817
if (drop_err ) {
815
- xenvif_tx_err (queue , first , cons + slots );
818
+ xenvif_tx_err (queue , first , extra_count , cons + slots );
816
819
return drop_err ;
817
820
}
818
821
@@ -827,9 +830,10 @@ struct xenvif_tx_cb {
827
830
#define XENVIF_TX_CB (skb ) ((struct xenvif_tx_cb *)(skb)->cb)
828
831
829
832
static inline void xenvif_tx_create_map_op (struct xenvif_queue * queue ,
830
- u16 pending_idx ,
831
- struct xen_netif_tx_request * txp ,
832
- struct gnttab_map_grant_ref * mop )
833
+ u16 pending_idx ,
834
+ struct xen_netif_tx_request * txp ,
835
+ unsigned int extra_count ,
836
+ struct gnttab_map_grant_ref * mop )
833
837
{
834
838
queue -> pages_to_map [mop - queue -> tx_map_ops ] = queue -> mmap_pages [pending_idx ];
835
839
gnttab_set_map_op (mop , idx_to_kaddr (queue , pending_idx ),
@@ -838,6 +842,7 @@ static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
838
842
839
843
memcpy (& queue -> pending_tx_info [pending_idx ].req , txp ,
840
844
sizeof (* txp ));
845
+ queue -> pending_tx_info [pending_idx ].extra_count = extra_count ;
841
846
}
842
847
843
848
static inline struct sk_buff * xenvif_alloc_skb (unsigned int size )
@@ -880,7 +885,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
880
885
shinfo -> nr_frags ++ , txp ++ , gop ++ ) {
881
886
index = pending_index (queue -> pending_cons ++ );
882
887
pending_idx = queue -> pending_ring [index ];
883
- xenvif_tx_create_map_op (queue , pending_idx , txp , gop );
888
+ xenvif_tx_create_map_op (queue , pending_idx , txp , 0 , gop );
884
889
frag_set_pending_idx (& frags [shinfo -> nr_frags ], pending_idx );
885
890
}
886
891
@@ -893,7 +898,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
893
898
shinfo -> nr_frags ++ , txp ++ , gop ++ ) {
894
899
index = pending_index (queue -> pending_cons ++ );
895
900
pending_idx = queue -> pending_ring [index ];
896
- xenvif_tx_create_map_op (queue , pending_idx , txp , gop );
901
+ xenvif_tx_create_map_op (queue , pending_idx , txp , 0 ,
902
+ gop );
897
903
frag_set_pending_idx (& frags [shinfo -> nr_frags ],
898
904
pending_idx );
899
905
}
@@ -1095,8 +1101,9 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1095
1101
}
1096
1102
1097
1103
static int xenvif_get_extras (struct xenvif_queue * queue ,
1098
- struct xen_netif_extra_info * extras ,
1099
- int work_to_do )
1104
+ struct xen_netif_extra_info * extras ,
1105
+ unsigned int * extra_count ,
1106
+ int work_to_do )
1100
1107
{
1101
1108
struct xen_netif_extra_info extra ;
1102
1109
RING_IDX cons = queue -> tx .req_cons ;
@@ -1109,17 +1116,19 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
1109
1116
}
1110
1117
1111
1118
RING_COPY_REQUEST (& queue -> tx , cons , & extra );
1119
+
1120
+ queue -> tx .req_cons = ++ cons ;
1121
+ (* extra_count )++ ;
1122
+
1112
1123
if (unlikely (!extra .type ||
1113
1124
extra .type >= XEN_NETIF_EXTRA_TYPE_MAX )) {
1114
- queue -> tx .req_cons = ++ cons ;
1115
1125
netdev_err (queue -> vif -> dev ,
1116
1126
"Invalid extra type: %d\n" , extra .type );
1117
1127
xenvif_fatal_tx_err (queue -> vif );
1118
1128
return - EINVAL ;
1119
1129
}
1120
1130
1121
1131
memcpy (& extras [extra .type - 1 ], & extra , sizeof (extra ));
1122
- queue -> tx .req_cons = ++ cons ;
1123
1132
} while (extra .flags & XEN_NETIF_EXTRA_FLAG_MORE );
1124
1133
1125
1134
return work_to_do ;
@@ -1294,6 +1303,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1294
1303
struct xen_netif_tx_request txreq ;
1295
1304
struct xen_netif_tx_request txfrags [XEN_NETBK_LEGACY_SLOTS_MAX ];
1296
1305
struct xen_netif_extra_info extras [XEN_NETIF_EXTRA_TYPE_MAX - 1 ];
1306
+ unsigned int extra_count ;
1297
1307
u16 pending_idx ;
1298
1308
RING_IDX idx ;
1299
1309
int work_to_do ;
@@ -1330,8 +1340,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1330
1340
queue -> tx .req_cons = ++ idx ;
1331
1341
1332
1342
memset (extras , 0 , sizeof (extras ));
1343
+ extra_count = 0 ;
1333
1344
if (txreq .flags & XEN_NETTXF_extra_info ) {
1334
1345
work_to_do = xenvif_get_extras (queue , extras ,
1346
+ & extra_count ,
1335
1347
work_to_do );
1336
1348
idx = queue -> tx .req_cons ;
1337
1349
if (unlikely (work_to_do < 0 ))
@@ -1344,7 +1356,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1344
1356
extra = & extras [XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1 ];
1345
1357
ret = xenvif_mcast_add (queue -> vif , extra -> u .mcast .addr );
1346
1358
1347
- make_tx_response (queue , & txreq ,
1359
+ make_tx_response (queue , & txreq , extra_count ,
1348
1360
(ret == 0 ) ?
1349
1361
XEN_NETIF_RSP_OKAY :
1350
1362
XEN_NETIF_RSP_ERROR );
@@ -1358,12 +1370,14 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1358
1370
extra = & extras [XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1 ];
1359
1371
xenvif_mcast_del (queue -> vif , extra -> u .mcast .addr );
1360
1372
1361
- make_tx_response (queue , & txreq , XEN_NETIF_RSP_OKAY );
1373
+ make_tx_response (queue , & txreq , extra_count ,
1374
+ XEN_NETIF_RSP_OKAY );
1362
1375
push_tx_responses (queue );
1363
1376
continue ;
1364
1377
}
1365
1378
1366
- ret = xenvif_count_requests (queue , & txreq , txfrags , work_to_do );
1379
+ ret = xenvif_count_requests (queue , & txreq , extra_count ,
1380
+ txfrags , work_to_do );
1367
1381
if (unlikely (ret < 0 ))
1368
1382
break ;
1369
1383
@@ -1372,7 +1386,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1372
1386
if (unlikely (txreq .size < ETH_HLEN )) {
1373
1387
netdev_dbg (queue -> vif -> dev ,
1374
1388
"Bad packet size: %d\n" , txreq .size );
1375
- xenvif_tx_err (queue , & txreq , idx );
1389
+ xenvif_tx_err (queue , & txreq , extra_count , idx );
1376
1390
break ;
1377
1391
}
1378
1392
@@ -1397,7 +1411,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1397
1411
if (unlikely (skb == NULL )) {
1398
1412
netdev_dbg (queue -> vif -> dev ,
1399
1413
"Can't allocate a skb in start_xmit.\n" );
1400
- xenvif_tx_err (queue , & txreq , idx );
1414
+ xenvif_tx_err (queue , & txreq , extra_count , idx );
1401
1415
break ;
1402
1416
}
1403
1417
@@ -1416,7 +1430,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1416
1430
nskb = xenvif_alloc_skb (0 );
1417
1431
if (unlikely (nskb == NULL )) {
1418
1432
kfree_skb (skb );
1419
- xenvif_tx_err (queue , & txreq , idx );
1433
+ xenvif_tx_err (queue , & txreq , extra_count , idx );
1420
1434
if (net_ratelimit ())
1421
1435
netdev_err (queue -> vif -> dev ,
1422
1436
"Can't allocate the frag_list skb.\n" );
@@ -1457,13 +1471,16 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1457
1471
if (data_len < txreq .size ) {
1458
1472
frag_set_pending_idx (& skb_shinfo (skb )-> frags [0 ],
1459
1473
pending_idx );
1460
- xenvif_tx_create_map_op (queue , pending_idx , & txreq , gop );
1474
+ xenvif_tx_create_map_op (queue , pending_idx , & txreq ,
1475
+ extra_count , gop );
1461
1476
gop ++ ;
1462
1477
} else {
1463
1478
frag_set_pending_idx (& skb_shinfo (skb )-> frags [0 ],
1464
1479
INVALID_PENDING_IDX );
1465
- memcpy (& queue -> pending_tx_info [pending_idx ].req , & txreq ,
1466
- sizeof (txreq ));
1480
+ memcpy (& queue -> pending_tx_info [pending_idx ].req ,
1481
+ & txreq , sizeof (txreq ));
1482
+ queue -> pending_tx_info [pending_idx ].extra_count =
1483
+ extra_count ;
1467
1484
}
1468
1485
1469
1486
queue -> pending_cons ++ ;
@@ -1804,7 +1821,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1804
1821
1805
1822
spin_lock_irqsave (& queue -> response_lock , flags );
1806
1823
1807
- make_tx_response (queue , & pending_tx_info -> req , status );
1824
+ make_tx_response (queue , & pending_tx_info -> req ,
1825
+ pending_tx_info -> extra_count , status );
1808
1826
1809
1827
/* Release the pending index before pusing the Tx response so
1810
1828
* its available before a new Tx request is pushed by the
@@ -1821,6 +1839,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1821
1839
1822
1840
static void make_tx_response (struct xenvif_queue * queue ,
1823
1841
struct xen_netif_tx_request * txp ,
1842
+ unsigned int extra_count ,
1824
1843
s8 st )
1825
1844
{
1826
1845
RING_IDX i = queue -> tx .rsp_prod_pvt ;
@@ -1830,7 +1849,7 @@ static void make_tx_response(struct xenvif_queue *queue,
1830
1849
resp -> id = txp -> id ;
1831
1850
resp -> status = st ;
1832
1851
1833
- if ( txp -> flags & XEN_NETTXF_extra_info )
1852
+ while ( extra_count -- != 0 )
1834
1853
RING_GET_RESPONSE (& queue -> tx , ++ i )-> status = XEN_NETIF_RSP_NULL ;
1835
1854
1836
1855
queue -> tx .rsp_prod_pvt = ++ i ;
0 commit comments