@@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
147
147
atomic_dec (& netbk -> netfront_count );
148
148
}
149
149
150
- static void xen_netbk_idx_release (struct xen_netbk * netbk , u16 pending_idx );
150
+ static void xen_netbk_idx_release (struct xen_netbk * netbk , u16 pending_idx ,
151
+ u8 status );
151
152
static void make_tx_response (struct xenvif * vif ,
152
153
struct xen_netif_tx_request * txp ,
153
154
s8 st );
@@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,
879
880
880
881
do {
881
882
make_tx_response (vif , txp , XEN_NETIF_RSP_ERROR );
882
- if (cons > = end )
883
+ if (cons = = end )
883
884
break ;
884
885
txp = RING_GET_REQUEST (& vif -> tx , cons ++ );
885
886
} while (1 );
@@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,
888
889
xenvif_put (vif );
889
890
}
890
891
892
+ static void netbk_fatal_tx_err (struct xenvif * vif )
893
+ {
894
+ netdev_err (vif -> dev , "fatal error; disabling device\n" );
895
+ xenvif_carrier_off (vif );
896
+ xenvif_put (vif );
897
+ }
898
+
891
899
static int netbk_count_requests (struct xenvif * vif ,
892
900
struct xen_netif_tx_request * first ,
893
901
struct xen_netif_tx_request * txp ,
@@ -901,28 +909,32 @@ static int netbk_count_requests(struct xenvif *vif,
901
909
902
910
do {
903
911
if (frags >= work_to_do ) {
904
- netdev_dbg (vif -> dev , "Need more frags\n" );
912
+ netdev_err (vif -> dev , "Need more frags\n" );
913
+ netbk_fatal_tx_err (vif );
905
914
return - frags ;
906
915
}
907
916
908
917
if (unlikely (frags >= MAX_SKB_FRAGS )) {
909
- netdev_dbg (vif -> dev , "Too many frags\n" );
918
+ netdev_err (vif -> dev , "Too many frags\n" );
919
+ netbk_fatal_tx_err (vif );
910
920
return - frags ;
911
921
}
912
922
913
923
memcpy (txp , RING_GET_REQUEST (& vif -> tx , cons + frags ),
914
924
sizeof (* txp ));
915
925
if (txp -> size > first -> size ) {
916
- netdev_dbg (vif -> dev , "Frags galore\n" );
926
+ netdev_err (vif -> dev , "Frag is bigger than frame.\n" );
927
+ netbk_fatal_tx_err (vif );
917
928
return - frags ;
918
929
}
919
930
920
931
first -> size -= txp -> size ;
921
932
frags ++ ;
922
933
923
934
if (unlikely ((txp -> offset + txp -> size ) > PAGE_SIZE )) {
924
- netdev_dbg (vif -> dev , "txp->offset: %x, size: %u\n" ,
935
+ netdev_err (vif -> dev , "txp->offset: %x, size: %u\n" ,
925
936
txp -> offset , txp -> size );
937
+ netbk_fatal_tx_err (vif );
926
938
return - frags ;
927
939
}
928
940
} while ((txp ++ )-> flags & XEN_NETTXF_more_data );
@@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
966
978
pending_idx = netbk -> pending_ring [index ];
967
979
page = xen_netbk_alloc_page (netbk , skb , pending_idx );
968
980
if (!page )
969
- return NULL ;
981
+ goto err ;
970
982
971
983
gop -> source .u .ref = txp -> gref ;
972
984
gop -> source .domid = vif -> domid ;
@@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
988
1000
}
989
1001
990
1002
return gop ;
1003
+ err :
1004
+ /* Unwind, freeing all pages and sending error responses. */
1005
+ while (i -- > start ) {
1006
+ xen_netbk_idx_release (netbk , frag_get_pending_idx (& frags [i ]),
1007
+ XEN_NETIF_RSP_ERROR );
1008
+ }
1009
+ /* The head too, if necessary. */
1010
+ if (start )
1011
+ xen_netbk_idx_release (netbk , pending_idx , XEN_NETIF_RSP_ERROR );
1012
+
1013
+ return NULL ;
991
1014
}
992
1015
993
1016
static int xen_netbk_tx_check_gop (struct xen_netbk * netbk ,
@@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
996
1019
{
997
1020
struct gnttab_copy * gop = * gopp ;
998
1021
u16 pending_idx = * ((u16 * )skb -> data );
999
- struct pending_tx_info * pending_tx_info = netbk -> pending_tx_info ;
1000
- struct xenvif * vif = pending_tx_info [pending_idx ].vif ;
1001
- struct xen_netif_tx_request * txp ;
1002
1022
struct skb_shared_info * shinfo = skb_shinfo (skb );
1003
1023
int nr_frags = shinfo -> nr_frags ;
1004
1024
int i , err , start ;
1005
1025
1006
1026
/* Check status of header. */
1007
1027
err = gop -> status ;
1008
- if (unlikely (err )) {
1009
- pending_ring_idx_t index ;
1010
- index = pending_index (netbk -> pending_prod ++ );
1011
- txp = & pending_tx_info [pending_idx ].req ;
1012
- make_tx_response (vif , txp , XEN_NETIF_RSP_ERROR );
1013
- netbk -> pending_ring [index ] = pending_idx ;
1014
- xenvif_put (vif );
1015
- }
1028
+ if (unlikely (err ))
1029
+ xen_netbk_idx_release (netbk , pending_idx , XEN_NETIF_RSP_ERROR );
1016
1030
1017
1031
/* Skip first skb fragment if it is on same page as header fragment. */
1018
1032
start = (frag_get_pending_idx (& shinfo -> frags [0 ]) == pending_idx );
1019
1033
1020
1034
for (i = start ; i < nr_frags ; i ++ ) {
1021
1035
int j , newerr ;
1022
- pending_ring_idx_t index ;
1023
1036
1024
1037
pending_idx = frag_get_pending_idx (& shinfo -> frags [i ]);
1025
1038
@@ -1028,27 +1041,23 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1028
1041
if (likely (!newerr )) {
1029
1042
/* Had a previous error? Invalidate this fragment. */
1030
1043
if (unlikely (err ))
1031
- xen_netbk_idx_release (netbk , pending_idx );
1044
+ xen_netbk_idx_release (netbk , pending_idx , XEN_NETIF_RSP_OKAY );
1032
1045
continue ;
1033
1046
}
1034
1047
1035
1048
/* Error on this fragment: respond to client with an error. */
1036
- txp = & netbk -> pending_tx_info [pending_idx ].req ;
1037
- make_tx_response (vif , txp , XEN_NETIF_RSP_ERROR );
1038
- index = pending_index (netbk -> pending_prod ++ );
1039
- netbk -> pending_ring [index ] = pending_idx ;
1040
- xenvif_put (vif );
1049
+ xen_netbk_idx_release (netbk , pending_idx , XEN_NETIF_RSP_ERROR );
1041
1050
1042
1051
/* Not the first error? Preceding frags already invalidated. */
1043
1052
if (err )
1044
1053
continue ;
1045
1054
1046
1055
/* First error: invalidate header and preceding fragments. */
1047
1056
pending_idx = * ((u16 * )skb -> data );
1048
- xen_netbk_idx_release (netbk , pending_idx );
1057
+ xen_netbk_idx_release (netbk , pending_idx , XEN_NETIF_RSP_OKAY );
1049
1058
for (j = start ; j < i ; j ++ ) {
1050
1059
pending_idx = frag_get_pending_idx (& shinfo -> frags [j ]);
1051
- xen_netbk_idx_release (netbk , pending_idx );
1060
+ xen_netbk_idx_release (netbk , pending_idx , XEN_NETIF_RSP_OKAY );
1052
1061
}
1053
1062
1054
1063
/* Remember the error: invalidate all subsequent fragments. */
@@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1082
1091
1083
1092
/* Take an extra reference to offset xen_netbk_idx_release */
1084
1093
get_page (netbk -> mmap_pages [pending_idx ]);
1085
- xen_netbk_idx_release (netbk , pending_idx );
1094
+ xen_netbk_idx_release (netbk , pending_idx , XEN_NETIF_RSP_OKAY );
1086
1095
}
1087
1096
}
1088
1097
@@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1095
1104
1096
1105
do {
1097
1106
if (unlikely (work_to_do -- <= 0 )) {
1098
- netdev_dbg (vif -> dev , "Missing extra info\n" );
1107
+ netdev_err (vif -> dev , "Missing extra info\n" );
1108
+ netbk_fatal_tx_err (vif );
1099
1109
return - EBADR ;
1100
1110
}
1101
1111
@@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1104
1114
if (unlikely (!extra .type ||
1105
1115
extra .type >= XEN_NETIF_EXTRA_TYPE_MAX )) {
1106
1116
vif -> tx .req_cons = ++ cons ;
1107
- netdev_dbg (vif -> dev ,
1117
+ netdev_err (vif -> dev ,
1108
1118
"Invalid extra type: %d\n" , extra .type );
1119
+ netbk_fatal_tx_err (vif );
1109
1120
return - EINVAL ;
1110
1121
}
1111
1122
@@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
1121
1132
struct xen_netif_extra_info * gso )
1122
1133
{
1123
1134
if (!gso -> u .gso .size ) {
1124
- netdev_dbg (vif -> dev , "GSO size must not be zero.\n" );
1135
+ netdev_err (vif -> dev , "GSO size must not be zero.\n" );
1136
+ netbk_fatal_tx_err (vif );
1125
1137
return - EINVAL ;
1126
1138
}
1127
1139
1128
1140
/* Currently only TCPv4 S.O. is supported. */
1129
1141
if (gso -> u .gso .type != XEN_NETIF_GSO_TYPE_TCPV4 ) {
1130
- netdev_dbg (vif -> dev , "Bad GSO type %d.\n" , gso -> u .gso .type );
1142
+ netdev_err (vif -> dev , "Bad GSO type %d.\n" , gso -> u .gso .type );
1143
+ netbk_fatal_tx_err (vif );
1131
1144
return - EINVAL ;
1132
1145
}
1133
1146
@@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1264
1277
1265
1278
/* Get a netif from the list with work to do. */
1266
1279
vif = poll_net_schedule_list (netbk );
1280
+ /* This can sometimes happen because the test of
1281
+ * list_empty(net_schedule_list) at the top of the
1282
+ * loop is unlocked. Just go back and have another
1283
+ * look.
1284
+ */
1267
1285
if (!vif )
1268
1286
continue ;
1269
1287
1288
+ if (vif -> tx .sring -> req_prod - vif -> tx .req_cons >
1289
+ XEN_NETIF_TX_RING_SIZE ) {
1290
+ netdev_err (vif -> dev ,
1291
+ "Impossible number of requests. "
1292
+ "req_prod %d, req_cons %d, size %ld\n" ,
1293
+ vif -> tx .sring -> req_prod , vif -> tx .req_cons ,
1294
+ XEN_NETIF_TX_RING_SIZE );
1295
+ netbk_fatal_tx_err (vif );
1296
+ continue ;
1297
+ }
1298
+
1270
1299
RING_FINAL_CHECK_FOR_REQUESTS (& vif -> tx , work_to_do );
1271
1300
if (!work_to_do ) {
1272
1301
xenvif_put (vif );
@@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1294
1323
work_to_do = xen_netbk_get_extras (vif , extras ,
1295
1324
work_to_do );
1296
1325
idx = vif -> tx .req_cons ;
1297
- if (unlikely (work_to_do < 0 )) {
1298
- netbk_tx_err (vif , & txreq , idx );
1326
+ if (unlikely (work_to_do < 0 ))
1299
1327
continue ;
1300
- }
1301
1328
}
1302
1329
1303
1330
ret = netbk_count_requests (vif , & txreq , txfrags , work_to_do );
1304
- if (unlikely (ret < 0 )) {
1305
- netbk_tx_err (vif , & txreq , idx - ret );
1331
+ if (unlikely (ret < 0 ))
1306
1332
continue ;
1307
- }
1333
+
1308
1334
idx += ret ;
1309
1335
1310
1336
if (unlikely (txreq .size < ETH_HLEN )) {
@@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1316
1342
1317
1343
/* No crossing a page as the payload mustn't fragment. */
1318
1344
if (unlikely ((txreq .offset + txreq .size ) > PAGE_SIZE )) {
1319
- netdev_dbg (vif -> dev ,
1345
+ netdev_err (vif -> dev ,
1320
1346
"txreq.offset: %x, size: %u, end: %lu\n" ,
1321
1347
txreq .offset , txreq .size ,
1322
1348
(txreq .offset & ~PAGE_MASK ) + txreq .size );
1323
- netbk_tx_err (vif , & txreq , idx );
1349
+ netbk_fatal_tx_err (vif );
1324
1350
continue ;
1325
1351
}
1326
1352
@@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1348
1374
gso = & extras [XEN_NETIF_EXTRA_TYPE_GSO - 1 ];
1349
1375
1350
1376
if (netbk_set_skb_gso (vif , skb , gso )) {
1377
+ /* Failure in netbk_set_skb_gso is fatal. */
1351
1378
kfree_skb (skb );
1352
- netbk_tx_err (vif , & txreq , idx );
1353
1379
continue ;
1354
1380
}
1355
1381
}
@@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1448
1474
txp -> size -= data_len ;
1449
1475
} else {
1450
1476
/* Schedule a response immediately. */
1451
- xen_netbk_idx_release (netbk , pending_idx );
1477
+ xen_netbk_idx_release (netbk , pending_idx , XEN_NETIF_RSP_OKAY );
1452
1478
}
1453
1479
1454
1480
if (txp -> flags & XEN_NETTXF_csum_blank )
@@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
1500
1526
xen_netbk_tx_submit (netbk );
1501
1527
}
1502
1528
1503
- static void xen_netbk_idx_release (struct xen_netbk * netbk , u16 pending_idx )
1529
+ static void xen_netbk_idx_release (struct xen_netbk * netbk , u16 pending_idx ,
1530
+ u8 status )
1504
1531
{
1505
1532
struct xenvif * vif ;
1506
1533
struct pending_tx_info * pending_tx_info ;
@@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
1514
1541
1515
1542
vif = pending_tx_info -> vif ;
1516
1543
1517
- make_tx_response (vif , & pending_tx_info -> req , XEN_NETIF_RSP_OKAY );
1544
+ make_tx_response (vif , & pending_tx_info -> req , status );
1518
1545
1519
1546
index = pending_index (netbk -> pending_prod ++ );
1520
1547
netbk -> pending_ring [index ] = pending_idx ;
0 commit comments