Skip to content

Commit 3c4ef85

Browse files
committed
Merge branch 'xen-netback-fix-multiple-extra-info-handling'
Paul Durrant says: ==================== xen-netback: fix multiple extra info handling If a frontend passes multiple extra info fragments to netback on the guest transmit side, because xen-netback does not account for this properly, only a single ack response will be sent. This will eventually cause processing of the shared ring to wedge. This series re-imports the canonical netif.h from Xen, where the ring protocol documentation has been updated, fixes this issue in xen-netback and also adds a patch to reduce log spam. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents 136ba62 + 8e4ee59 commit 3c4ef85

File tree

4 files changed

+809
-120
lines changed

4 files changed

+809
-120
lines changed

drivers/net/xen-netback/common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ typedef unsigned int pending_ring_idx_t;
5252

5353
struct pending_tx_info {
5454
struct xen_netif_tx_request req; /* tx request */
55+
unsigned int extra_count;
5556
/* Callback data for released SKBs. The callback is always
5657
* xenvif_zerocopy_callback, desc contains the pending_idx, which is
5758
* also an index in pending_tx_info array. It is initialized in

drivers/net/xen-netback/netback.c

Lines changed: 42 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
9595

9696
static void make_tx_response(struct xenvif_queue *queue,
9797
struct xen_netif_tx_request *txp,
98+
unsigned int extra_count,
9899
s8 st);
99100
static void push_tx_responses(struct xenvif_queue *queue);
100101

@@ -696,14 +697,15 @@ void xenvif_tx_credit_callback(unsigned long data)
696697
}
697698

698699
static void xenvif_tx_err(struct xenvif_queue *queue,
699-
struct xen_netif_tx_request *txp, RING_IDX end)
700+
struct xen_netif_tx_request *txp,
701+
unsigned int extra_count, RING_IDX end)
700702
{
701703
RING_IDX cons = queue->tx.req_cons;
702704
unsigned long flags;
703705

704706
do {
705707
spin_lock_irqsave(&queue->response_lock, flags);
706-
make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
708+
make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
707709
push_tx_responses(queue);
708710
spin_unlock_irqrestore(&queue->response_lock, flags);
709711
if (cons == end)
@@ -724,6 +726,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
724726

725727
static int xenvif_count_requests(struct xenvif_queue *queue,
726728
struct xen_netif_tx_request *first,
729+
unsigned int extra_count,
727730
struct xen_netif_tx_request *txp,
728731
int work_to_do)
729732
{
@@ -812,7 +815,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
812815
} while (more_data);
813816

814817
if (drop_err) {
815-
xenvif_tx_err(queue, first, cons + slots);
818+
xenvif_tx_err(queue, first, extra_count, cons + slots);
816819
return drop_err;
817820
}
818821

@@ -827,9 +830,10 @@ struct xenvif_tx_cb {
827830
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
828831

829832
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
830-
u16 pending_idx,
831-
struct xen_netif_tx_request *txp,
832-
struct gnttab_map_grant_ref *mop)
833+
u16 pending_idx,
834+
struct xen_netif_tx_request *txp,
835+
unsigned int extra_count,
836+
struct gnttab_map_grant_ref *mop)
833837
{
834838
queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
835839
gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
@@ -838,6 +842,7 @@ static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
838842

839843
memcpy(&queue->pending_tx_info[pending_idx].req, txp,
840844
sizeof(*txp));
845+
queue->pending_tx_info[pending_idx].extra_count = extra_count;
841846
}
842847

843848
static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
@@ -880,7 +885,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
880885
shinfo->nr_frags++, txp++, gop++) {
881886
index = pending_index(queue->pending_cons++);
882887
pending_idx = queue->pending_ring[index];
883-
xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
888+
xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
884889
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
885890
}
886891

@@ -893,7 +898,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
893898
shinfo->nr_frags++, txp++, gop++) {
894899
index = pending_index(queue->pending_cons++);
895900
pending_idx = queue->pending_ring[index];
896-
xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
901+
xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
902+
gop);
897903
frag_set_pending_idx(&frags[shinfo->nr_frags],
898904
pending_idx);
899905
}
@@ -1095,8 +1101,9 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
10951101
}
10961102

10971103
static int xenvif_get_extras(struct xenvif_queue *queue,
1098-
struct xen_netif_extra_info *extras,
1099-
int work_to_do)
1104+
struct xen_netif_extra_info *extras,
1105+
unsigned int *extra_count,
1106+
int work_to_do)
11001107
{
11011108
struct xen_netif_extra_info extra;
11021109
RING_IDX cons = queue->tx.req_cons;
@@ -1109,17 +1116,19 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
11091116
}
11101117

11111118
RING_COPY_REQUEST(&queue->tx, cons, &extra);
1119+
1120+
queue->tx.req_cons = ++cons;
1121+
(*extra_count)++;
1122+
11121123
if (unlikely(!extra.type ||
11131124
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1114-
queue->tx.req_cons = ++cons;
11151125
netdev_err(queue->vif->dev,
11161126
"Invalid extra type: %d\n", extra.type);
11171127
xenvif_fatal_tx_err(queue->vif);
11181128
return -EINVAL;
11191129
}
11201130

11211131
memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1122-
queue->tx.req_cons = ++cons;
11231132
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
11241133

11251134
return work_to_do;
@@ -1294,6 +1303,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
12941303
struct xen_netif_tx_request txreq;
12951304
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
12961305
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1306+
unsigned int extra_count;
12971307
u16 pending_idx;
12981308
RING_IDX idx;
12991309
int work_to_do;
@@ -1330,8 +1340,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
13301340
queue->tx.req_cons = ++idx;
13311341

13321342
memset(extras, 0, sizeof(extras));
1343+
extra_count = 0;
13331344
if (txreq.flags & XEN_NETTXF_extra_info) {
13341345
work_to_do = xenvif_get_extras(queue, extras,
1346+
&extra_count,
13351347
work_to_do);
13361348
idx = queue->tx.req_cons;
13371349
if (unlikely(work_to_do < 0))
@@ -1344,7 +1356,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
13441356
extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
13451357
ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
13461358

1347-
make_tx_response(queue, &txreq,
1359+
make_tx_response(queue, &txreq, extra_count,
13481360
(ret == 0) ?
13491361
XEN_NETIF_RSP_OKAY :
13501362
XEN_NETIF_RSP_ERROR);
@@ -1358,12 +1370,14 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
13581370
extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
13591371
xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
13601372

1361-
make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
1373+
make_tx_response(queue, &txreq, extra_count,
1374+
XEN_NETIF_RSP_OKAY);
13621375
push_tx_responses(queue);
13631376
continue;
13641377
}
13651378

1366-
ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
1379+
ret = xenvif_count_requests(queue, &txreq, extra_count,
1380+
txfrags, work_to_do);
13671381
if (unlikely(ret < 0))
13681382
break;
13691383

@@ -1372,7 +1386,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
13721386
if (unlikely(txreq.size < ETH_HLEN)) {
13731387
netdev_dbg(queue->vif->dev,
13741388
"Bad packet size: %d\n", txreq.size);
1375-
xenvif_tx_err(queue, &txreq, idx);
1389+
xenvif_tx_err(queue, &txreq, extra_count, idx);
13761390
break;
13771391
}
13781392

@@ -1397,7 +1411,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
13971411
if (unlikely(skb == NULL)) {
13981412
netdev_dbg(queue->vif->dev,
13991413
"Can't allocate a skb in start_xmit.\n");
1400-
xenvif_tx_err(queue, &txreq, idx);
1414+
xenvif_tx_err(queue, &txreq, extra_count, idx);
14011415
break;
14021416
}
14031417

@@ -1416,7 +1430,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
14161430
nskb = xenvif_alloc_skb(0);
14171431
if (unlikely(nskb == NULL)) {
14181432
kfree_skb(skb);
1419-
xenvif_tx_err(queue, &txreq, idx);
1433+
xenvif_tx_err(queue, &txreq, extra_count, idx);
14201434
if (net_ratelimit())
14211435
netdev_err(queue->vif->dev,
14221436
"Can't allocate the frag_list skb.\n");
@@ -1457,13 +1471,16 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
14571471
if (data_len < txreq.size) {
14581472
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
14591473
pending_idx);
1460-
xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
1474+
xenvif_tx_create_map_op(queue, pending_idx, &txreq,
1475+
extra_count, gop);
14611476
gop++;
14621477
} else {
14631478
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
14641479
INVALID_PENDING_IDX);
1465-
memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
1466-
sizeof(txreq));
1480+
memcpy(&queue->pending_tx_info[pending_idx].req,
1481+
&txreq, sizeof(txreq));
1482+
queue->pending_tx_info[pending_idx].extra_count =
1483+
extra_count;
14671484
}
14681485

14691486
queue->pending_cons++;
@@ -1804,7 +1821,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
18041821

18051822
spin_lock_irqsave(&queue->response_lock, flags);
18061823

1807-
make_tx_response(queue, &pending_tx_info->req, status);
1824+
make_tx_response(queue, &pending_tx_info->req,
1825+
pending_tx_info->extra_count, status);
18081826

18091827
/* Release the pending index before pusing the Tx response so
18101828
* its available before a new Tx request is pushed by the
@@ -1821,6 +1839,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
18211839

18221840
static void make_tx_response(struct xenvif_queue *queue,
18231841
struct xen_netif_tx_request *txp,
1842+
unsigned int extra_count,
18241843
s8 st)
18251844
{
18261845
RING_IDX i = queue->tx.rsp_prod_pvt;
@@ -1830,7 +1849,7 @@ static void make_tx_response(struct xenvif_queue *queue,
18301849
resp->id = txp->id;
18311850
resp->status = st;
18321851

1833-
if (txp->flags & XEN_NETTXF_extra_info)
1852+
while (extra_count-- != 0)
18341853
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
18351854

18361855
queue->tx.rsp_prod_pvt = ++i;

drivers/net/xen-netback/xenbus.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -511,8 +511,6 @@ static void set_backend_state(struct backend_info *be,
511511
switch (state) {
512512
case XenbusStateInitWait:
513513
case XenbusStateConnected:
514-
pr_info("%s: prepare for reconnect\n",
515-
be->dev->nodename);
516514
backend_switch_state(be, XenbusStateInitWait);
517515
break;
518516
case XenbusStateClosing:

0 commit comments

Comments
 (0)