Skip to content

Commit 4885628

Browse files
Ian Campbelldavem330
authored andcommitted
xen/netback: shutdown the ring if it contains garbage.
A buggy or malicious frontend should not be able to confuse netback. If we spot anything which is not as it should be then shutdown the device and don't try to continue with the ring in a potentially hostile state. Well behaved and non-hostile frontends will not be penalised. As well as making the existing checks for such errors fatal also add a new check that ensures that there isn't an insane number of requests on the ring (i.e. more than would fit in the ring). If the ring contains garbage then previously is was possible to loop over this insane number, getting an error each time and therefore not generating any more pending requests and therefore not exiting the loop in xen_netbk_tx_build_gops for an externded period. Also turn various netdev_dbg calls which no precipitate a fatal error into netdev_err, they are rate limited because the device is shutdown afterwards. This fixes at least one known DoS/softlockup of the backend domain. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Acked-by: Jan Beulich <JBeulich@suse.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent bf414b3 commit 4885628

File tree

3 files changed

+62
-26
lines changed

3 files changed

+62
-26
lines changed

drivers/net/xen-netback/common.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
151151
/* Notify xenvif that ring now has space to send an skb to the frontend */
152152
void xenvif_notify_tx_completion(struct xenvif *vif);
153153

154+
/* Prevent the device from generating any further traffic. */
155+
void xenvif_carrier_off(struct xenvif *vif);
156+
154157
/* Returns number of ring slots required to send an skb to the frontend */
155158
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
156159

drivers/net/xen-netback/interface.c

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -343,17 +343,22 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
343343
return err;
344344
}
345345

346-
void xenvif_disconnect(struct xenvif *vif)
346+
void xenvif_carrier_off(struct xenvif *vif)
347347
{
348348
struct net_device *dev = vif->dev;
349-
if (netif_carrier_ok(dev)) {
350-
rtnl_lock();
351-
netif_carrier_off(dev); /* discard queued packets */
352-
if (netif_running(dev))
353-
xenvif_down(vif);
354-
rtnl_unlock();
355-
xenvif_put(vif);
356-
}
349+
350+
rtnl_lock();
351+
netif_carrier_off(dev); /* discard queued packets */
352+
if (netif_running(dev))
353+
xenvif_down(vif);
354+
rtnl_unlock();
355+
xenvif_put(vif);
356+
}
357+
358+
void xenvif_disconnect(struct xenvif *vif)
359+
{
360+
if (netif_carrier_ok(vif->dev))
361+
xenvif_carrier_off(vif);
357362

358363
atomic_dec(&vif->refcnt);
359364
wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);

drivers/net/xen-netback/netback.c

Lines changed: 45 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -888,6 +888,13 @@ static void netbk_tx_err(struct xenvif *vif,
888888
xenvif_put(vif);
889889
}
890890

891+
static void netbk_fatal_tx_err(struct xenvif *vif)
892+
{
893+
netdev_err(vif->dev, "fatal error; disabling device\n");
894+
xenvif_carrier_off(vif);
895+
xenvif_put(vif);
896+
}
897+
891898
static int netbk_count_requests(struct xenvif *vif,
892899
struct xen_netif_tx_request *first,
893900
struct xen_netif_tx_request *txp,
@@ -901,28 +908,32 @@ static int netbk_count_requests(struct xenvif *vif,
901908

902909
do {
903910
if (frags >= work_to_do) {
904-
netdev_dbg(vif->dev, "Need more frags\n");
911+
netdev_err(vif->dev, "Need more frags\n");
912+
netbk_fatal_tx_err(vif);
905913
return -frags;
906914
}
907915

908916
if (unlikely(frags >= MAX_SKB_FRAGS)) {
909-
netdev_dbg(vif->dev, "Too many frags\n");
917+
netdev_err(vif->dev, "Too many frags\n");
918+
netbk_fatal_tx_err(vif);
910919
return -frags;
911920
}
912921

913922
memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
914923
sizeof(*txp));
915924
if (txp->size > first->size) {
916-
netdev_dbg(vif->dev, "Frags galore\n");
925+
netdev_err(vif->dev, "Frag is bigger than frame.\n");
926+
netbk_fatal_tx_err(vif);
917927
return -frags;
918928
}
919929

920930
first->size -= txp->size;
921931
frags++;
922932

923933
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
924-
netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
934+
netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
925935
txp->offset, txp->size);
936+
netbk_fatal_tx_err(vif);
926937
return -frags;
927938
}
928939
} while ((txp++)->flags & XEN_NETTXF_more_data);
@@ -1095,7 +1106,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
10951106

10961107
do {
10971108
if (unlikely(work_to_do-- <= 0)) {
1098-
netdev_dbg(vif->dev, "Missing extra info\n");
1109+
netdev_err(vif->dev, "Missing extra info\n");
1110+
netbk_fatal_tx_err(vif);
10991111
return -EBADR;
11001112
}
11011113

@@ -1104,8 +1116,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
11041116
if (unlikely(!extra.type ||
11051117
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
11061118
vif->tx.req_cons = ++cons;
1107-
netdev_dbg(vif->dev,
1119+
netdev_err(vif->dev,
11081120
"Invalid extra type: %d\n", extra.type);
1121+
netbk_fatal_tx_err(vif);
11091122
return -EINVAL;
11101123
}
11111124

@@ -1121,13 +1134,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
11211134
struct xen_netif_extra_info *gso)
11221135
{
11231136
if (!gso->u.gso.size) {
1124-
netdev_dbg(vif->dev, "GSO size must not be zero.\n");
1137+
netdev_err(vif->dev, "GSO size must not be zero.\n");
1138+
netbk_fatal_tx_err(vif);
11251139
return -EINVAL;
11261140
}
11271141

11281142
/* Currently only TCPv4 S.O. is supported. */
11291143
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1130-
netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1144+
netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1145+
netbk_fatal_tx_err(vif);
11311146
return -EINVAL;
11321147
}
11331148

@@ -1264,9 +1279,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
12641279

12651280
/* Get a netif from the list with work to do. */
12661281
vif = poll_net_schedule_list(netbk);
1282+
/* This can sometimes happen because the test of
1283+
* list_empty(net_schedule_list) at the top of the
1284+
* loop is unlocked. Just go back and have another
1285+
* look.
1286+
*/
12671287
if (!vif)
12681288
continue;
12691289

1290+
if (vif->tx.sring->req_prod - vif->tx.req_cons >
1291+
XEN_NETIF_TX_RING_SIZE) {
1292+
netdev_err(vif->dev,
1293+
"Impossible number of requests. "
1294+
"req_prod %d, req_cons %d, size %ld\n",
1295+
vif->tx.sring->req_prod, vif->tx.req_cons,
1296+
XEN_NETIF_TX_RING_SIZE);
1297+
netbk_fatal_tx_err(vif);
1298+
continue;
1299+
}
1300+
12701301
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
12711302
if (!work_to_do) {
12721303
xenvif_put(vif);
@@ -1294,17 +1325,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
12941325
work_to_do = xen_netbk_get_extras(vif, extras,
12951326
work_to_do);
12961327
idx = vif->tx.req_cons;
1297-
if (unlikely(work_to_do < 0)) {
1298-
netbk_tx_err(vif, &txreq, idx);
1328+
if (unlikely(work_to_do < 0))
12991329
continue;
1300-
}
13011330
}
13021331

13031332
ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
1304-
if (unlikely(ret < 0)) {
1305-
netbk_tx_err(vif, &txreq, idx - ret);
1333+
if (unlikely(ret < 0))
13061334
continue;
1307-
}
1335+
13081336
idx += ret;
13091337

13101338
if (unlikely(txreq.size < ETH_HLEN)) {
@@ -1316,11 +1344,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
13161344

13171345
/* No crossing a page as the payload mustn't fragment. */
13181346
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1319-
netdev_dbg(vif->dev,
1347+
netdev_err(vif->dev,
13201348
"txreq.offset: %x, size: %u, end: %lu\n",
13211349
txreq.offset, txreq.size,
13221350
(txreq.offset&~PAGE_MASK) + txreq.size);
1323-
netbk_tx_err(vif, &txreq, idx);
1351+
netbk_fatal_tx_err(vif);
13241352
continue;
13251353
}
13261354

@@ -1348,8 +1376,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
13481376
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
13491377

13501378
if (netbk_set_skb_gso(vif, skb, gso)) {
1379+
/* Failure in netbk_set_skb_gso is fatal. */
13511380
kfree_skb(skb);
1352-
netbk_tx_err(vif, &txreq, idx);
13531381
continue;
13541382
}
13551383
}

0 commit comments

Comments
 (0)