Skip to content

Commit 0c35565

Browse files
committed
Merge branch 'netback'
Ian Campbell says: ==================== The Xen netback implementation contains a couple of flaws which can allow a guest to cause a DoS in the backend domain, potentially affecting other domains in the system. CVE-2013-0216 is a failure to sanity check the ring producer/consumer pointers which can allow a guest to cause netback to loop for an extended period preventing other work from occurring. CVE-2013-0217 is a memory leak on an error path which is guest triggerable. The following series contains the fixes for these issues, as previously included in Xen Security Advisory 39: http://lists.xen.org/archives/html/xen-announce/2013-02/msg00001.html Changes in v2: - Typo and block comment format fixes - Added stable Cc ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents e21b9d0 + b914972 commit 0c35565

File tree

3 files changed

+88
-53
lines changed

3 files changed

+88
-53
lines changed

drivers/net/xen-netback/common.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
151151
/* Notify xenvif that ring now has space to send an skb to the frontend */
152152
void xenvif_notify_tx_completion(struct xenvif *vif);
153153

154+
/* Prevent the device from generating any further traffic. */
155+
void xenvif_carrier_off(struct xenvif *vif);
156+
154157
/* Returns number of ring slots required to send an skb to the frontend */
155158
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
156159

drivers/net/xen-netback/interface.c

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -343,17 +343,22 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
343343
return err;
344344
}
345345

346-
void xenvif_disconnect(struct xenvif *vif)
346+
void xenvif_carrier_off(struct xenvif *vif)
347347
{
348348
struct net_device *dev = vif->dev;
349-
if (netif_carrier_ok(dev)) {
350-
rtnl_lock();
351-
netif_carrier_off(dev); /* discard queued packets */
352-
if (netif_running(dev))
353-
xenvif_down(vif);
354-
rtnl_unlock();
355-
xenvif_put(vif);
356-
}
349+
350+
rtnl_lock();
351+
netif_carrier_off(dev); /* discard queued packets */
352+
if (netif_running(dev))
353+
xenvif_down(vif);
354+
rtnl_unlock();
355+
xenvif_put(vif);
356+
}
357+
358+
void xenvif_disconnect(struct xenvif *vif)
359+
{
360+
if (netif_carrier_ok(vif->dev))
361+
xenvif_carrier_off(vif);
357362

358363
atomic_dec(&vif->refcnt);
359364
wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);

drivers/net/xen-netback/netback.c

Lines changed: 71 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
147147
atomic_dec(&netbk->netfront_count);
148148
}
149149

150-
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
150+
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
151+
u8 status);
151152
static void make_tx_response(struct xenvif *vif,
152153
struct xen_netif_tx_request *txp,
153154
s8 st);
@@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,
879880

880881
do {
881882
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
882-
if (cons >= end)
883+
if (cons == end)
883884
break;
884885
txp = RING_GET_REQUEST(&vif->tx, cons++);
885886
} while (1);
@@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,
888889
xenvif_put(vif);
889890
}
890891

892+
static void netbk_fatal_tx_err(struct xenvif *vif)
893+
{
894+
netdev_err(vif->dev, "fatal error; disabling device\n");
895+
xenvif_carrier_off(vif);
896+
xenvif_put(vif);
897+
}
898+
891899
static int netbk_count_requests(struct xenvif *vif,
892900
struct xen_netif_tx_request *first,
893901
struct xen_netif_tx_request *txp,
@@ -901,28 +909,32 @@ static int netbk_count_requests(struct xenvif *vif,
901909

902910
do {
903911
if (frags >= work_to_do) {
904-
netdev_dbg(vif->dev, "Need more frags\n");
912+
netdev_err(vif->dev, "Need more frags\n");
913+
netbk_fatal_tx_err(vif);
905914
return -frags;
906915
}
907916

908917
if (unlikely(frags >= MAX_SKB_FRAGS)) {
909-
netdev_dbg(vif->dev, "Too many frags\n");
918+
netdev_err(vif->dev, "Too many frags\n");
919+
netbk_fatal_tx_err(vif);
910920
return -frags;
911921
}
912922

913923
memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
914924
sizeof(*txp));
915925
if (txp->size > first->size) {
916-
netdev_dbg(vif->dev, "Frags galore\n");
926+
netdev_err(vif->dev, "Frag is bigger than frame.\n");
927+
netbk_fatal_tx_err(vif);
917928
return -frags;
918929
}
919930

920931
first->size -= txp->size;
921932
frags++;
922933

923934
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
924-
netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
935+
netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
925936
txp->offset, txp->size);
937+
netbk_fatal_tx_err(vif);
926938
return -frags;
927939
}
928940
} while ((txp++)->flags & XEN_NETTXF_more_data);
@@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
966978
pending_idx = netbk->pending_ring[index];
967979
page = xen_netbk_alloc_page(netbk, skb, pending_idx);
968980
if (!page)
969-
return NULL;
981+
goto err;
970982

971983
gop->source.u.ref = txp->gref;
972984
gop->source.domid = vif->domid;
@@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
9881000
}
9891001

9901002
return gop;
1003+
err:
1004+
/* Unwind, freeing all pages and sending error responses. */
1005+
while (i-- > start) {
1006+
xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
1007+
XEN_NETIF_RSP_ERROR);
1008+
}
1009+
/* The head too, if necessary. */
1010+
if (start)
1011+
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1012+
1013+
return NULL;
9911014
}
9921015

9931016
static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
@@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
9961019
{
9971020
struct gnttab_copy *gop = *gopp;
9981021
u16 pending_idx = *((u16 *)skb->data);
999-
struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
1000-
struct xenvif *vif = pending_tx_info[pending_idx].vif;
1001-
struct xen_netif_tx_request *txp;
10021022
struct skb_shared_info *shinfo = skb_shinfo(skb);
10031023
int nr_frags = shinfo->nr_frags;
10041024
int i, err, start;
10051025

10061026
/* Check status of header. */
10071027
err = gop->status;
1008-
if (unlikely(err)) {
1009-
pending_ring_idx_t index;
1010-
index = pending_index(netbk->pending_prod++);
1011-
txp = &pending_tx_info[pending_idx].req;
1012-
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1013-
netbk->pending_ring[index] = pending_idx;
1014-
xenvif_put(vif);
1015-
}
1028+
if (unlikely(err))
1029+
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
10161030

10171031
/* Skip first skb fragment if it is on same page as header fragment. */
10181032
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
10191033

10201034
for (i = start; i < nr_frags; i++) {
10211035
int j, newerr;
1022-
pending_ring_idx_t index;
10231036

10241037
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
10251038

@@ -1028,27 +1041,23 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
10281041
if (likely(!newerr)) {
10291042
/* Had a previous error? Invalidate this fragment. */
10301043
if (unlikely(err))
1031-
xen_netbk_idx_release(netbk, pending_idx);
1044+
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
10321045
continue;
10331046
}
10341047

10351048
/* Error on this fragment: respond to client with an error. */
1036-
txp = &netbk->pending_tx_info[pending_idx].req;
1037-
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1038-
index = pending_index(netbk->pending_prod++);
1039-
netbk->pending_ring[index] = pending_idx;
1040-
xenvif_put(vif);
1049+
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
10411050

10421051
/* Not the first error? Preceding frags already invalidated. */
10431052
if (err)
10441053
continue;
10451054

10461055
/* First error: invalidate header and preceding fragments. */
10471056
pending_idx = *((u16 *)skb->data);
1048-
xen_netbk_idx_release(netbk, pending_idx);
1057+
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
10491058
for (j = start; j < i; j++) {
10501059
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1051-
xen_netbk_idx_release(netbk, pending_idx);
1060+
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
10521061
}
10531062

10541063
/* Remember the error: invalidate all subsequent fragments. */
@@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
10821091

10831092
/* Take an extra reference to offset xen_netbk_idx_release */
10841093
get_page(netbk->mmap_pages[pending_idx]);
1085-
xen_netbk_idx_release(netbk, pending_idx);
1094+
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
10861095
}
10871096
}
10881097

@@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
10951104

10961105
do {
10971106
if (unlikely(work_to_do-- <= 0)) {
1098-
netdev_dbg(vif->dev, "Missing extra info\n");
1107+
netdev_err(vif->dev, "Missing extra info\n");
1108+
netbk_fatal_tx_err(vif);
10991109
return -EBADR;
11001110
}
11011111

@@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
11041114
if (unlikely(!extra.type ||
11051115
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
11061116
vif->tx.req_cons = ++cons;
1107-
netdev_dbg(vif->dev,
1117+
netdev_err(vif->dev,
11081118
"Invalid extra type: %d\n", extra.type);
1119+
netbk_fatal_tx_err(vif);
11091120
return -EINVAL;
11101121
}
11111122

@@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
11211132
struct xen_netif_extra_info *gso)
11221133
{
11231134
if (!gso->u.gso.size) {
1124-
netdev_dbg(vif->dev, "GSO size must not be zero.\n");
1135+
netdev_err(vif->dev, "GSO size must not be zero.\n");
1136+
netbk_fatal_tx_err(vif);
11251137
return -EINVAL;
11261138
}
11271139

11281140
/* Currently only TCPv4 S.O. is supported. */
11291141
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1130-
netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1142+
netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1143+
netbk_fatal_tx_err(vif);
11311144
return -EINVAL;
11321145
}
11331146

@@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
12641277

12651278
/* Get a netif from the list with work to do. */
12661279
vif = poll_net_schedule_list(netbk);
1280+
/* This can sometimes happen because the test of
1281+
* list_empty(net_schedule_list) at the top of the
1282+
* loop is unlocked. Just go back and have another
1283+
* look.
1284+
*/
12671285
if (!vif)
12681286
continue;
12691287

1288+
if (vif->tx.sring->req_prod - vif->tx.req_cons >
1289+
XEN_NETIF_TX_RING_SIZE) {
1290+
netdev_err(vif->dev,
1291+
"Impossible number of requests. "
1292+
"req_prod %d, req_cons %d, size %ld\n",
1293+
vif->tx.sring->req_prod, vif->tx.req_cons,
1294+
XEN_NETIF_TX_RING_SIZE);
1295+
netbk_fatal_tx_err(vif);
1296+
continue;
1297+
}
1298+
12701299
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
12711300
if (!work_to_do) {
12721301
xenvif_put(vif);
@@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
12941323
work_to_do = xen_netbk_get_extras(vif, extras,
12951324
work_to_do);
12961325
idx = vif->tx.req_cons;
1297-
if (unlikely(work_to_do < 0)) {
1298-
netbk_tx_err(vif, &txreq, idx);
1326+
if (unlikely(work_to_do < 0))
12991327
continue;
1300-
}
13011328
}
13021329

13031330
ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
1304-
if (unlikely(ret < 0)) {
1305-
netbk_tx_err(vif, &txreq, idx - ret);
1331+
if (unlikely(ret < 0))
13061332
continue;
1307-
}
1333+
13081334
idx += ret;
13091335

13101336
if (unlikely(txreq.size < ETH_HLEN)) {
@@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
13161342

13171343
/* No crossing a page as the payload mustn't fragment. */
13181344
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1319-
netdev_dbg(vif->dev,
1345+
netdev_err(vif->dev,
13201346
"txreq.offset: %x, size: %u, end: %lu\n",
13211347
txreq.offset, txreq.size,
13221348
(txreq.offset&~PAGE_MASK) + txreq.size);
1323-
netbk_tx_err(vif, &txreq, idx);
1349+
netbk_fatal_tx_err(vif);
13241350
continue;
13251351
}
13261352

@@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
13481374
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
13491375

13501376
if (netbk_set_skb_gso(vif, skb, gso)) {
1377+
/* Failure in netbk_set_skb_gso is fatal. */
13511378
kfree_skb(skb);
1352-
netbk_tx_err(vif, &txreq, idx);
13531379
continue;
13541380
}
13551381
}
@@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
14481474
txp->size -= data_len;
14491475
} else {
14501476
/* Schedule a response immediately. */
1451-
xen_netbk_idx_release(netbk, pending_idx);
1477+
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
14521478
}
14531479

14541480
if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
15001526
xen_netbk_tx_submit(netbk);
15011527
}
15021528

1503-
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
1529+
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1530+
u8 status)
15041531
{
15051532
struct xenvif *vif;
15061533
struct pending_tx_info *pending_tx_info;
@@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
15141541

15151542
vif = pending_tx_info->vif;
15161543

1517-
make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
1544+
make_tx_response(vif, &pending_tx_info->req, status);
15181545

15191546
index = pending_index(netbk->pending_prod++);
15201547
netbk->pending_ring[index] = pending_idx;

0 commit comments

Comments
 (0)