Skip to content

Commit fae5082

Browse files
mugunthanvnmdavem330
authored andcommitted
net: ethernet: davinci_cpdma: Add boundary for rx and tx descriptors
When there is heavy transmission traffic in the CPDMA, then Rx descriptors memory is also utilized as tx desc memory looses all rx descriptors and the driver stops working then. This patch adds boundary for tx and rx descriptors in bd ram dividing the descriptor memory to ensure that during heavy transmission tx doesn't use rx descriptors. This patch is already applied to davinci_emac driver, since CPSW and davici_dmac shares the same CPDMA, moving the boundry seperation from Davinci EMAC driver to CPDMA driver which was done in the following commit commit 86d8c07 Author: Sascha Hauer <s.hauer@pengutronix.de> Date: Tue Jan 3 05:27:47 2012 +0000 net/davinci: do not use all descriptors for tx packets The driver uses a shared pool for both rx and tx descriptors. During open it queues fixed number of 128 descriptors for receive packets. For each received packet it tries to queue another descriptor. If this fails the descriptor is lost for rx. The driver has no limitation on tx descriptors to use, so it can happen during a nmap / ping -f attack that the driver allocates all descriptors for tx and looses all rx descriptors. The driver stops working then. To fix this limit the number of tx descriptors used to half of the descriptors available, the rx path uses the other half. Tested on a custom board using nmap / ping -f to the board from two different hosts. Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent ee21c7e commit fae5082

File tree

4 files changed

+57
-13
lines changed

4 files changed

+57
-13
lines changed

drivers/net/ethernet/ti/cpsw.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -374,6 +374,9 @@ void cpsw_tx_handler(void *token, int len, int status)
374374
struct net_device *ndev = skb->dev;
375375
struct cpsw_priv *priv = netdev_priv(ndev);
376376

377+
/* Check whether the queue is stopped due to stalled tx dma, if the
378+
* queue is stopped then start the queue as we have free desc for tx
379+
*/
377380
if (unlikely(netif_queue_stopped(ndev)))
378381
netif_start_queue(ndev);
379382
cpts_tx_timestamp(&priv->cpts, skb);
@@ -736,6 +739,12 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
736739
goto fail;
737740
}
738741

742+
/* If there is no more tx desc left free then we need to
743+
* tell the kernel to stop sending us tx frames.
744+
*/
745+
if (unlikely(cpdma_check_free_tx_desc(priv->txch)))
746+
netif_stop_queue(ndev);
747+
739748
return NETDEV_TX_OK;
740749
fail:
741750
priv->stats.tx_dropped++;

drivers/net/ethernet/ti/davinci_cpdma.c

Lines changed: 40 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -105,13 +105,13 @@ struct cpdma_ctlr {
105105
};
106106

107107
struct cpdma_chan {
108+
struct cpdma_desc __iomem *head, *tail;
109+
void __iomem *hdp, *cp, *rxfree;
108110
enum cpdma_state state;
109111
struct cpdma_ctlr *ctlr;
110112
int chan_num;
111113
spinlock_t lock;
112-
struct cpdma_desc __iomem *head, *tail;
113114
int count;
114-
void __iomem *hdp, *cp, *rxfree;
115115
u32 mask;
116116
cpdma_handler_fn handler;
117117
enum dma_data_direction dir;
@@ -217,17 +217,27 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
217217
}
218218

219219
static struct cpdma_desc __iomem *
220-
cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
220+
cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
221221
{
222222
unsigned long flags;
223223
int index;
224+
int desc_start;
225+
int desc_end;
224226
struct cpdma_desc __iomem *desc = NULL;
225227

226228
spin_lock_irqsave(&pool->lock, flags);
227229

228-
index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
229-
num_desc, 0);
230-
if (index < pool->num_desc) {
230+
if (is_rx) {
231+
desc_start = 0;
232+
desc_end = pool->num_desc/2;
233+
} else {
234+
desc_start = pool->num_desc/2;
235+
desc_end = pool->num_desc;
236+
}
237+
238+
index = bitmap_find_next_zero_area(pool->bitmap,
239+
desc_end, desc_start, num_desc, 0);
240+
if (index < desc_end) {
231241
bitmap_set(pool->bitmap, index, num_desc);
232242
desc = pool->iomap + pool->desc_size * index;
233243
pool->used_desc++;
@@ -668,7 +678,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
668678
goto unlock_ret;
669679
}
670680

671-
desc = cpdma_desc_alloc(ctlr->pool, 1);
681+
desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
672682
if (!desc) {
673683
chan->stats.desc_alloc_fail++;
674684
ret = -ENOMEM;
@@ -704,6 +714,29 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
704714
}
705715
EXPORT_SYMBOL_GPL(cpdma_chan_submit);
706716

717+
bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
718+
{
719+
unsigned long flags;
720+
int index;
721+
bool ret;
722+
struct cpdma_ctlr *ctlr = chan->ctlr;
723+
struct cpdma_desc_pool *pool = ctlr->pool;
724+
725+
spin_lock_irqsave(&pool->lock, flags);
726+
727+
index = bitmap_find_next_zero_area(pool->bitmap,
728+
pool->num_desc, pool->num_desc/2, 1, 0);
729+
730+
if (index < pool->num_desc)
731+
ret = true;
732+
else
733+
ret = false;
734+
735+
spin_unlock_irqrestore(&pool->lock, flags);
736+
return ret;
737+
}
738+
EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
739+
707740
static void __cpdma_chan_free(struct cpdma_chan *chan,
708741
struct cpdma_desc __iomem *desc,
709742
int outlen, int status)

drivers/net/ethernet/ti/davinci_cpdma.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota);
8888
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
8989
void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
9090
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
91+
bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
9192

9293
enum cpdma_control {
9394
CPDMA_CMD_IDLE, /* write-only */

drivers/net/ethernet/ti/davinci_emac.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
120120
#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
121121
#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
122122
#define EMAC_DEF_RX_NUM_DESC (128)
123-
#define EMAC_DEF_TX_NUM_DESC (128)
124123
#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
125124
#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
126125
#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
@@ -342,7 +341,6 @@ struct emac_priv {
342341
u32 mac_hash2;
343342
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
344343
u32 rx_addr_type;
345-
atomic_t cur_tx;
346344
const char *phy_id;
347345
#ifdef CONFIG_OF
348346
struct device_node *phy_node;
@@ -1050,10 +1048,10 @@ static void emac_tx_handler(void *token, int len, int status)
10501048
{
10511049
struct sk_buff *skb = token;
10521050
struct net_device *ndev = skb->dev;
1053-
struct emac_priv *priv = netdev_priv(ndev);
1054-
1055-
atomic_dec(&priv->cur_tx);
10561051

1052+
/* Check whether the queue is stopped due to stalled tx dma, if the
1053+
* queue is stopped then start the queue as we have free desc for tx
1054+
*/
10571055
if (unlikely(netif_queue_stopped(ndev)))
10581056
netif_start_queue(ndev);
10591057
ndev->stats.tx_packets++;
@@ -1101,7 +1099,10 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
11011099
goto fail_tx;
11021100
}
11031101

1104-
if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC)
1102+
/* If there is no more tx desc left free then we need to
1103+
* tell the kernel to stop sending us tx frames.
1104+
*/
1105+
if (unlikely(cpdma_check_free_tx_desc(priv->txch)))
11051106
netif_stop_queue(ndev);
11061107

11071108
return NETDEV_TX_OK;

0 commit comments

Comments
 (0)