Skip to content

Commit 018423e

Browse files
David VomLehndavem330
authored andcommitted
net: ethernet: aquantia: Add ring support code
Add code to support the transmit and receive ring buffers. Signed-off-by: Alexander Loktionov <Alexander.Loktionov@aquantia.com> Signed-off-by: Dmitrii Tarakanov <Dmitrii.Tarakanov@aquantia.com> Signed-off-by: Pavel Belous <Pavel.Belous@aquantia.com> Signed-off-by: Dmitry Bezrukov <Dmitry.Bezrukov@aquantia.com> Signed-off-by: David M. VomLehn <vomlehn@texas.net> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 3a35780 commit 018423e

File tree

2 files changed

+533
-0
lines changed

2 files changed

+533
-0
lines changed
Lines changed: 376 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,376 @@
1+
/*
2+
* aQuantia Corporation Network Driver
3+
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
4+
*
5+
* This program is free software; you can redistribute it and/or modify it
6+
* under the terms and conditions of the GNU General Public License,
7+
* version 2, as published by the Free Software Foundation.
8+
*/
9+
10+
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
11+
12+
#include "aq_ring.h"
13+
#include "aq_nic.h"
14+
#include "aq_hw.h"
15+
16+
#include <linux/netdevice.h>
17+
#include <linux/etherdevice.h>
18+
19+
static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
20+
struct aq_nic_s *aq_nic)
21+
{
22+
int err = 0;
23+
24+
self->buff_ring =
25+
kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
26+
27+
if (!self->buff_ring) {
28+
err = -ENOMEM;
29+
goto err_exit;
30+
}
31+
self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
32+
self->size * self->dx_size,
33+
&self->dx_ring_pa, GFP_KERNEL);
34+
if (!self->dx_ring) {
35+
err = -ENOMEM;
36+
goto err_exit;
37+
}
38+
39+
err_exit:
40+
if (err < 0) {
41+
aq_ring_free(self);
42+
self = NULL;
43+
}
44+
return self;
45+
}
46+
47+
struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
48+
struct aq_nic_s *aq_nic,
49+
unsigned int idx,
50+
struct aq_nic_cfg_s *aq_nic_cfg)
51+
{
52+
int err = 0;
53+
54+
self->aq_nic = aq_nic;
55+
self->idx = idx;
56+
self->size = aq_nic_cfg->txds;
57+
self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
58+
59+
self = aq_ring_alloc(self, aq_nic);
60+
if (!self) {
61+
err = -ENOMEM;
62+
goto err_exit;
63+
}
64+
65+
err_exit:
66+
if (err < 0) {
67+
aq_ring_free(self);
68+
self = NULL;
69+
}
70+
return self;
71+
}
72+
73+
struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
74+
struct aq_nic_s *aq_nic,
75+
unsigned int idx,
76+
struct aq_nic_cfg_s *aq_nic_cfg)
77+
{
78+
int err = 0;
79+
80+
self->aq_nic = aq_nic;
81+
self->idx = idx;
82+
self->size = aq_nic_cfg->rxds;
83+
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
84+
85+
self = aq_ring_alloc(self, aq_nic);
86+
if (!self) {
87+
err = -ENOMEM;
88+
goto err_exit;
89+
}
90+
91+
err_exit:
92+
if (err < 0) {
93+
aq_ring_free(self);
94+
self = NULL;
95+
}
96+
return self;
97+
}
98+
99+
int aq_ring_init(struct aq_ring_s *self)
100+
{
101+
self->hw_head = 0;
102+
self->sw_head = 0;
103+
self->sw_tail = 0;
104+
return 0;
105+
}
106+
107+
void aq_ring_tx_append_buffs(struct aq_ring_s *self,
108+
struct aq_ring_buff_s *buffer,
109+
unsigned int buffers)
110+
{
111+
if (likely(self->sw_tail + buffers < self->size)) {
112+
memcpy(&self->buff_ring[self->sw_tail], buffer,
113+
sizeof(buffer[0]) * buffers);
114+
} else {
115+
unsigned int first_part = self->size - self->sw_tail;
116+
unsigned int second_part = buffers - first_part;
117+
118+
memcpy(&self->buff_ring[self->sw_tail], buffer,
119+
sizeof(buffer[0]) * first_part);
120+
121+
memcpy(&self->buff_ring[0], &buffer[first_part],
122+
sizeof(buffer[0]) * second_part);
123+
}
124+
}
125+
126+
int aq_ring_tx_clean(struct aq_ring_s *self)
127+
{
128+
struct device *dev = aq_nic_get_dev(self->aq_nic);
129+
130+
for (; self->sw_head != self->hw_head;
131+
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
132+
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
133+
134+
if (likely(buff->is_mapped)) {
135+
if (unlikely(buff->is_sop))
136+
dma_unmap_single(dev, buff->pa, buff->len,
137+
DMA_TO_DEVICE);
138+
else
139+
dma_unmap_page(dev, buff->pa, buff->len,
140+
DMA_TO_DEVICE);
141+
}
142+
143+
if (unlikely(buff->is_eop))
144+
dev_kfree_skb_any(buff->skb);
145+
}
146+
147+
if (aq_ring_avail_dx(self) > AQ_CFG_SKB_FRAGS_MAX)
148+
aq_nic_ndev_queue_start(self->aq_nic, self->idx);
149+
150+
return 0;
151+
}
152+
153+
static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i,
154+
unsigned int t)
155+
{
156+
return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
157+
}
158+
159+
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
160+
int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget)
161+
{
162+
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
163+
int err = 0;
164+
bool is_rsc_completed = true;
165+
166+
for (; (self->sw_head != self->hw_head) && budget;
167+
self->sw_head = aq_ring_next_dx(self, self->sw_head),
168+
--budget, ++(*work_done)) {
169+
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
170+
struct sk_buff *skb = NULL;
171+
unsigned int next_ = 0U;
172+
unsigned int i = 0U;
173+
struct aq_ring_buff_s *buff_ = NULL;
174+
175+
if (buff->is_error) {
176+
__free_pages(buff->page, 0);
177+
continue;
178+
}
179+
180+
if (buff->is_cleaned)
181+
continue;
182+
183+
if (!buff->is_eop) {
184+
for (next_ = buff->next,
185+
buff_ = &self->buff_ring[next_]; true;
186+
next_ = buff_->next,
187+
buff_ = &self->buff_ring[next_]) {
188+
is_rsc_completed =
189+
aq_ring_dx_in_range(self->sw_head,
190+
next_,
191+
self->hw_head);
192+
193+
if (unlikely(!is_rsc_completed)) {
194+
is_rsc_completed = false;
195+
break;
196+
}
197+
198+
if (buff_->is_eop)
199+
break;
200+
}
201+
202+
if (!is_rsc_completed) {
203+
err = 0;
204+
goto err_exit;
205+
}
206+
}
207+
208+
/* for single fragment packets use build_skb() */
209+
if (buff->is_eop) {
210+
skb = build_skb(page_address(buff->page),
211+
buff->len + AQ_SKB_ALIGN);
212+
if (unlikely(!skb)) {
213+
err = -ENOMEM;
214+
goto err_exit;
215+
}
216+
217+
skb->dev = ndev;
218+
skb_put(skb, buff->len);
219+
} else {
220+
skb = netdev_alloc_skb(ndev, ETH_HLEN);
221+
if (unlikely(!skb)) {
222+
err = -ENOMEM;
223+
goto err_exit;
224+
}
225+
skb_put(skb, ETH_HLEN);
226+
memcpy(skb->data, page_address(buff->page), ETH_HLEN);
227+
228+
skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN,
229+
buff->len - ETH_HLEN,
230+
SKB_TRUESIZE(buff->len - ETH_HLEN));
231+
232+
for (i = 1U, next_ = buff->next,
233+
buff_ = &self->buff_ring[next_]; true;
234+
next_ = buff_->next,
235+
buff_ = &self->buff_ring[next_], ++i) {
236+
skb_add_rx_frag(skb, i, buff_->page, 0,
237+
buff_->len,
238+
SKB_TRUESIZE(buff->len -
239+
ETH_HLEN));
240+
buff_->is_cleaned = 1;
241+
242+
if (buff_->is_eop)
243+
break;
244+
}
245+
}
246+
247+
skb->protocol = eth_type_trans(skb, ndev);
248+
if (unlikely(buff->is_cso_err)) {
249+
++self->stats.rx.errors;
250+
__skb_mark_checksum_bad(skb);
251+
} else {
252+
if (buff->is_ip_cso) {
253+
__skb_incr_checksum_unnecessary(skb);
254+
if (buff->is_udp_cso || buff->is_tcp_cso)
255+
__skb_incr_checksum_unnecessary(skb);
256+
} else {
257+
skb->ip_summed = CHECKSUM_NONE;
258+
}
259+
}
260+
261+
skb_set_hash(skb, buff->rss_hash,
262+
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
263+
PKT_HASH_TYPE_NONE);
264+
265+
skb_record_rx_queue(skb, self->idx);
266+
267+
netif_receive_skb(skb);
268+
269+
++self->stats.rx.packets;
270+
self->stats.rx.bytes += skb->len;
271+
}
272+
273+
err_exit:
274+
return err;
275+
}
276+
277+
int aq_ring_rx_fill(struct aq_ring_s *self)
278+
{
279+
struct aq_ring_buff_s *buff = NULL;
280+
int err = 0;
281+
int i = 0;
282+
283+
for (i = aq_ring_avail_dx(self); i--;
284+
self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
285+
buff = &self->buff_ring[self->sw_tail];
286+
287+
buff->flags = 0U;
288+
buff->len = AQ_CFG_RX_FRAME_MAX;
289+
290+
buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD |
291+
__GFP_COMP, 0);
292+
if (!buff->page) {
293+
err = -ENOMEM;
294+
goto err_exit;
295+
}
296+
297+
buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
298+
buff->page, 0,
299+
AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
300+
301+
err = dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa);
302+
if (err < 0)
303+
goto err_exit;
304+
305+
buff = NULL;
306+
}
307+
if (err < 0)
308+
goto err_exit;
309+
310+
err_exit:
311+
if (err < 0) {
312+
if (buff && buff->page)
313+
__free_pages(buff->page, 0);
314+
}
315+
316+
return err;
317+
}
318+
319+
void aq_ring_rx_deinit(struct aq_ring_s *self)
320+
{
321+
if (!self)
322+
goto err_exit;
323+
324+
for (; self->sw_head != self->sw_tail;
325+
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
326+
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
327+
328+
dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa,
329+
AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
330+
331+
__free_pages(buff->page, 0);
332+
}
333+
334+
err_exit:;
335+
}
336+
337+
void aq_ring_tx_deinit(struct aq_ring_s *self)
338+
{
339+
if (!self)
340+
goto err_exit;
341+
342+
for (; self->sw_head != self->sw_tail;
343+
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
344+
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
345+
struct device *ndev = aq_nic_get_dev(self->aq_nic);
346+
347+
if (likely(buff->is_mapped)) {
348+
if (unlikely(buff->is_sop)) {
349+
dma_unmap_single(ndev, buff->pa, buff->len,
350+
DMA_TO_DEVICE);
351+
} else {
352+
dma_unmap_page(ndev, buff->pa, buff->len,
353+
DMA_TO_DEVICE);
354+
}
355+
}
356+
357+
if (unlikely(buff->is_eop))
358+
dev_kfree_skb_any(buff->skb);
359+
}
360+
err_exit:;
361+
}
362+
363+
void aq_ring_free(struct aq_ring_s *self)
364+
{
365+
if (!self)
366+
goto err_exit;
367+
368+
kfree(self->buff_ring);
369+
370+
if (self->dx_ring)
371+
dma_free_coherent(aq_nic_get_dev(self->aq_nic),
372+
self->size * self->dx_size, self->dx_ring,
373+
self->dx_ring_pa);
374+
375+
err_exit:;
376+
}

0 commit comments

Comments
 (0)