Skip to content

Commit 8990777

Browse files
arndbdavem330
authored andcommitted
netcp: try to reduce type confusion in descriptors
The netcp driver produces tons of warnings when CONFIG_LPAE is enabled on ARM: drivers/net/ethernet/ti/netcp_core.c: In function 'netcp_tx_map_skb': drivers/net/ethernet/ti/netcp_core.c:1084:13: warning: passing argument 1 of 'set_words' from incompatible pointer type [-Wincompatible-pointer-types] This is the result of trying to pass a pointer to a dma_addr_t to a function that expects a u32 pointer to copy that into a DMA descriptor. Looking at that code in more detail to fix the warnings, I see multiple related problems: * The conversion functions are not endian-safe, as the DMA descriptors are almost certainly fixed-endian, but the CPU is not. * On 64-bit machines, passing a pointer through a u32 variable is a bug, accessing an indirect pointer as a u32 pointer even more so. * The handling of epib and psdata mixes native-endian and device-endian data. In this patch, I try to sort out the types for most accesses here, adding le32_to_cpu/cpu_to_le32 where appropriate, and passing pointers through two 32-bit words in the descriptor padding, to make it plausible that the driver does the right thing if compiled for big-endian or 64-bit systems. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent ad2c8c7 commit 8990777

File tree

2 files changed

+82
-63
lines changed

2 files changed

+82
-63
lines changed

drivers/net/ethernet/ti/netcp_core.c

Lines changed: 71 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -109,69 +109,80 @@ module_param(netcp_debug_level, int, 0);
109109
MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
110110

111111
/* Helper functions - Get/Set */
112-
static void get_pkt_info(u32 *buff, u32 *buff_len, u32 *ndesc,
112+
static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc,
113113
struct knav_dma_desc *desc)
114114
{
115-
*buff_len = desc->buff_len;
116-
*buff = desc->buff;
117-
*ndesc = desc->next_desc;
115+
*buff_len = le32_to_cpu(desc->buff_len);
116+
*buff = le32_to_cpu(desc->buff);
117+
*ndesc = le32_to_cpu(desc->next_desc);
118118
}
119119

120-
static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc)
120+
static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc)
121121
{
122-
*pad0 = desc->pad[0];
123-
*pad1 = desc->pad[1];
122+
*pad0 = le32_to_cpu(desc->pad[0]);
123+
*pad1 = le32_to_cpu(desc->pad[1]);
124+
*pad2 = le32_to_cpu(desc->pad[2]);
124125
}
125126

126-
static void get_org_pkt_info(u32 *buff, u32 *buff_len,
127+
static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc)
128+
{
129+
u64 pad64;
130+
131+
pad64 = le32_to_cpu(desc->pad[0]) +
132+
((u64)le32_to_cpu(desc->pad[1]) << 32);
133+
*padptr = (void *)(uintptr_t)pad64;
134+
}
135+
136+
static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len,
127137
struct knav_dma_desc *desc)
128138
{
129-
*buff = desc->orig_buff;
130-
*buff_len = desc->orig_len;
139+
*buff = le32_to_cpu(desc->orig_buff);
140+
*buff_len = le32_to_cpu(desc->orig_len);
131141
}
132142

133-
static void get_words(u32 *words, int num_words, u32 *desc)
143+
static void get_words(dma_addr_t *words, int num_words, __le32 *desc)
134144
{
135145
int i;
136146

137147
for (i = 0; i < num_words; i++)
138-
words[i] = desc[i];
148+
words[i] = le32_to_cpu(desc[i]);
139149
}
140150

141-
static void set_pkt_info(u32 buff, u32 buff_len, u32 ndesc,
151+
static void set_pkt_info(dma_addr_t buff, u32 buff_len, u32 ndesc,
142152
struct knav_dma_desc *desc)
143153
{
144-
desc->buff_len = buff_len;
145-
desc->buff = buff;
146-
desc->next_desc = ndesc;
154+
desc->buff_len = cpu_to_le32(buff_len);
155+
desc->buff = cpu_to_le32(buff);
156+
desc->next_desc = cpu_to_le32(ndesc);
147157
}
148158

149159
static void set_desc_info(u32 desc_info, u32 pkt_info,
150160
struct knav_dma_desc *desc)
151161
{
152-
desc->desc_info = desc_info;
153-
desc->packet_info = pkt_info;
162+
desc->desc_info = cpu_to_le32(desc_info);
163+
desc->packet_info = cpu_to_le32(pkt_info);
154164
}
155165

156-
static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc)
166+
static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc)
157167
{
158-
desc->pad[0] = pad0;
159-
desc->pad[1] = pad1;
168+
desc->pad[0] = cpu_to_le32(pad0);
169+
desc->pad[1] = cpu_to_le32(pad1);
170+
desc->pad[2] = cpu_to_le32(pad1);
160171
}
161172

162-
static void set_org_pkt_info(u32 buff, u32 buff_len,
173+
static void set_org_pkt_info(dma_addr_t buff, u32 buff_len,
163174
struct knav_dma_desc *desc)
164175
{
165-
desc->orig_buff = buff;
166-
desc->orig_len = buff_len;
176+
desc->orig_buff = cpu_to_le32(buff);
177+
desc->orig_len = cpu_to_le32(buff_len);
167178
}
168179

169-
static void set_words(u32 *words, int num_words, u32 *desc)
180+
static void set_words(u32 *words, int num_words, __le32 *desc)
170181
{
171182
int i;
172183

173184
for (i = 0; i < num_words; i++)
174-
desc[i] = words[i];
185+
desc[i] = cpu_to_le32(words[i]);
175186
}
176187

177188
/* Read the e-fuse value as 32 bit values to be endian independent */
@@ -570,7 +581,7 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
570581
dma_addr_t dma_desc, dma_buf;
571582
unsigned int buf_len, dma_sz = sizeof(*ndesc);
572583
void *buf_ptr;
573-
u32 tmp;
584+
u32 pad[2];
574585

575586
get_words(&dma_desc, 1, &desc->next_desc);
576587

@@ -580,14 +591,15 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
580591
dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
581592
break;
582593
}
583-
get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
584-
get_pad_info((u32 *)&buf_ptr, &tmp, ndesc);
594+
get_pad_ptr(&buf_ptr, ndesc);
585595
dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
586596
__free_page(buf_ptr);
587597
knav_pool_desc_put(netcp->rx_pool, desc);
588598
}
589599

590-
get_pad_info((u32 *)&buf_ptr, &buf_len, desc);
600+
get_pad_info(&pad[0], &pad[1], &buf_len, desc);
601+
buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32));
602+
591603
if (buf_ptr)
592604
netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
593605
knav_pool_desc_put(netcp->rx_pool, desc);
@@ -626,7 +638,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
626638
struct netcp_packet p_info;
627639
struct sk_buff *skb;
628640
void *org_buf_ptr;
629-
u32 tmp;
630641

631642
dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
632643
if (!dma_desc)
@@ -639,7 +650,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
639650
}
640651

641652
get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
642-
get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc);
653+
get_pad_ptr(&org_buf_ptr, desc);
643654

644655
if (unlikely(!org_buf_ptr)) {
645656
dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
@@ -664,6 +675,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
664675
/* Fill in the page fragment list */
665676
while (dma_desc) {
666677
struct page *page;
678+
void *ptr;
667679

668680
ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
669681
if (unlikely(!ndesc)) {
@@ -672,14 +684,15 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
672684
}
673685

674686
get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
675-
get_pad_info((u32 *)&page, &tmp, ndesc);
687+
get_pad_ptr(ptr, ndesc);
688+
page = ptr;
676689

677690
if (likely(dma_buff && buf_len && page)) {
678691
dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
679692
DMA_FROM_DEVICE);
680693
} else {
681-
dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n",
682-
(void *)dma_buff, buf_len, page);
694+
dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n",
695+
&dma_buff, buf_len, page);
683696
goto free_desc;
684697
}
685698

@@ -750,7 +763,6 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
750763
unsigned int buf_len, dma_sz;
751764
dma_addr_t dma;
752765
void *buf_ptr;
753-
u32 tmp;
754766

755767
/* Allocate descriptor */
756768
while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
@@ -761,7 +773,7 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
761773
}
762774

763775
get_org_pkt_info(&dma, &buf_len, desc);
764-
get_pad_info((u32 *)&buf_ptr, &tmp, desc);
776+
get_pad_ptr(buf_ptr, desc);
765777

766778
if (unlikely(!dma)) {
767779
dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
@@ -813,7 +825,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
813825
struct page *page;
814826
dma_addr_t dma;
815827
void *bufptr;
816-
u32 pad[2];
828+
u32 pad[3];
817829

818830
/* Allocate descriptor */
819831
hwdesc = knav_pool_desc_get(netcp->rx_pool);
@@ -830,7 +842,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
830842
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
831843

832844
bufptr = netdev_alloc_frag(primary_buf_len);
833-
pad[1] = primary_buf_len;
845+
pad[2] = primary_buf_len;
834846

835847
if (unlikely(!bufptr)) {
836848
dev_warn_ratelimited(netcp->ndev_dev,
@@ -842,7 +854,8 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
842854
if (unlikely(dma_mapping_error(netcp->dev, dma)))
843855
goto fail;
844856

845-
pad[0] = (u32)bufptr;
857+
pad[0] = lower_32_bits((uintptr_t)bufptr);
858+
pad[1] = upper_32_bits((uintptr_t)bufptr);
846859

847860
} else {
848861
/* Allocate a secondary receive queue entry */
@@ -853,8 +866,9 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
853866
}
854867
buf_len = PAGE_SIZE;
855868
dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
856-
pad[0] = (u32)page;
857-
pad[1] = 0;
869+
pad[0] = lower_32_bits(dma);
870+
pad[1] = upper_32_bits(dma);
871+
pad[2] = 0;
858872
}
859873

860874
desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
@@ -864,7 +878,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
864878
pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
865879
KNAV_DMA_DESC_RETQ_SHIFT;
866880
set_org_pkt_info(dma, buf_len, hwdesc);
867-
set_pad_info(pad[0], pad[1], hwdesc);
881+
set_pad_info(pad[0], pad[1], pad[2], hwdesc);
868882
set_desc_info(desc_info, pkt_info, hwdesc);
869883

870884
/* Push to FDQs */
@@ -935,8 +949,8 @@ static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
935949
dma_unmap_single(netcp->dev, dma_buf, buf_len,
936950
DMA_TO_DEVICE);
937951
else
938-
dev_warn(netcp->ndev_dev, "bad Tx desc buf(%p), len(%d)\n",
939-
(void *)dma_buf, buf_len);
952+
dev_warn(netcp->ndev_dev, "bad Tx desc buf(%pad), len(%d)\n",
953+
&dma_buf, buf_len);
940954

941955
knav_pool_desc_put(netcp->tx_pool, ndesc);
942956
ndesc = NULL;
@@ -953,11 +967,11 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
953967
unsigned int budget)
954968
{
955969
struct knav_dma_desc *desc;
970+
void *ptr;
956971
struct sk_buff *skb;
957972
unsigned int dma_sz;
958973
dma_addr_t dma;
959974
int pkts = 0;
960-
u32 tmp;
961975

962976
while (budget--) {
963977
dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
@@ -970,7 +984,8 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
970984
continue;
971985
}
972986

973-
get_pad_info((u32 *)&skb, &tmp, desc);
987+
get_pad_ptr(&ptr, desc);
988+
skb = ptr;
974989
netcp_free_tx_desc_chain(netcp, desc, dma_sz);
975990
if (!skb) {
976991
dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
@@ -1059,6 +1074,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
10591074
u32 page_offset = frag->page_offset;
10601075
u32 buf_len = skb_frag_size(frag);
10611076
dma_addr_t desc_dma;
1077+
u32 desc_dma_32;
10621078
u32 pkt_info;
10631079

10641080
dma_addr = dma_map_page(dev, page, page_offset, buf_len,
@@ -1075,13 +1091,13 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
10751091
goto free_descs;
10761092
}
10771093

1078-
desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool,
1079-
(void *)ndesc);
1094+
desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, ndesc);
10801095
pkt_info =
10811096
(netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
10821097
KNAV_DMA_DESC_RETQ_SHIFT;
10831098
set_pkt_info(dma_addr, buf_len, 0, ndesc);
1084-
set_words(&desc_dma, 1, &pdesc->next_desc);
1099+
desc_dma_32 = (u32)desc_dma;
1100+
set_words(&desc_dma_32, 1, &pdesc->next_desc);
10851101
pkt_len += buf_len;
10861102
if (pdesc != desc)
10871103
knav_pool_desc_map(netcp->tx_pool, pdesc,
@@ -1173,11 +1189,14 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
11731189
}
11741190

11751191
set_words(&tmp, 1, &desc->packet_info);
1176-
set_words((u32 *)&skb, 1, &desc->pad[0]);
1192+
tmp = lower_32_bits((uintptr_t)&skb);
1193+
set_words(&tmp, 1, &desc->pad[0]);
1194+
tmp = upper_32_bits((uintptr_t)&skb);
1195+
set_words(&tmp, 1, &desc->pad[1]);
11771196

11781197
if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
11791198
tmp = tx_pipe->switch_to_port;
1180-
set_words((u32 *)&tmp, 1, &desc->tag_info);
1199+
set_words(&tmp, 1, &desc->tag_info);
11811200
}
11821201

11831202
/* submit packet descriptor */

include/linux/soc/ti/knav_dma.h

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -144,17 +144,17 @@ struct knav_dma_cfg {
144144
* @psdata: Protocol specific
145145
*/
146146
struct knav_dma_desc {
147-
u32 desc_info;
148-
u32 tag_info;
149-
u32 packet_info;
150-
u32 buff_len;
151-
u32 buff;
152-
u32 next_desc;
153-
u32 orig_len;
154-
u32 orig_buff;
155-
u32 epib[KNAV_DMA_NUM_EPIB_WORDS];
156-
u32 psdata[KNAV_DMA_NUM_PS_WORDS];
157-
u32 pad[4];
147+
__le32 desc_info;
148+
__le32 tag_info;
149+
__le32 packet_info;
150+
__le32 buff_len;
151+
__le32 buff;
152+
__le32 next_desc;
153+
__le32 orig_len;
154+
__le32 orig_buff;
155+
__le32 epib[KNAV_DMA_NUM_EPIB_WORDS];
156+
__le32 psdata[KNAV_DMA_NUM_PS_WORDS];
157+
__le32 pad[4];
158158
} ____cacheline_aligned;
159159

160160
#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)

0 commit comments

Comments
 (0)