@@ -109,69 +109,80 @@ module_param(netcp_debug_level, int, 0);
109
109
MODULE_PARM_DESC (netcp_debug_level , "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)" );
110
110
111
111
/* Helper functions - Get/Set */
112
- static void get_pkt_info (u32 * buff , u32 * buff_len , u32 * ndesc ,
112
+ static void get_pkt_info (dma_addr_t * buff , u32 * buff_len , dma_addr_t * ndesc ,
113
113
struct knav_dma_desc * desc )
114
114
{
115
- * buff_len = desc -> buff_len ;
116
- * buff = desc -> buff ;
117
- * ndesc = desc -> next_desc ;
115
+ * buff_len = le32_to_cpu ( desc -> buff_len ) ;
116
+ * buff = le32_to_cpu ( desc -> buff ) ;
117
+ * ndesc = le32_to_cpu ( desc -> next_desc ) ;
118
118
}
119
119
120
- static void get_pad_info (u32 * pad0 , u32 * pad1 , struct knav_dma_desc * desc )
120
+ static void get_pad_info (u32 * pad0 , u32 * pad1 , u32 * pad2 , struct knav_dma_desc * desc )
121
121
{
122
- * pad0 = desc -> pad [0 ];
123
- * pad1 = desc -> pad [1 ];
122
+ * pad0 = le32_to_cpu (desc -> pad [0 ]);
123
+ * pad1 = le32_to_cpu (desc -> pad [1 ]);
124
+ * pad2 = le32_to_cpu (desc -> pad [2 ]);
124
125
}
125
126
126
- static void get_org_pkt_info (u32 * buff , u32 * buff_len ,
127
+ static void get_pad_ptr (void * * padptr , struct knav_dma_desc * desc )
128
+ {
129
+ u64 pad64 ;
130
+
131
+ pad64 = le32_to_cpu (desc -> pad [0 ]) +
132
+ ((u64 )le32_to_cpu (desc -> pad [1 ]) << 32 );
133
+ * padptr = (void * )(uintptr_t )pad64 ;
134
+ }
135
+
136
+ static void get_org_pkt_info (dma_addr_t * buff , u32 * buff_len ,
127
137
struct knav_dma_desc * desc )
128
138
{
129
- * buff = desc -> orig_buff ;
130
- * buff_len = desc -> orig_len ;
139
+ * buff = le32_to_cpu ( desc -> orig_buff ) ;
140
+ * buff_len = le32_to_cpu ( desc -> orig_len ) ;
131
141
}
132
142
133
- static void get_words (u32 * words , int num_words , u32 * desc )
143
+ static void get_words (dma_addr_t * words , int num_words , __le32 * desc )
134
144
{
135
145
int i ;
136
146
137
147
for (i = 0 ; i < num_words ; i ++ )
138
- words [i ] = desc [i ];
148
+ words [i ] = le32_to_cpu ( desc [i ]) ;
139
149
}
140
150
141
- static void set_pkt_info (u32 buff , u32 buff_len , u32 ndesc ,
151
+ static void set_pkt_info (dma_addr_t buff , u32 buff_len , u32 ndesc ,
142
152
struct knav_dma_desc * desc )
143
153
{
144
- desc -> buff_len = buff_len ;
145
- desc -> buff = buff ;
146
- desc -> next_desc = ndesc ;
154
+ desc -> buff_len = cpu_to_le32 ( buff_len ) ;
155
+ desc -> buff = cpu_to_le32 ( buff ) ;
156
+ desc -> next_desc = cpu_to_le32 ( ndesc ) ;
147
157
}
148
158
149
159
static void set_desc_info (u32 desc_info , u32 pkt_info ,
150
160
struct knav_dma_desc * desc )
151
161
{
152
- desc -> desc_info = desc_info ;
153
- desc -> packet_info = pkt_info ;
162
+ desc -> desc_info = cpu_to_le32 ( desc_info ) ;
163
+ desc -> packet_info = cpu_to_le32 ( pkt_info ) ;
154
164
}
155
165
156
- static void set_pad_info (u32 pad0 , u32 pad1 , struct knav_dma_desc * desc )
166
+ static void set_pad_info (u32 pad0 , u32 pad1 , u32 pad2 , struct knav_dma_desc * desc )
157
167
{
158
- desc -> pad [0 ] = pad0 ;
159
- desc -> pad [1 ] = pad1 ;
168
+ desc -> pad [0 ] = cpu_to_le32 (pad0 );
169
+ desc -> pad [1 ] = cpu_to_le32 (pad1 );
170
+ desc -> pad [2 ] = cpu_to_le32 (pad1 );
160
171
}
161
172
162
- static void set_org_pkt_info (u32 buff , u32 buff_len ,
173
+ static void set_org_pkt_info (dma_addr_t buff , u32 buff_len ,
163
174
struct knav_dma_desc * desc )
164
175
{
165
- desc -> orig_buff = buff ;
166
- desc -> orig_len = buff_len ;
176
+ desc -> orig_buff = cpu_to_le32 ( buff ) ;
177
+ desc -> orig_len = cpu_to_le32 ( buff_len ) ;
167
178
}
168
179
169
- static void set_words (u32 * words , int num_words , u32 * desc )
180
+ static void set_words (u32 * words , int num_words , __le32 * desc )
170
181
{
171
182
int i ;
172
183
173
184
for (i = 0 ; i < num_words ; i ++ )
174
- desc [i ] = words [i ];
185
+ desc [i ] = cpu_to_le32 ( words [i ]) ;
175
186
}
176
187
177
188
/* Read the e-fuse value as 32 bit values to be endian independent */
@@ -570,7 +581,7 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
570
581
dma_addr_t dma_desc , dma_buf ;
571
582
unsigned int buf_len , dma_sz = sizeof (* ndesc );
572
583
void * buf_ptr ;
573
- u32 tmp ;
584
+ u32 pad [ 2 ] ;
574
585
575
586
get_words (& dma_desc , 1 , & desc -> next_desc );
576
587
@@ -580,14 +591,15 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
580
591
dev_err (netcp -> ndev_dev , "failed to unmap Rx desc\n" );
581
592
break ;
582
593
}
583
- get_pkt_info (& dma_buf , & tmp , & dma_desc , ndesc );
584
- get_pad_info ((u32 * )& buf_ptr , & tmp , ndesc );
594
+ get_pad_ptr (& buf_ptr , ndesc );
585
595
dma_unmap_page (netcp -> dev , dma_buf , PAGE_SIZE , DMA_FROM_DEVICE );
586
596
__free_page (buf_ptr );
587
597
knav_pool_desc_put (netcp -> rx_pool , desc );
588
598
}
589
599
590
- get_pad_info ((u32 * )& buf_ptr , & buf_len , desc );
600
+ get_pad_info (& pad [0 ], & pad [1 ], & buf_len , desc );
601
+ buf_ptr = (void * )(uintptr_t )(pad [0 ] + ((u64 )pad [1 ] << 32 ));
602
+
591
603
if (buf_ptr )
592
604
netcp_frag_free (buf_len <= PAGE_SIZE , buf_ptr );
593
605
knav_pool_desc_put (netcp -> rx_pool , desc );
@@ -626,7 +638,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
626
638
struct netcp_packet p_info ;
627
639
struct sk_buff * skb ;
628
640
void * org_buf_ptr ;
629
- u32 tmp ;
630
641
631
642
dma_desc = knav_queue_pop (netcp -> rx_queue , & dma_sz );
632
643
if (!dma_desc )
@@ -639,7 +650,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
639
650
}
640
651
641
652
get_pkt_info (& dma_buff , & buf_len , & dma_desc , desc );
642
- get_pad_info (( u32 * ) & org_buf_ptr , & org_buf_len , desc );
653
+ get_pad_ptr ( & org_buf_ptr , desc );
643
654
644
655
if (unlikely (!org_buf_ptr )) {
645
656
dev_err (netcp -> ndev_dev , "NULL bufptr in desc\n" );
@@ -664,6 +675,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
664
675
/* Fill in the page fragment list */
665
676
while (dma_desc ) {
666
677
struct page * page ;
678
+ void * ptr ;
667
679
668
680
ndesc = knav_pool_desc_unmap (netcp -> rx_pool , dma_desc , dma_sz );
669
681
if (unlikely (!ndesc )) {
@@ -672,14 +684,15 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
672
684
}
673
685
674
686
get_pkt_info (& dma_buff , & buf_len , & dma_desc , ndesc );
675
- get_pad_info ((u32 * )& page , & tmp , ndesc );
687
+ get_pad_ptr (ptr , ndesc );
688
+ page = ptr ;
676
689
677
690
if (likely (dma_buff && buf_len && page )) {
678
691
dma_unmap_page (netcp -> dev , dma_buff , PAGE_SIZE ,
679
692
DMA_FROM_DEVICE );
680
693
} else {
681
- dev_err (netcp -> ndev_dev , "Bad Rx desc dma_buff(%p ), len(%d), page(%p)\n" ,
682
- ( void * ) dma_buff , buf_len , page );
694
+ dev_err (netcp -> ndev_dev , "Bad Rx desc dma_buff(%pad ), len(%d), page(%p)\n" ,
695
+ & dma_buff , buf_len , page );
683
696
goto free_desc ;
684
697
}
685
698
@@ -750,7 +763,6 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
750
763
unsigned int buf_len , dma_sz ;
751
764
dma_addr_t dma ;
752
765
void * buf_ptr ;
753
- u32 tmp ;
754
766
755
767
/* Allocate descriptor */
756
768
while ((dma = knav_queue_pop (netcp -> rx_fdq [fdq ], & dma_sz ))) {
@@ -761,7 +773,7 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
761
773
}
762
774
763
775
get_org_pkt_info (& dma , & buf_len , desc );
764
- get_pad_info (( u32 * ) & buf_ptr , & tmp , desc );
776
+ get_pad_ptr ( buf_ptr , desc );
765
777
766
778
if (unlikely (!dma )) {
767
779
dev_err (netcp -> ndev_dev , "NULL orig_buff in desc\n" );
@@ -813,7 +825,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
813
825
struct page * page ;
814
826
dma_addr_t dma ;
815
827
void * bufptr ;
816
- u32 pad [2 ];
828
+ u32 pad [3 ];
817
829
818
830
/* Allocate descriptor */
819
831
hwdesc = knav_pool_desc_get (netcp -> rx_pool );
@@ -830,7 +842,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
830
842
SKB_DATA_ALIGN (sizeof (struct skb_shared_info ));
831
843
832
844
bufptr = netdev_alloc_frag (primary_buf_len );
833
- pad [1 ] = primary_buf_len ;
845
+ pad [2 ] = primary_buf_len ;
834
846
835
847
if (unlikely (!bufptr )) {
836
848
dev_warn_ratelimited (netcp -> ndev_dev ,
@@ -842,7 +854,8 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
842
854
if (unlikely (dma_mapping_error (netcp -> dev , dma )))
843
855
goto fail ;
844
856
845
- pad [0 ] = (u32 )bufptr ;
857
+ pad [0 ] = lower_32_bits ((uintptr_t )bufptr );
858
+ pad [1 ] = upper_32_bits ((uintptr_t )bufptr );
846
859
847
860
} else {
848
861
/* Allocate a secondary receive queue entry */
@@ -853,8 +866,9 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
853
866
}
854
867
buf_len = PAGE_SIZE ;
855
868
dma = dma_map_page (netcp -> dev , page , 0 , buf_len , DMA_TO_DEVICE );
856
- pad [0 ] = (u32 )page ;
857
- pad [1 ] = 0 ;
869
+ pad [0 ] = lower_32_bits (dma );
870
+ pad [1 ] = upper_32_bits (dma );
871
+ pad [2 ] = 0 ;
858
872
}
859
873
860
874
desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC ;
@@ -864,7 +878,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
864
878
pkt_info |= (netcp -> rx_queue_id & KNAV_DMA_DESC_RETQ_MASK ) <<
865
879
KNAV_DMA_DESC_RETQ_SHIFT ;
866
880
set_org_pkt_info (dma , buf_len , hwdesc );
867
- set_pad_info (pad [0 ], pad [1 ], hwdesc );
881
+ set_pad_info (pad [0 ], pad [1 ], pad [ 2 ], hwdesc );
868
882
set_desc_info (desc_info , pkt_info , hwdesc );
869
883
870
884
/* Push to FDQs */
@@ -935,8 +949,8 @@ static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
935
949
dma_unmap_single (netcp -> dev , dma_buf , buf_len ,
936
950
DMA_TO_DEVICE );
937
951
else
938
- dev_warn (netcp -> ndev_dev , "bad Tx desc buf(%p ), len(%d)\n" ,
939
- ( void * ) dma_buf , buf_len );
952
+ dev_warn (netcp -> ndev_dev , "bad Tx desc buf(%pad ), len(%d)\n" ,
953
+ & dma_buf , buf_len );
940
954
941
955
knav_pool_desc_put (netcp -> tx_pool , ndesc );
942
956
ndesc = NULL ;
@@ -953,11 +967,11 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
953
967
unsigned int budget )
954
968
{
955
969
struct knav_dma_desc * desc ;
970
+ void * ptr ;
956
971
struct sk_buff * skb ;
957
972
unsigned int dma_sz ;
958
973
dma_addr_t dma ;
959
974
int pkts = 0 ;
960
- u32 tmp ;
961
975
962
976
while (budget -- ) {
963
977
dma = knav_queue_pop (netcp -> tx_compl_q , & dma_sz );
@@ -970,7 +984,8 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
970
984
continue ;
971
985
}
972
986
973
- get_pad_info ((u32 * )& skb , & tmp , desc );
987
+ get_pad_ptr (& ptr , desc );
988
+ skb = ptr ;
974
989
netcp_free_tx_desc_chain (netcp , desc , dma_sz );
975
990
if (!skb ) {
976
991
dev_err (netcp -> ndev_dev , "No skb in Tx desc\n" );
@@ -1059,6 +1074,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
1059
1074
u32 page_offset = frag -> page_offset ;
1060
1075
u32 buf_len = skb_frag_size (frag );
1061
1076
dma_addr_t desc_dma ;
1077
+ u32 desc_dma_32 ;
1062
1078
u32 pkt_info ;
1063
1079
1064
1080
dma_addr = dma_map_page (dev , page , page_offset , buf_len ,
@@ -1075,13 +1091,13 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
1075
1091
goto free_descs ;
1076
1092
}
1077
1093
1078
- desc_dma = knav_pool_desc_virt_to_dma (netcp -> tx_pool ,
1079
- (void * )ndesc );
1094
+ desc_dma = knav_pool_desc_virt_to_dma (netcp -> tx_pool , ndesc );
1080
1095
pkt_info =
1081
1096
(netcp -> tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK ) <<
1082
1097
KNAV_DMA_DESC_RETQ_SHIFT ;
1083
1098
set_pkt_info (dma_addr , buf_len , 0 , ndesc );
1084
- set_words (& desc_dma , 1 , & pdesc -> next_desc );
1099
+ desc_dma_32 = (u32 )desc_dma ;
1100
+ set_words (& desc_dma_32 , 1 , & pdesc -> next_desc );
1085
1101
pkt_len += buf_len ;
1086
1102
if (pdesc != desc )
1087
1103
knav_pool_desc_map (netcp -> tx_pool , pdesc ,
@@ -1173,11 +1189,14 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
1173
1189
}
1174
1190
1175
1191
set_words (& tmp , 1 , & desc -> packet_info );
1176
- set_words ((u32 * )& skb , 1 , & desc -> pad [0 ]);
1192
+ tmp = lower_32_bits ((uintptr_t )& skb );
1193
+ set_words (& tmp , 1 , & desc -> pad [0 ]);
1194
+ tmp = upper_32_bits ((uintptr_t )& skb );
1195
+ set_words (& tmp , 1 , & desc -> pad [1 ]);
1177
1196
1178
1197
if (tx_pipe -> flags & SWITCH_TO_PORT_IN_TAGINFO ) {
1179
1198
tmp = tx_pipe -> switch_to_port ;
1180
- set_words (( u32 * ) & tmp , 1 , & desc -> tag_info );
1199
+ set_words (& tmp , 1 , & desc -> tag_info );
1181
1200
}
1182
1201
1183
1202
/* submit packet descriptor */
0 commit comments