@@ -33,6 +33,31 @@ unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
33
33
34
34
#endif /* EFX_USE_PIO */
35
35
36
+ static inline unsigned int
37
+ efx_tx_queue_get_insert_index (const struct efx_tx_queue * tx_queue )
38
+ {
39
+ return tx_queue -> insert_count & tx_queue -> ptr_mask ;
40
+ }
41
+
42
+ static inline struct efx_tx_buffer *
43
+ __efx_tx_queue_get_insert_buffer (const struct efx_tx_queue * tx_queue )
44
+ {
45
+ return & tx_queue -> buffer [efx_tx_queue_get_insert_index (tx_queue )];
46
+ }
47
+
48
+ static inline struct efx_tx_buffer *
49
+ efx_tx_queue_get_insert_buffer (const struct efx_tx_queue * tx_queue )
50
+ {
51
+ struct efx_tx_buffer * buffer =
52
+ __efx_tx_queue_get_insert_buffer (tx_queue );
53
+
54
+ EFX_BUG_ON_PARANOID (buffer -> len );
55
+ EFX_BUG_ON_PARANOID (buffer -> flags );
56
+ EFX_BUG_ON_PARANOID (buffer -> unmap_len );
57
+
58
+ return buffer ;
59
+ }
60
+
36
61
static void efx_dequeue_buffer (struct efx_tx_queue * tx_queue ,
37
62
struct efx_tx_buffer * buffer ,
38
63
unsigned int * pkts_compl ,
@@ -180,7 +205,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
180
205
struct device * dma_dev = & efx -> pci_dev -> dev ;
181
206
struct efx_tx_buffer * buffer ;
182
207
skb_frag_t * fragment ;
183
- unsigned int len , unmap_len = 0 , insert_ptr ;
208
+ unsigned int len , unmap_len = 0 ;
184
209
dma_addr_t dma_addr , unmap_addr = 0 ;
185
210
unsigned int dma_len ;
186
211
unsigned short dma_flags ;
@@ -221,11 +246,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
221
246
222
247
/* Add to TX queue, splitting across DMA boundaries */
223
248
do {
224
- insert_ptr = tx_queue -> insert_count & tx_queue -> ptr_mask ;
225
- buffer = & tx_queue -> buffer [insert_ptr ];
226
- EFX_BUG_ON_PARANOID (buffer -> flags );
227
- EFX_BUG_ON_PARANOID (buffer -> len );
228
- EFX_BUG_ON_PARANOID (buffer -> unmap_len );
249
+ buffer = efx_tx_queue_get_insert_buffer (tx_queue );
229
250
230
251
dma_len = efx_max_tx_len (efx , dma_addr );
231
252
if (likely (dma_len >= len ))
@@ -283,8 +304,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
283
304
while (tx_queue -> insert_count != tx_queue -> write_count ) {
284
305
unsigned int pkts_compl = 0 , bytes_compl = 0 ;
285
306
-- tx_queue -> insert_count ;
286
- insert_ptr = tx_queue -> insert_count & tx_queue -> ptr_mask ;
287
- buffer = & tx_queue -> buffer [insert_ptr ];
307
+ buffer = __efx_tx_queue_get_insert_buffer (tx_queue );
288
308
efx_dequeue_buffer (tx_queue , buffer , & pkts_compl , & bytes_compl );
289
309
}
290
310
@@ -755,23 +775,18 @@ static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
755
775
{
756
776
struct efx_tx_buffer * buffer ;
757
777
struct efx_nic * efx = tx_queue -> efx ;
758
- unsigned dma_len , insert_ptr ;
778
+ unsigned dma_len ;
759
779
760
780
EFX_BUG_ON_PARANOID (len <= 0 );
761
781
762
782
while (1 ) {
763
- insert_ptr = tx_queue -> insert_count & tx_queue -> ptr_mask ;
764
- buffer = & tx_queue -> buffer [insert_ptr ];
783
+ buffer = efx_tx_queue_get_insert_buffer (tx_queue );
765
784
++ tx_queue -> insert_count ;
766
785
767
786
EFX_BUG_ON_PARANOID (tx_queue -> insert_count -
768
787
tx_queue -> read_count >=
769
788
efx -> txq_entries );
770
789
771
- EFX_BUG_ON_PARANOID (buffer -> len );
772
- EFX_BUG_ON_PARANOID (buffer -> unmap_len );
773
- EFX_BUG_ON_PARANOID (buffer -> flags );
774
-
775
790
buffer -> dma_addr = dma_addr ;
776
791
777
792
dma_len = efx_max_tx_len (efx , dma_addr );
@@ -832,8 +847,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
832
847
/* Work backwards until we hit the original insert pointer value */
833
848
while (tx_queue -> insert_count != tx_queue -> write_count ) {
834
849
-- tx_queue -> insert_count ;
835
- buffer = & tx_queue -> buffer [tx_queue -> insert_count &
836
- tx_queue -> ptr_mask ];
850
+ buffer = __efx_tx_queue_get_insert_buffer (tx_queue );
837
851
efx_dequeue_buffer (tx_queue , buffer , NULL , NULL );
838
852
}
839
853
}
@@ -978,7 +992,7 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
978
992
struct tso_state * st )
979
993
{
980
994
struct efx_tx_buffer * buffer =
981
- & tx_queue -> buffer [ tx_queue -> insert_count & tx_queue -> ptr_mask ] ;
995
+ efx_tx_queue_get_insert_buffer ( tx_queue ) ;
982
996
bool is_last = st -> out_len <= skb_shinfo (skb )-> gso_size ;
983
997
u8 tcp_flags_clear ;
984
998
@@ -1048,8 +1062,7 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1048
1062
/* We mapped the headers in tso_start(). Unmap them
1049
1063
* when the last segment is completed.
1050
1064
*/
1051
- buffer = & tx_queue -> buffer [tx_queue -> insert_count &
1052
- tx_queue -> ptr_mask ];
1065
+ buffer = efx_tx_queue_get_insert_buffer (tx_queue );
1053
1066
buffer -> dma_addr = st -> header_dma_addr ;
1054
1067
buffer -> len = st -> header_len ;
1055
1068
if (is_last ) {
0 commit comments