@@ -113,15 +113,13 @@ static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
113
113
if (ring == xhci -> event_ring )
114
114
return trb == & seg -> trbs [TRBS_PER_SEGMENT ];
115
115
else
116
- return (le32_to_cpu (trb -> link .control ) & TRB_TYPE_BITMASK )
117
- == TRB_TYPE (TRB_LINK );
116
+ return TRB_TYPE_LINK_LE32 (trb -> link .control );
118
117
}
119
118
120
119
static int enqueue_is_link_trb (struct xhci_ring * ring )
121
120
{
122
121
struct xhci_link_trb * link = & ring -> enqueue -> link ;
123
- return ((le32_to_cpu (link -> control ) & TRB_TYPE_BITMASK ) ==
124
- TRB_TYPE (TRB_LINK ));
122
+ return TRB_TYPE_LINK_LE32 (link -> control );
125
123
}
126
124
127
125
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
@@ -372,7 +370,7 @@ static struct xhci_segment *find_trb_seg(
372
370
while (cur_seg -> trbs > trb ||
373
371
& cur_seg -> trbs [TRBS_PER_SEGMENT - 1 ] < trb ) {
374
372
generic_trb = & cur_seg -> trbs [TRBS_PER_SEGMENT - 1 ].generic ;
375
- if (le32_to_cpu ( generic_trb -> field [3 ]) & LINK_TOGGLE )
373
+ if (generic_trb -> field [3 ] & cpu_to_le32 ( LINK_TOGGLE ) )
376
374
* cycle_state ^= 0x1 ;
377
375
cur_seg = cur_seg -> next ;
378
376
if (cur_seg == start_seg )
@@ -489,8 +487,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
489
487
}
490
488
491
489
trb = & state -> new_deq_ptr -> generic ;
492
- if (( le32_to_cpu ( trb -> field [3 ]) & TRB_TYPE_BITMASK ) ==
493
- TRB_TYPE ( TRB_LINK ) && ( le32_to_cpu ( trb -> field [3 ]) & LINK_TOGGLE ))
490
+ if (TRB_TYPE_LINK_LE32 ( trb -> field [3 ]) &&
491
+ ( trb -> field [3 ] & cpu_to_le32 ( LINK_TOGGLE ) ))
494
492
state -> new_cycle_state ^= 0x1 ;
495
493
next_trb (xhci , ep_ring , & state -> new_deq_seg , & state -> new_deq_ptr );
496
494
@@ -525,8 +523,7 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
525
523
for (cur_seg = cur_td -> start_seg , cur_trb = cur_td -> first_trb ;
526
524
true;
527
525
next_trb (xhci , ep_ring , & cur_seg , & cur_trb )) {
528
- if ((le32_to_cpu (cur_trb -> generic .field [3 ]) & TRB_TYPE_BITMASK )
529
- == TRB_TYPE (TRB_LINK )) {
526
+ if (TRB_TYPE_LINK_LE32 (cur_trb -> generic .field [3 ])) {
530
527
/* Unchain any chained Link TRBs, but
531
528
* leave the pointers intact.
532
529
*/
@@ -1000,7 +997,7 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
1000
997
* but we don't care.
1001
998
*/
1002
999
xhci_dbg (xhci , "Ignoring reset ep completion code of %u\n" ,
1003
- ( unsigned int ) GET_COMP_CODE (le32_to_cpu (event -> status )));
1000
+ GET_COMP_CODE (le32_to_cpu (event -> status )));
1004
1001
1005
1002
/* HW with the reset endpoint quirk needs to have a configure endpoint
1006
1003
* command complete before the endpoint can be used. Queue that here
@@ -1458,7 +1455,8 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1458
1455
* endpoint anyway. Check if a babble halted the
1459
1456
* endpoint.
1460
1457
*/
1461
- if ((le32_to_cpu (ep_ctx -> ep_info ) & EP_STATE_MASK ) == EP_STATE_HALTED )
1458
+ if ((ep_ctx -> ep_info & cpu_to_le32 (EP_STATE_MASK )) ==
1459
+ cpu_to_le32 (EP_STATE_HALTED ))
1462
1460
return 1 ;
1463
1461
1464
1462
return 0 ;
@@ -1752,10 +1750,8 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1752
1750
for (cur_trb = ep_ring -> dequeue ,
1753
1751
cur_seg = ep_ring -> deq_seg ; cur_trb != event_trb ;
1754
1752
next_trb (xhci , ep_ring , & cur_seg , & cur_trb )) {
1755
- if ((le32_to_cpu (cur_trb -> generic .field [3 ]) &
1756
- TRB_TYPE_BITMASK ) != TRB_TYPE (TRB_TR_NOOP ) &&
1757
- (le32_to_cpu (cur_trb -> generic .field [3 ]) &
1758
- TRB_TYPE_BITMASK ) != TRB_TYPE (TRB_LINK ))
1753
+ if (!TRB_TYPE_NOOP_LE32 (cur_trb -> generic .field [3 ]) &&
1754
+ !TRB_TYPE_LINK_LE32 (cur_trb -> generic .field [3 ]))
1759
1755
len += TRB_LEN (le32_to_cpu (cur_trb -> generic .field [2 ]));
1760
1756
}
1761
1757
len += TRB_LEN (le32_to_cpu (cur_trb -> generic .field [2 ])) -
@@ -1888,10 +1884,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1888
1884
for (cur_trb = ep_ring -> dequeue , cur_seg = ep_ring -> deq_seg ;
1889
1885
cur_trb != event_trb ;
1890
1886
next_trb (xhci , ep_ring , & cur_seg , & cur_trb )) {
1891
- if ((le32_to_cpu (cur_trb -> generic .field [3 ]) &
1892
- TRB_TYPE_BITMASK ) != TRB_TYPE (TRB_TR_NOOP ) &&
1893
- (le32_to_cpu (cur_trb -> generic .field [3 ]) &
1894
- TRB_TYPE_BITMASK ) != TRB_TYPE (TRB_LINK ))
1887
+ if (!TRB_TYPE_NOOP_LE32 (cur_trb -> generic .field [3 ]) &&
1888
+ !TRB_TYPE_LINK_LE32 (cur_trb -> generic .field [3 ]))
1895
1889
td -> urb -> actual_length +=
1896
1890
TRB_LEN (le32_to_cpu (cur_trb -> generic .field [2 ]));
1897
1891
}
@@ -2046,8 +2040,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2046
2040
TRB_TO_SLOT_ID (le32_to_cpu (event -> flags )),
2047
2041
ep_index );
2048
2042
xhci_dbg (xhci , "Event TRB with TRB type ID %u\n" ,
2049
- (unsigned int ) ( le32_to_cpu (event -> flags )
2050
- & TRB_TYPE_BITMASK )>>10 );
2043
+ (le32_to_cpu (event -> flags ) &
2044
+ TRB_TYPE_BITMASK )>>10 );
2051
2045
xhci_print_trb_offsets (xhci , (union xhci_trb * ) event );
2052
2046
if (ep -> skip ) {
2053
2047
ep -> skip = false;
@@ -2104,9 +2098,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2104
2098
* corresponding TD has been cancelled. Just ignore
2105
2099
* the TD.
2106
2100
*/
2107
- if ((le32_to_cpu (event_trb -> generic .field [3 ])
2108
- & TRB_TYPE_BITMASK )
2109
- == TRB_TYPE (TRB_TR_NOOP )) {
2101
+ if (TRB_TYPE_NOOP_LE32 (event_trb -> generic .field [3 ])) {
2110
2102
xhci_dbg (xhci ,
2111
2103
"event_trb is a no-op TRB. Skip it\n" );
2112
2104
goto cleanup ;
@@ -2432,7 +2424,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2432
2424
next -> link .control |= cpu_to_le32 (TRB_CHAIN );
2433
2425
2434
2426
wmb ();
2435
- next -> link .control ^= cpu_to_le32 (( u32 ) TRB_CYCLE );
2427
+ next -> link .control ^= cpu_to_le32 (TRB_CYCLE );
2436
2428
2437
2429
/* Toggle the cycle bit after the last ring segment. */
2438
2430
if (last_trb_on_last_seg (xhci , ring , ring -> enq_seg , next )) {
0 commit comments