@@ -2002,12 +2002,12 @@ r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
2002
2002
payload = (void * )mb + mb_offset ;
2003
2003
payload_flush = (void * )mb + mb_offset ;
2004
2004
2005
- if (payload -> header .type == R5LOG_PAYLOAD_DATA ) {
2005
+ if (le16_to_cpu ( payload -> header .type ) == R5LOG_PAYLOAD_DATA ) {
2006
2006
if (r5l_recovery_verify_data_checksum (
2007
2007
log , ctx , page , log_offset ,
2008
2008
payload -> checksum [0 ]) < 0 )
2009
2009
goto mismatch ;
2010
- } else if (payload -> header .type == R5LOG_PAYLOAD_PARITY ) {
2010
+ } else if (le16_to_cpu ( payload -> header .type ) == R5LOG_PAYLOAD_PARITY ) {
2011
2011
if (r5l_recovery_verify_data_checksum (
2012
2012
log , ctx , page , log_offset ,
2013
2013
payload -> checksum [0 ]) < 0 )
@@ -2019,12 +2019,12 @@ r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
2019
2019
BLOCK_SECTORS ),
2020
2020
payload -> checksum [1 ]) < 0 )
2021
2021
goto mismatch ;
2022
- } else if (payload -> header .type == R5LOG_PAYLOAD_FLUSH ) {
2022
+ } else if (le16_to_cpu ( payload -> header .type ) == R5LOG_PAYLOAD_FLUSH ) {
2023
2023
/* nothing to do for R5LOG_PAYLOAD_FLUSH here */
2024
2024
} else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
2025
2025
goto mismatch ;
2026
2026
2027
- if (payload -> header .type == R5LOG_PAYLOAD_FLUSH ) {
2027
+ if (le16_to_cpu ( payload -> header .type ) == R5LOG_PAYLOAD_FLUSH ) {
2028
2028
mb_offset += sizeof (struct r5l_payload_flush ) +
2029
2029
le32_to_cpu (payload_flush -> size );
2030
2030
} else {
@@ -2091,7 +2091,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2091
2091
payload = (void * )mb + mb_offset ;
2092
2092
payload_flush = (void * )mb + mb_offset ;
2093
2093
2094
- if (payload -> header .type == R5LOG_PAYLOAD_FLUSH ) {
2094
+ if (le16_to_cpu ( payload -> header .type ) == R5LOG_PAYLOAD_FLUSH ) {
2095
2095
int i , count ;
2096
2096
2097
2097
count = le32_to_cpu (payload_flush -> size ) / sizeof (__le64 );
@@ -2113,7 +2113,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2113
2113
}
2114
2114
2115
2115
/* DATA or PARITY payload */
2116
- stripe_sect = (payload -> header .type == R5LOG_PAYLOAD_DATA ) ?
2116
+ stripe_sect = (le16_to_cpu ( payload -> header .type ) == R5LOG_PAYLOAD_DATA ) ?
2117
2117
raid5_compute_sector (
2118
2118
conf , le64_to_cpu (payload -> location ), 0 , & dd ,
2119
2119
NULL )
@@ -2151,15 +2151,15 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2151
2151
list_add_tail (& sh -> lru , cached_stripe_list );
2152
2152
}
2153
2153
2154
- if (payload -> header .type == R5LOG_PAYLOAD_DATA ) {
2154
+ if (le16_to_cpu ( payload -> header .type ) == R5LOG_PAYLOAD_DATA ) {
2155
2155
if (!test_bit (STRIPE_R5C_CACHING , & sh -> state ) &&
2156
2156
test_bit (R5_Wantwrite , & sh -> dev [sh -> pd_idx ].flags )) {
2157
2157
r5l_recovery_replay_one_stripe (conf , sh , ctx );
2158
2158
list_move_tail (& sh -> lru , cached_stripe_list );
2159
2159
}
2160
2160
r5l_recovery_load_data (log , sh , ctx , payload ,
2161
2161
log_offset );
2162
- } else if (payload -> header .type == R5LOG_PAYLOAD_PARITY )
2162
+ } else if (le16_to_cpu ( payload -> header .type ) == R5LOG_PAYLOAD_PARITY )
2163
2163
r5l_recovery_load_parity (log , sh , ctx , payload ,
2164
2164
log_offset );
2165
2165
else
@@ -2361,7 +2361,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2361
2361
payload = (void * )mb + offset ;
2362
2362
payload -> header .type = cpu_to_le16 (
2363
2363
R5LOG_PAYLOAD_DATA );
2364
- payload -> size = BLOCK_SECTORS ;
2364
+ payload -> size = cpu_to_le32 ( BLOCK_SECTORS ) ;
2365
2365
payload -> location = cpu_to_le64 (
2366
2366
raid5_compute_blocknr (sh , i , 0 ));
2367
2367
addr = kmap_atomic (dev -> page );
0 commit comments