Skip to content

Commit 717bc0e

Browse files
Brian Fosterdchinner
authored andcommitted
xfs: refactor in-core log state update to helper
Once the record at the head of the log is identified and verified, the in-core log state is updated based on the record. This includes information such as the current head block and cycle, the start block of the last record written to the log, the tail lsn, etc. Once torn write detection is conditional, this logic will need to be reused. Factor the code to update the in-core log data structures into a new helper function. This patch does not change behavior. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
1 parent 65b99a0 commit 717bc0e

File tree

1 file changed

+33
-19
lines changed

1 file changed

+33
-19
lines changed

fs/xfs/xfs_log_recover.c

Lines changed: 33 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1276,6 +1276,37 @@ xlog_check_unmount_rec(
12761276
return 0;
12771277
}
12781278

1279+
static void
1280+
xlog_set_state(
1281+
struct xlog *log,
1282+
xfs_daddr_t head_blk,
1283+
struct xlog_rec_header *rhead,
1284+
xfs_daddr_t rhead_blk,
1285+
bool bump_cycle)
1286+
{
1287+
/*
1288+
* Reset log values according to the state of the log when we
1289+
* crashed. In the case where head_blk == 0, we bump curr_cycle
1290+
* one because the next write starts a new cycle rather than
1291+
* continuing the cycle of the last good log record. At this
1292+
* point we have guaranteed that all partial log records have been
1293+
* accounted for. Therefore, we know that the last good log record
1294+
* written was complete and ended exactly on the end boundary
1295+
* of the physical log.
1296+
*/
1297+
log->l_prev_block = rhead_blk;
1298+
log->l_curr_block = (int)head_blk;
1299+
log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1300+
if (bump_cycle)
1301+
log->l_curr_cycle++;
1302+
atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1303+
atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1304+
xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1305+
BBTOB(log->l_curr_block));
1306+
xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1307+
BBTOB(log->l_curr_block));
1308+
}
1309+
12791310
/*
12801311
* Find the sync block number or the tail of the log.
12811312
*
@@ -1356,26 +1387,9 @@ xlog_find_tail(
13561387
goto done;
13571388

13581389
/*
1359-
* Reset log values according to the state of the log when we
1360-
* crashed. In the case where head_blk == 0, we bump curr_cycle
1361-
* one because the next write starts a new cycle rather than
1362-
* continuing the cycle of the last good log record. At this
1363-
* point we have guaranteed that all partial log records have been
1364-
* accounted for. Therefore, we know that the last good log record
1365-
* written was complete and ended exactly on the end boundary
1366-
* of the physical log.
1390+
* Set the log state based on the current head record.
13671391
*/
1368-
log->l_prev_block = rhead_blk;
1369-
log->l_curr_block = (int)*head_blk;
1370-
log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1371-
if (wrapped)
1372-
log->l_curr_cycle++;
1373-
atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1374-
atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1375-
xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1376-
BBTOB(log->l_curr_block));
1377-
xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1378-
BBTOB(log->l_curr_block));
1392+
xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
13791393
tail_lsn = atomic64_read(&log->l_tail_lsn);
13801394

13811395
/*

0 commit comments

Comments
 (0)