Skip to content

Commit 808f80b

Browse files
fdmananamasoncl
authored andcommitted
Btrfs: update fix for read corruption of compressed and shared extents
My previous fix in commit 005efed ("Btrfs: fix read corruption of compressed and shared extents") was effective only if the compressed extents cover a file range with a length that is not a multiple of 16 pages. That's because the detection of when we reached a different range of the file that shares the same compressed extent as the previously processed range was done at extent_io.c:__do_contiguous_readpages(), which covers subranges with a length up to 16 pages, because extent_readpages() groups the pages in clusters no larger than 16 pages. So fix this by tracking the start of the previously processed file range's extent map at extent_readpages(). The following test case for fstests reproduces the issue: seq=`basename $0` seqres=$RESULT_DIR/$seq echo "QA output created by $seq" tmp=/tmp/$$ status=1 # failure is the default! trap "_cleanup; exit \$status" 0 1 2 3 15 _cleanup() { rm -f $tmp.* } # get standard environment, filters and checks . ./common/rc . ./common/filter # real QA test starts here _need_to_be_root _supported_fs btrfs _supported_os Linux _require_scratch _require_cloner rm -f $seqres.full test_clone_and_read_compressed_extent() { local mount_opts=$1 _scratch_mkfs >>$seqres.full 2>&1 _scratch_mount $mount_opts # Create our test file with a single extent of 64Kb that is going to # be compressed no matter which compression algo is used (zlib/lzo). $XFS_IO_PROG -f -c "pwrite -S 0xaa 0K 64K" \ $SCRATCH_MNT/foo | _filter_xfs_io # Now clone the compressed extent into an adjacent file offset. $CLONER_PROG -s 0 -d $((64 * 1024)) -l $((64 * 1024)) \ $SCRATCH_MNT/foo $SCRATCH_MNT/foo echo "File digest before unmount:" md5sum $SCRATCH_MNT/foo | _filter_scratch # Remount the fs or clear the page cache to trigger the bug in # btrfs. Because the extent has an uncompressed length that is a # multiple of 16 pages, all the pages belonging to the second range # of the file (64K to 128K), which points to the same extent as the # first range (0K to 64K), had their contents full of zeroes instead # of the byte 0xaa. This was a bug exclusively in the read path of # compressed extents, the correct data was stored on disk, btrfs # just failed to fill in the pages correctly. _scratch_remount echo "File digest after remount:" # Must match the digest we got before. md5sum $SCRATCH_MNT/foo | _filter_scratch } echo -e "\nTesting with zlib compression..." test_clone_and_read_compressed_extent "-o compress=zlib" _scratch_unmount echo -e "\nTesting with lzo compression..." test_clone_and_read_compressed_extent "-o compress=lzo" status=0 exit Cc: stable@vger.kernel.org Signed-off-by: Filipe Manana <fdmanana@suse.com> Tested-by: Timofey Titovets <nefelim4ag@gmail.com>
1 parent b786f16 commit 808f80b

File tree

1 file changed

+11
-8
lines changed

1 file changed

+11
-8
lines changed

fs/btrfs/extent_io.c

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3144,12 +3144,12 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
31443144
get_extent_t *get_extent,
31453145
struct extent_map **em_cached,
31463146
struct bio **bio, int mirror_num,
3147-
unsigned long *bio_flags, int rw)
3147+
unsigned long *bio_flags, int rw,
3148+
u64 *prev_em_start)
31483149
{
31493150
struct inode *inode;
31503151
struct btrfs_ordered_extent *ordered;
31513152
int index;
3152-
u64 prev_em_start = (u64)-1;
31533153

31543154
inode = pages[0]->mapping->host;
31553155
while (1) {
@@ -3165,7 +3165,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
31653165

31663166
for (index = 0; index < nr_pages; index++) {
31673167
__do_readpage(tree, pages[index], get_extent, em_cached, bio,
3168-
mirror_num, bio_flags, rw, &prev_em_start);
3168+
mirror_num, bio_flags, rw, prev_em_start);
31693169
page_cache_release(pages[index]);
31703170
}
31713171
}
@@ -3175,7 +3175,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
31753175
int nr_pages, get_extent_t *get_extent,
31763176
struct extent_map **em_cached,
31773177
struct bio **bio, int mirror_num,
3178-
unsigned long *bio_flags, int rw)
3178+
unsigned long *bio_flags, int rw,
3179+
u64 *prev_em_start)
31793180
{
31803181
u64 start = 0;
31813182
u64 end = 0;
@@ -3196,7 +3197,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
31963197
index - first_index, start,
31973198
end, get_extent, em_cached,
31983199
bio, mirror_num, bio_flags,
3199-
rw);
3200+
rw, prev_em_start);
32003201
start = page_start;
32013202
end = start + PAGE_CACHE_SIZE - 1;
32023203
first_index = index;
@@ -3207,7 +3208,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
32073208
__do_contiguous_readpages(tree, &pages[first_index],
32083209
index - first_index, start,
32093210
end, get_extent, em_cached, bio,
3210-
mirror_num, bio_flags, rw);
3211+
mirror_num, bio_flags, rw,
3212+
prev_em_start);
32113213
}
32123214

32133215
static int __extent_read_full_page(struct extent_io_tree *tree,
@@ -4218,6 +4220,7 @@ int extent_readpages(struct extent_io_tree *tree,
42184220
struct page *page;
42194221
struct extent_map *em_cached = NULL;
42204222
int nr = 0;
4223+
u64 prev_em_start = (u64)-1;
42214224

42224225
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
42234226
page = list_entry(pages->prev, struct page, lru);
@@ -4234,12 +4237,12 @@ int extent_readpages(struct extent_io_tree *tree,
42344237
if (nr < ARRAY_SIZE(pagepool))
42354238
continue;
42364239
__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
4237-
&bio, 0, &bio_flags, READ);
4240+
&bio, 0, &bio_flags, READ, &prev_em_start);
42384241
nr = 0;
42394242
}
42404243
if (nr)
42414244
__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
4242-
&bio, 0, &bio_flags, READ);
4245+
&bio, 0, &bio_flags, READ, &prev_em_start);
42434246

42444247
if (em_cached)
42454248
free_extent_map(em_cached);

0 commit comments

Comments
 (0)