Skip to content

Commit ad428cd

Browse files
committed
dax: Check the end of the block-device capacity with dax_direct_access()
The checks in __bdev_dax_supported() helped mitigate a potential data corruption bug in the pmem driver's handling of section alignment padding. Strengthen the checks, including checking the end of the range, to validate the dev_pagemap, Xarray entries, and sector-to-pfn translation established for pmem namespaces. Acked-by: Jan Kara <jack@suse.cz> Cc: "Darrick J. Wong" <darrick.wong@oracle.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
1 parent fa7d2e6 commit ad428cd

File tree

1 file changed

+28
-10
lines changed

1 file changed

+28
-10
lines changed

drivers/dax/super.c

Lines changed: 28 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -86,12 +86,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
8686
{
8787
struct dax_device *dax_dev;
8888
bool dax_enabled = false;
89+
pgoff_t pgoff, pgoff_end;
8990
struct request_queue *q;
90-
pgoff_t pgoff;
91-
int err, id;
92-
pfn_t pfn;
93-
long len;
9491
char buf[BDEVNAME_SIZE];
92+
void *kaddr, *end_kaddr;
93+
pfn_t pfn, end_pfn;
94+
sector_t last_page;
95+
long len, len2;
96+
int err, id;
9597

9698
if (blocksize != PAGE_SIZE) {
9799
pr_debug("%s: error: unsupported blocksize for dax\n",
@@ -113,6 +115,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
113115
return false;
114116
}
115117

118+
last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8;
119+
err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
120+
if (err) {
121+
pr_debug("%s: error: unaligned partition for dax\n",
122+
bdevname(bdev, buf));
123+
return false;
124+
}
125+
116126
dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
117127
if (!dax_dev) {
118128
pr_debug("%s: error: device does not support dax\n",
@@ -121,14 +131,15 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
121131
}
122132

123133
id = dax_read_lock();
124-
len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn);
134+
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
135+
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
125136
dax_read_unlock(id);
126137

127138
put_dax(dax_dev);
128139

129-
if (len < 1) {
140+
if (len < 1 || len2 < 1) {
130141
pr_debug("%s: error: dax access failed (%ld)\n",
131-
bdevname(bdev, buf), len);
142+
bdevname(bdev, buf), len < 1 ? len : len2);
132143
return false;
133144
}
134145

@@ -143,13 +154,20 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
143154
*/
144155
WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
145156
dax_enabled = true;
146-
} else if (pfn_t_devmap(pfn)) {
147-
struct dev_pagemap *pgmap;
157+
} else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
158+
struct dev_pagemap *pgmap, *end_pgmap;
148159

149160
pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
150-
if (pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX)
161+
end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
162+
if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
163+
&& pfn_t_to_page(pfn)->pgmap == pgmap
164+
&& pfn_t_to_page(end_pfn)->pgmap == pgmap
165+
&& pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
166+
&& pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
151167
dax_enabled = true;
152168
put_dev_pagemap(pgmap);
169+
put_dev_pagemap(end_pgmap);
170+
153171
}
154172

155173
if (!dax_enabled) {

0 commit comments

Comments
 (0)