Skip to content

Commit 6fd96ff

Browse files
committed
Merge branch 'for-5.1/libnvdimm-start-pad' into libnvdimm-for-next
Merge the initial lead-in cleanups and fixes that resulted from the effort to resolve bugs in the section-alignment padding implementation in the nvdimm core. The back half of this topic is abandoned in favor of implementing sub-section hotplug support.
2 parents 451fed2 + 4960461 commit 6fd96ff

File tree

3 files changed

+46
-20
lines changed

3 files changed

+46
-20
lines changed

drivers/dax/super.c

Lines changed: 28 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -86,12 +86,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
8686
{
8787
struct dax_device *dax_dev;
8888
bool dax_enabled = false;
89+
pgoff_t pgoff, pgoff_end;
8990
struct request_queue *q;
90-
pgoff_t pgoff;
91-
int err, id;
92-
pfn_t pfn;
93-
long len;
9491
char buf[BDEVNAME_SIZE];
92+
void *kaddr, *end_kaddr;
93+
pfn_t pfn, end_pfn;
94+
sector_t last_page;
95+
long len, len2;
96+
int err, id;
9597

9698
if (blocksize != PAGE_SIZE) {
9799
pr_debug("%s: error: unsupported blocksize for dax\n",
@@ -113,6 +115,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
113115
return false;
114116
}
115117

118+
last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8;
119+
err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
120+
if (err) {
121+
pr_debug("%s: error: unaligned partition for dax\n",
122+
bdevname(bdev, buf));
123+
return false;
124+
}
125+
116126
dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
117127
if (!dax_dev) {
118128
pr_debug("%s: error: device does not support dax\n",
@@ -121,14 +131,15 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
121131
}
122132

123133
id = dax_read_lock();
124-
len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn);
134+
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
135+
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
125136
dax_read_unlock(id);
126137

127138
put_dax(dax_dev);
128139

129-
if (len < 1) {
140+
if (len < 1 || len2 < 1) {
130141
pr_debug("%s: error: dax access failed (%ld)\n",
131-
bdevname(bdev, buf), len);
142+
bdevname(bdev, buf), len < 1 ? len : len2);
132143
return false;
133144
}
134145

@@ -143,13 +154,20 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
143154
*/
144155
WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
145156
dax_enabled = true;
146-
} else if (pfn_t_devmap(pfn)) {
147-
struct dev_pagemap *pgmap;
157+
} else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
158+
struct dev_pagemap *pgmap, *end_pgmap;
148159

149160
pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
150-
if (pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX)
161+
end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
162+
if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
163+
&& pfn_t_to_page(pfn)->pgmap == pgmap
164+
&& pfn_t_to_page(end_pfn)->pgmap == pgmap
165+
&& pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
166+
&& pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
151167
dax_enabled = true;
152168
put_dev_pagemap(pgmap);
169+
put_dev_pagemap(end_pgmap);
170+
153171
}
154172

155173
if (!dax_enabled) {

drivers/nvdimm/namespace_devs.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
138138
bool pmem_should_map_pages(struct device *dev)
139139
{
140140
struct nd_region *nd_region = to_nd_region(dev->parent);
141+
struct nd_namespace_common *ndns = to_ndns(dev);
141142
struct nd_namespace_io *nsio;
142143

143144
if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
@@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev)
149150
if (is_nd_pfn(dev) || is_nd_btt(dev))
150151
return false;
151152

153+
if (ndns->force_raw)
154+
return false;
155+
152156
nsio = to_nd_namespace_io(dev);
153157
if (region_intersects(nsio->res.start, resource_size(&nsio->res),
154158
IORESOURCE_SYSTEM_RAM,

drivers/nvdimm/pfn_devs.c

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -580,6 +580,11 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
580580
}
581581
EXPORT_SYMBOL(nd_pfn_probe);
582582

583+
static u32 info_block_reserve(void)
584+
{
585+
return ALIGN(SZ_8K, PAGE_SIZE);
586+
}
587+
583588
/*
584589
* We hotplug memory at section granularity, pad the reserved area from
585590
* the previous section base to the namespace base address.
@@ -593,7 +598,7 @@ static unsigned long init_altmap_base(resource_size_t base)
593598

594599
static unsigned long init_altmap_reserve(resource_size_t base)
595600
{
596-
unsigned long reserve = PHYS_PFN(SZ_8K);
601+
unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
597602
unsigned long base_pfn = PHYS_PFN(base);
598603

599604
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
@@ -608,6 +613,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
608613
u64 offset = le64_to_cpu(pfn_sb->dataoff);
609614
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
610615
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
616+
u32 reserve = info_block_reserve();
611617
struct nd_namespace_common *ndns = nd_pfn->ndns;
612618
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
613619
resource_size_t base = nsio->res.start + start_pad;
@@ -621,7 +627,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
621627
res->end -= end_trunc;
622628

623629
if (nd_pfn->mode == PFN_MODE_RAM) {
624-
if (offset < SZ_8K)
630+
if (offset < reserve)
625631
return -EINVAL;
626632
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
627633
pgmap->altmap_valid = false;
@@ -634,7 +640,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
634640
le64_to_cpu(nd_pfn->pfn_sb->npfns),
635641
nd_pfn->npfns);
636642
memcpy(altmap, &__altmap, sizeof(*altmap));
637-
altmap->free = PHYS_PFN(offset - SZ_8K);
643+
altmap->free = PHYS_PFN(offset - reserve);
638644
altmap->alloc = 0;
639645
pgmap->altmap_valid = true;
640646
} else
@@ -678,18 +684,17 @@ static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trun
678684
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
679685
IORES_DESC_NONE) == REGION_MIXED
680686
|| !IS_ALIGNED(end, nd_pfn->align)
681-
|| nd_region_conflict(nd_region, start, size + adjust))
687+
|| nd_region_conflict(nd_region, start, size))
682688
*end_trunc = end - phys_pmem_align_down(nd_pfn, end);
683689
}
684690

685691
static int nd_pfn_init(struct nd_pfn *nd_pfn)
686692
{
687-
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
688693
struct nd_namespace_common *ndns = nd_pfn->ndns;
689694
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
695+
u32 start_pad, end_trunc, reserve = info_block_reserve();
690696
resource_size_t start, size;
691697
struct nd_region *nd_region;
692-
u32 start_pad, end_trunc;
693698
struct nd_pfn_sb *pfn_sb;
694699
unsigned long npfns;
695700
phys_addr_t offset;
@@ -734,19 +739,18 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
734739
*/
735740
start = nsio->res.start + start_pad;
736741
size = resource_size(&nsio->res);
737-
npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
742+
npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - reserve)
738743
/ PAGE_SIZE);
739744
if (nd_pfn->mode == PFN_MODE_PMEM) {
740745
/*
741746
* The altmap should be padded out to the block size used
742747
* when populating the vmemmap. This *should* be equal to
743748
* PMD_SIZE for most architectures.
744749
*/
745-
offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
750+
offset = ALIGN(start + reserve + 64 * npfns,
746751
max(nd_pfn->align, PMD_SIZE)) - start;
747752
} else if (nd_pfn->mode == PFN_MODE_RAM)
748-
offset = ALIGN(start + SZ_8K + dax_label_reserve,
749-
nd_pfn->align) - start;
753+
offset = ALIGN(start + reserve, nd_pfn->align) - start;
750754
else
751755
return -ENXIO;
752756

0 commit comments

Comments
 (0)