Skip to content

Commit ac515c0

Browse files
committed
libnvdimm, pmem, pfn: move pfn setup to the core
Now that pmem internals have been disentangled from pfn setup, that code can move to the core. This is in preparation for adding another user of the pfn-device capabilities. Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
1 parent 200c79d commit ac515c0

File tree

3 files changed

+188
-184
lines changed

3 files changed

+188
-184
lines changed

drivers/nvdimm/nd.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -272,9 +272,16 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
272272
void nvdimm_badblocks_populate(struct nd_region *nd_region,
273273
struct badblocks *bb, const struct resource *res);
274274
#if IS_ENABLED(CONFIG_ND_CLAIM)
275+
struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
276+
struct resource *res, struct vmem_altmap *altmap);
275277
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
276278
void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
277279
#else
280+
static inline struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
281+
struct resource *res, struct vmem_altmap *altmap)
282+
{
283+
return ERR_PTR(-ENXIO);
284+
}
278285
static inline int devm_nsio_enable(struct device *dev,
279286
struct nd_namespace_io *nsio)
280287
{

drivers/nvdimm/pfn_devs.c

Lines changed: 181 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1111
* General Public License for more details.
1212
*/
13+
#include <linux/memremap.h>
1314
#include <linux/blkdev.h>
1415
#include <linux/device.h>
1516
#include <linux/genhd.h>
@@ -441,3 +442,183 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
441442
return rc;
442443
}
443444
EXPORT_SYMBOL(nd_pfn_probe);
445+
446+
/*
447+
* We hotplug memory at section granularity, pad the reserved area from
448+
* the previous section base to the namespace base address.
449+
*/
450+
static unsigned long init_altmap_base(resource_size_t base)
451+
{
452+
unsigned long base_pfn = PHYS_PFN(base);
453+
454+
return PFN_SECTION_ALIGN_DOWN(base_pfn);
455+
}
456+
457+
static unsigned long init_altmap_reserve(resource_size_t base)
458+
{
459+
unsigned long reserve = PHYS_PFN(SZ_8K);
460+
unsigned long base_pfn = PHYS_PFN(base);
461+
462+
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
463+
return reserve;
464+
}
465+
466+
static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
467+
struct resource *res, struct vmem_altmap *altmap)
468+
{
469+
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
470+
u64 offset = le64_to_cpu(pfn_sb->dataoff);
471+
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
472+
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
473+
struct nd_namespace_common *ndns = nd_pfn->ndns;
474+
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
475+
resource_size_t base = nsio->res.start + start_pad;
476+
struct vmem_altmap __altmap = {
477+
.base_pfn = init_altmap_base(base),
478+
.reserve = init_altmap_reserve(base),
479+
};
480+
481+
memcpy(res, &nsio->res, sizeof(*res));
482+
res->start += start_pad;
483+
res->end -= end_trunc;
484+
485+
nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
486+
if (nd_pfn->mode == PFN_MODE_RAM) {
487+
if (offset < SZ_8K)
488+
return ERR_PTR(-EINVAL);
489+
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
490+
altmap = NULL;
491+
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
492+
nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE;
493+
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
494+
dev_info(&nd_pfn->dev,
495+
"number of pfns truncated from %lld to %ld\n",
496+
le64_to_cpu(nd_pfn->pfn_sb->npfns),
497+
nd_pfn->npfns);
498+
memcpy(altmap, &__altmap, sizeof(*altmap));
499+
altmap->free = PHYS_PFN(offset - SZ_8K);
500+
altmap->alloc = 0;
501+
} else
502+
return ERR_PTR(-ENXIO);
503+
504+
return altmap;
505+
}
506+
507+
static int nd_pfn_init(struct nd_pfn *nd_pfn)
508+
{
509+
struct nd_namespace_common *ndns = nd_pfn->ndns;
510+
u32 start_pad = 0, end_trunc = 0;
511+
resource_size_t start, size;
512+
struct nd_namespace_io *nsio;
513+
struct nd_region *nd_region;
514+
struct nd_pfn_sb *pfn_sb;
515+
unsigned long npfns;
516+
phys_addr_t offset;
517+
u64 checksum;
518+
int rc;
519+
520+
pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
521+
if (!pfn_sb)
522+
return -ENOMEM;
523+
524+
nd_pfn->pfn_sb = pfn_sb;
525+
rc = nd_pfn_validate(nd_pfn);
526+
if (rc != -ENODEV)
527+
return rc;
528+
529+
/* no info block, do init */;
530+
nd_region = to_nd_region(nd_pfn->dev.parent);
531+
if (nd_region->ro) {
532+
dev_info(&nd_pfn->dev,
533+
"%s is read-only, unable to init metadata\n",
534+
dev_name(&nd_region->dev));
535+
return -ENXIO;
536+
}
537+
538+
memset(pfn_sb, 0, sizeof(*pfn_sb));
539+
540+
/*
541+
* Check if pmem collides with 'System RAM' when section aligned and
542+
* trim it accordingly
543+
*/
544+
nsio = to_nd_namespace_io(&ndns->dev);
545+
start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
546+
size = resource_size(&nsio->res);
547+
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
548+
IORES_DESC_NONE) == REGION_MIXED) {
549+
start = nsio->res.start;
550+
start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
551+
}
552+
553+
start = nsio->res.start;
554+
size = PHYS_SECTION_ALIGN_UP(start + size) - start;
555+
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
556+
IORES_DESC_NONE) == REGION_MIXED) {
557+
size = resource_size(&nsio->res);
558+
end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
559+
}
560+
561+
if (start_pad + end_trunc)
562+
dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
563+
dev_name(&ndns->dev), start_pad + end_trunc);
564+
565+
/*
566+
* Note, we use 64 here for the standard size of struct page,
567+
* debugging options may cause it to be larger in which case the
568+
* implementation will limit the pfns advertised through
569+
* ->direct_access() to those that are included in the memmap.
570+
*/
571+
start += start_pad;
572+
size = resource_size(&nsio->res);
573+
npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
574+
if (nd_pfn->mode == PFN_MODE_PMEM)
575+
offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
576+
- start;
577+
else if (nd_pfn->mode == PFN_MODE_RAM)
578+
offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
579+
else
580+
return -ENXIO;
581+
582+
if (offset + start_pad + end_trunc >= size) {
583+
dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
584+
dev_name(&ndns->dev));
585+
return -ENXIO;
586+
}
587+
588+
npfns = (size - offset - start_pad - end_trunc) / SZ_4K;
589+
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
590+
pfn_sb->dataoff = cpu_to_le64(offset);
591+
pfn_sb->npfns = cpu_to_le64(npfns);
592+
memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
593+
memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
594+
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
595+
pfn_sb->version_major = cpu_to_le16(1);
596+
pfn_sb->version_minor = cpu_to_le16(1);
597+
pfn_sb->start_pad = cpu_to_le32(start_pad);
598+
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
599+
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
600+
pfn_sb->checksum = cpu_to_le64(checksum);
601+
602+
return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
603+
}
604+
605+
/*
606+
* Determine the effective resource range and vmem_altmap from an nd_pfn
607+
* instance.
608+
*/
609+
struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
610+
struct resource *res, struct vmem_altmap *altmap)
611+
{
612+
int rc;
613+
614+
if (!nd_pfn->uuid || !nd_pfn->ndns)
615+
return ERR_PTR(-ENODEV);
616+
617+
rc = nd_pfn_init(nd_pfn);
618+
if (rc)
619+
return ERR_PTR(rc);
620+
621+
/* we need a valid pfn_sb before we can init a vmem_altmap */
622+
return __nvdimm_setup_pfn(nd_pfn, res, altmap);
623+
}
624+
EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);

0 commit comments

Comments
 (0)