@@ -580,6 +580,11 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
580
580
}
581
581
EXPORT_SYMBOL (nd_pfn_probe );
582
582
583
+ static u32 info_block_reserve (void )
584
+ {
585
+ return ALIGN (SZ_8K , PAGE_SIZE );
586
+ }
587
+
583
588
/*
584
589
* We hotplug memory at section granularity, pad the reserved area from
585
590
* the previous section base to the namespace base address.
@@ -593,7 +598,7 @@ static unsigned long init_altmap_base(resource_size_t base)
593
598
594
599
static unsigned long init_altmap_reserve (resource_size_t base )
595
600
{
596
- unsigned long reserve = PFN_UP ( SZ_8K ) ;
601
+ unsigned long reserve = info_block_reserve () >> PAGE_SHIFT ;
597
602
unsigned long base_pfn = PHYS_PFN (base );
598
603
599
604
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN (base_pfn );
@@ -608,6 +613,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
608
613
u64 offset = le64_to_cpu (pfn_sb -> dataoff );
609
614
u32 start_pad = __le32_to_cpu (pfn_sb -> start_pad );
610
615
u32 end_trunc = __le32_to_cpu (pfn_sb -> end_trunc );
616
+ u32 reserve = info_block_reserve ();
611
617
struct nd_namespace_common * ndns = nd_pfn -> ndns ;
612
618
struct nd_namespace_io * nsio = to_nd_namespace_io (& ndns -> dev );
613
619
resource_size_t base = nsio -> res .start + start_pad ;
@@ -621,7 +627,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
621
627
res -> end -= end_trunc ;
622
628
623
629
if (nd_pfn -> mode == PFN_MODE_RAM ) {
624
- if (offset < SZ_8K )
630
+ if (offset < reserve )
625
631
return - EINVAL ;
626
632
nd_pfn -> npfns = le64_to_cpu (pfn_sb -> npfns );
627
633
pgmap -> altmap_valid = false;
@@ -634,7 +640,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
634
640
le64_to_cpu (nd_pfn -> pfn_sb -> npfns ),
635
641
nd_pfn -> npfns );
636
642
memcpy (altmap , & __altmap , sizeof (* altmap ));
637
- altmap -> free = PHYS_PFN (offset - SZ_8K );
643
+ altmap -> free = PHYS_PFN (offset - reserve );
638
644
altmap -> alloc = 0 ;
639
645
pgmap -> altmap_valid = true;
640
646
} else
@@ -687,9 +693,9 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
687
693
u32 dax_label_reserve = is_nd_dax (& nd_pfn -> dev ) ? SZ_128K : 0 ;
688
694
struct nd_namespace_common * ndns = nd_pfn -> ndns ;
689
695
struct nd_namespace_io * nsio = to_nd_namespace_io (& ndns -> dev );
696
+ u32 start_pad , end_trunc , reserve = info_block_reserve ();
690
697
resource_size_t start , size ;
691
698
struct nd_region * nd_region ;
692
- u32 start_pad , end_trunc ;
693
699
struct nd_pfn_sb * pfn_sb ;
694
700
unsigned long npfns ;
695
701
phys_addr_t offset ;
@@ -734,18 +740,18 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
734
740
*/
735
741
start = nsio -> res .start + start_pad ;
736
742
size = resource_size (& nsio -> res );
737
- npfns = PFN_SECTION_ALIGN_UP ((size - start_pad - end_trunc - SZ_8K )
743
+ npfns = PFN_SECTION_ALIGN_UP ((size - start_pad - end_trunc - reserve )
738
744
/ PAGE_SIZE );
739
745
if (nd_pfn -> mode == PFN_MODE_PMEM ) {
740
746
/*
741
747
* The altmap should be padded out to the block size used
742
748
* when populating the vmemmap. This *should* be equal to
743
749
* PMD_SIZE for most architectures.
744
750
*/
745
- offset = ALIGN (start + SZ_8K + 64 * npfns + dax_label_reserve ,
751
+ offset = ALIGN (start + reserve + 64 * npfns + dax_label_reserve ,
746
752
max (nd_pfn -> align , PMD_SIZE )) - start ;
747
753
} else if (nd_pfn -> mode == PFN_MODE_RAM )
748
- offset = ALIGN (start + SZ_8K + dax_label_reserve ,
754
+ offset = ALIGN (start + reserve + dax_label_reserve ,
749
755
nd_pfn -> align ) - start ;
750
756
else
751
757
return - ENXIO ;
0 commit comments