@@ -43,7 +43,10 @@ struct pmem_device {
43
43
phys_addr_t data_offset ;
44
44
u64 pfn_flags ;
45
45
void __pmem * virt_addr ;
46
+ /* immutable base size of the namespace */
46
47
size_t size ;
48
+ /* trim size when namespace capacity has been section aligned */
49
+ u32 pfn_pad ;
47
50
struct badblocks bb ;
48
51
};
49
52
@@ -145,7 +148,7 @@ static long pmem_direct_access(struct block_device *bdev, sector_t sector,
145
148
* kaddr = pmem -> virt_addr + offset ;
146
149
* pfn = phys_to_pfn_t (pmem -> phys_addr + offset , pmem -> pfn_flags );
147
150
148
- return pmem -> size - offset ;
151
+ return pmem -> size - pmem -> pfn_pad - offset ;
149
152
}
150
153
151
154
static const struct block_device_operations pmem_fops = {
@@ -236,7 +239,8 @@ static int pmem_attach_disk(struct device *dev,
236
239
disk -> flags = GENHD_FL_EXT_DEVT ;
237
240
nvdimm_namespace_disk_name (ndns , disk -> disk_name );
238
241
disk -> driverfs_dev = dev ;
239
- set_capacity (disk , (pmem -> size - pmem -> data_offset ) / 512 );
242
+ set_capacity (disk , (pmem -> size - pmem -> pfn_pad - pmem -> data_offset )
243
+ / 512 );
240
244
pmem -> pmem_disk = disk ;
241
245
devm_exit_badblocks (dev , & pmem -> bb );
242
246
if (devm_init_badblocks (dev , & pmem -> bb ))
@@ -279,6 +283,9 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
279
283
struct nd_pfn_sb * pfn_sb = kzalloc (sizeof (* pfn_sb ), GFP_KERNEL );
280
284
struct pmem_device * pmem = dev_get_drvdata (& nd_pfn -> dev );
281
285
struct nd_namespace_common * ndns = nd_pfn -> ndns ;
286
+ u32 start_pad = 0 , end_trunc = 0 ;
287
+ resource_size_t start , size ;
288
+ struct nd_namespace_io * nsio ;
282
289
struct nd_region * nd_region ;
283
290
unsigned long npfns ;
284
291
phys_addr_t offset ;
@@ -304,28 +311,66 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
304
311
}
305
312
306
313
memset (pfn_sb , 0 , sizeof (* pfn_sb ));
307
- npfns = (pmem -> size - SZ_8K ) / SZ_4K ;
314
+
315
+ /*
316
+ * Check if pmem collides with 'System RAM' when section aligned and
317
+ * trim it accordingly
318
+ */
319
+ nsio = to_nd_namespace_io (& ndns -> dev );
320
+ start = PHYS_SECTION_ALIGN_DOWN (nsio -> res .start );
321
+ size = resource_size (& nsio -> res );
322
+ if (region_intersects (start , size , IORESOURCE_SYSTEM_RAM ,
323
+ IORES_DESC_NONE ) == REGION_MIXED ) {
324
+
325
+ start = nsio -> res .start ;
326
+ start_pad = PHYS_SECTION_ALIGN_UP (start ) - start ;
327
+ }
328
+
329
+ start = nsio -> res .start ;
330
+ size = PHYS_SECTION_ALIGN_UP (start + size ) - start ;
331
+ if (region_intersects (start , size , IORESOURCE_SYSTEM_RAM ,
332
+ IORES_DESC_NONE ) == REGION_MIXED ) {
333
+ size = resource_size (& nsio -> res );
334
+ end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN (start + size );
335
+ }
336
+
337
+ if (start_pad + end_trunc )
338
+ dev_info (& nd_pfn -> dev , "%s section collision, truncate %d bytes\n" ,
339
+ dev_name (& ndns -> dev ), start_pad + end_trunc );
340
+
308
341
/*
309
342
* Note, we use 64 here for the standard size of struct page,
310
343
* debugging options may cause it to be larger in which case the
311
344
* implementation will limit the pfns advertised through
312
345
* ->direct_access() to those that are included in the memmap.
313
346
*/
347
+ start += start_pad ;
348
+ npfns = (pmem -> size - start_pad - end_trunc - SZ_8K ) / SZ_4K ;
314
349
if (nd_pfn -> mode == PFN_MODE_PMEM )
315
- offset = ALIGN (SZ_8K + 64 * npfns , nd_pfn -> align );
350
+ offset = ALIGN (start + SZ_8K + 64 * npfns , nd_pfn -> align )
351
+ - start ;
316
352
else if (nd_pfn -> mode == PFN_MODE_RAM )
317
- offset = ALIGN (SZ_8K , nd_pfn -> align );
353
+ offset = ALIGN (start + SZ_8K , nd_pfn -> align ) - start ;
318
354
else
319
355
goto err ;
320
356
321
- npfns = (pmem -> size - offset ) / SZ_4K ;
357
+ if (offset + start_pad + end_trunc >= pmem -> size ) {
358
+ dev_err (& nd_pfn -> dev , "%s unable to satisfy requested alignment\n" ,
359
+ dev_name (& ndns -> dev ));
360
+ goto err ;
361
+ }
362
+
363
+ npfns = (pmem -> size - offset - start_pad - end_trunc ) / SZ_4K ;
322
364
pfn_sb -> mode = cpu_to_le32 (nd_pfn -> mode );
323
365
pfn_sb -> dataoff = cpu_to_le64 (offset );
324
366
pfn_sb -> npfns = cpu_to_le64 (npfns );
325
367
memcpy (pfn_sb -> signature , PFN_SIG , PFN_SIG_LEN );
326
368
memcpy (pfn_sb -> uuid , nd_pfn -> uuid , 16 );
327
369
memcpy (pfn_sb -> parent_uuid , nd_dev_to_uuid (& ndns -> dev ), 16 );
328
370
pfn_sb -> version_major = cpu_to_le16 (1 );
371
+ pfn_sb -> version_minor = cpu_to_le16 (1 );
372
+ pfn_sb -> start_pad = cpu_to_le32 (start_pad );
373
+ pfn_sb -> end_trunc = cpu_to_le32 (end_trunc );
329
374
checksum = nd_sb_checksum ((struct nd_gen_sb * ) pfn_sb );
330
375
pfn_sb -> checksum = cpu_to_le64 (checksum );
331
376
@@ -376,60 +421,57 @@ static unsigned long init_altmap_reserve(resource_size_t base)
376
421
return reserve ;
377
422
}
378
423
379
- static int nvdimm_namespace_attach_pfn (struct nd_namespace_common * ndns )
424
+ static int __nvdimm_namespace_attach_pfn (struct nd_pfn * nd_pfn )
380
425
{
381
- struct nd_namespace_io * nsio = to_nd_namespace_io (& ndns -> dev );
382
- struct nd_pfn * nd_pfn = to_nd_pfn (ndns -> claim );
383
- struct device * dev = & nd_pfn -> dev ;
384
- struct nd_region * nd_region ;
385
- struct vmem_altmap * altmap ;
386
- struct nd_pfn_sb * pfn_sb ;
387
- struct pmem_device * pmem ;
388
- struct request_queue * q ;
389
- phys_addr_t offset ;
390
426
int rc ;
427
+ struct resource res ;
428
+ struct request_queue * q ;
429
+ struct pmem_device * pmem ;
430
+ struct vmem_altmap * altmap ;
431
+ struct device * dev = & nd_pfn -> dev ;
432
+ struct nd_pfn_sb * pfn_sb = nd_pfn -> pfn_sb ;
433
+ struct nd_namespace_common * ndns = nd_pfn -> ndns ;
434
+ u32 start_pad = __le32_to_cpu (pfn_sb -> start_pad );
435
+ u32 end_trunc = __le32_to_cpu (pfn_sb -> end_trunc );
436
+ struct nd_namespace_io * nsio = to_nd_namespace_io (& ndns -> dev );
437
+ resource_size_t base = nsio -> res .start + start_pad ;
391
438
struct vmem_altmap __altmap = {
392
- .base_pfn = init_altmap_base (nsio -> res . start ),
393
- .reserve = init_altmap_reserve (nsio -> res . start ),
439
+ .base_pfn = init_altmap_base (base ),
440
+ .reserve = init_altmap_reserve (base ),
394
441
};
395
442
396
- if (!nd_pfn -> uuid || !nd_pfn -> ndns )
397
- return - ENODEV ;
398
-
399
- nd_region = to_nd_region (dev -> parent );
400
- rc = nd_pfn_init (nd_pfn );
401
- if (rc )
402
- return rc ;
403
-
404
- pfn_sb = nd_pfn -> pfn_sb ;
405
- offset = le64_to_cpu (pfn_sb -> dataoff );
443
+ pmem = dev_get_drvdata (dev );
444
+ pmem -> data_offset = le64_to_cpu (pfn_sb -> dataoff );
445
+ pmem -> pfn_pad = start_pad + end_trunc ;
406
446
nd_pfn -> mode = le32_to_cpu (nd_pfn -> pfn_sb -> mode );
407
447
if (nd_pfn -> mode == PFN_MODE_RAM ) {
408
- if (offset < SZ_8K )
448
+ if (pmem -> data_offset < SZ_8K )
409
449
return - EINVAL ;
410
450
nd_pfn -> npfns = le64_to_cpu (pfn_sb -> npfns );
411
451
altmap = NULL ;
412
452
} else if (nd_pfn -> mode == PFN_MODE_PMEM ) {
413
- nd_pfn -> npfns = (resource_size ( & nsio -> res ) - offset )
453
+ nd_pfn -> npfns = (pmem -> size - pmem -> pfn_pad - pmem -> data_offset )
414
454
/ PAGE_SIZE ;
415
455
if (le64_to_cpu (nd_pfn -> pfn_sb -> npfns ) > nd_pfn -> npfns )
416
456
dev_info (& nd_pfn -> dev ,
417
457
"number of pfns truncated from %lld to %ld\n" ,
418
458
le64_to_cpu (nd_pfn -> pfn_sb -> npfns ),
419
459
nd_pfn -> npfns );
420
460
altmap = & __altmap ;
421
- altmap -> free = __phys_to_pfn (offset - SZ_8K );
461
+ altmap -> free = __phys_to_pfn (pmem -> data_offset - SZ_8K );
422
462
altmap -> alloc = 0 ;
423
463
} else {
424
464
rc = - ENXIO ;
425
465
goto err ;
426
466
}
427
467
428
468
/* establish pfn range for lookup, and switch to direct map */
429
- pmem = dev_get_drvdata (dev );
430
469
q = pmem -> pmem_queue ;
470
+ memcpy (& res , & nsio -> res , sizeof (res ));
471
+ res .start += start_pad ;
472
+ res .end -= end_trunc ;
431
473
devm_memunmap (dev , (void __force * ) pmem -> virt_addr );
432
- pmem -> virt_addr = (void __pmem * ) devm_memremap_pages (dev , & nsio -> res ,
474
+ pmem -> virt_addr = (void __pmem * ) devm_memremap_pages (dev , & res ,
433
475
& q -> q_usage_counter , altmap );
434
476
pmem -> pfn_flags |= PFN_MAP ;
435
477
if (IS_ERR (pmem -> virt_addr )) {
@@ -438,7 +480,6 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
438
480
}
439
481
440
482
/* attach pmem disk in "pfn-mode" */
441
- pmem -> data_offset = offset ;
442
483
rc = pmem_attach_disk (dev , ndns , pmem );
443
484
if (rc )
444
485
goto err ;
@@ -447,6 +488,22 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
447
488
err :
448
489
nvdimm_namespace_detach_pfn (ndns );
449
490
return rc ;
491
+
492
+ }
493
+
494
+ static int nvdimm_namespace_attach_pfn (struct nd_namespace_common * ndns )
495
+ {
496
+ struct nd_pfn * nd_pfn = to_nd_pfn (ndns -> claim );
497
+ int rc ;
498
+
499
+ if (!nd_pfn -> uuid || !nd_pfn -> ndns )
500
+ return - ENODEV ;
501
+
502
+ rc = nd_pfn_init (nd_pfn );
503
+ if (rc )
504
+ return rc ;
505
+ /* we need a valid pfn_sb before we can init a vmem_altmap */
506
+ return __nvdimm_namespace_attach_pfn (nd_pfn );
450
507
}
451
508
452
509
static int nd_pmem_probe (struct device * dev )
0 commit comments