@@ -386,40 +386,148 @@ struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
386
386
}
387
387
EXPORT_SYMBOL_GPL (nvdimm_create );
388
388
389
+ struct blk_alloc_info {
390
+ struct nd_mapping * nd_mapping ;
391
+ resource_size_t available , busy ;
392
+ struct resource * res ;
393
+ };
394
+
395
+ static int alias_dpa_busy (struct device * dev , void * data )
396
+ {
397
+ resource_size_t map_end , blk_start , new , busy ;
398
+ struct blk_alloc_info * info = data ;
399
+ struct nd_mapping * nd_mapping ;
400
+ struct nd_region * nd_region ;
401
+ struct nvdimm_drvdata * ndd ;
402
+ struct resource * res ;
403
+ int i ;
404
+
405
+ if (!is_nd_pmem (dev ))
406
+ return 0 ;
407
+
408
+ nd_region = to_nd_region (dev );
409
+ for (i = 0 ; i < nd_region -> ndr_mappings ; i ++ ) {
410
+ nd_mapping = & nd_region -> mapping [i ];
411
+ if (nd_mapping -> nvdimm == info -> nd_mapping -> nvdimm )
412
+ break ;
413
+ }
414
+
415
+ if (i >= nd_region -> ndr_mappings )
416
+ return 0 ;
417
+
418
+ ndd = to_ndd (nd_mapping );
419
+ map_end = nd_mapping -> start + nd_mapping -> size - 1 ;
420
+ blk_start = nd_mapping -> start ;
421
+ retry :
422
+ /*
423
+ * Find the free dpa from the end of the last pmem allocation to
424
+ * the end of the interleave-set mapping that is not already
425
+ * covered by a blk allocation.
426
+ */
427
+ busy = 0 ;
428
+ for_each_dpa_resource (ndd , res ) {
429
+ if ((res -> start >= blk_start && res -> start < map_end )
430
+ || (res -> end >= blk_start
431
+ && res -> end <= map_end )) {
432
+ if (strncmp (res -> name , "pmem" , 4 ) == 0 ) {
433
+ new = max (blk_start , min (map_end + 1 ,
434
+ res -> end + 1 ));
435
+ if (new != blk_start ) {
436
+ blk_start = new ;
437
+ goto retry ;
438
+ }
439
+ } else
440
+ busy += min (map_end , res -> end )
441
+ - max (nd_mapping -> start , res -> start ) + 1 ;
442
+ } else if (nd_mapping -> start > res -> start
443
+ && map_end < res -> end ) {
444
+ /* total eclipse of the PMEM region mapping */
445
+ busy += nd_mapping -> size ;
446
+ break ;
447
+ }
448
+ }
449
+
450
+ info -> available -= blk_start - nd_mapping -> start + busy ;
451
+ return 0 ;
452
+ }
453
+
454
+ static int blk_dpa_busy (struct device * dev , void * data )
455
+ {
456
+ struct blk_alloc_info * info = data ;
457
+ struct nd_mapping * nd_mapping ;
458
+ struct nd_region * nd_region ;
459
+ resource_size_t map_end ;
460
+ int i ;
461
+
462
+ if (!is_nd_pmem (dev ))
463
+ return 0 ;
464
+
465
+ nd_region = to_nd_region (dev );
466
+ for (i = 0 ; i < nd_region -> ndr_mappings ; i ++ ) {
467
+ nd_mapping = & nd_region -> mapping [i ];
468
+ if (nd_mapping -> nvdimm == info -> nd_mapping -> nvdimm )
469
+ break ;
470
+ }
471
+
472
+ if (i >= nd_region -> ndr_mappings )
473
+ return 0 ;
474
+
475
+ map_end = nd_mapping -> start + nd_mapping -> size - 1 ;
476
+ if (info -> res -> start >= nd_mapping -> start
477
+ && info -> res -> start < map_end ) {
478
+ if (info -> res -> end <= map_end ) {
479
+ info -> busy = 0 ;
480
+ return 1 ;
481
+ } else {
482
+ info -> busy -= info -> res -> end - map_end ;
483
+ return 0 ;
484
+ }
485
+ } else if (info -> res -> end >= nd_mapping -> start
486
+ && info -> res -> end <= map_end ) {
487
+ info -> busy -= nd_mapping -> start - info -> res -> start ;
488
+ return 0 ;
489
+ } else {
490
+ info -> busy -= nd_mapping -> size ;
491
+ return 0 ;
492
+ }
493
+ }
494
+
389
495
/**
390
496
* nd_blk_available_dpa - account the unused dpa of BLK region
391
497
* @nd_mapping: container of dpa-resource-root + labels
392
498
*
393
- * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges.
499
+ * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
500
+ * we arrange for them to never start at an lower dpa than the last
501
+ * PMEM allocation in an aliased region.
394
502
*/
395
- resource_size_t nd_blk_available_dpa (struct nd_mapping * nd_mapping )
503
+ resource_size_t nd_blk_available_dpa (struct nd_region * nd_region )
396
504
{
505
+ struct nvdimm_bus * nvdimm_bus = walk_to_nvdimm_bus (& nd_region -> dev );
506
+ struct nd_mapping * nd_mapping = & nd_region -> mapping [0 ];
397
507
struct nvdimm_drvdata * ndd = to_ndd (nd_mapping );
398
- resource_size_t map_end , busy = 0 , available ;
508
+ struct blk_alloc_info info = {
509
+ .nd_mapping = nd_mapping ,
510
+ .available = nd_mapping -> size ,
511
+ };
399
512
struct resource * res ;
400
513
401
514
if (!ndd )
402
515
return 0 ;
403
516
404
- map_end = nd_mapping -> start + nd_mapping -> size - 1 ;
405
- for_each_dpa_resource (ndd , res )
406
- if (res -> start >= nd_mapping -> start && res -> start < map_end ) {
407
- resource_size_t end = min (map_end , res -> end );
517
+ device_for_each_child (& nvdimm_bus -> dev , & info , alias_dpa_busy );
408
518
409
- busy += end - res -> start + 1 ;
410
- } else if (res -> end >= nd_mapping -> start
411
- && res -> end <= map_end ) {
412
- busy += res -> end - nd_mapping -> start ;
413
- } else if (nd_mapping -> start > res -> start
414
- && nd_mapping -> start < res -> end ) {
415
- /* total eclipse of the BLK region mapping */
416
- busy += nd_mapping -> size ;
417
- }
519
+ /* now account for busy blk allocations in unaliased dpa */
520
+ for_each_dpa_resource (ndd , res ) {
521
+ if (strncmp (res -> name , "blk" , 3 ) != 0 )
522
+ continue ;
418
523
419
- available = map_end - nd_mapping -> start + 1 ;
420
- if (busy < available )
421
- return available - busy ;
422
- return 0 ;
524
+ info .res = res ;
525
+ info .busy = resource_size (res );
526
+ device_for_each_child (& nvdimm_bus -> dev , & info , blk_dpa_busy );
527
+ info .available -= info .busy ;
528
+ }
529
+
530
+ return info .available ;
423
531
}
424
532
425
533
/**
@@ -451,21 +559,16 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
451
559
map_start = nd_mapping -> start ;
452
560
map_end = map_start + nd_mapping -> size - 1 ;
453
561
blk_start = max (map_start , map_end + 1 - * overlap );
454
- for_each_dpa_resource (ndd , res )
562
+ for_each_dpa_resource (ndd , res ) {
455
563
if (res -> start >= map_start && res -> start < map_end ) {
456
564
if (strncmp (res -> name , "blk" , 3 ) == 0 )
457
- blk_start = min (blk_start , res -> start );
458
- else if (res -> start != map_start ) {
565
+ blk_start = min (blk_start ,
566
+ max (map_start , res -> start ));
567
+ else if (res -> end > map_end ) {
459
568
reason = "misaligned to iset" ;
460
569
goto err ;
461
- } else {
462
- if (busy ) {
463
- reason = "duplicate overlapping PMEM reservations?" ;
464
- goto err ;
465
- }
570
+ } else
466
571
busy += resource_size (res );
467
- continue ;
468
- }
469
572
} else if (res -> end >= map_start && res -> end <= map_end ) {
470
573
if (strncmp (res -> name , "blk" , 3 ) == 0 ) {
471
574
/*
@@ -474,15 +577,14 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
474
577
* be used for BLK.
475
578
*/
476
579
blk_start = map_start ;
477
- } else {
478
- reason = "misaligned to iset" ;
479
- goto err ;
480
- }
580
+ } else
581
+ busy += resource_size (res );
481
582
} else if (map_start > res -> start && map_start < res -> end ) {
482
583
/* total eclipse of the mapping */
483
584
busy += nd_mapping -> size ;
484
585
blk_start = map_start ;
485
586
}
587
+ }
486
588
487
589
* overlap = map_end + 1 - blk_start ;
488
590
available = blk_start - map_start ;
@@ -491,10 +593,6 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
491
593
return 0 ;
492
594
493
595
err :
494
- /*
495
- * Something is wrong, PMEM must align with the start of the
496
- * interleave set, and there can only be one allocation per set.
497
- */
498
596
nd_dbg_dpa (nd_region , ndd , res , "%s\n" , reason );
499
597
return 0 ;
500
598
}
0 commit comments