76
76
* :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
77
77
* performs such an assignment directly.
78
78
*
79
- * Once memblock is setup the memory can be allocated using either
80
- * memblock or bootmem APIs.
79
+ * Once memblock is setup the memory can be allocated using one of the
80
+ * API variants:
81
+ *
82
+ * * :c:func:`memblock_phys_alloc*` - these functions return the
83
+ * **physical** address of the allocated memory
84
+ * * :c:func:`memblock_alloc*` - these functions return the **virtual**
85
+ * address of the allocated memory.
86
+ *
87
+ * Note, that both API variants use implict assumptions about allowed
88
+ * memory ranges and the fallback methods. Consult the documentation
89
+ * of :c:func:`memblock_alloc_internal` and
90
+ * :c:func:`memblock_alloc_range_nid` functions for more elaboarte
91
+ * description.
81
92
*
82
93
* As the system boot progresses, the architecture specific
83
94
* :c:func:`mem_init` function frees all the memory to the buddy page
@@ -435,17 +446,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
435
446
else
436
447
in_slab = & memblock_reserved_in_slab ;
437
448
438
- /* Try to find some space for it.
439
- *
440
- * WARNING: We assume that either slab_is_available() and we use it or
441
- * we use MEMBLOCK for allocations. That means that this is unsafe to
442
- * use when bootmem is currently active (unless bootmem itself is
443
- * implemented on top of MEMBLOCK which isn't the case yet)
444
- *
445
- * This should however not be an issue for now, as we currently only
446
- * call into MEMBLOCK while it's still active, or much later when slab
447
- * is active for memory hotplug operations
448
- */
449
+ /* Try to find some space for it */
449
450
if (use_slab ) {
450
451
new_array = kmalloc (new_size , GFP_KERNEL );
451
452
addr = new_array ? __pa (new_array ) : 0 ;
@@ -989,7 +990,7 @@ static bool should_skip_region(struct memblock_region *m, int nid, int flags)
989
990
}
990
991
991
992
/**
992
- * __next__mem_range - next function for for_each_free_mem_range() etc.
993
+ * __next_mem_range - next function for for_each_free_mem_range() etc.
993
994
* @idx: pointer to u64 loop variable
994
995
* @nid: node selector, %NUMA_NO_NODE for all nodes
995
996
* @flags: pick from blocks based on memory attributes
@@ -1335,6 +1336,18 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1335
1336
return found ;
1336
1337
}
1337
1338
1339
+ /**
1340
+ * memblock_phys_alloc_range - allocate a memory block inside specified range
1341
+ * @size: size of memory block to be allocated in bytes
1342
+ * @align: alignment of the region and block's size
1343
+ * @start: the lower bound of the memory region to allocate (physical address)
1344
+ * @end: the upper bound of the memory region to allocate (physical address)
1345
+ *
1346
+ * Allocate @size bytes in the between @start and @end.
1347
+ *
1348
+ * Return: physical address of the allocated memory block on success,
1349
+ * %0 on failure.
1350
+ */
1338
1351
phys_addr_t __init memblock_phys_alloc_range (phys_addr_t size ,
1339
1352
phys_addr_t align ,
1340
1353
phys_addr_t start ,
@@ -1343,6 +1356,19 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1343
1356
return memblock_alloc_range_nid (size , align , start , end , NUMA_NO_NODE );
1344
1357
}
1345
1358
1359
+ /**
1360
+ * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
1361
+ * @size: size of memory block to be allocated in bytes
1362
+ * @align: alignment of the region and block's size
1363
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1364
+ *
1365
+ * Allocates memory block from the specified NUMA node. If the node
1366
+ * has no available memory, attempts to allocated from any node in the
1367
+ * system.
1368
+ *
1369
+ * Return: physical address of the allocated memory block on success,
1370
+ * %0 on failure.
1371
+ */
1346
1372
phys_addr_t __init memblock_phys_alloc_try_nid (phys_addr_t size , phys_addr_t align , int nid )
1347
1373
{
1348
1374
return memblock_alloc_range_nid (size , align , 0 ,
@@ -1469,13 +1495,13 @@ void * __init memblock_alloc_try_nid(
1469
1495
}
1470
1496
1471
1497
/**
1472
- * __memblock_free_late - free bootmem block pages directly to buddy allocator
1498
+ * __memblock_free_late - free pages directly to buddy allocator
1473
1499
* @base: phys starting address of the boot memory block
1474
1500
* @size: size of the boot memory block in bytes
1475
1501
*
1476
- * This is only useful when the bootmem allocator has already been torn
1502
+ * This is only useful when the memblock allocator has already been torn
1477
1503
* down, but we are still initializing the system. Pages are released directly
1478
- * to the buddy allocator, no bootmem metadata is updated because it is gone .
1504
+ * to the buddy allocator.
1479
1505
*/
1480
1506
void __init __memblock_free_late (phys_addr_t base , phys_addr_t size )
1481
1507
{
0 commit comments