Skip to content

Commit 6ed311b

Browse files
committed
memblock: Move functions around into a more sensible order
Some shuffling is needed for doing array resize so we may as well put some sense into the ordering of the functions in the whole memblock.c file. No code change. Added some comments. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
1 parent 7f219c7 commit 6ed311b

File tree

1 file changed

+159
-142
lines changed

1 file changed

+159
-142
lines changed

mm/memblock.c

Lines changed: 159 additions & 142 deletions
Original file line numberDiff line numberDiff line change
@@ -24,40 +24,18 @@ static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIO
2424

2525
#define MEMBLOCK_ERROR (~(phys_addr_t)0)
2626

27-
static int __init early_memblock(char *p)
28-
{
29-
if (p && strstr(p, "debug"))
30-
memblock_debug = 1;
31-
return 0;
32-
}
33-
early_param("memblock", early_memblock);
27+
/*
28+
* Address comparison utilities
29+
*/
3430

35-
static void memblock_dump(struct memblock_type *region, char *name)
31+
static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size)
3632
{
37-
unsigned long long base, size;
38-
int i;
39-
40-
pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
41-
42-
for (i = 0; i < region->cnt; i++) {
43-
base = region->regions[i].base;
44-
size = region->regions[i].size;
45-
46-
pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
47-
name, i, base, base + size - 1, size);
48-
}
33+
return addr & ~(size - 1);
4934
}
5035

51-
void memblock_dump_all(void)
36+
static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size)
5237
{
53-
if (!memblock_debug)
54-
return;
55-
56-
pr_info("MEMBLOCK configuration:\n");
57-
pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
58-
59-
memblock_dump(&memblock.memory, "memory");
60-
memblock_dump(&memblock.reserved, "reserved");
38+
return (addr + (size - 1)) & ~(size - 1);
6139
}
6240

6341
static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
@@ -88,6 +66,77 @@ static long memblock_regions_adjacent(struct memblock_type *type,
8866
return memblock_addrs_adjacent(base1, size1, base2, size2);
8967
}
9068

69+
long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
70+
{
71+
unsigned long i;
72+
73+
for (i = 0; i < type->cnt; i++) {
74+
phys_addr_t rgnbase = type->regions[i].base;
75+
phys_addr_t rgnsize = type->regions[i].size;
76+
if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
77+
break;
78+
}
79+
80+
return (i < type->cnt) ? i : -1;
81+
}
82+
83+
/*
84+
* Find, allocate, deallocate or reserve unreserved regions. All allocations
85+
* are top-down.
86+
*/
87+
88+
static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end,
89+
phys_addr_t size, phys_addr_t align)
90+
{
91+
phys_addr_t base, res_base;
92+
long j;
93+
94+
base = memblock_align_down((end - size), align);
95+
while (start <= base) {
96+
j = memblock_overlaps_region(&memblock.reserved, base, size);
97+
if (j < 0)
98+
return base;
99+
res_base = memblock.reserved.regions[j].base;
100+
if (res_base < size)
101+
break;
102+
base = memblock_align_down(res_base - size, align);
103+
}
104+
105+
return MEMBLOCK_ERROR;
106+
}
107+
108+
static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
109+
{
110+
long i;
111+
phys_addr_t base = 0;
112+
phys_addr_t res_base;
113+
114+
BUG_ON(0 == size);
115+
116+
size = memblock_align_up(size, align);
117+
118+
/* Pump up max_addr */
119+
if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE)
120+
max_addr = memblock.current_limit;
121+
122+
/* We do a top-down search, this tends to limit memory
123+
* fragmentation by keeping early boot allocs near the
124+
* top of memory
125+
*/
126+
for (i = memblock.memory.cnt - 1; i >= 0; i--) {
127+
phys_addr_t memblockbase = memblock.memory.regions[i].base;
128+
phys_addr_t memblocksize = memblock.memory.regions[i].size;
129+
130+
if (memblocksize < size)
131+
continue;
132+
base = min(memblockbase + memblocksize, max_addr);
133+
res_base = memblock_find_region(memblockbase, base, size, align);
134+
if (res_base != MEMBLOCK_ERROR)
135+
return res_base;
136+
}
137+
return MEMBLOCK_ERROR;
138+
}
139+
91140
static void memblock_remove_region(struct memblock_type *type, unsigned long r)
92141
{
93142
unsigned long i;
@@ -107,22 +156,6 @@ static void memblock_coalesce_regions(struct memblock_type *type,
107156
memblock_remove_region(type, r2);
108157
}
109158

110-
void __init memblock_analyze(void)
111-
{
112-
int i;
113-
114-
/* Check marker in the unused last array entry */
115-
WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
116-
!= (phys_addr_t)RED_INACTIVE);
117-
WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
118-
!= (phys_addr_t)RED_INACTIVE);
119-
120-
memblock.memory_size = 0;
121-
122-
for (i = 0; i < memblock.memory.cnt; i++)
123-
memblock.memory_size += memblock.memory.regions[i].size;
124-
}
125-
126159
static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
127160
{
128161
unsigned long coalesced = 0;
@@ -260,49 +293,47 @@ long __init memblock_reserve(phys_addr_t base, phys_addr_t size)
260293
return memblock_add_region(_rgn, base, size);
261294
}
262295

263-
long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
296+
phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
264297
{
265-
unsigned long i;
298+
phys_addr_t found;
266299

267-
for (i = 0; i < type->cnt; i++) {
268-
phys_addr_t rgnbase = type->regions[i].base;
269-
phys_addr_t rgnsize = type->regions[i].size;
270-
if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
271-
break;
272-
}
300+
/* We align the size to limit fragmentation. Without this, a lot of
301+
* small allocs quickly eat up the whole reserve array on sparc
302+
*/
303+
size = memblock_align_up(size, align);
273304

274-
return (i < type->cnt) ? i : -1;
275-
}
305+
found = memblock_find_base(size, align, max_addr);
306+
if (found != MEMBLOCK_ERROR &&
307+
memblock_add_region(&memblock.reserved, found, size) >= 0)
308+
return found;
276309

277-
static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size)
278-
{
279-
return addr & ~(size - 1);
310+
return 0;
280311
}
281312

282-
static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size)
313+
phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
283314
{
284-
return (addr + (size - 1)) & ~(size - 1);
315+
phys_addr_t alloc;
316+
317+
alloc = __memblock_alloc_base(size, align, max_addr);
318+
319+
if (alloc == 0)
320+
panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
321+
(unsigned long long) size, (unsigned long long) max_addr);
322+
323+
return alloc;
285324
}
286325

287-
static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end,
288-
phys_addr_t size, phys_addr_t align)
326+
phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
289327
{
290-
phys_addr_t base, res_base;
291-
long j;
328+
return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
329+
}
292330

293-
base = memblock_align_down((end - size), align);
294-
while (start <= base) {
295-
j = memblock_overlaps_region(&memblock.reserved, base, size);
296-
if (j < 0)
297-
return base;
298-
res_base = memblock.reserved.regions[j].base;
299-
if (res_base < size)
300-
break;
301-
base = memblock_align_down(res_base - size, align);
302-
}
303331

304-
return MEMBLOCK_ERROR;
305-
}
332+
/*
333+
* Additional node-local allocators. Search for node memory is bottom up
334+
* and walks memblock regions within that node bottom-up as well, but allocation
335+
* within an memblock region is top-down.
336+
*/
306337

307338
phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
308339
{
@@ -364,72 +395,6 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n
364395
return memblock_alloc(size, align);
365396
}
366397

367-
phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
368-
{
369-
return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
370-
}
371-
372-
static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
373-
{
374-
long i;
375-
phys_addr_t base = 0;
376-
phys_addr_t res_base;
377-
378-
BUG_ON(0 == size);
379-
380-
/* Pump up max_addr */
381-
if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE)
382-
max_addr = memblock.current_limit;
383-
384-
/* We do a top-down search, this tends to limit memory
385-
* fragmentation by keeping early boot allocs near the
386-
* top of memory
387-
*/
388-
for (i = memblock.memory.cnt - 1; i >= 0; i--) {
389-
phys_addr_t memblockbase = memblock.memory.regions[i].base;
390-
phys_addr_t memblocksize = memblock.memory.regions[i].size;
391-
392-
if (memblocksize < size)
393-
continue;
394-
base = min(memblockbase + memblocksize, max_addr);
395-
res_base = memblock_find_region(memblockbase, base, size, align);
396-
if (res_base != MEMBLOCK_ERROR)
397-
return res_base;
398-
}
399-
return MEMBLOCK_ERROR;
400-
}
401-
402-
phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
403-
{
404-
phys_addr_t found;
405-
406-
/* We align the size to limit fragmentation. Without this, a lot of
407-
* small allocs quickly eat up the whole reserve array on sparc
408-
*/
409-
size = memblock_align_up(size, align);
410-
411-
found = memblock_find_base(size, align, max_addr);
412-
if (found != MEMBLOCK_ERROR &&
413-
memblock_add_region(&memblock.reserved, found, size) >= 0)
414-
return found;
415-
416-
return 0;
417-
}
418-
419-
phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
420-
{
421-
phys_addr_t alloc;
422-
423-
alloc = __memblock_alloc_base(size, align, max_addr);
424-
425-
if (alloc == 0)
426-
panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
427-
(unsigned long long) size, (unsigned long long) max_addr);
428-
429-
return alloc;
430-
}
431-
432-
433398
/* You must call memblock_analyze() before this. */
434399
phys_addr_t __init memblock_phys_mem_size(void)
435400
{
@@ -534,6 +499,50 @@ void __init memblock_set_current_limit(phys_addr_t limit)
534499
memblock.current_limit = limit;
535500
}
536501

502+
static void memblock_dump(struct memblock_type *region, char *name)
503+
{
504+
unsigned long long base, size;
505+
int i;
506+
507+
pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
508+
509+
for (i = 0; i < region->cnt; i++) {
510+
base = region->regions[i].base;
511+
size = region->regions[i].size;
512+
513+
pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
514+
name, i, base, base + size - 1, size);
515+
}
516+
}
517+
518+
void memblock_dump_all(void)
519+
{
520+
if (!memblock_debug)
521+
return;
522+
523+
pr_info("MEMBLOCK configuration:\n");
524+
pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
525+
526+
memblock_dump(&memblock.memory, "memory");
527+
memblock_dump(&memblock.reserved, "reserved");
528+
}
529+
530+
void __init memblock_analyze(void)
531+
{
532+
int i;
533+
534+
/* Check marker in the unused last array entry */
535+
WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
536+
!= (phys_addr_t)RED_INACTIVE);
537+
WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
538+
!= (phys_addr_t)RED_INACTIVE);
539+
540+
memblock.memory_size = 0;
541+
542+
for (i = 0; i < memblock.memory.cnt; i++)
543+
memblock.memory_size += memblock.memory.regions[i].size;
544+
}
545+
537546
void __init memblock_init(void)
538547
{
539548
/* Hookup the initial arrays */
@@ -561,3 +570,11 @@ void __init memblock_init(void)
561570
memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
562571
}
563572

573+
static int __init early_memblock(char *p)
574+
{
575+
if (p && strstr(p, "debug"))
576+
memblock_debug = 1;
577+
return 0;
578+
}
579+
early_param("memblock", early_memblock);
580+

0 commit comments

Comments
 (0)