|
82 | 82 | * initialization compltes.
|
83 | 83 | */
|
84 | 84 |
|
| 85 | +#ifndef CONFIG_NEED_MULTIPLE_NODES |
| 86 | +struct pglist_data __refdata contig_page_data; |
| 87 | +EXPORT_SYMBOL(contig_page_data); |
| 88 | +#endif |
| 89 | + |
| 90 | +unsigned long max_low_pfn; |
| 91 | +unsigned long min_low_pfn; |
| 92 | +unsigned long max_pfn; |
| 93 | +unsigned long long max_possible_pfn; |
| 94 | + |
85 | 95 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
|
86 | 96 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
|
87 | 97 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
|
@@ -1877,6 +1887,100 @@ static int __init early_memblock(char *p)
|
1877 | 1887 | }
|
1878 | 1888 | early_param("memblock", early_memblock);
|
1879 | 1889 |
|
| 1890 | +static void __init __free_pages_memory(unsigned long start, unsigned long end) |
| 1891 | +{ |
| 1892 | + int order; |
| 1893 | + |
| 1894 | + while (start < end) { |
| 1895 | + order = min(MAX_ORDER - 1UL, __ffs(start)); |
| 1896 | + |
| 1897 | + while (start + (1UL << order) > end) |
| 1898 | + order--; |
| 1899 | + |
| 1900 | + memblock_free_pages(pfn_to_page(start), start, order); |
| 1901 | + |
| 1902 | + start += (1UL << order); |
| 1903 | + } |
| 1904 | +} |
| 1905 | + |
| 1906 | +static unsigned long __init __free_memory_core(phys_addr_t start, |
| 1907 | + phys_addr_t end) |
| 1908 | +{ |
| 1909 | + unsigned long start_pfn = PFN_UP(start); |
| 1910 | + unsigned long end_pfn = min_t(unsigned long, |
| 1911 | + PFN_DOWN(end), max_low_pfn); |
| 1912 | + |
| 1913 | + if (start_pfn >= end_pfn) |
| 1914 | + return 0; |
| 1915 | + |
| 1916 | + __free_pages_memory(start_pfn, end_pfn); |
| 1917 | + |
| 1918 | + return end_pfn - start_pfn; |
| 1919 | +} |
| 1920 | + |
| 1921 | +static unsigned long __init free_low_memory_core_early(void) |
| 1922 | +{ |
| 1923 | + unsigned long count = 0; |
| 1924 | + phys_addr_t start, end; |
| 1925 | + u64 i; |
| 1926 | + |
| 1927 | + memblock_clear_hotplug(0, -1); |
| 1928 | + |
| 1929 | + for_each_reserved_mem_region(i, &start, &end) |
| 1930 | + reserve_bootmem_region(start, end); |
| 1931 | + |
| 1932 | + /* |
| 1933 | + * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id |
| 1934 | + * because in some case like Node0 doesn't have RAM installed |
| 1935 | + * low ram will be on Node1 |
| 1936 | + */ |
| 1937 | + for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, |
| 1938 | + NULL) |
| 1939 | + count += __free_memory_core(start, end); |
| 1940 | + |
| 1941 | + return count; |
| 1942 | +} |
| 1943 | + |
| 1944 | +static int reset_managed_pages_done __initdata; |
| 1945 | + |
| 1946 | +void reset_node_managed_pages(pg_data_t *pgdat) |
| 1947 | +{ |
| 1948 | + struct zone *z; |
| 1949 | + |
| 1950 | + for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) |
| 1951 | + z->managed_pages = 0; |
| 1952 | +} |
| 1953 | + |
| 1954 | +void __init reset_all_zones_managed_pages(void) |
| 1955 | +{ |
| 1956 | + struct pglist_data *pgdat; |
| 1957 | + |
| 1958 | + if (reset_managed_pages_done) |
| 1959 | + return; |
| 1960 | + |
| 1961 | + for_each_online_pgdat(pgdat) |
| 1962 | + reset_node_managed_pages(pgdat); |
| 1963 | + |
| 1964 | + reset_managed_pages_done = 1; |
| 1965 | +} |
| 1966 | + |
| 1967 | +/** |
| 1968 | + * memblock_free_all - release free pages to the buddy allocator |
| 1969 | + * |
| 1970 | + * Return: the number of pages actually released. |
| 1971 | + */ |
| 1972 | +unsigned long __init memblock_free_all(void) |
| 1973 | +{ |
| 1974 | + unsigned long pages; |
| 1975 | + |
| 1976 | + reset_all_zones_managed_pages(); |
| 1977 | + |
| 1978 | + pages = free_low_memory_core_early(); |
| 1979 | + totalram_pages += pages; |
| 1980 | + |
| 1981 | + return pages; |
| 1982 | +} |
| 1983 | + |
1880 | 1984 | #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
|
1881 | 1985 |
|
1882 | 1986 | static int memblock_debug_show(struct seq_file *m, void *private)
|
|
0 commit comments