mm: introduce memmap_alloc() to unify memory map allocation
There are several places that allocate memory for the memory map: alloc_node_mem_map() for FLATMEM, sparse_buffer_init() and __populate_section_memmap() for SPARSEMEM. The memory allocated in the FLATMEM case is zeroed and it is never poisoned, regardless of CONFIG_PAGE_POISON setting. The memory allocated in the SPARSEMEM cases is not zeroed and it is implicitly poisoned inside memblock if CONFIG_PAGE_POISON is set. Introduce memmap_alloc() wrapper for memblock allocators that will be used for both FLATMEM and SPARSEMEM cases and will makei memory map zeroing and poisoning consistent for different memory models. Link: https://lkml.kernel.org/r/20210714123739.16493-4-rppt@kernel.org Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Michal Simek <monstr@monstr.eu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
22e7878102
commit
c803b3c8b3
@@ -211,6 +211,10 @@ extern void zone_pcp_reset(struct zone *zone);
|
|||||||
extern void zone_pcp_disable(struct zone *zone);
|
extern void zone_pcp_disable(struct zone *zone);
|
||||||
extern void zone_pcp_enable(struct zone *zone);
|
extern void zone_pcp_enable(struct zone *zone);
|
||||||
|
|
||||||
|
extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
|
||||||
|
phys_addr_t min_addr,
|
||||||
|
int nid, bool exact_nid);
|
||||||
|
|
||||||
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
|
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -6748,6 +6748,26 @@ static void __init memmap_init(void)
|
|||||||
init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
|
init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
|
||||||
|
phys_addr_t min_addr, int nid, bool exact_nid)
|
||||||
|
{
|
||||||
|
void *ptr;
|
||||||
|
|
||||||
|
if (exact_nid)
|
||||||
|
ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
|
||||||
|
MEMBLOCK_ALLOC_ACCESSIBLE,
|
||||||
|
nid);
|
||||||
|
else
|
||||||
|
ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
|
||||||
|
MEMBLOCK_ALLOC_ACCESSIBLE,
|
||||||
|
nid);
|
||||||
|
|
||||||
|
if (ptr && size > 0)
|
||||||
|
page_init_poison(ptr, size);
|
||||||
|
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
static int zone_batchsize(struct zone *zone)
|
static int zone_batchsize(struct zone *zone)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
@@ -7519,8 +7539,8 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
|
|||||||
end = pgdat_end_pfn(pgdat);
|
end = pgdat_end_pfn(pgdat);
|
||||||
end = ALIGN(end, MAX_ORDER_NR_PAGES);
|
end = ALIGN(end, MAX_ORDER_NR_PAGES);
|
||||||
size = (end - start) * sizeof(struct page);
|
size = (end - start) * sizeof(struct page);
|
||||||
map = memblock_alloc_node(size, SMP_CACHE_BYTES,
|
map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
|
||||||
pgdat->node_id);
|
pgdat->node_id, false);
|
||||||
if (!map)
|
if (!map)
|
||||||
panic("Failed to allocate %ld bytes for node %d memory map\n",
|
panic("Failed to allocate %ld bytes for node %d memory map\n",
|
||||||
size, pgdat->node_id);
|
size, pgdat->node_id);
|
||||||
|
|||||||
@@ -436,8 +436,7 @@ struct page __init *__populate_section_memmap(unsigned long pfn,
|
|||||||
if (map)
|
if (map)
|
||||||
return map;
|
return map;
|
||||||
|
|
||||||
map = memblock_alloc_try_nid_raw(size, size, addr,
|
map = memmap_alloc(size, size, addr, nid, false);
|
||||||
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
|
|
||||||
if (!map)
|
if (!map)
|
||||||
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
|
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
|
||||||
__func__, size, PAGE_SIZE, nid, &addr);
|
__func__, size, PAGE_SIZE, nid, &addr);
|
||||||
@@ -464,8 +463,7 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
|
|||||||
* and we want it to be properly aligned to the section size - this is
|
* and we want it to be properly aligned to the section size - this is
|
||||||
* especially the case for VMEMMAP which maps memmap to PMDs
|
* especially the case for VMEMMAP which maps memmap to PMDs
|
||||||
*/
|
*/
|
||||||
sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(),
|
sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true);
|
||||||
addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
|
|
||||||
sparsemap_buf_end = sparsemap_buf + size;
|
sparsemap_buf_end = sparsemap_buf + size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user