We currently maintain two separate memory pools for the host stage-2, one for pages used in the page-table when mapping memory regions, and the other to map MMIO regions. The former is large enough to map all of memory with page granularity and the latter can cover an arbitrary portion of IPA space, but allows to 'recycle' pages. However, this split makes accounting difficult to manage as pages at intermediate levels of the page-table may be used to map both memory and MMIO regions. Simplify the scheme by merging both pools into one. This means we can now hit the -ENOMEM case in the memory abort path, but we're still guaranteed forward-progress in the worst case by unmapping MMIO regions. On the plus side this also means we can usually map a lot more MMIO space at once if memory ranges happen to be mapped with block mappings. Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210608114518.748712-5-qperret@google.com
113 lines
2.7 KiB
C
113 lines
2.7 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2020 - Google LLC
|
|
* Author: Quentin Perret <qperret@google.com>
|
|
*/
|
|
|
|
#include <linux/kvm_host.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/sort.h>
|
|
|
|
#include <asm/kvm_host.h>
|
|
|
|
#include <nvhe/memory.h>
|
|
#include <nvhe/mm.h>
|
|
|
|
static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
|
|
static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
|
|
|
|
phys_addr_t hyp_mem_base;
|
|
phys_addr_t hyp_mem_size;
|
|
|
|
static int cmp_hyp_memblock(const void *p1, const void *p2)
|
|
{
|
|
const struct memblock_region *r1 = p1;
|
|
const struct memblock_region *r2 = p2;
|
|
|
|
return r1->base < r2->base ? -1 : (r1->base > r2->base);
|
|
}
|
|
|
|
static void __init sort_memblock_regions(void)
|
|
{
|
|
sort(hyp_memory,
|
|
*hyp_memblock_nr_ptr,
|
|
sizeof(struct memblock_region),
|
|
cmp_hyp_memblock,
|
|
NULL);
|
|
}
|
|
|
|
static int __init register_memblock_regions(void)
|
|
{
|
|
struct memblock_region *reg;
|
|
|
|
for_each_mem_region(reg) {
|
|
if (*hyp_memblock_nr_ptr >= HYP_MEMBLOCK_REGIONS)
|
|
return -ENOMEM;
|
|
|
|
hyp_memory[*hyp_memblock_nr_ptr] = *reg;
|
|
(*hyp_memblock_nr_ptr)++;
|
|
}
|
|
sort_memblock_regions();
|
|
|
|
return 0;
|
|
}
|
|
|
|
void __init kvm_hyp_reserve(void)
|
|
{
|
|
u64 nr_pages, prev, hyp_mem_pages = 0;
|
|
int ret;
|
|
|
|
if (!is_hyp_mode_available() || is_kernel_in_hyp_mode())
|
|
return;
|
|
|
|
if (kvm_get_mode() != KVM_MODE_PROTECTED)
|
|
return;
|
|
|
|
ret = register_memblock_regions();
|
|
if (ret) {
|
|
*hyp_memblock_nr_ptr = 0;
|
|
kvm_err("Failed to register hyp memblocks: %d\n", ret);
|
|
return;
|
|
}
|
|
|
|
hyp_mem_pages += hyp_s1_pgtable_pages();
|
|
hyp_mem_pages += host_s2_pgtable_pages();
|
|
|
|
/*
|
|
* The hyp_vmemmap needs to be backed by pages, but these pages
|
|
* themselves need to be present in the vmemmap, so compute the number
|
|
* of pages needed by looking for a fixed point.
|
|
*/
|
|
nr_pages = 0;
|
|
do {
|
|
prev = nr_pages;
|
|
nr_pages = hyp_mem_pages + prev;
|
|
nr_pages = DIV_ROUND_UP(nr_pages * sizeof(struct hyp_page), PAGE_SIZE);
|
|
nr_pages += __hyp_pgtable_max_pages(nr_pages);
|
|
} while (nr_pages != prev);
|
|
hyp_mem_pages += nr_pages;
|
|
|
|
/*
|
|
* Try to allocate a PMD-aligned region to reduce TLB pressure once
|
|
* this is unmapped from the host stage-2, and fallback to PAGE_SIZE.
|
|
*/
|
|
hyp_mem_size = hyp_mem_pages << PAGE_SHIFT;
|
|
hyp_mem_base = memblock_find_in_range(0, memblock_end_of_DRAM(),
|
|
ALIGN(hyp_mem_size, PMD_SIZE),
|
|
PMD_SIZE);
|
|
if (!hyp_mem_base)
|
|
hyp_mem_base = memblock_find_in_range(0, memblock_end_of_DRAM(),
|
|
hyp_mem_size, PAGE_SIZE);
|
|
else
|
|
hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE);
|
|
|
|
if (!hyp_mem_base) {
|
|
kvm_err("Failed to reserve hyp memory\n");
|
|
return;
|
|
}
|
|
memblock_reserve(hyp_mem_base, hyp_mem_size);
|
|
|
|
kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20,
|
|
hyp_mem_base);
|
|
}
|