ANDROID: KVM: arm64: Allow loading modules to the pKVM hypervisor

All nVHE hypervisor code is currently required to be statically linked
into the kernel image. Sadly, scaling pKVM will inevitably require
running _some_ hardware-specific code in the hypervisor due to the
absence of architecture requirements regarding IOMMU implementations or
power management, for example.

In order to address this issue, introduce the ability to load modules
in the pKVM hypervisor at run-time. pKVM modules are expected to be
embedded inside kernel modules, and to be loaded into pKVM when their
kernel counterpart is loaded at EL1. pKVM module loading is defined as a
privileged operation -- all of them must be loaded while the host kernel
is still part of the trusted computing base.

Bug: 244543039
Bug: 244373730
Co-authored-by: Vincent Donnefort <vdonnefort@google.com>
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Change-Id: If8e5d3ac0a2893c892aff09e5b51d3b8e14693f8
This commit is contained in:
Quentin Perret
2022-10-06 11:57:45 +01:00
parent b1bd8930b8
commit 1c2e782ae5
14 changed files with 396 additions and 0 deletions

View File

@@ -63,6 +63,10 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
__KVM_HOST_SMCCC_FUNC___pkvm_alloc_module_va,
__KVM_HOST_SMCCC_FUNC___pkvm_map_module_page,
__KVM_HOST_SMCCC_FUNC___pkvm_unmap_module_page,
__KVM_HOST_SMCCC_FUNC___pkvm_init_module,
__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
/* Hypercalls available after pKVM finalisation */

View File

@@ -116,11 +116,15 @@ alternative_cb_end
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/kvm_host.h>
#include <asm/kvm_pkvm_module.h>
void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void kvm_compute_layout(void);
void kvm_apply_hyp_relocations(void);
void kvm_apply_hyp_module_relocations(void *mod_start, void *hyp_va,
kvm_nvhe_reloc_t *begin,
kvm_nvhe_reloc_t *end);
#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)

View File

@@ -0,0 +1,65 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARM64_KVM_PKVM_MODULE_H__
#define __ARM64_KVM_PKVM_MODULE_H__
#include <linux/export.h>
struct pkvm_module_ops {
};
struct pkvm_module_section {
void *start;
void *end;
};
typedef s32 kvm_nvhe_reloc_t;
struct pkvm_el2_module {
struct pkvm_module_section text;
struct pkvm_module_section bss;
struct pkvm_module_section rodata;
struct pkvm_module_section data;
kvm_nvhe_reloc_t *relocs;
unsigned int nr_relocs;
int (*init)(const struct pkvm_module_ops *ops);
};
#ifdef MODULE
int __pkvm_load_el2_module(struct pkvm_el2_module *mod, struct module *this);
/*
* function_nocfi() does not work with function pointers, hence the macro in
* lieu of a function.
*/
#define pkvm_load_el2_module(init_fn) \
({ \
extern char __kvm_nvhe___hypmod_text_start[]; \
extern char __kvm_nvhe___hypmod_text_end[]; \
extern char __kvm_nvhe___hypmod_bss_start[]; \
extern char __kvm_nvhe___hypmod_bss_end[]; \
extern char __kvm_nvhe___hypmod_rodata_start[]; \
extern char __kvm_nvhe___hypmod_rodata_end[]; \
extern char __kvm_nvhe___hypmod_data_start[]; \
extern char __kvm_nvhe___hypmod_data_end[]; \
extern char __kvm_nvhe___hyprel_start[]; \
extern char __kvm_nvhe___hyprel_end[]; \
struct pkvm_el2_module mod; \
\
mod.text.start = __kvm_nvhe___hypmod_text_start; \
mod.text.end = __kvm_nvhe___hypmod_text_end; \
mod.bss.start = __kvm_nvhe___hypmod_bss_start; \
mod.bss.end = __kvm_nvhe___hypmod_bss_end; \
mod.rodata.start = __kvm_nvhe___hypmod_rodata_start; \
mod.rodata.end = __kvm_nvhe___hypmod_rodata_end; \
mod.data.start = __kvm_nvhe___hypmod_data_start; \
mod.data.end = __kvm_nvhe___hypmod_data_end; \
mod.relocs = (kvm_nvhe_reloc_t *)__kvm_nvhe___hyprel_start; \
mod.nr_relocs = (__kvm_nvhe___hyprel_end - __kvm_nvhe___hyprel_start) / \
sizeof(*mod.relocs); \
mod.init = function_nocfi(init_fn); \
\
__pkvm_load_el2_module(&mod, THIS_MODULE); \
})
#endif
#endif

View File

@@ -1,3 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <asm/page-def.h>
SECTIONS {
#ifdef CONFIG_ARM64_MODULE_PLTS
.plt 0 : { BYTE(0) }
@@ -46,4 +49,24 @@ SECTIONS {
*/
.text.hot : { *(.text.hot) }
#endif
#ifdef CONFIG_KVM
.hyp.text : ALIGN(PAGE_SIZE) {
*(.hyp.text)
. = ALIGN(PAGE_SIZE);
}
.hyp.bss : ALIGN(PAGE_SIZE) {
*(.hyp.bss)
. = ALIGN(PAGE_SIZE);
}
.hyp.rodata : ALIGN(PAGE_SIZE) {
*(.hyp.rodata)
. = ALIGN(PAGE_SIZE);
}
.hyp.data : ALIGN(PAGE_SIZE) {
*(.hyp.data)
. = ALIGN(PAGE_SIZE);
}
.hyp.reloc : ALIGN(4) { *(.hyp.reloc) }
#endif
}

View File

@@ -12,6 +12,7 @@
extern struct kvm_pgtable pkvm_pgtable;
extern hyp_spinlock_t pkvm_pgd_lock;
extern const struct pkvm_module_ops module_ops;
int hyp_create_pcpu_fixmap(void);
void *hyp_fixmap_map(phys_addr_t phys);
@@ -30,4 +31,7 @@ int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr);
void pkvm_remove_mappings(void *from, void *to);
int __pkvm_map_module_page(u64 pfn, void *va, enum kvm_pgtable_prot prot);
void __pkvm_unmap_module_page(u64 pfn, void *va);
void *__pkvm_alloc_module_va(u64 nr_pages);
#endif /* __KVM_HYP_MM_H */

View File

@@ -0,0 +1,5 @@
#ifdef CONFIG_MODULES
int __pkvm_init_module(void *module_init);
#else
static inline int __pkvm_init_module(void *module_init); { return -EOPNOTSUPP; }
#endif

View File

@@ -8,6 +8,7 @@ hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o
hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
hyp-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
hyp-obj-$(CONFIG_MODULES) += modules.o
hyp-obj-y += $(lib-objs)
hyp-obj-$(CONFIG_KVM_S2MPU) += iommu/s2mpu.o

View File

@@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
$(obj)/hyp.lds: arch/arm64/kvm/hyp/nvhe/module.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
include $(srctree)/arch/arm64/kvm/hyp/nvhe/Makefile.nvhe

View File

@@ -18,6 +18,7 @@
#include <nvhe/ffa.h>
#include <nvhe/iommu.h>
#include <nvhe/mem_protect.h>
#include <nvhe/modules.h>
#include <nvhe/mm.h>
#include <nvhe/pkvm.h>
#include <nvhe/trap_handler.h>
@@ -1154,6 +1155,37 @@ static void handle___pkvm_iommu_finalize(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_iommu_finalize();
}
static void handle___pkvm_alloc_module_va(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, nr_pages, host_ctxt, 1);
cpu_reg(host_ctxt, 1) = (u64)__pkvm_alloc_module_va(nr_pages);
}
static void handle___pkvm_map_module_page(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, pfn, host_ctxt, 1);
DECLARE_REG(void *, va, host_ctxt, 2);
DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
cpu_reg(host_ctxt, 1) = (u64)__pkvm_map_module_page(pfn, va, prot);
}
static void handle___pkvm_unmap_module_page(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, pfn, host_ctxt, 1);
DECLARE_REG(void *, va, host_ctxt, 2);
__pkvm_unmap_module_page(pfn, va);
}
static void handle___pkvm_init_module(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(void *, ptr, host_ctxt, 1);
cpu_reg(host_ctxt, 1) = __pkvm_init_module(ptr);
}
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -1192,6 +1224,10 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_iommu_register),
HANDLE_FUNC(__pkvm_iommu_pm_notify),
HANDLE_FUNC(__pkvm_iommu_finalize),
HANDLE_FUNC(__pkvm_alloc_module_va),
HANDLE_FUNC(__pkvm_map_module_page),
HANDLE_FUNC(__pkvm_unmap_module_page),
HANDLE_FUNC(__pkvm_init_module),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)

View File

@@ -100,6 +100,33 @@ int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
return err;
}
void *__pkvm_alloc_module_va(u64 nr_pages)
{
unsigned long addr;
int ret;
ret = pkvm_alloc_private_va_range(nr_pages << PAGE_SHIFT, &addr);
return ret ? NULL : (void *)addr;
}
int __pkvm_map_module_page(u64 pfn, void *va, enum kvm_pgtable_prot prot)
{
int ret;
ret = __pkvm_host_donate_hyp(pfn, 1);
if (ret)
return ret;
return __pkvm_create_mappings((unsigned long)va, PAGE_SIZE, hyp_pfn_to_phys(pfn), prot);
}
void __pkvm_unmap_module_page(u64 pfn, void *va)
{
WARN_ON(__pkvm_hyp_donate_host(pfn, 1));
pkvm_remove_mappings(va, va + PAGE_SIZE);
}
int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
{
unsigned long start = (unsigned long)from;

View File

@@ -0,0 +1,37 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/hyp_image.h>
#include <asm/page-def.h>
SECTIONS {
.hyp.text : {
HYP_SECTION_SYMBOL_NAME(.text) = .;
__hypmod_text_start = .;
*(.text .text.*)
__hypmod_text_end = .;
}
.hyp.bss : {
HYP_SECTION_SYMBOL_NAME(.bss) = .;
__hypmod_bss_start = .;
*(.bss .bss.*)
FILL(0)
__hypmod_bss_end = .;
}
.hyp.rodata : {
HYP_SECTION_SYMBOL_NAME(.rodata) = .;
__hypmod_rodata_start = .;
*(.rodata .rodata.*)
BYTE(0)
__hypmod_rodata_end = .;
}
.hyp.data : {
HYP_SECTION_SYMBOL_NAME(.data) = .;
__hypmod_data_start = .;
*(.data .data.*)
BYTE(0)
__hypmod_data_end = .;
}
}

View File

@@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2022 Google LLC
*/
#include <asm/kvm_host.h>
#include <asm/kvm_pkvm_module.h>
#include <nvhe/modules.h>
const struct pkvm_module_ops module_ops = {
};
int __pkvm_init_module(void *module_init)
{
int (*do_module_init)(const struct pkvm_module_ops *ops) = module_init;
int ret;
ret = do_module_init(&module_ops);
return ret;
}

View File

@@ -13,7 +13,9 @@
#include <linux/of_reserved_mem.h>
#include <linux/sort.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_pkvm.h>
#include <asm/kvm_pkvm_module.h>
#include "hyp_constants.h"
@@ -412,3 +414,140 @@ int pkvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
return 0;
}
struct pkvm_mod_sec_mapping {
struct pkvm_module_section *sec;
enum kvm_pgtable_prot prot;
};
static void pkvm_unmap_module_pages(void *kern_va, void *hyp_va, size_t size)
{
size_t offset;
u64 pfn;
for (offset = 0; offset < size; offset += PAGE_SIZE) {
pfn = vmalloc_to_pfn(kern_va + offset);
kvm_call_hyp_nvhe(__pkvm_unmap_module_page, pfn,
hyp_va + offset);
}
}
static void pkvm_unmap_module_sections(struct pkvm_mod_sec_mapping *secs_map, void *hyp_va_base, int nr_secs)
{
size_t offset, size;
void *start;
int i;
for (i = 0; i < nr_secs; i++) {
start = secs_map[i].sec->start;
size = secs_map[i].sec->end - start;
offset = start - secs_map[0].sec->start;
pkvm_unmap_module_pages(start, hyp_va_base + offset, size);
}
}
static int pkvm_map_module_section(struct pkvm_mod_sec_mapping *sec_map, void *hyp_va)
{
size_t offset, size = sec_map->sec->end - sec_map->sec->start;
int ret;
u64 pfn;
for (offset = 0; offset < size; offset += PAGE_SIZE) {
pfn = vmalloc_to_pfn(sec_map->sec->start + offset);
ret = kvm_call_hyp_nvhe(__pkvm_map_module_page, pfn,
hyp_va + offset, sec_map->prot);
if (ret) {
pkvm_unmap_module_pages(sec_map->sec->start, hyp_va, offset);
return ret;
}
}
return 0;
}
static int pkvm_map_module_sections(struct pkvm_mod_sec_mapping *secs_map, void *hyp_va_base, int nr_secs)
{
size_t offset;
int i, ret;
for (i = 0; i < nr_secs; i++) {
offset = secs_map[i].sec->start - secs_map[0].sec->start;
ret = pkvm_map_module_section(&secs_map[i], hyp_va_base + offset);
if (ret) {
pkvm_unmap_module_sections(secs_map, hyp_va_base, i);
return ret;
}
}
return 0;
}
static int __pkvm_cmp_mod_sec(const void *p1, const void *p2)
{
struct pkvm_mod_sec_mapping const *s1 = p1;
struct pkvm_mod_sec_mapping const *s2 = p2;
return s1->sec->start < s2->sec->start ? -1 : s1->sec->start > s2->sec->start;
}
int __pkvm_load_el2_module(struct pkvm_el2_module *mod, struct module *this)
{
struct pkvm_mod_sec_mapping secs_map[] = {
{ &mod->text, KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X },
{ &mod->bss, KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W },
{ &mod->rodata, KVM_PGTABLE_PROT_R },
{ &mod->data, KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W },
};
void *start, *end, *hyp_va;
kvm_nvhe_reloc_t *endrel;
size_t offset, size;
int ret, i;
if (!is_protected_kvm_enabled())
return -EOPNOTSUPP;
for (i = 0; i < ARRAY_SIZE(secs_map); i++) {
if (!PAGE_ALIGNED(secs_map[i].sec->start)) {
kvm_err("EL2 sections are not page-aligned\n");
return -EINVAL;
}
}
if (!try_module_get(this)) {
kvm_err("Kernel module has been unloaded\n");
return -ENODEV;
}
sort(secs_map, ARRAY_SIZE(secs_map), sizeof(secs_map[0]), __pkvm_cmp_mod_sec, NULL);
start = secs_map[0].sec->start;
end = secs_map[ARRAY_SIZE(secs_map) - 1].sec->end;
size = PAGE_ALIGN(end - start);
hyp_va = (void *)kvm_call_hyp_nvhe(__pkvm_alloc_module_va, size >> PAGE_SHIFT);
if (!hyp_va) {
kvm_err("Failed to allocate hypervisor VA space for EL2 module\n");
module_put(this);
return -ENOMEM;
}
endrel = (void *)mod->relocs + mod->nr_relocs * sizeof(*endrel);
kvm_apply_hyp_module_relocations(start, hyp_va, mod->relocs, endrel);
ret = pkvm_map_module_sections(secs_map, hyp_va, ARRAY_SIZE(secs_map));
if (ret) {
kvm_err("Failed to map EL2 module page: %d\n", ret);
module_put(this);
return ret;
}
offset = (size_t)((void *)mod->init - start);
ret = kvm_call_hyp_nvhe(__pkvm_init_module, hyp_va + offset);
if (ret) {
kvm_err("Failed to init EL2 module: %d\n", ret);
pkvm_unmap_module_sections(secs_map, hyp_va, ARRAY_SIZE(secs_map));
module_put(this);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(__pkvm_load_el2_module);

View File

@@ -12,6 +12,7 @@
#include <asm/insn.h>
#include <asm/kvm_mmu.h>
#include <asm/memory.h>
#include <asm/patching.h>
/*
* The LSB of the HYP VA tag
@@ -109,6 +110,29 @@ __init void kvm_apply_hyp_relocations(void)
}
}
void kvm_apply_hyp_module_relocations(void *mod_start, void *hyp_va,
kvm_nvhe_reloc_t *begin,
kvm_nvhe_reloc_t *end)
{
kvm_nvhe_reloc_t *rel;
for (rel = begin; rel < end; ++rel) {
u32 **ptr, *va;
/*
* Each entry contains a 32-bit relative offset from itself
* to a VA position in the module area.
*/
ptr = (u32 **)((char *)rel + *rel);
/* Read the module VA value at the relocation address. */
va = *ptr;
/* Convert the module VA of the reloc to a hyp VA */
WARN_ON(aarch64_addr_write(ptr, (u64)(((void *)va - mod_start) + hyp_va)));
}
}
static u32 compute_instruction(int n, u32 rd, u32 rn)
{
u32 insn = AARCH64_BREAK_FAULT;