ANDROID: KVM: arm64: Let modules specify arbitrary permissions for host pages

Currently pKVM modules can only restrict the host stage-2 permissions of
some pages with no way of relaxing them later on. Also, modules lack the
ability to unmap pages from the host without mapping them in the
hypervisor as they only have access to the host-to-hyp-donation path.

In order to give modules more flexibility, make the
hyp_protect_host_page() function a lot more generic by allowing it to
relax permissions as well as "map" with !R!W!X.

Bug: 264070847
Change-Id: Id6751fe147ea8b86a416a1c3326a2d75f04b623d
Signed-off-by: Quentin Perret <qperret@google.com>
This commit is contained in:
Quentin Perret
2023-01-30 16:40:24 +00:00
parent 9d6994b7e3
commit 41ca9a59f9
4 changed files with 45 additions and 12 deletions

View File

@@ -30,7 +30,7 @@ struct pkvm_module_ops {
void (*linear_unmap_early)(void *addr, size_t size); void (*linear_unmap_early)(void *addr, size_t size);
void (*flush_dcache_to_poc)(void *addr, size_t size); void (*flush_dcache_to_poc)(void *addr, size_t size);
int (*register_host_perm_fault_handler)(int (*cb)(struct kvm_cpu_context *ctxt, u64 esr, u64 addr)); int (*register_host_perm_fault_handler)(int (*cb)(struct kvm_cpu_context *ctxt, u64 esr, u64 addr));
int (*protect_host_page)(u64 pfn, enum kvm_pgtable_prot prot); int (*host_stage2_mod_prot)(u64 pfn, enum kvm_pgtable_prot prot);
int (*host_stage2_get_leaf)(phys_addr_t phys, kvm_pte_t *ptep, u32 *level); int (*host_stage2_get_leaf)(phys_addr_t phys, kvm_pte_t *ptep, u32 *level);
int (*register_host_smc_handler)(bool (*cb)(struct kvm_cpu_context *)); int (*register_host_smc_handler)(bool (*cb)(struct kvm_cpu_context *));
int (*register_default_trap_handler)(bool (*cb)(struct kvm_cpu_context *)); int (*register_default_trap_handler)(bool (*cb)(struct kvm_cpu_context *));

View File

@@ -99,11 +99,12 @@ int hyp_register_host_perm_fault_handler(int (*cb)(struct kvm_cpu_context *ctxt,
int hyp_pin_shared_mem(void *from, void *to); int hyp_pin_shared_mem(void *from, void *to);
void hyp_unpin_shared_mem(void *from, void *to); void hyp_unpin_shared_mem(void *from, void *to);
void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc); void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
int hyp_protect_host_page(u64 pfn, enum kvm_pgtable_prot prot);
int host_stage2_get_leaf(phys_addr_t phys, kvm_pte_t *ptep, u32 *level); int host_stage2_get_leaf(phys_addr_t phys, kvm_pte_t *ptep, u32 *level);
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages, int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
struct kvm_hyp_memcache *host_mc); struct kvm_hyp_memcache *host_mc);
int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot);
void psci_mem_protect_inc(u64 n); void psci_mem_protect_inc(u64 n);
void psci_mem_protect_dec(u64 n); void psci_mem_protect_dec(u64 n);

View File

@@ -1886,14 +1886,28 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
return ret; return ret;
} }
int hyp_protect_host_page(u64 pfn, enum kvm_pgtable_prot prot) static int restrict_host_page_perms(u64 addr, kvm_pte_t pte, u32 level, enum kvm_pgtable_prot prot)
{
int ret = 0;
/* XXX: optimize ... */
if (kvm_pte_valid(pte) && (level == KVM_PGTABLE_MAX_LEVELS - 1))
ret = kvm_pgtable_stage2_unmap(&host_mmu.pgt, addr, PAGE_SIZE);
if (!ret)
ret = host_stage2_idmap_locked(addr, PAGE_SIZE, prot, false);
return ret;
}
int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
{ {
u64 addr = hyp_pfn_to_phys(pfn); u64 addr = hyp_pfn_to_phys(pfn);
struct hyp_page *page;
kvm_pte_t pte; kvm_pte_t pte;
u32 level; u32 level;
int ret; int ret;
if ((prot & KVM_PGTABLE_PROT_RWX) != prot || prot == KVM_PGTABLE_PROT_RWX) if ((prot & KVM_PGTABLE_PROT_RWX) != prot || !addr_is_memory(addr))
return -EINVAL; return -EINVAL;
host_lock_component(); host_lock_component();
@@ -1901,16 +1915,34 @@ int hyp_protect_host_page(u64 pfn, enum kvm_pgtable_prot prot)
if (ret) if (ret)
goto unlock; goto unlock;
if (host_get_page_state(pte, addr) != PKVM_PAGE_OWNED) { ret = -EPERM;
ret = -EPERM; page = hyp_phys_to_page(addr);
/*
* Modules can only relax permissions of pages they own, and restrict
* permissions of pristine pages.
*/
if (prot == KVM_PGTABLE_PROT_RWX) {
if (!(page->flags & MODULE_OWNED_PAGE))
goto unlock;
} else if (host_get_page_state(pte, addr) != PKVM_PAGE_OWNED) {
goto unlock; goto unlock;
} }
/* XXX: optimize ... */ if (prot == KVM_PGTABLE_PROT_RWX)
if (kvm_pte_valid(pte) && (level == KVM_PGTABLE_MAX_LEVELS - 1)) ret = host_stage2_set_owner_locked(addr, PAGE_SIZE, PKVM_ID_HOST);
ret = kvm_pgtable_stage2_unmap(&host_mmu.pgt, addr, PAGE_SIZE); else if (!prot)
if (!ret) ret = host_stage2_set_owner_locked(addr, PAGE_SIZE, PKVM_ID_PROTECTED);
ret = host_stage2_idmap_locked(addr, PAGE_SIZE, prot, false); else
ret = restrict_host_page_perms(addr, pte, level, prot);
if (ret)
goto unlock;
if (prot != KVM_PGTABLE_PROT_RWX)
hyp_phys_to_page(addr)->flags |= MODULE_OWNED_PAGE;
else
hyp_phys_to_page(addr)->flags &= ~MODULE_OWNED_PAGE;
unlock: unlock:
host_unlock_component(); host_unlock_component();

View File

@@ -68,7 +68,7 @@ const struct pkvm_module_ops module_ops = {
.linear_unmap_early = __pkvm_linear_unmap_early, .linear_unmap_early = __pkvm_linear_unmap_early,
.flush_dcache_to_poc = __kvm_flush_dcache_to_poc, .flush_dcache_to_poc = __kvm_flush_dcache_to_poc,
.register_host_perm_fault_handler = hyp_register_host_perm_fault_handler, .register_host_perm_fault_handler = hyp_register_host_perm_fault_handler,
.protect_host_page = hyp_protect_host_page, .host_stage2_mod_prot = module_change_host_page_prot,
.host_stage2_get_leaf = host_stage2_get_leaf, .host_stage2_get_leaf = host_stage2_get_leaf,
.register_host_smc_handler = __pkvm_register_host_smc_handler, .register_host_smc_handler = __pkvm_register_host_smc_handler,
.register_default_trap_handler = __pkvm_register_default_trap_handler, .register_default_trap_handler = __pkvm_register_default_trap_handler,