From f13ce4744f00bd8a014dfbb5a89871073f0e8efc Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Thu, 12 Jan 2023 11:32:05 +0000 Subject: [PATCH] ANDROID: KVM: arm64: Poison pages on the relinquish path When a guest relinquishes a page to the host (because of e.g. balooning), it issues a hypercall where the hypervisor puts the page in the 'pending reclaim' state. It is then the host's responsibility to call the reclaim hypercall to trigger the page poisoning before dropping the GUP pin. In order to reduce dependencies on the 'pending reclaim' state which will be reworked very soon, rework the relinquish path to poison the pages synchronously, hence removing the need for the host to issue a reclaim call. The page ownership is now synchronously returned to the host on the back of the relinquish hcall. Bug: 229972313 Bug: 238945523 Change-Id: I85fd3485308209357bcdeb855de07da227fac0fe Signed-off-by: Quentin Perret --- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 23 +++++++++++++---------- arch/arm64/kvm/pkvm.c | 4 ---- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 00c2ddaa9df4..58d05791ec25 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -348,9 +348,9 @@ static int relinquish_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag, void * const arg) { kvm_pte_t pte = *ptep; - struct hyp_page *page; struct relinquish_data *data = arg; enum pkvm_page_state state; + phys_addr_t phys; if (!kvm_pte_valid(pte)) return 0; @@ -359,12 +359,13 @@ static int relinquish_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, if (state != data->expected_state) return -EPERM; - page = hyp_phys_to_page(kvm_pte_to_phys(pte)); - if (state == PKVM_PAGE_OWNED) - page->flags |= HOST_PAGE_NEED_POISONING; - page->flags |= HOST_PAGE_PENDING_RECLAIM; + phys = kvm_pte_to_phys(pte); + if (state == PKVM_PAGE_OWNED) { + hyp_poison_page(phys); + psci_mem_protect_dec(1); + } - data->pa = kvm_pte_to_phys(pte); + data->pa = phys; return 0; } @@ -392,12 +393,14 @@ int __pkvm_guest_relinquish_to_host(struct pkvm_hyp_vcpu *vcpu, /* Set default pa value to "not found". */ data.pa = 0; - /* If ipa is mapped: sets page flags, and gets the pa. */ + /* If ipa is mapped: poisons the page, and gets the pa. */ ret = kvm_pgtable_walk(&vm->pgt, ipa, PAGE_SIZE, &walker); - /* Zap the guest stage2 pte. */ - if (!ret && data.pa) - kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE); + /* Zap the guest stage2 pte and return ownership to the host */ + if (!ret && data.pa) { + WARN_ON(host_stage2_set_owner_locked(data.pa, PAGE_SIZE, PKVM_ID_HOST)); + WARN_ON(kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE)); + } guest_unlock_component(vm); host_unlock_component(); diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c index 2f4900ea7dee..e1381f564107 100644 --- a/arch/arm64/kvm/pkvm.c +++ b/arch/arm64/kvm/pkvm.c @@ -381,10 +381,6 @@ void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa) return; ppage = container_of(node, struct kvm_pinned_page, node); - - WARN_ON(kvm_call_hyp_nvhe(__pkvm_host_reclaim_page, - page_to_pfn(ppage->page))); - account_locked_vm(mm, 1, false); unpin_user_pages_dirty_lock(&ppage->page, 1, true); kfree(ppage);