ANDROID: KVM: arm64: Poison pages on the relinquish path

When a guest relinquishes a page to the host (because of e.g.
balooning), it issues a hypercall where the hypervisor puts the page in
the 'pending reclaim' state. It is then the host's responsibility to
call the reclaim hypercall to trigger the page poisoning before dropping
the GUP pin.

In order to reduce dependencies on the 'pending reclaim' state which
will be reworked very soon, rework the relinquish path to poison the
pages synchronously, hence removing the need for the host to issue a
reclaim call. The page ownership is now synchronously returned to the
host on the back of the relinquish hcall.

Bug: 229972313
Bug: 238945523
Change-Id: I85fd3485308209357bcdeb855de07da227fac0fe
Signed-off-by: Quentin Perret <qperret@google.com>
This commit is contained in:
Quentin Perret
2023-01-12 11:32:05 +00:00
parent 3d06919cab
commit f13ce4744f
2 changed files with 13 additions and 14 deletions

View File

@@ -348,9 +348,9 @@ static int relinquish_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag, void * const arg) enum kvm_pgtable_walk_flags flag, void * const arg)
{ {
kvm_pte_t pte = *ptep; kvm_pte_t pte = *ptep;
struct hyp_page *page;
struct relinquish_data *data = arg; struct relinquish_data *data = arg;
enum pkvm_page_state state; enum pkvm_page_state state;
phys_addr_t phys;
if (!kvm_pte_valid(pte)) if (!kvm_pte_valid(pte))
return 0; return 0;
@@ -359,12 +359,13 @@ static int relinquish_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
if (state != data->expected_state) if (state != data->expected_state)
return -EPERM; return -EPERM;
page = hyp_phys_to_page(kvm_pte_to_phys(pte)); phys = kvm_pte_to_phys(pte);
if (state == PKVM_PAGE_OWNED) if (state == PKVM_PAGE_OWNED) {
page->flags |= HOST_PAGE_NEED_POISONING; hyp_poison_page(phys);
page->flags |= HOST_PAGE_PENDING_RECLAIM; psci_mem_protect_dec(1);
}
data->pa = kvm_pte_to_phys(pte); data->pa = phys;
return 0; return 0;
} }
@@ -392,12 +393,14 @@ int __pkvm_guest_relinquish_to_host(struct pkvm_hyp_vcpu *vcpu,
/* Set default pa value to "not found". */ /* Set default pa value to "not found". */
data.pa = 0; data.pa = 0;
/* If ipa is mapped: sets page flags, and gets the pa. */ /* If ipa is mapped: poisons the page, and gets the pa. */
ret = kvm_pgtable_walk(&vm->pgt, ipa, PAGE_SIZE, &walker); ret = kvm_pgtable_walk(&vm->pgt, ipa, PAGE_SIZE, &walker);
/* Zap the guest stage2 pte. */ /* Zap the guest stage2 pte and return ownership to the host */
if (!ret && data.pa) if (!ret && data.pa) {
kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE); WARN_ON(host_stage2_set_owner_locked(data.pa, PAGE_SIZE, PKVM_ID_HOST));
WARN_ON(kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE));
}
guest_unlock_component(vm); guest_unlock_component(vm);
host_unlock_component(); host_unlock_component();

View File

@@ -381,10 +381,6 @@ void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa)
return; return;
ppage = container_of(node, struct kvm_pinned_page, node); ppage = container_of(node, struct kvm_pinned_page, node);
WARN_ON(kvm_call_hyp_nvhe(__pkvm_host_reclaim_page,
page_to_pfn(ppage->page)));
account_locked_vm(mm, 1, false); account_locked_vm(mm, 1, false);
unpin_user_pages_dirty_lock(&ppage->page, 1, true); unpin_user_pages_dirty_lock(&ppage->page, 1, true);
kfree(ppage); kfree(ppage);