ANDROID: KVM: arm64: Introduce the pkvm_vcpu_{load,put} hypercalls

Rather than look-up the hyp vCPU on every run hypercall at EL2,
introduce a per-CPU 'loaded_hyp_vcpu' tracking variable which is updated
by a pair of load/put hypercalls called directly from
kvm_arch_vcpu_{load,put}() when pKVM is enabled.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Will Deacon <willdeacon@google.com>
Bug: 233587962
Change-Id: Ic640cb805d0f9610059713ff19918dcffc477d44
This commit is contained in:
Marc Zyngier
2022-04-20 16:05:17 +01:00
committed by Will Deacon
parent bd91ee22a4
commit 3bd11c3908
7 changed files with 98 additions and 24 deletions

View File

@@ -79,6 +79,8 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm, __KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu, __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm, __KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
}; };
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[] #define DECLARE_KVM_VHE_SYM(sym) extern char sym[]

View File

@@ -444,12 +444,26 @@ nommu:
vcpu_ptrauth_disable(vcpu); vcpu_ptrauth_disable(vcpu);
kvm_arch_vcpu_load_debug_state_flags(vcpu); kvm_arch_vcpu_load_debug_state_flags(vcpu);
if (is_protected_kvm_enabled()) {
kvm_call_hyp_nvhe(__pkvm_vcpu_load,
vcpu->kvm->arch.pkvm.handle,
vcpu->vcpu_idx, vcpu->arch.hcr_el2);
kvm_call_hyp(__vgic_v3_restore_vmcr_aprs,
&vcpu->arch.vgic_cpu.vgic_v3);
}
if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus)) if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus))
vcpu_set_on_unsupported_cpu(vcpu); vcpu_set_on_unsupported_cpu(vcpu);
} }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
if (is_protected_kvm_enabled()) {
kvm_call_hyp(__vgic_v3_save_vmcr_aprs,
&vcpu->arch.vgic_cpu.vgic_v3);
kvm_call_hyp_nvhe(__pkvm_vcpu_put);
}
kvm_arch_vcpu_put_debug_state_flags(vcpu); kvm_arch_vcpu_put_debug_state_flags(vcpu);
kvm_arch_vcpu_put_fp(vcpu); kvm_arch_vcpu_put_fp(vcpu);
if (has_vhe()) if (has_vhe())

View File

@@ -20,6 +20,12 @@ struct pkvm_hyp_vcpu {
/* Backpointer to the host's (untrusted) vCPU instance. */ /* Backpointer to the host's (untrusted) vCPU instance. */
struct kvm_vcpu *host_vcpu; struct kvm_vcpu *host_vcpu;
/*
* If this hyp vCPU is loaded, then this is a backpointer to the
* per-cpu pointer tracking us. Otherwise, NULL if not loaded.
*/
struct pkvm_hyp_vcpu **loaded_hyp_vcpu;
}; };
/* /*
@@ -77,6 +83,7 @@ int __pkvm_teardown_vm(pkvm_handle_t handle);
struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle, struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
unsigned int vcpu_idx); unsigned int vcpu_idx);
void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu); void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void);
u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id); u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code); bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);

View File

@@ -138,20 +138,48 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
sync_hyp_timer_state(hyp_vcpu); sync_hyp_timer_state(hyp_vcpu);
} }
static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
DECLARE_REG(unsigned int, vcpu_idx, host_ctxt, 2);
DECLARE_REG(u64, hcr_el2, host_ctxt, 3);
struct pkvm_hyp_vcpu *hyp_vcpu;
if (!is_protected_kvm_enabled())
return;
hyp_vcpu = pkvm_load_hyp_vcpu(handle, vcpu_idx);
if (!hyp_vcpu)
return;
if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) {
/* Propagate WFx trapping flags, trap ptrauth */
hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWE | HCR_TWI |
HCR_API | HCR_APK);
hyp_vcpu->vcpu.arch.hcr_el2 |= hcr_el2 & (HCR_TWE | HCR_TWI);
}
}
static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt)
{
struct pkvm_hyp_vcpu *hyp_vcpu;
if (!is_protected_kvm_enabled())
return;
hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
if (hyp_vcpu)
pkvm_put_hyp_vcpu(hyp_vcpu);
}
static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
{ {
DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1); DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
int ret; int ret;
host_vcpu = kern_hyp_va(host_vcpu);
if (unlikely(is_protected_kvm_enabled())) { if (unlikely(is_protected_kvm_enabled())) {
struct pkvm_hyp_vcpu *hyp_vcpu; struct pkvm_hyp_vcpu *hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
struct kvm *host_kvm;
host_kvm = kern_hyp_va(host_vcpu->kvm);
hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
host_vcpu->vcpu_idx);
if (!hyp_vcpu) { if (!hyp_vcpu) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
@@ -162,12 +190,10 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
ret = __kvm_vcpu_run(&hyp_vcpu->vcpu); ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);
sync_hyp_vcpu(hyp_vcpu); sync_hyp_vcpu(hyp_vcpu);
pkvm_put_hyp_vcpu(hyp_vcpu);
} else { } else {
/* The host is fully trusted, run its vCPU directly. */ /* The host is fully trusted, run its vCPU directly. */
ret = __kvm_vcpu_run(host_vcpu); ret = __kvm_vcpu_run(kern_hyp_va(host_vcpu));
} }
out: out:
cpu_reg(host_ctxt, 1) = ret; cpu_reg(host_ctxt, 1) = ret;
} }
@@ -186,29 +212,22 @@ static void handle___pkvm_host_map_guest(struct kvm_cpu_context *host_ctxt)
{ {
DECLARE_REG(u64, pfn, host_ctxt, 1); DECLARE_REG(u64, pfn, host_ctxt, 1);
DECLARE_REG(u64, gfn, host_ctxt, 2); DECLARE_REG(u64, gfn, host_ctxt, 2);
DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 3);
struct pkvm_hyp_vcpu *hyp_vcpu; struct pkvm_hyp_vcpu *hyp_vcpu;
struct kvm *host_kvm;
int ret = -EINVAL; int ret = -EINVAL;
if (!is_protected_kvm_enabled()) if (!is_protected_kvm_enabled())
goto out; goto out;
host_vcpu = kern_hyp_va(host_vcpu); hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
host_kvm = kern_hyp_va(host_vcpu->kvm);
hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
host_vcpu->vcpu_idx);
if (!hyp_vcpu) if (!hyp_vcpu)
goto out; goto out;
/* Top-up our per-vcpu memcache from the host's */ /* Top-up our per-vcpu memcache from the host's */
ret = pkvm_refill_memcache(hyp_vcpu); ret = pkvm_refill_memcache(hyp_vcpu);
if (ret) if (ret)
goto out_put_vcpu; goto out;
ret = __pkvm_host_share_guest(pfn, gfn, hyp_vcpu); ret = __pkvm_host_share_guest(pfn, gfn, hyp_vcpu);
out_put_vcpu:
pkvm_put_hyp_vcpu(hyp_vcpu);
out: out:
cpu_reg(host_ctxt, 1) = ret; cpu_reg(host_ctxt, 1) = ret;
} }
@@ -432,6 +451,8 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_init_vm), HANDLE_FUNC(__pkvm_init_vm),
HANDLE_FUNC(__pkvm_init_vcpu), HANDLE_FUNC(__pkvm_init_vcpu),
HANDLE_FUNC(__pkvm_teardown_vm), HANDLE_FUNC(__pkvm_teardown_vm),
HANDLE_FUNC(__pkvm_vcpu_load),
HANDLE_FUNC(__pkvm_vcpu_put),
}; };
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)

View File

@@ -17,6 +17,12 @@ unsigned long __icache_flags;
/* Used by kvm_get_vttbr(). */ /* Used by kvm_get_vttbr(). */
unsigned int kvm_arm_vmid_bits; unsigned int kvm_arm_vmid_bits;
/*
* The currently loaded hyp vCPU for each physical CPU. Used only when
* protected KVM is enabled, but for both protected and non-protected VMs.
*/
static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
/* /*
* Set trap register values based on features in ID_AA64PFR0. * Set trap register values based on features in ID_AA64PFR0.
*/ */
@@ -246,15 +252,30 @@ struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
struct pkvm_hyp_vcpu *hyp_vcpu = NULL; struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
struct pkvm_hyp_vm *hyp_vm; struct pkvm_hyp_vm *hyp_vm;
/* Cannot load a new vcpu without putting the old one first. */
if (__this_cpu_read(loaded_hyp_vcpu))
return NULL;
hyp_spin_lock(&vm_table_lock); hyp_spin_lock(&vm_table_lock);
hyp_vm = get_vm_by_handle(handle); hyp_vm = get_vm_by_handle(handle);
if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx) if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
goto unlock; goto unlock;
hyp_vcpu = hyp_vm->vcpus[vcpu_idx]; hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
/* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) {
hyp_vcpu = NULL;
goto unlock;
}
hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu);
hyp_page_ref_inc(hyp_virt_to_page(hyp_vm)); hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
unlock: unlock:
hyp_spin_unlock(&vm_table_lock); hyp_spin_unlock(&vm_table_lock);
if (hyp_vcpu)
__this_cpu_write(loaded_hyp_vcpu, hyp_vcpu);
return hyp_vcpu; return hyp_vcpu;
} }
@@ -263,10 +284,17 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu); struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
hyp_spin_lock(&vm_table_lock); hyp_spin_lock(&vm_table_lock);
hyp_vcpu->loaded_hyp_vcpu = NULL;
__this_cpu_write(loaded_hyp_vcpu, NULL);
hyp_page_ref_dec(hyp_virt_to_page(hyp_vm)); hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
hyp_spin_unlock(&vm_table_lock); hyp_spin_unlock(&vm_table_lock);
} }
struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void)
{
return __this_cpu_read(loaded_hyp_vcpu);
}
static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu) static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
{ {
if (host_vcpu) if (host_vcpu)

View File

@@ -1172,9 +1172,9 @@ static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
return 0; return 0;
} }
static int pkvm_host_map_guest(u64 pfn, u64 gfn, struct kvm_vcpu *vcpu) static int pkvm_host_map_guest(u64 pfn, u64 gfn)
{ {
int ret = kvm_call_hyp_nvhe(__pkvm_host_map_guest, pfn, gfn, vcpu); int ret = kvm_call_hyp_nvhe(__pkvm_host_map_guest, pfn, gfn);
/* /*
* Getting -EPERM at this point implies that the pfn has already been * Getting -EPERM at this point implies that the pfn has already been
@@ -1240,7 +1240,7 @@ static int pkvm_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
write_lock(&kvm->mmu_lock); write_lock(&kvm->mmu_lock);
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
ret = pkvm_host_map_guest(pfn, fault_ipa >> PAGE_SHIFT, vcpu); ret = pkvm_host_map_guest(pfn, fault_ipa >> PAGE_SHIFT);
if (ret) { if (ret) {
if (ret == -EAGAIN) if (ret == -EAGAIN)
ret = 0; ret = 0;

View File

@@ -724,7 +724,8 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
{ {
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if); if (likely(!is_protected_kvm_enabled()))
kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
if (has_vhe()) if (has_vhe())
__vgic_v3_activate_traps(cpu_if); __vgic_v3_activate_traps(cpu_if);
@@ -738,7 +739,8 @@ void vgic_v3_put(struct kvm_vcpu *vcpu, bool blocking)
WARN_ON(vgic_v4_put(vcpu, blocking)); WARN_ON(vgic_v4_put(vcpu, blocking));
kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if); if (likely(!is_protected_kvm_enabled()))
kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
if (has_vhe()) if (has_vhe())
__vgic_v3_deactivate_traps(cpu_if); __vgic_v3_deactivate_traps(cpu_if);