ANDROID: KVM: arm64: Merge vmcr/apr save/restore

Merge the functions to save and restore vmcr and apr. This can in some
cases reduce the number of hypercalls necessary to load/put the vgic
state in nVHE and will also ease its management in protected mode later
on.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Bug: 209580772
Change-Id: Id85f0698a7a346282e55c15993c274828bd5309c
Signed-off-by: Will Deacon <willdeacon@google.com>
This commit is contained in:
Marc Zyngier
2021-10-07 15:17:53 +01:00
committed by Will Deacon
parent 717dc97b99
commit 4eda3ce580
10 changed files with 47 additions and 84 deletions

View File

@@ -71,10 +71,8 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid, __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context, __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff, __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
__KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr, __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr, __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
__KVM_HOST_SMCCC_FUNC___pkvm_init_shadow, __KVM_HOST_SMCCC_FUNC___pkvm_init_shadow,
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_shadow, __KVM_HOST_SMCCC_FUNC___pkvm_teardown_shadow,
}; };
@@ -215,8 +213,6 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu); extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_gic_config(void); extern u64 __vgic_v3_get_gic_config(void);
extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
extern void __vgic_v3_init_lrs(void); extern void __vgic_v3_init_lrs(void);
extern u64 __kvm_get_mdcr_el2(void); extern u64 __kvm_get_mdcr_el2(void);

View File

@@ -61,8 +61,8 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if); void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if); void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if); void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if); void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if); void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu); int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
#ifdef __KVM_NVHE_HYPERVISOR__ #ifdef __KVM_NVHE_HYPERVISOR__

View File

@@ -398,15 +398,14 @@ void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
* doorbells to be signalled, should an interrupt become pending. * doorbells to be signalled, should an interrupt become pending.
*/ */
preempt_disable(); preempt_disable();
kvm_vgic_vmcr_sync(vcpu); kvm_vgic_put(vcpu, true);
vgic_v4_put(vcpu, true);
preempt_enable(); preempt_enable();
} }
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{ {
preempt_disable(); preempt_disable();
vgic_v4_load(vcpu); kvm_vgic_load(vcpu);
preempt_enable(); preempt_enable();
} }
@@ -460,7 +459,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
if (has_vhe()) if (has_vhe())
kvm_vcpu_put_sysregs_vhe(vcpu); kvm_vcpu_put_sysregs_vhe(vcpu);
kvm_timer_vcpu_put(vcpu); kvm_timer_vcpu_put(vcpu);
kvm_vgic_put(vcpu); kvm_vgic_put(vcpu, false);
kvm_vcpu_pmu_restore_host(vcpu); kvm_vcpu_pmu_restore_host(vcpu);
vcpu->cpu = -1; vcpu->cpu = -1;

View File

@@ -360,16 +360,6 @@ static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config(); cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
} }
static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
{
cpu_reg(host_ctxt, 1) = __vgic_v3_read_vmcr();
}
static void handle___vgic_v3_write_vmcr(struct kvm_cpu_context *host_ctxt)
{
__vgic_v3_write_vmcr(cpu_reg(host_ctxt, 1));
}
static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt) static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
{ {
__vgic_v3_init_lrs(); __vgic_v3_init_lrs();
@@ -380,18 +370,18 @@ static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2(); cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
} }
static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt) static void handle___vgic_v3_save_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
{ {
DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1); DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
__vgic_v3_save_aprs(kern_hyp_va(cpu_if)); __vgic_v3_save_vmcr_aprs(kern_hyp_va(cpu_if));
} }
static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt) static void handle___vgic_v3_restore_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
{ {
DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1); DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
__vgic_v3_restore_aprs(kern_hyp_va(cpu_if)); __vgic_v3_restore_vmcr_aprs(kern_hyp_va(cpu_if));
} }
static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt) static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt)
@@ -488,10 +478,8 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_tlb_flush_vmid), HANDLE_FUNC(__kvm_tlb_flush_vmid),
HANDLE_FUNC(__kvm_flush_cpu_context), HANDLE_FUNC(__kvm_flush_cpu_context),
HANDLE_FUNC(__kvm_timer_set_cntvoff), HANDLE_FUNC(__kvm_timer_set_cntvoff),
HANDLE_FUNC(__vgic_v3_read_vmcr), HANDLE_FUNC(__vgic_v3_save_vmcr_aprs),
HANDLE_FUNC(__vgic_v3_write_vmcr), HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
HANDLE_FUNC(__vgic_v3_save_aprs),
HANDLE_FUNC(__vgic_v3_restore_aprs),
HANDLE_FUNC(__pkvm_init_shadow), HANDLE_FUNC(__pkvm_init_shadow),
HANDLE_FUNC(__pkvm_teardown_shadow), HANDLE_FUNC(__pkvm_teardown_shadow),
}; };

View File

@@ -330,7 +330,7 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
write_gicreg(0, ICH_HCR_EL2); write_gicreg(0, ICH_HCR_EL2);
} }
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if) static void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
{ {
u64 val; u64 val;
u32 nr_pre_bits; u32 nr_pre_bits;
@@ -363,7 +363,7 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
} }
} }
void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if) static void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
{ {
u64 val; u64 val;
u32 nr_pre_bits; u32 nr_pre_bits;
@@ -455,16 +455,35 @@ u64 __vgic_v3_get_gic_config(void)
return val; return val;
} }
u64 __vgic_v3_read_vmcr(void) static u64 __vgic_v3_read_vmcr(void)
{ {
return read_gicreg(ICH_VMCR_EL2); return read_gicreg(ICH_VMCR_EL2);
} }
void __vgic_v3_write_vmcr(u32 vmcr) static void __vgic_v3_write_vmcr(u32 vmcr)
{ {
write_gicreg(vmcr, ICH_VMCR_EL2); write_gicreg(vmcr, ICH_VMCR_EL2);
} }
void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
{
__vgic_v3_save_aprs(cpu_if);
if (cpu_if->vgic_sre)
cpu_if->vgic_vmcr = __vgic_v3_read_vmcr();
}
void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
{
/*
* If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
* is dependent on ICC_SRE_EL1.SRE, and we have to perform the
* VMCR_EL2 save/restore in the world switch.
*/
if (cpu_if->vgic_sre)
__vgic_v3_write_vmcr(cpu_if->vgic_vmcr);
__vgic_v3_restore_aprs(cpu_if);
}
static int __vgic_v3_bpr_min(void) static int __vgic_v3_bpr_min(void)
{ {
/* See Pseudocode for VPriorityGroup */ /* See Pseudocode for VPriorityGroup */

View File

@@ -470,17 +470,10 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
kvm_vgic_global_state.vctrl_base + GICH_APR); kvm_vgic_global_state.vctrl_base + GICH_APR);
} }
void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu) void vgic_v2_put(struct kvm_vcpu *vcpu, bool blocking)
{ {
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR); cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
}
void vgic_v2_put(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
vgic_v2_vmcr_sync(vcpu);
cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR); cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
} }

View File

@@ -707,15 +707,7 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
{ {
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
/* kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
* If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
* is dependent on ICC_SRE_EL1.SRE, and we have to perform the
* VMCR_EL2 save/restore in the world switch.
*/
if (likely(cpu_if->vgic_sre))
kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
if (has_vhe()) if (has_vhe())
__vgic_v3_activate_traps(cpu_if); __vgic_v3_activate_traps(cpu_if);
@@ -723,23 +715,13 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
WARN_ON(vgic_v4_load(vcpu)); WARN_ON(vgic_v4_load(vcpu));
} }
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu) void vgic_v3_put(struct kvm_vcpu *vcpu, bool blocking)
{ {
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
if (likely(cpu_if->vgic_sre)) WARN_ON(vgic_v4_put(vcpu, blocking));
cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
}
void vgic_v3_put(struct kvm_vcpu *vcpu) kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
WARN_ON(vgic_v4_put(vcpu, false));
vgic_v3_vmcr_sync(vcpu);
kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
if (has_vhe()) if (has_vhe())
__vgic_v3_deactivate_traps(cpu_if); __vgic_v3_deactivate_traps(cpu_if);

View File

@@ -931,26 +931,15 @@ void kvm_vgic_load(struct kvm_vcpu *vcpu)
vgic_v3_load(vcpu); vgic_v3_load(vcpu);
} }
void kvm_vgic_put(struct kvm_vcpu *vcpu) void kvm_vgic_put(struct kvm_vcpu *vcpu, bool blocking)
{ {
if (unlikely(!vgic_initialized(vcpu->kvm))) if (unlikely(!vgic_initialized(vcpu->kvm)))
return; return;
if (kvm_vgic_global_state.type == VGIC_V2) if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_put(vcpu); vgic_v2_put(vcpu, blocking);
else else
vgic_v3_put(vcpu); vgic_v3_put(vcpu, blocking);
}
void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
{
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
return;
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_vmcr_sync(vcpu);
else
vgic_v3_vmcr_sync(vcpu);
} }
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)

View File

@@ -196,8 +196,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
void vgic_v2_init_lrs(void); void vgic_v2_init_lrs(void);
void vgic_v2_load(struct kvm_vcpu *vcpu); void vgic_v2_load(struct kvm_vcpu *vcpu);
void vgic_v2_put(struct kvm_vcpu *vcpu); void vgic_v2_put(struct kvm_vcpu *vcpu, bool blocking);
void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
void vgic_v2_save_state(struct kvm_vcpu *vcpu); void vgic_v2_save_state(struct kvm_vcpu *vcpu);
void vgic_v2_restore_state(struct kvm_vcpu *vcpu); void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
@@ -227,8 +226,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
bool vgic_v3_check_base(struct kvm *kvm); bool vgic_v3_check_base(struct kvm *kvm);
void vgic_v3_load(struct kvm_vcpu *vcpu); void vgic_v3_load(struct kvm_vcpu *vcpu);
void vgic_v3_put(struct kvm_vcpu *vcpu); void vgic_v3_put(struct kvm_vcpu *vcpu, bool blocking);
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
bool vgic_has_its(struct kvm *kvm); bool vgic_has_its(struct kvm *kvm);
int kvm_vgic_register_its_device(void); int kvm_vgic_register_its_device(void);

View File

@@ -380,8 +380,7 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
void kvm_vgic_load(struct kvm_vcpu *vcpu); void kvm_vgic_load(struct kvm_vcpu *vcpu);
void kvm_vgic_put(struct kvm_vcpu *vcpu); void kvm_vgic_put(struct kvm_vcpu *vcpu, bool blocking);
void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) ((k)->arch.vgic.initialized) #define vgic_initialized(k) ((k)->arch.vgic.initialized)