FROMLIST: KVM: arm64: Handle FFA_RXTX_MAP and FFA_RXTX_UNMAP calls from the host

Handle FFA_RXTX_MAP and FFA_RXTX_UNMAP calls from the host by sharing
the host's mailbox memory with the hypervisor and establishing a
separate pair of mailboxes between the hypervisor and the SPMD at EL3.

Bug: 254811097
Co-developed-by: Andrew Walbran <qwandor@google.com>
Change-Id: Ib5fa89e9b01aa20f7c1b5b41df79d66e98d07f55
Signed-off-by: Andrew Walbran <qwandor@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20221116170335.2341003-8-qperret@google.com
This commit is contained in:
Will Deacon
2022-11-16 17:03:30 +00:00
committed by Quentin Perret
parent 0b1291b733
commit 847f7e0189
2 changed files with 181 additions and 0 deletions

View File

@@ -31,6 +31,8 @@
#include <asm/kvm_pkvm.h> #include <asm/kvm_pkvm.h>
#include <nvhe/ffa.h> #include <nvhe/ffa.h>
#include <nvhe/mem_protect.h>
#include <nvhe/memory.h>
#include <nvhe/trap_handler.h> #include <nvhe/trap_handler.h>
#include <nvhe/spinlock.h> #include <nvhe/spinlock.h>
@@ -52,6 +54,7 @@ struct kvm_ffa_buffers {
* client. * client.
*/ */
static struct kvm_ffa_buffers hyp_buffers; static struct kvm_ffa_buffers hyp_buffers;
static struct kvm_ffa_buffers host_buffers;
static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno) static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
{ {
@@ -71,6 +74,11 @@ static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
} }
} }
static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
{
ffa_to_smccc_res_prop(res, ret, 0);
}
static void ffa_set_retval(struct kvm_cpu_context *ctxt, static void ffa_set_retval(struct kvm_cpu_context *ctxt,
struct arm_smccc_res *res) struct arm_smccc_res *res)
{ {
@@ -88,6 +96,140 @@ static bool is_ffa_call(u64 func_id)
ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM; ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
} }
static int spmd_map_ffa_buffers(u64 ffa_page_count)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
hyp_virt_to_phys(hyp_buffers.tx),
hyp_virt_to_phys(hyp_buffers.rx),
ffa_page_count,
0, 0, 0, 0,
&res);
return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
}
static int spmd_unmap_ffa_buffers(void)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
HOST_FFA_ID,
0, 0, 0, 0, 0, 0,
&res);
return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
}
static void do_ffa_rxtx_map(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
DECLARE_REG(phys_addr_t, tx, ctxt, 1);
DECLARE_REG(phys_addr_t, rx, ctxt, 2);
DECLARE_REG(u32, npages, ctxt, 3);
int ret = 0;
void *rx_virt, *tx_virt;
if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out;
}
if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out;
}
hyp_spin_lock(&host_buffers.lock);
if (host_buffers.tx) {
ret = FFA_RET_DENIED;
goto out_unlock;
}
ret = spmd_map_ffa_buffers(npages);
if (ret)
goto out_unlock;
ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
if (ret) {
ret = FFA_RET_INVALID_PARAMETERS;
goto err_unmap;
}
ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
if (ret) {
ret = FFA_RET_INVALID_PARAMETERS;
goto err_unshare_tx;
}
tx_virt = hyp_phys_to_virt(tx);
ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
if (ret) {
ret = FFA_RET_INVALID_PARAMETERS;
goto err_unshare_rx;
}
rx_virt = hyp_phys_to_virt(rx);
ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
if (ret) {
ret = FFA_RET_INVALID_PARAMETERS;
goto err_unpin_tx;
}
host_buffers.tx = tx_virt;
host_buffers.rx = rx_virt;
out_unlock:
hyp_spin_unlock(&host_buffers.lock);
out:
ffa_to_smccc_res(res, ret);
return;
err_unpin_tx:
hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
err_unshare_rx:
__pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
err_unshare_tx:
__pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
err_unmap:
spmd_unmap_ffa_buffers();
goto out_unlock;
}
static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
DECLARE_REG(u32, id, ctxt, 1);
int ret = 0;
if (id != HOST_FFA_ID) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out;
}
hyp_spin_lock(&host_buffers.lock);
if (!host_buffers.tx) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out_unlock;
}
hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
host_buffers.tx = NULL;
hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
host_buffers.rx = NULL;
spmd_unmap_ffa_buffers();
out_unlock:
hyp_spin_unlock(&host_buffers.lock);
out:
ffa_to_smccc_res(res, ret);
}
static bool ffa_call_unsupported(u64 func_id) static bool ffa_call_unsupported(u64 func_id)
{ {
switch (func_id) { switch (func_id) {
@@ -159,7 +301,11 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
goto out_handled; goto out_handled;
/* Memory management */ /* Memory management */
case FFA_FN64_RXTX_MAP: case FFA_FN64_RXTX_MAP:
do_ffa_rxtx_map(&res, host_ctxt);
goto out_handled;
case FFA_RXTX_UNMAP: case FFA_RXTX_UNMAP:
do_ffa_rxtx_unmap(&res, host_ctxt);
goto out_handled;
case FFA_MEM_SHARE: case FFA_MEM_SHARE:
case FFA_FN64_MEM_SHARE: case FFA_FN64_MEM_SHARE:
case FFA_MEM_LEND: case FFA_MEM_LEND:
@@ -181,6 +327,7 @@ out_handled:
int hyp_ffa_init(void *pages) int hyp_ffa_init(void *pages)
{ {
struct arm_smccc_res res; struct arm_smccc_res res;
size_t min_rxtx_sz;
if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2) if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
return 0; return 0;
@@ -199,11 +346,37 @@ int hyp_ffa_init(void *pages)
if (res.a2 != HOST_FFA_ID) if (res.a2 != HOST_FFA_ID)
return -EINVAL; return -EINVAL;
arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
0, 0, 0, 0, 0, 0, &res);
if (res.a0 != FFA_SUCCESS)
return -EOPNOTSUPP;
switch (res.a2) {
case FFA_FEAT_RXTX_MIN_SZ_4K:
min_rxtx_sz = SZ_4K;
break;
case FFA_FEAT_RXTX_MIN_SZ_16K:
min_rxtx_sz = SZ_16K;
break;
case FFA_FEAT_RXTX_MIN_SZ_64K:
min_rxtx_sz = SZ_64K;
break;
default:
return -EINVAL;
}
if (min_rxtx_sz > PAGE_SIZE)
return -EOPNOTSUPP;
hyp_buffers = (struct kvm_ffa_buffers) { hyp_buffers = (struct kvm_ffa_buffers) {
.lock = __HYP_SPIN_LOCK_UNLOCKED, .lock = __HYP_SPIN_LOCK_UNLOCKED,
.tx = pages, .tx = pages,
.rx = pages + (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE), .rx = pages + (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE),
}; };
host_buffers = (struct kvm_ffa_buffers) {
.lock = __HYP_SPIN_LOCK_UNLOCKED,
};
return 0; return 0;
} }

View File

@@ -94,6 +94,14 @@
*/ */
#define FFA_PAGE_SIZE SZ_4K #define FFA_PAGE_SIZE SZ_4K
/*
* Minimum buffer size/alignment encodings returned by an FFA_FEATURES
* query for FFA_RXTX_MAP.
*/
#define FFA_FEAT_RXTX_MIN_SZ_4K 0
#define FFA_FEAT_RXTX_MIN_SZ_64K 1
#define FFA_FEAT_RXTX_MIN_SZ_16K 2
/* FFA Bus/Device/Driver related */ /* FFA Bus/Device/Driver related */
struct ffa_device { struct ffa_device {
int vm_id; int vm_id;