[PATCH 03/10] KVM: arm64: Use guard(hyp_spinlock) in ffa.c
From: Fuad Tabba
Date: Mon Mar 16 2026 - 13:40:21 EST
Migrate manual hyp_spin_lock() and hyp_spin_unlock() calls managing
host_buffers.lock and version_lock to use the guard(hyp_spinlock)
macro.
This eliminates manual unlock calls on return paths and simplifies
error handling by replacing goto labels with direct returns.
Change-Id: I52e31c0bed3d2772c800a535af8abdabd81a178b
Signed-off-by: Fuad Tabba <tabba@xxxxxxxxxx>
---
arch/arm64/kvm/hyp/nvhe/ffa.c | 86 +++++++++++++++++++------------------------
1 file changed, 38 insertions(+), 48 deletions(-)
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c
index 94161ea1cd60..0c772501c3ba 100644
--- a/arch/arm64/kvm/hyp/nvhe/ffa.c
+++ b/arch/arm64/kvm/hyp/nvhe/ffa.c
@@ -239,6 +239,8 @@ static void do_ffa_rxtx_map(struct arm_smccc_1_2_regs *res,
int ret = 0;
void *rx_virt, *tx_virt;
+ guard(hyp_spinlock)(&host_buffers.lock);
+
if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out;
@@ -249,10 +251,9 @@ static void do_ffa_rxtx_map(struct arm_smccc_1_2_regs *res,
goto out;
}
- hyp_spin_lock(&host_buffers.lock);
if (host_buffers.tx) {
ret = FFA_RET_DENIED;
- goto out_unlock;
+ goto out;
}
/*
@@ -261,7 +262,7 @@ static void do_ffa_rxtx_map(struct arm_smccc_1_2_regs *res,
*/
ret = ffa_map_hyp_buffers(npages);
if (ret)
- goto out_unlock;
+ goto out;
ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
if (ret) {
@@ -292,8 +293,6 @@ static void do_ffa_rxtx_map(struct arm_smccc_1_2_regs *res,
host_buffers.tx = tx_virt;
host_buffers.rx = rx_virt;
-out_unlock:
- hyp_spin_unlock(&host_buffers.lock);
out:
ffa_to_smccc_res(res, ret);
return;
@@ -306,7 +305,7 @@ static void do_ffa_rxtx_map(struct arm_smccc_1_2_regs *res,
__pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
err_unmap:
ffa_unmap_hyp_buffers();
- goto out_unlock;
+ goto out;
}
static void do_ffa_rxtx_unmap(struct arm_smccc_1_2_regs *res,
@@ -315,15 +314,16 @@ static void do_ffa_rxtx_unmap(struct arm_smccc_1_2_regs *res,
DECLARE_REG(u32, id, ctxt, 1);
int ret = 0;
+ guard(hyp_spinlock)(&host_buffers.lock);
+
if (id != HOST_FFA_ID) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out;
}
- hyp_spin_lock(&host_buffers.lock);
if (!host_buffers.tx) {
ret = FFA_RET_INVALID_PARAMETERS;
- goto out_unlock;
+ goto out;
}
hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
@@ -336,8 +336,6 @@ static void do_ffa_rxtx_unmap(struct arm_smccc_1_2_regs *res,
ffa_unmap_hyp_buffers();
-out_unlock:
- hyp_spin_unlock(&host_buffers.lock);
out:
ffa_to_smccc_res(res, ret);
}
@@ -421,15 +419,16 @@ static void do_ffa_mem_frag_tx(struct arm_smccc_1_2_regs *res,
int ret = FFA_RET_INVALID_PARAMETERS;
u32 nr_ranges;
+ guard(hyp_spinlock)(&host_buffers.lock);
+
if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
goto out;
if (fraglen % sizeof(*buf))
goto out;
- hyp_spin_lock(&host_buffers.lock);
if (!host_buffers.tx)
- goto out_unlock;
+ goto out;
buf = hyp_buffers.tx;
memcpy(buf, host_buffers.tx, fraglen);
@@ -444,15 +443,13 @@ static void do_ffa_mem_frag_tx(struct arm_smccc_1_2_regs *res,
*/
ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
WARN_ON(res->a0 != FFA_SUCCESS);
- goto out_unlock;
+ goto out;
}
ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
-out_unlock:
- hyp_spin_unlock(&host_buffers.lock);
out:
if (ret)
ffa_to_smccc_res(res, ret);
@@ -482,6 +479,8 @@ static void __do_ffa_mem_xfer(const u64 func_id,
u32 offset, nr_ranges, checked_offset;
int ret = 0;
+ guard(hyp_spinlock)(&host_buffers.lock);
+
if (addr_mbz || npages_mbz || fraglen > len ||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
ret = FFA_RET_INVALID_PARAMETERS;
@@ -494,15 +493,14 @@ static void __do_ffa_mem_xfer(const u64 func_id,
goto out;
}
- hyp_spin_lock(&host_buffers.lock);
if (!host_buffers.tx) {
ret = FFA_RET_INVALID_PARAMETERS;
- goto out_unlock;
+ goto out;
}
if (len > ffa_desc_buf.len) {
ret = FFA_RET_NO_MEMORY;
- goto out_unlock;
+ goto out;
}
buf = hyp_buffers.tx;
@@ -513,30 +511,30 @@ static void __do_ffa_mem_xfer(const u64 func_id,
offset = ep_mem_access->composite_off;
if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
ret = FFA_RET_INVALID_PARAMETERS;
- goto out_unlock;
+ goto out;
}
if (check_add_overflow(offset, sizeof(struct ffa_composite_mem_region), &checked_offset)) {
ret = FFA_RET_INVALID_PARAMETERS;
- goto out_unlock;
+ goto out;
}
if (fraglen < checked_offset) {
ret = FFA_RET_INVALID_PARAMETERS;
- goto out_unlock;
+ goto out;
}
reg = (void *)buf + offset;
nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
if (nr_ranges % sizeof(reg->constituents[0])) {
ret = FFA_RET_INVALID_PARAMETERS;
- goto out_unlock;
+ goto out;
}
nr_ranges /= sizeof(reg->constituents[0]);
ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
if (ret)
- goto out_unlock;
+ goto out;
ffa_mem_xfer(res, func_id, len, fraglen);
if (fraglen != len) {
@@ -549,8 +547,6 @@ static void __do_ffa_mem_xfer(const u64 func_id,
goto err_unshare;
}
-out_unlock:
- hyp_spin_unlock(&host_buffers.lock);
out:
if (ret)
ffa_to_smccc_res(res, ret);
@@ -558,7 +554,7 @@ static void __do_ffa_mem_xfer(const u64 func_id,
err_unshare:
WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
- goto out_unlock;
+ goto out;
}
#define do_ffa_mem_xfer(fid, res, ctxt) \
@@ -583,7 +579,7 @@ static void do_ffa_mem_reclaim(struct arm_smccc_1_2_regs *res,
handle = PACK_HANDLE(handle_lo, handle_hi);
- hyp_spin_lock(&host_buffers.lock);
+ guard(hyp_spinlock)(&host_buffers.lock);
buf = hyp_buffers.tx;
*buf = (struct ffa_mem_region) {
@@ -594,7 +590,7 @@ static void do_ffa_mem_reclaim(struct arm_smccc_1_2_regs *res,
ffa_retrieve_req(res, sizeof(*buf));
buf = hyp_buffers.rx;
if (res->a0 != FFA_MEM_RETRIEVE_RESP)
- goto out_unlock;
+ goto out;
len = res->a1;
fraglen = res->a2;
@@ -611,13 +607,13 @@ static void do_ffa_mem_reclaim(struct arm_smccc_1_2_regs *res,
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
ret = FFA_RET_ABORTED;
ffa_rx_release(res);
- goto out_unlock;
+ goto out;
}
if (len > ffa_desc_buf.len) {
ret = FFA_RET_NO_MEMORY;
ffa_rx_release(res);
- goto out_unlock;
+ goto out;
}
buf = ffa_desc_buf.buf;
@@ -628,7 +624,7 @@ static void do_ffa_mem_reclaim(struct arm_smccc_1_2_regs *res,
ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
if (res->a0 != FFA_MEM_FRAG_TX) {
ret = FFA_RET_INVALID_PARAMETERS;
- goto out_unlock;
+ goto out;
}
fraglen = res->a3;
@@ -638,15 +634,13 @@ static void do_ffa_mem_reclaim(struct arm_smccc_1_2_regs *res,
ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
if (res->a0 != FFA_SUCCESS)
- goto out_unlock;
+ goto out;
reg = (void *)buf + offset;
/* If the SPMD was happy, then we should be too. */
WARN_ON(ffa_host_unshare_ranges(reg->constituents,
reg->addr_range_cnt));
-out_unlock:
- hyp_spin_unlock(&host_buffers.lock);
-
+out:
if (ret)
ffa_to_smccc_res(res, ret);
}
@@ -774,13 +768,13 @@ static void do_ffa_version(struct arm_smccc_1_2_regs *res,
return;
}
- hyp_spin_lock(&version_lock);
+ guard(hyp_spinlock)(&version_lock);
if (has_version_negotiated) {
if (FFA_MINOR_VERSION(ffa_req_version) < FFA_MINOR_VERSION(hyp_ffa_version))
res->a0 = FFA_RET_NOT_SUPPORTED;
else
res->a0 = hyp_ffa_version;
- goto unlock;
+ return;
}
/*
@@ -793,7 +787,7 @@ static void do_ffa_version(struct arm_smccc_1_2_regs *res,
.a1 = ffa_req_version,
}, res);
if ((s32)res->a0 == FFA_RET_NOT_SUPPORTED)
- goto unlock;
+ return;
hyp_ffa_version = ffa_req_version;
}
@@ -804,8 +798,6 @@ static void do_ffa_version(struct arm_smccc_1_2_regs *res,
smp_store_release(&has_version_negotiated, true);
res->a0 = hyp_ffa_version;
}
-unlock:
- hyp_spin_unlock(&version_lock);
}
static void do_ffa_part_get(struct arm_smccc_1_2_regs *res,
@@ -818,10 +810,10 @@ static void do_ffa_part_get(struct arm_smccc_1_2_regs *res,
DECLARE_REG(u32, flags, ctxt, 5);
u32 count, partition_sz, copy_sz;
- hyp_spin_lock(&host_buffers.lock);
+ guard(hyp_spinlock)(&host_buffers.lock);
if (!host_buffers.rx) {
ffa_to_smccc_res(res, FFA_RET_BUSY);
- goto out_unlock;
+ return;
}
arm_smccc_1_2_smc(&(struct arm_smccc_1_2_regs) {
@@ -834,16 +826,16 @@ static void do_ffa_part_get(struct arm_smccc_1_2_regs *res,
}, res);
if (res->a0 != FFA_SUCCESS)
- goto out_unlock;
+ return;
count = res->a2;
if (!count)
- goto out_unlock;
+ return;
if (hyp_ffa_version > FFA_VERSION_1_0) {
/* Get the number of partitions deployed in the system */
if (flags & 0x1)
- goto out_unlock;
+ return;
partition_sz = res->a3;
} else {
@@ -854,12 +846,10 @@ static void do_ffa_part_get(struct arm_smccc_1_2_regs *res,
copy_sz = partition_sz * count;
if (copy_sz > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
ffa_to_smccc_res(res, FFA_RET_ABORTED);
- goto out_unlock;
+ return;
}
memcpy(host_buffers.rx, hyp_buffers.rx, copy_sz);
-out_unlock:
- hyp_spin_unlock(&host_buffers.lock);
}
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
--
2.53.0.851.ga537e3e6e9-goog