From 05df73905780b3c9bbdae14ed42fb656b25bec39 Mon Sep 17 00:00:00 2001 From: Jeehoon Kang Date: Sat, 25 May 2019 21:10:36 +0900 Subject: [PATCH 1/2] Split vcpu lock Change-Id: I24e714a3db3c873860f0dab5a424e5c3e585eb0c --- inc/hf/vcpu.h | 33 +++++----- src/api.c | 73 +++++++++------------- src/arch/aarch64/hypervisor/handler.c | 9 +-- src/arch/aarch64/hypervisor/psci_handler.c | 2 +- src/arch/aarch64/inc/hf/arch/spinlock.h | 6 ++ src/arch/fake/inc/hf/arch/spinlock.h | 5 ++ src/cpu.c | 8 +-- src/load.c | 8 +-- src/vcpu.c | 32 +++++----- 9 files changed, 85 insertions(+), 91 deletions(-) diff --git a/inc/hf/vcpu.h b/inc/hf/vcpu.h index 87d5e8c3a..252316db1 100644 --- a/inc/hf/vcpu.h +++ b/inc/hf/vcpu.h @@ -31,9 +31,6 @@ enum vcpu_state { /** The vCPU is ready to be run. */ VCPU_STATE_READY, - /** The vCPU is currently running. */ - VCPU_STATE_RUNNING, - /** The vCPU is waiting for a message. */ VCPU_STATE_BLOCKED_MAILBOX, @@ -65,7 +62,16 @@ struct vcpu_fault_info { }; struct vcpu { - struct spinlock lock; + /* + * Protects accesses to vCPU's state and architecture registers. If a + * vCPU is running, its execution lock is logically held by the + * running pCPU. + */ + struct spinlock execution_lock; + /* + * Protects accesses to vCPU's interrupts. + */ + struct spinlock interrupts_lock; /* * The state is only changed in the context of the vCPU being run. This @@ -78,26 +84,19 @@ struct vcpu { struct vm *vm; struct arch_regs regs; struct interrupts interrupts; - - /* - * Determine whether the 'regs' field is available for use. This is set - * to false when a vCPU is about to run on a physical CPU, and is set - * back to true when it is descheduled. - */ - bool regs_available; }; -/** Encapsulates a vCPU whose lock is held. */ -struct vcpu_locked { +/** Encapsulates a vCPU whose execution lock is held. */ +struct vcpu_execution_locked { struct vcpu *vcpu; }; -struct vcpu_locked vcpu_lock(struct vcpu *vcpu); -void vcpu_unlock(struct vcpu_locked *locked); +struct vcpu_execution_locked vcpu_lock(struct vcpu *vcpu); +void vcpu_unlock(struct vcpu_execution_locked *locked); void vcpu_init(struct vcpu *vcpu, struct vm *vm); -void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg); +void vcpu_on(struct vcpu_execution_locked vcpu, ipaddr_t entry, uintreg_t arg); spci_vcpu_index_t vcpu_index(const struct vcpu *vcpu); -bool vcpu_is_off(struct vcpu_locked vcpu); +bool vcpu_is_off(struct vcpu_execution_locked vcpu); bool vcpu_secondary_reset_and_start(struct vcpu *vcpu, ipaddr_t entry, uintreg_t arg); diff --git a/src/api.c b/src/api.c index 8f8272ed9..28b13bc80 100644 --- a/src/api.c +++ b/src/api.c @@ -39,7 +39,7 @@ * acquisition of locks held concurrently by the same physical CPU. Our current * ordering requirements are as follows: * - * vm::lock -> vcpu::lock -> mm_stage1_lock -> dlog sl + * vcpu::execution_lock -> vm::lock -> vcpu::interrupts_lock -> mm_stage1_lock -> dlog sl * * Locks of the same kind require the lock of lowest address to be locked first, * see `sl_lock_both()`. @@ -117,10 +117,8 @@ static struct vcpu *api_switch_to_primary(struct vcpu *current, /* Set the return value for the primary VM's call to HF_VCPU_RUN. */ arch_regs_set_retval(&next->regs, primary_ret); - /* Mark the current vCPU as waiting. */ - sl_lock(¤t->lock); + /* Mark the current vcpu as waiting. */ current->state = secondary_state; - sl_unlock(¤t->lock); return next; } @@ -277,9 +275,7 @@ spci_vcpu_count_t api_vcpu_get_count(spci_vm_id_t vm_id, */ void api_regs_state_saved(struct vcpu *vcpu) { - sl_lock(&vcpu->lock); - vcpu->regs_available = true; - sl_unlock(&vcpu->lock); + sl_unlock(&vcpu->execution_lock); } /** @@ -323,7 +319,7 @@ static int64_t internal_interrupt_inject(struct vcpu *target_vcpu, uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS); int64_t ret = 0; - sl_lock(&target_vcpu->lock); + sl_lock(&target_vcpu->interrupts_lock); /* * We only need to change state and (maybe) trigger a virtual IRQ if it @@ -364,7 +360,7 @@ static int64_t internal_interrupt_inject(struct vcpu *target_vcpu, /* Either way, make it pending. */ target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask; - sl_unlock(&target_vcpu->lock); + sl_unlock(&target_vcpu->interrupts_lock); return ret; } @@ -399,31 +395,18 @@ static struct spci_value spci_msg_recv_return(const struct vm *receiver) /** * Prepares the vCPU to run by updating its state and fetching whether a return * value needs to be forced onto the vCPU. + * + * Returns: + * - false if it fails to prepare `vcpu` to run. + * - true if it succeeds to prepare `vcpu` to run. In this case, + `vcpu->execution_lock` has acquired. */ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu, struct spci_value *run_ret) { - bool need_vm_lock; + bool need_vm_lock = false; bool ret; - /* - * Check that the registers are available so that the vCPU can be run. - * - * The VM lock is not needed in the common case so it must only be taken - * when it is going to be needed. This ensures there are no inter-vCPU - * dependencies in the common run case meaning the sensitive context - * switch performance is consistent. - */ - sl_lock(&vcpu->lock); - - /* The VM needs to be locked to deliver mailbox messages. */ - need_vm_lock = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX; - if (need_vm_lock) { - sl_unlock(&vcpu->lock); - sl_lock(&vcpu->vm->lock); - sl_lock(&vcpu->lock); - } - /* * If the vCPU is already running somewhere then we can't run it here * simultaneously. While it is actually running then the state should be @@ -435,7 +418,7 @@ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu, * until this has finished, so count this state as still running for the * purposes of this check. */ - if (vcpu->state == VCPU_STATE_RUNNING || !vcpu->regs_available) { + if (sl_try_lock(&vcpu->execution_lock)) { /* * vCPU is running on another pCPU. * @@ -458,8 +441,13 @@ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu, goto out; } + /* The VM needs to be locked to deliver mailbox messages. */ + need_vm_lock = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX; + if (need_vm_lock) { + sl_lock(&vcpu->vm->lock); + } + switch (vcpu->state) { - case VCPU_STATE_RUNNING: case VCPU_STATE_OFF: case VCPU_STATE_ABORTED: ret = false; @@ -517,23 +505,18 @@ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu, /* It has been decided that the vCPU should be run. */ vcpu->cpu = current->cpu; - vcpu->state = VCPU_STATE_RUNNING; - - /* - * Mark the registers as unavailable now that we're about to reflect - * them onto the real registers. This will also prevent another physical - * CPU from trying to read these registers. - */ - vcpu->regs_available = false; ret = true; out: - sl_unlock(&vcpu->lock); if (need_vm_lock) { sl_unlock(&vcpu->vm->lock); } + if (!ret) { + sl_unlock(&vcpu->execution_lock); + } + return ret; } @@ -1051,7 +1034,7 @@ bool api_spci_msg_recv_block_interrupted(struct vcpu *current) { bool interrupted; - sl_lock(¤t->lock); + sl_lock(¤t->interrupts_lock); /* * Don't block if there are enabled and pending interrupts, to match @@ -1059,7 +1042,7 @@ bool api_spci_msg_recv_block_interrupted(struct vcpu *current) */ interrupted = (current->interrupts.enabled_and_pending_count > 0); - sl_unlock(¤t->lock); + sl_unlock(¤t->interrupts_lock); return interrupted; } @@ -1253,7 +1236,7 @@ int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current) return -1; } - sl_lock(¤t->lock); + sl_lock(¤t->interrupts_lock); if (enable) { /* * If it is pending and was not enabled before, increment the @@ -1279,7 +1262,7 @@ int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current) ~intid_mask; } - sl_unlock(¤t->lock); + sl_unlock(¤t->interrupts_lock); return 0; } @@ -1297,7 +1280,7 @@ uint32_t api_interrupt_get(struct vcpu *current) * Find the first enabled and pending interrupt ID, return it, and * deactivate it. */ - sl_lock(¤t->lock); + sl_lock(¤t->interrupts_lock); for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) { uint32_t enabled_and_pending = current->interrupts.interrupt_enabled[i] & @@ -1317,7 +1300,7 @@ uint32_t api_interrupt_get(struct vcpu *current) } } - sl_unlock(¤t->lock); + sl_unlock(¤t->interrupts_lock); return first_interrupt; } diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c index 4a4f0353d..2ea8deff1 100644 --- a/src/arch/aarch64/hypervisor/handler.c +++ b/src/arch/aarch64/hypervisor/handler.c @@ -27,6 +27,7 @@ #include "hf/dlog.h" #include "hf/panic.h" #include "hf/spci.h" +#include "hf/spinlock.h" #include "hf/vm.h" #include "vmapi/hf/call.h" @@ -398,20 +399,20 @@ static void update_vi(struct vcpu *next) */ struct vcpu *vcpu = current(); - sl_lock(&vcpu->lock); + sl_lock(&vcpu->interrupts_lock); set_virtual_interrupt_current( vcpu->interrupts.enabled_and_pending_count > 0); - sl_unlock(&vcpu->lock); + sl_unlock(&vcpu->interrupts_lock); } else { /* * About to switch vCPUs, set the bit for the vCPU to which we * are switching in the saved copy of the register. */ - sl_lock(&next->lock); + sl_lock(&next->interrupts_lock); set_virtual_interrupt( &next->regs, next->interrupts.enabled_and_pending_count > 0); - sl_unlock(&next->lock); + sl_unlock(&next->interrupts_lock); } } diff --git a/src/arch/aarch64/hypervisor/psci_handler.c b/src/arch/aarch64/hypervisor/psci_handler.c index aabe979ba..eea3a6661 100644 --- a/src/arch/aarch64/hypervisor/psci_handler.c +++ b/src/arch/aarch64/hypervisor/psci_handler.c @@ -296,7 +296,7 @@ bool psci_secondary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, cpu_id_t target_affinity = arg0; uint32_t lowest_affinity_level = arg1; struct vm *vm = vcpu->vm; - struct vcpu_locked target_vcpu; + struct vcpu_execution_locked target_vcpu; spci_vcpu_index_t target_vcpu_index = vcpu_id_to_index(target_affinity); diff --git a/src/arch/aarch64/inc/hf/arch/spinlock.h b/src/arch/aarch64/inc/hf/arch/spinlock.h index 2b39a1b0d..c4fb02544 100644 --- a/src/arch/aarch64/inc/hf/arch/spinlock.h +++ b/src/arch/aarch64/inc/hf/arch/spinlock.h @@ -29,6 +29,7 @@ */ #include +#include #include "hf/arch/types.h" @@ -61,6 +62,11 @@ static inline void sl_lock(struct spinlock *l) : "cc"); } +static inline bool sl_try_lock(struct spinlock *l) +{ + return !atomic_flag_test_and_set_explicit((volatile atomic_flag *)&l->v, memory_order_acquire); +} + static inline void sl_unlock(struct spinlock *l) { /* diff --git a/src/arch/fake/inc/hf/arch/spinlock.h b/src/arch/fake/inc/hf/arch/spinlock.h index db3b45c1b..09d8564c1 100644 --- a/src/arch/fake/inc/hf/arch/spinlock.h +++ b/src/arch/fake/inc/hf/arch/spinlock.h @@ -36,6 +36,11 @@ static inline void sl_lock(struct spinlock *l) } } +static inline bool sl_try_lock(struct spinlock *l) +{ + return !atomic_flag_test_and_set_explicit(&l->v, memory_order_acquire); +} + static inline void sl_unlock(struct spinlock *l) { atomic_flag_clear_explicit(&l->v, memory_order_release); diff --git a/src/cpu.c b/src/cpu.c index e52fe2d91..fc5cd1943 100644 --- a/src/cpu.c +++ b/src/cpu.c @@ -141,11 +141,11 @@ bool cpu_on(struct cpu *c, ipaddr_t entry, uintreg_t arg) if (!prev) { struct vm *vm = vm_find(HF_PRIMARY_VM_ID); struct vcpu *vcpu = vm_get_vcpu(vm, cpu_index(c)); - struct vcpu_locked vcpu_locked; + struct vcpu_execution_locked vcpu_execution_locked; - vcpu_locked = vcpu_lock(vcpu); - vcpu_on(vcpu_locked, entry, arg); - vcpu_unlock(&vcpu_locked); + vcpu_execution_locked = vcpu_lock(vcpu); + vcpu_on(vcpu_execution_locked, entry, arg); + vcpu_unlock(&vcpu_execution_locked); } return prev; diff --git a/src/load.c b/src/load.c index 0040201cd..9abe667dd 100644 --- a/src/load.c +++ b/src/load.c @@ -121,7 +121,7 @@ static bool load_primary(struct mm_stage1_locked stage1_locked, { struct vm *vm; struct vm_locked vm_locked; - struct vcpu_locked vcpu_locked; + struct vcpu_execution_locked vcpu_execution_locked; size_t i; bool ret; @@ -218,9 +218,9 @@ static bool load_primary(struct mm_stage1_locked stage1_locked, dlog_info("Loaded primary VM with %u vCPUs, entry at %#x.\n", vm->vcpu_count, pa_addr(primary_begin)); - vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0)); - vcpu_on(vcpu_locked, ipa_from_pa(primary_begin), params->kernel_arg); - vcpu_unlock(&vcpu_locked); + vcpu_execution_locked = vcpu_lock(vm_get_vcpu(vm, 0)); + vcpu_on(vcpu_execution_locked, ipa_from_pa(primary_begin), params->kernel_arg); + vcpu_unlock(&vcpu_execution_locked); ret = true; out: diff --git a/src/vcpu.c b/src/vcpu.c index b77689d47..0018ee153 100644 --- a/src/vcpu.c +++ b/src/vcpu.c @@ -24,13 +24,13 @@ /** * Locks the given vCPU and updates `locked` to hold the newly locked vCPU. */ -struct vcpu_locked vcpu_lock(struct vcpu *vcpu) +struct vcpu_execution_locked vcpu_lock(struct vcpu *vcpu) { - struct vcpu_locked locked = { + struct vcpu_execution_locked locked = { .vcpu = vcpu, }; - sl_lock(&vcpu->lock); + sl_lock(&vcpu->execution_lock); return locked; } @@ -39,26 +39,27 @@ struct vcpu_locked vcpu_lock(struct vcpu *vcpu) * Unlocks a vCPU previously locked with vpu_lock, and updates `locked` to * reflect the fact that the vCPU is no longer locked. */ -void vcpu_unlock(struct vcpu_locked *locked) +void vcpu_unlock(struct vcpu_execution_locked *locked) { - sl_unlock(&locked->vcpu->lock); + sl_unlock(&locked->vcpu->execution_lock); locked->vcpu = NULL; } void vcpu_init(struct vcpu *vcpu, struct vm *vm) { memset_s(vcpu, sizeof(*vcpu), 0, sizeof(*vcpu)); - sl_init(&vcpu->lock); - vcpu->regs_available = true; + sl_init(&vcpu->execution_lock); + sl_init(&vcpu->interrupts_lock); vcpu->vm = vm; vcpu->state = VCPU_STATE_OFF; } /** * Initialise the registers for the given vCPU and set the state to - * VCPU_STATE_READY. The caller must hold the vCPU lock while calling this. + * VCPU_STATE_READY. The caller must hold the vCPU execution lock while calling + * this. */ -void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg) +void vcpu_on(struct vcpu_execution_locked vcpu, ipaddr_t entry, uintreg_t arg) { arch_regs_set_pc_arg(&vcpu.vcpu->regs, entry, arg); vcpu.vcpu->state = VCPU_STATE_READY; @@ -77,13 +78,12 @@ spci_vcpu_index_t vcpu_index(const struct vcpu *vcpu) * turning vCPUs on and off. Note that aborted still counts as on in this * context. */ -bool vcpu_is_off(struct vcpu_locked vcpu) +bool vcpu_is_off(struct vcpu_execution_locked vcpu) { switch (vcpu.vcpu->state) { case VCPU_STATE_OFF: return true; case VCPU_STATE_READY: - case VCPU_STATE_RUNNING: case VCPU_STATE_BLOCKED_MAILBOX: case VCPU_STATE_BLOCKED_INTERRUPT: case VCPU_STATE_ABORTED: @@ -107,14 +107,14 @@ bool vcpu_is_off(struct vcpu_locked vcpu) bool vcpu_secondary_reset_and_start(struct vcpu *vcpu, ipaddr_t entry, uintreg_t arg) { - struct vcpu_locked vcpu_locked; + struct vcpu_execution_locked vcpu_execution_locked; struct vm *vm = vcpu->vm; bool vcpu_was_off; CHECK(vm->id != HF_PRIMARY_VM_ID); - vcpu_locked = vcpu_lock(vcpu); - vcpu_was_off = vcpu_is_off(vcpu_locked); + vcpu_execution_locked = vcpu_lock(vcpu); + vcpu_was_off = vcpu_is_off(vcpu_execution_locked); if (vcpu_was_off) { /* * Set vCPU registers to a clean state ready for boot. As this @@ -123,9 +123,9 @@ bool vcpu_secondary_reset_and_start(struct vcpu *vcpu, ipaddr_t entry, * pCPU it is running on. */ arch_regs_reset(vcpu); - vcpu_on(vcpu_locked, entry, arg); + vcpu_on(vcpu_execution_locked, entry, arg); } - vcpu_unlock(&vcpu_locked); + vcpu_unlock(&vcpu_execution_locked); return vcpu_was_off; } From cf3c87865c3d7da6f1e17db610ec924ff74404ab Mon Sep 17 00:00:00 2001 From: "Park, Sanguk" Date: Sat, 18 Apr 2020 18:56:45 +0900 Subject: [PATCH 2/2] Fix data race and format the code. --- inc/hf/vcpu.h | 1 + src/api.c | 12 +++++++----- src/arch/aarch64/hypervisor/handler.c | 8 -------- src/arch/aarch64/hypervisor/psci_handler.c | 21 ++++++++++++++++----- src/arch/aarch64/hypervisor/psci_handler.h | 10 ++++++++++ src/arch/aarch64/inc/hf/arch/spinlock.h | 5 +++-- src/load.c | 3 ++- src/vcpu.c | 13 +++++++++++++ 8 files changed, 52 insertions(+), 21 deletions(-) diff --git a/inc/hf/vcpu.h b/inc/hf/vcpu.h index 252316db1..761c3fe19 100644 --- a/inc/hf/vcpu.h +++ b/inc/hf/vcpu.h @@ -93,6 +93,7 @@ struct vcpu_execution_locked { struct vcpu_execution_locked vcpu_lock(struct vcpu *vcpu); void vcpu_unlock(struct vcpu_execution_locked *locked); +bool vcpu_try_lock(struct vcpu *vcpu, struct vcpu_execution_locked *locked); void vcpu_init(struct vcpu *vcpu, struct vm *vm); void vcpu_on(struct vcpu_execution_locked vcpu, ipaddr_t entry, uintreg_t arg); spci_vcpu_index_t vcpu_index(const struct vcpu *vcpu); diff --git a/src/api.c b/src/api.c index 28b13bc80..42a753673 100644 --- a/src/api.c +++ b/src/api.c @@ -39,7 +39,8 @@ * acquisition of locks held concurrently by the same physical CPU. Our current * ordering requirements are as follows: * - * vcpu::execution_lock -> vm::lock -> vcpu::interrupts_lock -> mm_stage1_lock -> dlog sl + * vcpu::execution_lock -> vm::lock -> vcpu::interrupts_lock -> mm_stage1_lock + * -> dlog sl * * Locks of the same kind require the lock of lowest address to be locked first, * see `sl_lock_both()`. @@ -275,7 +276,9 @@ spci_vcpu_count_t api_vcpu_get_count(spci_vm_id_t vm_id, */ void api_regs_state_saved(struct vcpu *vcpu) { - sl_unlock(&vcpu->execution_lock); + if (vcpu->vm->id != HF_PRIMARY_VM_ID) { + sl_unlock(&vcpu->execution_lock); + } } /** @@ -418,7 +421,7 @@ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu, * until this has finished, so count this state as still running for the * purposes of this check. */ - if (sl_try_lock(&vcpu->execution_lock)) { + if (!sl_try_lock(&vcpu->execution_lock)) { /* * vCPU is running on another pCPU. * @@ -427,8 +430,7 @@ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu, * return the sleep duration if needed. */ *run_ret = spci_error(SPCI_BUSY); - ret = false; - goto out; + return false; } if (atomic_load_explicit(&vcpu->vm->aborting, memory_order_relaxed)) { diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c index 2ea8deff1..9af4f002a 100644 --- a/src/arch/aarch64/hypervisor/handler.c +++ b/src/arch/aarch64/hypervisor/handler.c @@ -52,14 +52,6 @@ */ #define CLIENT_ID_MASK UINT64_C(0xffff) -/** - * Returns a reference to the currently executing vCPU. - */ -static struct vcpu *current(void) -{ - return (struct vcpu *)read_msr(tpidr_el2); -} - /** * Saves the state of per-vCPU peripherals, such as the virtual timer, and * informs the arch-independent sections that registers have been saved. diff --git a/src/arch/aarch64/hypervisor/psci_handler.c b/src/arch/aarch64/hypervisor/psci_handler.c index eea3a6661..823f69eae 100644 --- a/src/arch/aarch64/hypervisor/psci_handler.c +++ b/src/arch/aarch64/hypervisor/psci_handler.c @@ -296,7 +296,8 @@ bool psci_secondary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, cpu_id_t target_affinity = arg0; uint32_t lowest_affinity_level = arg1; struct vm *vm = vcpu->vm; - struct vcpu_execution_locked target_vcpu; + struct vcpu *target_vcpu; + struct vcpu_execution_locked target_vcpu_locked; spci_vcpu_index_t target_vcpu_index = vcpu_id_to_index(target_affinity); @@ -311,10 +312,20 @@ bool psci_secondary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, break; } - target_vcpu = vcpu_lock(vm_get_vcpu(vm, target_vcpu_index)); - *ret = vcpu_is_off(target_vcpu) ? PSCI_RETURN_OFF - : PSCI_RETURN_ON; - vcpu_unlock(&target_vcpu); + target_vcpu = vm_get_vcpu(vm, target_vcpu_index); + if (target_vcpu == current()) { + *ret = PSCI_RETURN_ON; + break; + } + + if (!vcpu_try_lock(target_vcpu, &target_vcpu_locked)) { + *ret = PSCI_RETURN_ON; + break; + } + + *ret = vcpu_is_off(target_vcpu_locked) ? PSCI_RETURN_OFF + : PSCI_RETURN_ON; + vcpu_unlock(&target_vcpu_locked); break; } diff --git a/src/arch/aarch64/hypervisor/psci_handler.h b/src/arch/aarch64/hypervisor/psci_handler.h index 479c1cd02..f6b425f89 100644 --- a/src/arch/aarch64/hypervisor/psci_handler.h +++ b/src/arch/aarch64/hypervisor/psci_handler.h @@ -22,6 +22,16 @@ #include "hf/cpu.h" +#include "msr.h" + bool psci_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, uintreg_t arg1, uintreg_t arg2, uintreg_t *ret, struct vcpu **next); + +/** + * Returns a reference to the currently executing vCPU. + */ +static inline struct vcpu *current(void) +{ + return (struct vcpu *)read_msr(tpidr_el2); +} diff --git a/src/arch/aarch64/inc/hf/arch/spinlock.h b/src/arch/aarch64/inc/hf/arch/spinlock.h index c4fb02544..62e304074 100644 --- a/src/arch/aarch64/inc/hf/arch/spinlock.h +++ b/src/arch/aarch64/inc/hf/arch/spinlock.h @@ -28,8 +28,8 @@ * the guarantees provided by atomic instructions introduced in Armv8.1 LSE. */ -#include #include +#include #include "hf/arch/types.h" @@ -64,7 +64,8 @@ static inline void sl_lock(struct spinlock *l) static inline bool sl_try_lock(struct spinlock *l) { - return !atomic_flag_test_and_set_explicit((volatile atomic_flag *)&l->v, memory_order_acquire); + return !atomic_flag_test_and_set_explicit((volatile atomic_flag *)&l->v, + memory_order_acquire); } static inline void sl_unlock(struct spinlock *l) diff --git a/src/load.c b/src/load.c index 9abe667dd..53a3d0bf4 100644 --- a/src/load.c +++ b/src/load.c @@ -219,7 +219,8 @@ static bool load_primary(struct mm_stage1_locked stage1_locked, vm->vcpu_count, pa_addr(primary_begin)); vcpu_execution_locked = vcpu_lock(vm_get_vcpu(vm, 0)); - vcpu_on(vcpu_execution_locked, ipa_from_pa(primary_begin), params->kernel_arg); + vcpu_on(vcpu_execution_locked, ipa_from_pa(primary_begin), + params->kernel_arg); vcpu_unlock(&vcpu_execution_locked); ret = true; diff --git a/src/vcpu.c b/src/vcpu.c index 0018ee153..8e710f55d 100644 --- a/src/vcpu.c +++ b/src/vcpu.c @@ -45,6 +45,19 @@ void vcpu_unlock(struct vcpu_execution_locked *locked) locked->vcpu = NULL; } +/** + * Tries to lock the given vCPU, but doesn't block if it is locked. + **/ +bool vcpu_try_lock(struct vcpu *vcpu, struct vcpu_execution_locked *locked) +{ + if (!sl_try_lock(&vcpu->execution_lock)) { + return false; + } + + locked->vcpu = vcpu; + return true; +} + void vcpu_init(struct vcpu *vcpu, struct vm *vm) { memset_s(vcpu, sizeof(*vcpu), 0, sizeof(*vcpu));