From cf3c87865c3d7da6f1e17db610ec924ff74404ab Mon Sep 17 00:00:00 2001 From: "Park, Sanguk" Date: Sat, 18 Apr 2020 18:56:45 +0900 Subject: [PATCH] Fix data race and format the code. --- inc/hf/vcpu.h | 1 + src/api.c | 12 +++++++----- src/arch/aarch64/hypervisor/handler.c | 8 -------- src/arch/aarch64/hypervisor/psci_handler.c | 21 ++++++++++++++++----- src/arch/aarch64/hypervisor/psci_handler.h | 10 ++++++++++ src/arch/aarch64/inc/hf/arch/spinlock.h | 5 +++-- src/load.c | 3 ++- src/vcpu.c | 13 +++++++++++++ 8 files changed, 52 insertions(+), 21 deletions(-) diff --git a/inc/hf/vcpu.h b/inc/hf/vcpu.h index 252316db1..761c3fe19 100644 --- a/inc/hf/vcpu.h +++ b/inc/hf/vcpu.h @@ -93,6 +93,7 @@ struct vcpu_execution_locked { struct vcpu_execution_locked vcpu_lock(struct vcpu *vcpu); void vcpu_unlock(struct vcpu_execution_locked *locked); +bool vcpu_try_lock(struct vcpu *vcpu, struct vcpu_execution_locked *locked); void vcpu_init(struct vcpu *vcpu, struct vm *vm); void vcpu_on(struct vcpu_execution_locked vcpu, ipaddr_t entry, uintreg_t arg); spci_vcpu_index_t vcpu_index(const struct vcpu *vcpu); diff --git a/src/api.c b/src/api.c index 28b13bc80..42a753673 100644 --- a/src/api.c +++ b/src/api.c @@ -39,7 +39,8 @@ * acquisition of locks held concurrently by the same physical CPU. Our current * ordering requirements are as follows: * - * vcpu::execution_lock -> vm::lock -> vcpu::interrupts_lock -> mm_stage1_lock -> dlog sl + * vcpu::execution_lock -> vm::lock -> vcpu::interrupts_lock -> mm_stage1_lock + * -> dlog sl * * Locks of the same kind require the lock of lowest address to be locked first, * see `sl_lock_both()`. @@ -275,7 +276,9 @@ spci_vcpu_count_t api_vcpu_get_count(spci_vm_id_t vm_id, */ void api_regs_state_saved(struct vcpu *vcpu) { - sl_unlock(&vcpu->execution_lock); + if (vcpu->vm->id != HF_PRIMARY_VM_ID) { + sl_unlock(&vcpu->execution_lock); + } } /** @@ -418,7 +421,7 @@ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu, * until this has finished, so count this state as still running for the * purposes of this check. */ - if (sl_try_lock(&vcpu->execution_lock)) { + if (!sl_try_lock(&vcpu->execution_lock)) { /* * vCPU is running on another pCPU. * @@ -427,8 +430,7 @@ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu, * return the sleep duration if needed. */ *run_ret = spci_error(SPCI_BUSY); - ret = false; - goto out; + return false; } if (atomic_load_explicit(&vcpu->vm->aborting, memory_order_relaxed)) { diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c index 2ea8deff1..9af4f002a 100644 --- a/src/arch/aarch64/hypervisor/handler.c +++ b/src/arch/aarch64/hypervisor/handler.c @@ -52,14 +52,6 @@ */ #define CLIENT_ID_MASK UINT64_C(0xffff) -/** - * Returns a reference to the currently executing vCPU. - */ -static struct vcpu *current(void) -{ - return (struct vcpu *)read_msr(tpidr_el2); -} - /** * Saves the state of per-vCPU peripherals, such as the virtual timer, and * informs the arch-independent sections that registers have been saved. diff --git a/src/arch/aarch64/hypervisor/psci_handler.c b/src/arch/aarch64/hypervisor/psci_handler.c index eea3a6661..823f69eae 100644 --- a/src/arch/aarch64/hypervisor/psci_handler.c +++ b/src/arch/aarch64/hypervisor/psci_handler.c @@ -296,7 +296,8 @@ bool psci_secondary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, cpu_id_t target_affinity = arg0; uint32_t lowest_affinity_level = arg1; struct vm *vm = vcpu->vm; - struct vcpu_execution_locked target_vcpu; + struct vcpu *target_vcpu; + struct vcpu_execution_locked target_vcpu_locked; spci_vcpu_index_t target_vcpu_index = vcpu_id_to_index(target_affinity); @@ -311,10 +312,20 @@ bool psci_secondary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, break; } - target_vcpu = vcpu_lock(vm_get_vcpu(vm, target_vcpu_index)); - *ret = vcpu_is_off(target_vcpu) ? PSCI_RETURN_OFF - : PSCI_RETURN_ON; - vcpu_unlock(&target_vcpu); + target_vcpu = vm_get_vcpu(vm, target_vcpu_index); + if (target_vcpu == current()) { + *ret = PSCI_RETURN_ON; + break; + } + + if (!vcpu_try_lock(target_vcpu, &target_vcpu_locked)) { + *ret = PSCI_RETURN_ON; + break; + } + + *ret = vcpu_is_off(target_vcpu_locked) ? PSCI_RETURN_OFF + : PSCI_RETURN_ON; + vcpu_unlock(&target_vcpu_locked); break; } diff --git a/src/arch/aarch64/hypervisor/psci_handler.h b/src/arch/aarch64/hypervisor/psci_handler.h index 479c1cd02..f6b425f89 100644 --- a/src/arch/aarch64/hypervisor/psci_handler.h +++ b/src/arch/aarch64/hypervisor/psci_handler.h @@ -22,6 +22,16 @@ #include "hf/cpu.h" +#include "msr.h" + bool psci_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, uintreg_t arg1, uintreg_t arg2, uintreg_t *ret, struct vcpu **next); + +/** + * Returns a reference to the currently executing vCPU. + */ +static inline struct vcpu *current(void) +{ + return (struct vcpu *)read_msr(tpidr_el2); +} diff --git a/src/arch/aarch64/inc/hf/arch/spinlock.h b/src/arch/aarch64/inc/hf/arch/spinlock.h index c4fb02544..62e304074 100644 --- a/src/arch/aarch64/inc/hf/arch/spinlock.h +++ b/src/arch/aarch64/inc/hf/arch/spinlock.h @@ -28,8 +28,8 @@ * the guarantees provided by atomic instructions introduced in Armv8.1 LSE. */ -#include #include +#include #include "hf/arch/types.h" @@ -64,7 +64,8 @@ static inline void sl_lock(struct spinlock *l) static inline bool sl_try_lock(struct spinlock *l) { - return !atomic_flag_test_and_set_explicit((volatile atomic_flag *)&l->v, memory_order_acquire); + return !atomic_flag_test_and_set_explicit((volatile atomic_flag *)&l->v, + memory_order_acquire); } static inline void sl_unlock(struct spinlock *l) diff --git a/src/load.c b/src/load.c index 9abe667dd..53a3d0bf4 100644 --- a/src/load.c +++ b/src/load.c @@ -219,7 +219,8 @@ static bool load_primary(struct mm_stage1_locked stage1_locked, vm->vcpu_count, pa_addr(primary_begin)); vcpu_execution_locked = vcpu_lock(vm_get_vcpu(vm, 0)); - vcpu_on(vcpu_execution_locked, ipa_from_pa(primary_begin), params->kernel_arg); + vcpu_on(vcpu_execution_locked, ipa_from_pa(primary_begin), + params->kernel_arg); vcpu_unlock(&vcpu_execution_locked); ret = true; diff --git a/src/vcpu.c b/src/vcpu.c index 0018ee153..8e710f55d 100644 --- a/src/vcpu.c +++ b/src/vcpu.c @@ -45,6 +45,19 @@ void vcpu_unlock(struct vcpu_execution_locked *locked) locked->vcpu = NULL; } +/** + * Tries to lock the given vCPU, but doesn't block if it is locked. + **/ +bool vcpu_try_lock(struct vcpu *vcpu, struct vcpu_execution_locked *locked) +{ + if (!sl_try_lock(&vcpu->execution_lock)) { + return false; + } + + locked->vcpu = vcpu; + return true; +} + void vcpu_init(struct vcpu *vcpu, struct vm *vm) { memset_s(vcpu, sizeof(*vcpu), 0, sizeof(*vcpu));