Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Split vcpu lock #81

Draft
wants to merge 2 commits into
base: upstream
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 17 additions & 17 deletions inc/hf/vcpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,6 @@ enum vcpu_state {
/** The vCPU is ready to be run. */
VCPU_STATE_READY,

/** The vCPU is currently running. */
VCPU_STATE_RUNNING,

/** The vCPU is waiting for a message. */
VCPU_STATE_BLOCKED_MAILBOX,

Expand Down Expand Up @@ -65,7 +62,16 @@ struct vcpu_fault_info {
};

struct vcpu {
struct spinlock lock;
/*
* Protects accesses to vCPU's state and architecture registers. If a
* vCPU is running, its execution lock is logically held by the
* running pCPU.
*/
struct spinlock execution_lock;
/*
* Protects accesses to vCPU's interrupts.
*/
struct spinlock interrupts_lock;

/*
* The state is only changed in the context of the vCPU being run. This
Expand All @@ -78,26 +84,20 @@ struct vcpu {
struct vm *vm;
struct arch_regs regs;
struct interrupts interrupts;

/*
* Determine whether the 'regs' field is available for use. This is set
* to false when a vCPU is about to run on a physical CPU, and is set
* back to true when it is descheduled.
*/
bool regs_available;
};

/** Encapsulates a vCPU whose lock is held. */
struct vcpu_locked {
/** Encapsulates a vCPU whose execution lock is held. */
struct vcpu_execution_locked {
struct vcpu *vcpu;
};

struct vcpu_locked vcpu_lock(struct vcpu *vcpu);
void vcpu_unlock(struct vcpu_locked *locked);
struct vcpu_execution_locked vcpu_lock(struct vcpu *vcpu);
void vcpu_unlock(struct vcpu_execution_locked *locked);
bool vcpu_try_lock(struct vcpu *vcpu, struct vcpu_execution_locked *locked);
void vcpu_init(struct vcpu *vcpu, struct vm *vm);
void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg);
void vcpu_on(struct vcpu_execution_locked vcpu, ipaddr_t entry, uintreg_t arg);
spci_vcpu_index_t vcpu_index(const struct vcpu *vcpu);
bool vcpu_is_off(struct vcpu_locked vcpu);
bool vcpu_is_off(struct vcpu_execution_locked vcpu);
bool vcpu_secondary_reset_and_start(struct vcpu *vcpu, ipaddr_t entry,
uintreg_t arg);

Expand Down
79 changes: 32 additions & 47 deletions src/api.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@
* acquisition of locks held concurrently by the same physical CPU. Our current
* ordering requirements are as follows:
*
* vm::lock -> vcpu::lock -> mm_stage1_lock -> dlog sl
* vcpu::execution_lock -> vm::lock -> vcpu::interrupts_lock -> mm_stage1_lock
* -> dlog sl
*
* Locks of the same kind require the lock of lowest address to be locked first,
* see `sl_lock_both()`.
Expand Down Expand Up @@ -117,10 +118,8 @@ static struct vcpu *api_switch_to_primary(struct vcpu *current,
/* Set the return value for the primary VM's call to HF_VCPU_RUN. */
arch_regs_set_retval(&next->regs, primary_ret);

/* Mark the current vCPU as waiting. */
sl_lock(&current->lock);
/* Mark the current vcpu as waiting. */
current->state = secondary_state;
sl_unlock(&current->lock);

return next;
}
Expand Down Expand Up @@ -277,9 +276,9 @@ spci_vcpu_count_t api_vcpu_get_count(spci_vm_id_t vm_id,
*/
void api_regs_state_saved(struct vcpu *vcpu)
{
sl_lock(&vcpu->lock);
vcpu->regs_available = true;
sl_unlock(&vcpu->lock);
if (vcpu->vm->id != HF_PRIMARY_VM_ID) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  • 이 if는 왜 필요한지 궁금합니다.
  • 이건 이유를 한눈에 알아보기 쉽지 않은데 주석으로 남겨주세요.

sl_unlock(&vcpu->execution_lock);
}
}

/**
Expand Down Expand Up @@ -323,7 +322,7 @@ static int64_t internal_interrupt_inject(struct vcpu *target_vcpu,
uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS);
int64_t ret = 0;

sl_lock(&target_vcpu->lock);
sl_lock(&target_vcpu->interrupts_lock);

/*
* We only need to change state and (maybe) trigger a virtual IRQ if it
Expand Down Expand Up @@ -364,7 +363,7 @@ static int64_t internal_interrupt_inject(struct vcpu *target_vcpu,
/* Either way, make it pending. */
target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;

sl_unlock(&target_vcpu->lock);
sl_unlock(&target_vcpu->interrupts_lock);

return ret;
}
Expand Down Expand Up @@ -399,31 +398,18 @@ static struct spci_value spci_msg_recv_return(const struct vm *receiver)
/**
* Prepares the vCPU to run by updating its state and fetching whether a return
* value needs to be forced onto the vCPU.
*
* Returns:
* - false if it fails to prepare `vcpu` to run.
* - true if it succeeds to prepare `vcpu` to run. In this case,
`vcpu->execution_lock` has acquired.
*/
static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
struct spci_value *run_ret)
{
bool need_vm_lock;
bool need_vm_lock = false;
bool ret;

/*
* Check that the registers are available so that the vCPU can be run.
*
* The VM lock is not needed in the common case so it must only be taken
* when it is going to be needed. This ensures there are no inter-vCPU
* dependencies in the common run case meaning the sensitive context
* switch performance is consistent.
*/
sl_lock(&vcpu->lock);

/* The VM needs to be locked to deliver mailbox messages. */
need_vm_lock = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX;
if (need_vm_lock) {
sl_unlock(&vcpu->lock);
sl_lock(&vcpu->vm->lock);
sl_lock(&vcpu->lock);
}

/*
* If the vCPU is already running somewhere then we can't run it here
* simultaneously. While it is actually running then the state should be
Expand All @@ -435,7 +421,7 @@ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
* until this has finished, so count this state as still running for the
* purposes of this check.
*/
if (vcpu->state == VCPU_STATE_RUNNING || !vcpu->regs_available) {
if (!sl_try_lock(&vcpu->execution_lock)) {
/*
* vCPU is running on another pCPU.
*
Expand All @@ -444,8 +430,7 @@ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
* return the sleep duration if needed.
*/
*run_ret = spci_error(SPCI_BUSY);
ret = false;
goto out;
return false;
}

if (atomic_load_explicit(&vcpu->vm->aborting, memory_order_relaxed)) {
Expand All @@ -458,8 +443,13 @@ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
goto out;
}

/* The VM needs to be locked to deliver mailbox messages. */
need_vm_lock = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX;
if (need_vm_lock) {
sl_lock(&vcpu->vm->lock);
}

switch (vcpu->state) {
case VCPU_STATE_RUNNING:
case VCPU_STATE_OFF:
case VCPU_STATE_ABORTED:
ret = false;
Expand Down Expand Up @@ -517,23 +507,18 @@ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,

/* It has been decided that the vCPU should be run. */
vcpu->cpu = current->cpu;
vcpu->state = VCPU_STATE_RUNNING;

/*
* Mark the registers as unavailable now that we're about to reflect
* them onto the real registers. This will also prevent another physical
* CPU from trying to read these registers.
*/
vcpu->regs_available = false;

ret = true;

out:
sl_unlock(&vcpu->lock);
if (need_vm_lock) {
sl_unlock(&vcpu->vm->lock);
}

if (!ret) {
sl_unlock(&vcpu->execution_lock);
}

return ret;
}

Expand Down Expand Up @@ -1051,15 +1036,15 @@ bool api_spci_msg_recv_block_interrupted(struct vcpu *current)
{
bool interrupted;

sl_lock(&current->lock);
sl_lock(&current->interrupts_lock);

/*
* Don't block if there are enabled and pending interrupts, to match
* behaviour of wait_for_interrupt.
*/
interrupted = (current->interrupts.enabled_and_pending_count > 0);

sl_unlock(&current->lock);
sl_unlock(&current->interrupts_lock);

return interrupted;
}
Expand Down Expand Up @@ -1253,7 +1238,7 @@ int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current)
return -1;
}

sl_lock(&current->lock);
sl_lock(&current->interrupts_lock);
if (enable) {
/*
* If it is pending and was not enabled before, increment the
Expand All @@ -1279,7 +1264,7 @@ int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current)
~intid_mask;
}

sl_unlock(&current->lock);
sl_unlock(&current->interrupts_lock);
return 0;
}

Expand All @@ -1297,7 +1282,7 @@ uint32_t api_interrupt_get(struct vcpu *current)
* Find the first enabled and pending interrupt ID, return it, and
* deactivate it.
*/
sl_lock(&current->lock);
sl_lock(&current->interrupts_lock);
for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
uint32_t enabled_and_pending =
current->interrupts.interrupt_enabled[i] &
Expand All @@ -1317,7 +1302,7 @@ uint32_t api_interrupt_get(struct vcpu *current)
}
}

sl_unlock(&current->lock);
sl_unlock(&current->interrupts_lock);
return first_interrupt;
}

Expand Down
17 changes: 5 additions & 12 deletions src/arch/aarch64/hypervisor/handler.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include "hf/dlog.h"
#include "hf/panic.h"
#include "hf/spci.h"
#include "hf/spinlock.h"
#include "hf/vm.h"

#include "vmapi/hf/call.h"
Expand All @@ -51,14 +52,6 @@
*/
#define CLIENT_ID_MASK UINT64_C(0xffff)

/**
* Returns a reference to the currently executing vCPU.
*/
static struct vcpu *current(void)
{
return (struct vcpu *)read_msr(tpidr_el2);
}

/**
* Saves the state of per-vCPU peripherals, such as the virtual timer, and
* informs the arch-independent sections that registers have been saved.
Expand Down Expand Up @@ -398,20 +391,20 @@ static void update_vi(struct vcpu *next)
*/
struct vcpu *vcpu = current();

sl_lock(&vcpu->lock);
sl_lock(&vcpu->interrupts_lock);
set_virtual_interrupt_current(
vcpu->interrupts.enabled_and_pending_count > 0);
sl_unlock(&vcpu->lock);
sl_unlock(&vcpu->interrupts_lock);
} else {
/*
* About to switch vCPUs, set the bit for the vCPU to which we
* are switching in the saved copy of the register.
*/
sl_lock(&next->lock);
sl_lock(&next->interrupts_lock);
set_virtual_interrupt(
&next->regs,
next->interrupts.enabled_and_pending_count > 0);
sl_unlock(&next->lock);
sl_unlock(&next->interrupts_lock);
}
}

Expand Down
21 changes: 16 additions & 5 deletions src/arch/aarch64/hypervisor/psci_handler.c
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,8 @@ bool psci_secondary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0,
cpu_id_t target_affinity = arg0;
uint32_t lowest_affinity_level = arg1;
struct vm *vm = vcpu->vm;
struct vcpu_locked target_vcpu;
struct vcpu *target_vcpu;
struct vcpu_execution_locked target_vcpu_locked;
spci_vcpu_index_t target_vcpu_index =
vcpu_id_to_index(target_affinity);

Expand All @@ -311,10 +312,20 @@ bool psci_secondary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0,
break;
}

target_vcpu = vcpu_lock(vm_get_vcpu(vm, target_vcpu_index));
*ret = vcpu_is_off(target_vcpu) ? PSCI_RETURN_OFF
: PSCI_RETURN_ON;
vcpu_unlock(&target_vcpu);
target_vcpu = vm_get_vcpu(vm, target_vcpu_index);
if (target_vcpu == current()) {
*ret = PSCI_RETURN_ON;
break;
}

if (!vcpu_try_lock(target_vcpu, &target_vcpu_locked)) {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

이 지점에서 vCPU가 켜져 있는지를 확인하기 위해서 락을 겁니다. 따라서 실행 중이 아니더라도 execution lock 을 걸어야 하는 경우가 발생하는데, 이것을 어떻게 하는 게 맞을까요?

  • execution lock을 status lock 과 register lock 으로 분리. 이 경우 register lock이 걸린 경우 status 가 무조건 실행 중이고, 따라서 status를 무조건 잠궈야 합니다. 이런 invariant를 어떻게 표현해야 할까요?
  • 그냥 무시. execution lock에 달린 주석과 행동이 달라지고 개발자들 입장에서 더 부자연스러울 수 있습니다.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  • PSCI_AFFINITY_INFO의 semantics가 궁금합니다.
  • 저는 lock을 분리하는건 너무 복잡도가 올라갈 것 같습니다. 하나로 남기는데 이렇게 vCPU의 상태를 확인하기 위한 경우가 생긴다면 "execution lock"의 이름이 잘못된 걸수도 있겠습니다. 혹시 적절한 이름을 suggest해주시길 부탁드려요.
  • 어떻게 결정을 내린다고 하더라도 try_lock을 잡을 때 왜 잡는지 주석이 필요할 것 같습니다.

Copy link
Collaborator Author

@efenniht efenniht Apr 18, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

PSCI_AFFINITY_INFO 는 ARM의 PSCI(Power State Coordination Interface) 가 지원하는 명령 중 하나입니다. 원래는 physical CPU의 꺼지고 켜짐을 조회하는 데 쓰이는데, 여기서는 hypervisor가 중간에 개입하여, secondary VM이 자신의 vCPU에 대한 정보를 얻을 수 있도록 하고 있습니다.

이 lock은 state_lock 같은 이름이 맞을 것 같은데, 그대로 둔다면 secondary VM A의

  • vCPU 0가 vCPU 1이 켜져 있는 지 확인하는 중에
  • primary VM이 vCPU 1을 실행하려 하면

Primary VM은 vCPU 1의 실행을 실패할 수 있습니다. 이것이 옳은 semantics인가요?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Semantics가 틀려보이긴 하네요... 원래 구현은 어땠는지 궁금합니다.

*ret = PSCI_RETURN_ON;
break;
}

*ret = vcpu_is_off(target_vcpu_locked) ? PSCI_RETURN_OFF
: PSCI_RETURN_ON;
vcpu_unlock(&target_vcpu_locked);
break;
}

Expand Down
10 changes: 10 additions & 0 deletions src/arch/aarch64/hypervisor/psci_handler.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,16 @@

#include "hf/cpu.h"

#include "msr.h"

bool psci_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0,
uintreg_t arg1, uintreg_t arg2, uintreg_t *ret,
struct vcpu **next);

/**
* Returns a reference to the currently executing vCPU.
*/
static inline struct vcpu *current(void)
{
return (struct vcpu *)read_msr(tpidr_el2);
}
7 changes: 7 additions & 0 deletions src/arch/aarch64/inc/hf/arch/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
* the guarantees provided by atomic instructions introduced in Armv8.1 LSE.
*/

#include <stdatomic.h>
#include <stdint.h>

#include "hf/arch/types.h"
Expand Down Expand Up @@ -61,6 +62,12 @@ static inline void sl_lock(struct spinlock *l)
: "cc");
}

static inline bool sl_try_lock(struct spinlock *l)
{
return !atomic_flag_test_and_set_explicit((volatile atomic_flag *)&l->v,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Linter를 통과했는지 궁금합니다. indent가 이상해 보이는데 손으로 한 것 같진 않아서..

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

make format 을 실행하니까 이렇게 만들어 주었습니다.

memory_order_acquire);
}

static inline void sl_unlock(struct spinlock *l)
{
/*
Expand Down
Loading