Skip to content

Commit

Permalink
core: make generic notifications virtualization-aware
Browse files Browse the repository at this point in the history
Makes the generic notification handling aware of virtualization. Atomic
events are delivered with a guest_id parameter and asynchronous
notifications are started per guest_id.

struct notif_data is added as guest specific data to be able to track if
notifications are started for a guest.

While this patch compiles it doesn't work as intended without patches
handling the ABI specific side of things.

Signed-off-by: Jens Wiklander <[email protected]>
Acked-by: Etienne Carriere <[email protected]>
  • Loading branch information
jenswi-linaro authored and jforissier committed Jul 2, 2024
1 parent beb9021 commit d237e61
Show file tree
Hide file tree
Showing 8 changed files with 144 additions and 49 deletions.
6 changes: 3 additions & 3 deletions core/arch/arm/kernel/thread_spmc.c
Original file line number Diff line number Diff line change
Expand Up @@ -690,7 +690,7 @@ static uint32_t spmc_enable_async_notif(uint32_t bottom_half_value,
notif_vm_id = vm_id;
cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);

notif_deliver_atomic_event(NOTIF_EVENT_STARTED);
notif_deliver_atomic_event(NOTIF_EVENT_STARTED, 0);
return TEE_SUCCESS;
}

Expand Down Expand Up @@ -1655,7 +1655,7 @@ void thread_spmc_set_async_notif_intid(int intid)
DMSG("Asynchronous notifications are ready");
}

void notif_send_async(uint32_t value)
void notif_send_async(uint32_t value, uint16_t guest_id __unused)
{
uint32_t old_itr_status = 0;

Expand All @@ -1668,7 +1668,7 @@ void notif_send_async(uint32_t value)
cpu_spin_unlock_xrestore(&spmc_notif_lock, old_itr_status);
}
#else
void notif_send_async(uint32_t value)
void notif_send_async(uint32_t value, uint16_t guest_id __unused)
{
/* global notification, delay notification interrupt */
uint32_t flags = BIT32(1);
Expand Down
20 changes: 16 additions & 4 deletions core/arch/arm/kernel/virtualization.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <kernel/linker.h>
#include <kernel/misc.h>
#include <kernel/mutex.h>
#include <kernel/notif.h>
#include <kernel/panic.h>
#include <kernel/refcount.h>
#include <kernel/spinlock.h>
Expand Down Expand Up @@ -57,6 +58,7 @@ struct guest_partition {
tee_mm_entry_t *ta_ram;
tee_mm_entry_t *tables;
bool runtime_initialized;
bool got_guest_destroyed;
bool shutting_down;
uint16_t id;
struct refcount refc;
Expand Down Expand Up @@ -512,14 +514,24 @@ TEE_Result virt_guest_destroyed(uint16_t guest_id)
exceptions = cpu_spin_lock_xsave(&prtn_list_lock);

prtn = find_guest_by_id_unlocked(guest_id);
if (prtn)
prtn->shutting_down = true;
if (prtn && !prtn->got_guest_destroyed)
prtn->got_guest_destroyed = true;
else
prtn = NULL;

cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);

virt_put_guest(prtn);
if (!prtn)
if (prtn) {
notif_deliver_atomic_event(NOTIF_EVENT_SHUTDOWN, prtn->id);

exceptions = cpu_spin_lock_xsave(&prtn_list_lock);
prtn->shutting_down = true;
cpu_spin_unlock_xrestore(&prtn_list_lock, exceptions);

virt_put_guest(prtn);
} else {
EMSG("Client with id %d is not found", guest_id);
}

return TEE_SUCCESS;
}
Expand Down
2 changes: 1 addition & 1 deletion core/arch/arm/tee/entry_fast.c
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ void __tee_entry_fast(struct thread_smc_args *args)

case OPTEE_SMC_ENABLE_ASYNC_NOTIF:
if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
notif_deliver_atomic_event(NOTIF_EVENT_STARTED);
notif_deliver_atomic_event(NOTIF_EVENT_STARTED, 0);
args->a0 = OPTEE_SMC_RETURN_OK;
} else {
args->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION;
Expand Down
2 changes: 1 addition & 1 deletion core/arch/riscv/tee/entry_fast.c
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ void __tee_entry_fast(struct thread_abi_args *args)

case OPTEE_ABI_ENABLE_ASYNC_NOTIF:
if (IS_ENABLED(CFG_CORE_ASYNC_NOTIF)) {
notif_deliver_atomic_event(NOTIF_EVENT_STARTED);
notif_deliver_atomic_event(NOTIF_EVENT_STARTED, 0);
args->a0 = OPTEE_ABI_RETURN_OK;
} else {
args->a0 = OPTEE_ABI_RETURN_UNKNOWN_FUNCTION;
Expand Down
29 changes: 17 additions & 12 deletions core/include/kernel/notif.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ enum notif_event {
NOTIF_EVENT_STARTED,
NOTIF_EVENT_DO_BOTTOM_HALF,
NOTIF_EVENT_STOPPED,
NOTIF_EVENT_SHUTDOWN,
};

/*
Expand All @@ -78,21 +79,27 @@ enum notif_event {
* A atomic context means that interrupts are masked and a common spinlock
* is held. Calls via @atomic_cb are only atomic with regards to each
* other, other CPUs may execute yielding calls or even receive interrupts.
* If CFG_NS_VIRTUALIZATION=y then code is executing in Nexus context
* without a partition activated, the @guest_id triggering the event is
* instead supplied as an argument to the callback. The @guest_id should be
* ignored if CFG_NS_VIRTUALIZATION isn't enabled.
*
* A yielding context means that the function is executing in a normal
* threaded context allowing RPC and synchronization with other thread
* using mutexes and condition variables.
* using mutexes and condition variables. If CFG_NS_VIRTUALIZATION=y then
* is a partition matching the guest or VM that triggered the event.
*/
struct notif_driver {
void (*atomic_cb)(struct notif_driver *ndrv, enum notif_event ev);
void (*atomic_cb)(struct notif_driver *ndrv, enum notif_event ev,
uint16_t guest_id);
void (*yielding_cb)(struct notif_driver *ndrv, enum notif_event ev);
SLIST_ENTRY(notif_driver) link;
};

#if defined(CFG_CORE_ASYNC_NOTIF)
bool notif_async_is_started(void);
bool notif_async_is_started(uint16_t guest_id);
#else
static inline bool notif_async_is_started(void)
static inline bool notif_async_is_started(uint16_t guest_id __unused)
{
return false;
}
Expand All @@ -115,9 +122,10 @@ TEE_Result notif_wait_timeout(uint32_t value, uint32_t timeout_ms);
* Send an asynchronous value, note that it must be <= NOTIF_ASYNC_VALUE_MAX
*/
#if defined(CFG_CORE_ASYNC_NOTIF)
void notif_send_async(uint32_t value);
void notif_send_async(uint32_t value, uint16_t guest_id);
#else
static inline void notif_send_async(uint32_t value __unused)
static inline void notif_send_async(uint32_t value __unused,
uint16_t guest_id __unused)
{
}
#endif
Expand Down Expand Up @@ -157,17 +165,14 @@ static inline uint32_t notif_get_value(bool *value_valid, bool *value_pending)
}
#endif

/*
* These are called from yielding calls
*/
#if defined(CFG_CORE_ASYNC_NOTIF)
void notif_deliver_atomic_event(enum notif_event ev);
void notif_deliver_atomic_event(enum notif_event ev, uint16_t guest_id);
void notif_deliver_event(enum notif_event ev);
#else
static inline void notif_deliver_atomic_event(enum notif_event ev __unused)
static inline void notif_deliver_atomic_event(enum notif_event ev __unused,
uint16_t guest_id __unused)
{
}

static inline void notif_deliver_event(enum notif_event ev __unused)
{
}
Expand Down
81 changes: 63 additions & 18 deletions core/kernel/notif.c
Original file line number Diff line number Diff line change
@@ -1,35 +1,58 @@
// SPDX-License-Identifier: BSD-2-Clause
/*
* Copyright (c) 2021-2023, Linaro Limited
* Copyright (c) 2021-2024, Linaro Limited
*/

#include <initcall.h>
#include <kernel/mutex.h>
#include <kernel/notif.h>
#include <kernel/panic.h>
#include <kernel/spinlock.h>
#include <kernel/thread.h>
#include <kernel/virtualization.h>
#include <mm/core_memprot.h>
#include <optee_rpc_cmd.h>
#include <types_ext.h>

#if defined(CFG_CORE_ASYNC_NOTIF)
struct notif_data {
bool notif_started;
};

static struct mutex notif_mutex = MUTEX_INITIALIZER;
static unsigned int notif_lock __nex_data = SPINLOCK_UNLOCK;
static bool notif_started;

static struct notif_data default_notif_data;
static unsigned int notif_data_id __nex_bss;

SLIST_HEAD(notif_driver_head, notif_driver);
static struct notif_driver_head notif_driver_head __nex_data =
SLIST_HEAD_INITIALIZER(&notif_driver_head);

static struct notif_data *get_notif_data(struct guest_partition *prtn)
{
if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
assert(prtn);
return virt_get_guest_spec_data(prtn, notif_data_id);
}
return &default_notif_data;
}

bool notif_async_is_started(void)
bool notif_async_is_started(uint16_t guest_id)
{
struct guest_partition *prtn = virt_get_guest(guest_id);
uint32_t old_itr_status = 0;
bool ret = false;

old_itr_status = cpu_spin_lock_xsave(&notif_lock);
ret = notif_started;
cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || prtn) {
struct notif_data *ndata = get_notif_data(prtn);

old_itr_status = cpu_spin_lock_xsave(&notif_lock);
ret = ndata->notif_started;
cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
}

virt_put_guest(prtn);
return ret;
}

Expand Down Expand Up @@ -57,31 +80,43 @@ void notif_unregister_driver(struct notif_driver *ndrv)
cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
}

void notif_deliver_atomic_event(enum notif_event ev)
void notif_deliver_atomic_event(enum notif_event ev, uint16_t guest_id)
{
uint32_t old_itr_status = 0;
struct guest_partition *prtn = virt_get_guest(guest_id);
struct notif_data *ndata = get_notif_data(prtn);
struct notif_driver *nd = NULL;

assert(ev == NOTIF_EVENT_STARTED);
uint32_t old_itr_status = 0;

old_itr_status = cpu_spin_lock_xsave(&notif_lock);

if (notif_started) {
DMSG("Already started");
goto out;
switch (ev) {
case NOTIF_EVENT_STARTED:
if (ndata->notif_started) {
DMSG("Already started");
goto out;
}
ndata->notif_started = true;
break;
case NOTIF_EVENT_SHUTDOWN:
break;
default:
EMSG("Unknown event %d", (int)ev);
panic();
}
notif_started = true;

SLIST_FOREACH(nd, &notif_driver_head, link)
if (nd->atomic_cb)
nd->atomic_cb(nd, ev);
nd->atomic_cb(nd, ev, guest_id);

out:
cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
virt_put_guest(prtn);
}

void notif_deliver_event(enum notif_event ev)
{
struct guest_partition *prtn = virt_get_current_guest();
struct notif_data *ndata = get_notif_data(prtn);
uint32_t old_itr_status = 0;
struct notif_driver *nd = NULL;
struct notif_driver *nd_tmp = NULL;
Expand All @@ -92,13 +127,13 @@ void notif_deliver_event(enum notif_event ev)
mutex_lock(&notif_mutex);
old_itr_status = cpu_spin_lock_xsave(&notif_lock);

if (!notif_started) {
if (!ndata || !ndata->notif_started) {
DMSG("Not started ev %d", (int)ev);
goto out;
}

if (ev == NOTIF_EVENT_STOPPED)
notif_started = false;
ndata->notif_started = false;

SLIST_FOREACH_SAFE(nd, &notif_driver_head, link, nd_tmp) {
cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
Expand All @@ -108,7 +143,7 @@ void notif_deliver_event(enum notif_event ev)

old_itr_status = cpu_spin_lock_xsave(&notif_lock);

if (ev == NOTIF_EVENT_STOPPED && notif_started) {
if (ev == NOTIF_EVENT_STOPPED && ndata->notif_started) {
DMSG("Started again while stopping");
goto out;
}
Expand All @@ -117,6 +152,7 @@ void notif_deliver_event(enum notif_event ev)
out:
cpu_spin_unlock_xrestore(&notif_lock, old_itr_status);
mutex_unlock(&notif_mutex);
virt_put_guest(prtn);
}
#endif /*CFG_CORE_ASYNC_NOTIF*/

Expand All @@ -142,3 +178,12 @@ TEE_Result notif_wait_timeout(uint32_t value, uint32_t timeout_ms)
{
return notif_rpc(OPTEE_RPC_NOTIFICATION_WAIT, value, timeout_ms);
}

#ifdef CFG_NS_VIRTUALIZATION
static TEE_Result nex_init_notif(void)
{
return virt_add_guest_spec_data(&notif_data_id,
sizeof(struct notif_data), NULL);
}
nex_early_init(nex_init_notif);
#endif
4 changes: 2 additions & 2 deletions core/kernel/notif_default.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,12 +82,12 @@ uint32_t notif_get_value(bool *value_valid, bool *value_pending)
return res;
}

void notif_send_async(uint32_t value)
void notif_send_async(uint32_t value, uint16_t guest_id __maybe_unused)
{
uint32_t old_itr_status = 0;
struct itr_chip *itr_chip = interrupt_get_main_chip();

assert(value <= NOTIF_ASYNC_VALUE_MAX);
assert(value <= NOTIF_ASYNC_VALUE_MAX && !guest_id);
old_itr_status = cpu_spin_lock_xsave(&notif_default_lock);

bit_set(notif_values, value);
Expand Down
Loading

0 comments on commit d237e61

Please sign in to comment.