diff --git a/src/cpu/percpu.rs b/src/cpu/percpu.rs index 0c967ef68..2030b40ff 100644 --- a/src/cpu/percpu.rs +++ b/src/cpu/percpu.rs @@ -12,6 +12,7 @@ use crate::address::{Address, PhysAddr, VirtAddr}; use crate::cpu::tss::TSS_LIMIT; use crate::cpu::vmsa::init_guest_vmsa; use crate::error::SvsmError; +use crate::line_buffer::LineBuffer; use crate::locking::{LockGuard, RWLock, SpinLock}; use crate::mm::alloc::{allocate_page, allocate_zeroed_page}; use crate::mm::pagetable::{get_init_pgtable_locked, PageTable, PageTableRef}; @@ -177,6 +178,7 @@ pub struct PerCpu { svsm_vmsa: Option, guest_vmsa: SpinLock, reset_ip: u64, + ln_buf: LineBuffer, /// Address allocator for per-cpu 4k temporary mappings pub vrange_4k: VirtualRange, @@ -197,6 +199,7 @@ impl PerCpu { svsm_vmsa: None, guest_vmsa: SpinLock::new(GuestVmsaRef::new()), reset_ip: 0xffff_fff0u64, + ln_buf: LineBuffer::new(), vrange_4k: VirtualRange::new(), vrange_2m: VirtualRange::new(), } @@ -497,6 +500,10 @@ impl PerCpu { PAGE_SHIFT_2M, ); } + + pub fn get_line_buffer(&mut self) -> &mut LineBuffer { + &mut self.ln_buf + } } unsafe impl Sync for PerCpu {} diff --git a/src/lib.rs b/src/lib.rs index c1c0c1c64..f69e30829 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -18,7 +18,10 @@ pub mod fw_cfg; pub mod fw_meta; pub mod io; pub mod kernel_launch; +pub mod line_buffer; pub mod locking; +pub mod log_buffer; +pub mod migrate; pub mod mm; pub mod protocols; pub mod requests; diff --git a/src/line_buffer.rs b/src/line_buffer.rs new file mode 100644 index 000000000..d97f1620a --- /dev/null +++ b/src/line_buffer.rs @@ -0,0 +1,113 @@ +use crate::cpu::percpu::this_cpu_mut; +use crate::log_buffer::LB; +use crate::utils::immut_after_init::ImmutAfterInitCell; +use core::fmt; +use core::fmt::Write; + +const LINE_BUFFER_SIZE: usize = 256; +pub struct LineBuffer { + buf: [u8; LINE_BUFFER_SIZE], + head: usize, + tail: usize, +} + +impl LineBuffer { + pub const fn new() -> Self { + LineBuffer { + buf: [0; LINE_BUFFER_SIZE], + head: 0, + tail: 0, + } + } + + pub fn write_buffer(&mut self, s: &str) { + for b in s.bytes() { + self.buf[self.head] = b; + self.head = (self.head + 1) % LINE_BUFFER_SIZE; + if b == 0xa { + /* write to global log buffer when '\n' character is encountered */ + if self.tail <= self.head { + let st = core::str::from_utf8(&self.buf[self.tail..self.head]).unwrap(); + unsafe { LB.write_log(st) }; + } else { + let st1 = core::str::from_utf8(&self.buf[self.tail..]).unwrap(); + let st2 = core::str::from_utf8(&self.buf[..self.head]).unwrap(); + unsafe { LB.write_log(st1) }; + unsafe { LB.write_log(st2) }; + } + self.tail = self.head; + } + } + } +} + +impl fmt::Write for LineBuffer { + fn write_str(&mut self, s: &str) -> fmt::Result { + self.write_buffer(s); + Ok(()) + } +} + +#[derive(Clone, Copy)] +struct BufferLogger { + component: &'static str, +} + +impl BufferLogger { + fn new(component: &'static str) -> BufferLogger { + BufferLogger { component } + } +} + +impl log::Log for BufferLogger { + fn enabled(&self, _metadata: &log::Metadata) -> bool { + true + } + + fn log(&self, record: &log::Record) { + let component: &'static str = &self.component; + let line_buf: &mut LineBuffer = this_cpu_mut().get_line_buffer(); + // Log format/detail depends on the level. + match record.metadata().level() { + log::Level::Error | log::Level::Warn => write!( + line_buf, + "[{}] {}: {}\n", + component, + record.metadata().level().as_str(), + record.args() + ) + .expect("write error"), + + log::Level::Info => { + write!(line_buf, "[{}] {}\n", component, record.args()).expect("write error") + } + + log::Level::Debug | log::Level::Trace => write!( + line_buf, + "[{}/{}] {} {}\n", + component, + record.metadata().target(), + record.metadata().level().as_str(), + record.args() + ) + .expect("write error"), + } + } + + fn flush(&self) {} +} + +static BUFFER_LOGGER: ImmutAfterInitCell = ImmutAfterInitCell::uninit(); + +pub fn install_buffer_logger(component: &'static str) -> Result<(), ()> { + BUFFER_LOGGER + .init(&BufferLogger::new(component)) + .expect("already initialized the logger"); + if let Err(_) = log::set_logger(&*BUFFER_LOGGER) { + return Err(()); + } + + // Log levels are to be configured via the log's library feature configuration. + log::set_max_level(log::LevelFilter::Trace); + Ok(()) +} diff --git a/src/log_buffer.rs b/src/log_buffer.rs new file mode 100644 index 000000000..1f57fca5a --- /dev/null +++ b/src/log_buffer.rs @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2022-2023 SUSE LLC +// +// +// + +use crate::mm::alloc::allocate_zeroed_page; +extern crate alloc; +use crate::error::SvsmError; +use crate::types::PAGE_SIZE; +use alloc::vec::Vec; +use core::sync::atomic::{AtomicUsize, Ordering}; + +pub const BUF_SIZE: usize = PAGE_SIZE; +pub const TAIL_MASK: usize = 0xffffusize; +pub const HEAD_MASK: usize = 0xffffusize << 16; +pub const WRITER_MASK: usize = 0xffffusize << 32; +pub const READER_MASK: usize = 0x7fffusize << 48; +pub const FULL_MASK: usize = 0x1usize << 63; + +/* Buffer state encoding: + * bits 0-15 : tail offset + * bits 16-31 : head offset + * bits 32-48 : number of writers writing the buffer + * bits 49-62: number of readers reading the buffer + * bit 63 : set if buffer is full + */ +#[derive(Clone, Copy, Debug)] +struct LogBufferState { + tail: usize, + head: usize, + wc: usize, + rc: usize, + full: usize, +} + +impl LogBufferState { + pub fn new(tail: usize, head: usize, wc: usize, rc: usize, full: usize) -> Self { + LogBufferState { + tail, + head, + wc, + rc, + full, + } + } + + pub fn compute_state_write(&self, len: usize) -> Self { + let new_head = (self.head + len) % BUF_SIZE; + let place_left = if self.head >= self.tail { + BUF_SIZE - self.head + self.tail + } else { + self.tail - self.head + }; + let is_full = if place_left <= len { 1 } else { self.full }; + let new_tail = if is_full == 1 { new_head } else { self.tail }; + + LogBufferState::new(new_tail, new_head, self.wc + 1, self.rc, is_full) + } +} +impl From for LogBufferState { + fn from(state: usize) -> Self { + LogBufferState { + tail: state & TAIL_MASK, + head: (state & HEAD_MASK) >> 16, + wc: (state & WRITER_MASK) >> 32, + rc: (state & READER_MASK) >> 48, + full: (state & FULL_MASK) >> 63, + } + } +} + +impl From for usize { + fn from(lb: LogBufferState) -> Self { + let t = lb.tail & TAIL_MASK; + let h = (lb.head << 16) & HEAD_MASK; + let w = (lb.wc << 32) & WRITER_MASK; + let r = (lb.rc << 48) & READER_MASK; + let f = (lb.full << 63) & FULL_MASK; + t | h | w | r | f + } +} +pub struct LogBuffer { + buf: Vec, + state: AtomicUsize, +} + +impl LogBuffer { + pub const fn new() -> LogBuffer { + LogBuffer { + buf: Vec::new(), + state: AtomicUsize::new(0), + } + } + + pub fn init(&mut self, buf_addr: *mut u8) { + self.buf = unsafe { Vec::from_raw_parts(buf_addr, BUF_SIZE, BUF_SIZE) }; + } + + pub fn write_log(&mut self, s: &str) { + let mut head; + let len = s.len(); + + loop { + let pos = self.state.load(Ordering::Acquire); + let st = LogBufferState::from(pos); + head = st.head; + /* wait if there are readers */ + if st.rc > 0 { + core::hint::spin_loop(); + continue; + } + let st_new = st.compute_state_write(len); + let new_pos = usize::from(st_new); + if self + .state + .compare_exchange(pos, new_pos, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + break; + } + core::hint::spin_loop(); + } + + for b in s.bytes() { + self.buf[head] = b; + head = (head + 1) % BUF_SIZE; + } + + /* Decrement writer count */ + loop { + let pos = self.state.load(Ordering::Acquire); + let st = LogBufferState::from(pos); + let st_new = LogBufferState::new(st.tail, st.head, st.wc - 1, st.rc, st.full); + let new_pos = usize::from(st_new); + if self + .state + .compare_exchange(pos, new_pos, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + break; + } + core::hint::spin_loop(); + } + } + + pub fn read_log(&mut self) -> Result, SvsmError> { + let ret: Result, SvsmError>; + let mut st; + loop { + let pos = self.state.load(Ordering::Acquire); + st = LogBufferState::from(pos); + /* wait if there are writers */ + if st.wc > 0 { + core::hint::spin_loop(); + continue; + } + + let new_tail = st.head; + let st_new = LogBufferState::new(new_tail, st.head, st.wc, st.rc + 1, st.full); + let new_pos = usize::from(st_new); + if self + .state + .compare_exchange(pos, new_pos, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + break; + } + core::hint::spin_loop(); + } + + if st.head == st.tail && st.full == 0 { + /* Buffer is empty */ + ret = Ok(Vec::new()); + } else if st.head > st.tail && st.full == 0 { + let vec = self.buf[st.tail..st.head].to_vec(); + ret = Ok(vec); + } else { + let mut vec = self.buf[st.tail..].to_vec(); + vec.extend_from_slice(&self.buf[..st.head]); + ret = Ok(vec); + } + + /* clear the buffer-full status */ + let is_full: usize = 0; + + loop { + let pos = self.state.load(Ordering::Acquire); + let st = LogBufferState::from(pos); + let st_new = LogBufferState::new(st.tail, st.head, st.wc, st.rc - 1, is_full); + let new_pos = usize::from(st_new); + if self + .state + .compare_exchange(pos, new_pos, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + { + break; + } + core::hint::spin_loop(); + } + ret + } +} + +pub static mut LB: LogBuffer = LogBuffer::new(); + +pub fn init_log_buffer() { + let buf_addr = allocate_zeroed_page() + .expect("Failed to allocate buffer page") + .as_mut_ptr::(); + unsafe { LB.init(buf_addr) }; +} + +pub fn migrate_log_buffer(log_buf: &LogBuffer) { + init_log_buffer(); + let val = log_buf.state.load(Ordering::Acquire); + unsafe { LB.state.store(val, Ordering::Release) }; + unsafe { LB.buf = log_buf.buf.clone() }; +} diff --git a/src/migrate.rs b/src/migrate.rs new file mode 100644 index 000000000..6cbece995 --- /dev/null +++ b/src/migrate.rs @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2022-2023 SUSE LLC +// +// +use crate::address::VirtAddr; +use crate::log_buffer::LogBuffer; + +// struct containing information that +// is migrated from stage2 to svsm kernel +#[repr(C)] +pub struct MigrateInfo { + pub bitmap_addr: VirtAddr, + pub log_buf: &'static LogBuffer, +} + +impl MigrateInfo { + pub fn new(vb: VirtAddr, lb: &'static LogBuffer) -> Self { + MigrateInfo { + bitmap_addr: vb, + log_buf: lb, + } + } +} diff --git a/src/stage2.rs b/src/stage2.rs index a7b1daca6..35c6baae3 100644 --- a/src/stage2.rs +++ b/src/stage2.rs @@ -19,6 +19,9 @@ use svsm::cpu::percpu::{this_cpu_mut, PerCpu}; use svsm::elf; use svsm::fw_cfg::FwCfg; use svsm::kernel_launch::KernelLaunchInfo; +use svsm::line_buffer::install_buffer_logger; +use svsm::log_buffer::{init_log_buffer, LB}; +use svsm::migrate::MigrateInfo; use svsm::mm::alloc::{memory_info, print_memory_info, root_mem_init}; use svsm::mm::init_kernel_mapping_info; use svsm::mm::pagetable::{ @@ -83,7 +86,7 @@ static mut CONSOLE_SERIAL: SerialPort = SerialPort { }; fn setup_env() { - install_console_logger("Stage2"); + //install_console_logger("Stage2"); init_kernel_mapping_info( VirtAddr::null(), VirtAddr::from(640 * 1024usize), @@ -103,6 +106,10 @@ fn setup_env() { WRITER.lock().set(&mut CONSOLE_SERIAL); } init_console(); + init_log_buffer(); + if let Err(_) = install_buffer_logger("Stage2") { + panic!("log buffer installation error") + } // Console is fully working now and any unsupported configuration can be // properly reported. @@ -286,6 +293,7 @@ pub extern "C" fn stage2_main(launch_info: &Stage1LaunchInfo) { let kernel_entry = kernel_elf.get_entry(kernel_vaddr_alloc_base); let valid_bitmap = valid_bitmap_addr(); + let migrate_info = unsafe { MigrateInfo::new(VirtAddr::from(valid_bitmap.bits()), &LB) }; // Shut down the GHCB shutdown_percpu(); @@ -294,7 +302,7 @@ pub extern "C" fn stage2_main(launch_info: &Stage1LaunchInfo) { asm!("jmp *%rax", in("rax") kernel_entry, in("r8") &launch_info, - in("r9") valid_bitmap.bits(), + in("r9") &migrate_info, options(att_syntax)) }; diff --git a/src/svsm.rs b/src/svsm.rs index e6362b360..6236691f6 100644 --- a/src/svsm.rs +++ b/src/svsm.rs @@ -34,6 +34,9 @@ use svsm::error::SvsmError; use svsm::fs::{initialize_fs, populate_ram_fs}; use svsm::fw_cfg::FwCfg; use svsm::kernel_launch::KernelLaunchInfo; +use svsm::line_buffer::install_buffer_logger; +use svsm::log_buffer::migrate_log_buffer; +use svsm::migrate::MigrateInfo; use svsm::mm::alloc::{memory_info, print_memory_info, root_mem_init}; use svsm::mm::memory::init_memory_map; use svsm::mm::pagetable::paging_init; @@ -66,7 +69,7 @@ extern "C" { * startup_64. * * %r8 Pointer to the KernelLaunchInfo structure - * %r9 Pointer to the valid-bitmap from stage2 + * %r9 Pointer to the MigrateInfo from stage2 */ global_asm!( r#" @@ -310,9 +313,9 @@ fn mapping_info_init(launch_info: &KernelLaunchInfo) { } #[no_mangle] -pub extern "C" fn svsm_start(li: &KernelLaunchInfo, vb_addr: VirtAddr) { +pub extern "C" fn svsm_start(li: &KernelLaunchInfo, mi: &MigrateInfo) { let launch_info: KernelLaunchInfo = *li; - let vb_ptr = vb_addr.as_mut_ptr::(); + let vb_ptr = mi.bitmap_addr.as_mut_ptr::(); mapping_info_init(&launch_info); @@ -351,6 +354,7 @@ pub extern "C" fn svsm_start(li: &KernelLaunchInfo, vb_addr: VirtAddr) { memory_init(&launch_info); migrate_valid_bitmap().expect("Failed to migrate valid-bitmap"); + migrate_log_buffer(mi.log_buf); let kernel_elf_len = (launch_info.kernel_elf_stage2_virt_end - launch_info.kernel_elf_stage2_virt_start) as usize; @@ -383,8 +387,11 @@ pub extern "C" fn svsm_start(li: &KernelLaunchInfo, vb_addr: VirtAddr) { unsafe { WRITER.lock().set(&mut CONSOLE_SERIAL); } + if let Err(_) = install_buffer_logger("SVSM") { + panic!("log buffer installation error") + } init_console(); - install_console_logger("SVSM"); + // install_console_logger("SVSM"); log::info!("COCONUT Secure Virtual Machine Service Module (SVSM)"); @@ -423,7 +430,6 @@ pub extern "C" fn svsm_main() { invalidate_stage2().expect("Failed to invalidate Stage2 memory"); let fw_cfg = FwCfg::new(&CONSOLE_IO); - init_memory_map(&fw_cfg, &LAUNCH_INFO).expect("Failed to init guest memory map"); initialize_fs();