Skip to content

Commit

Permalink
Implement a log buffer to store log messages(Draft initial commit).
Browse files Browse the repository at this point in the history
Add Percpu line buffer implementation.

Signed-off-by: Vasant Karasulli <[email protected]>
  • Loading branch information
vsntk18 committed Jun 23, 2023
1 parent b4ca120 commit 5648746
Show file tree
Hide file tree
Showing 7 changed files with 388 additions and 7 deletions.
7 changes: 7 additions & 0 deletions src/cpu/percpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ use crate::address::{Address, PhysAddr, VirtAddr};
use crate::cpu::tss::TSS_LIMIT;
use crate::cpu::vmsa::init_guest_vmsa;
use crate::error::SvsmError;
use crate::line_buffer::LineBuffer;
use crate::locking::{LockGuard, RWLock, SpinLock};
use crate::mm::alloc::{allocate_page, allocate_zeroed_page};
use crate::mm::pagetable::{get_init_pgtable_locked, PageTable, PageTableRef};
Expand Down Expand Up @@ -177,6 +178,7 @@ pub struct PerCpu {
svsm_vmsa: Option<VmsaRef>,
guest_vmsa: SpinLock<GuestVmsaRef>,
reset_ip: u64,
ln_buf: LineBuffer,

/// Address allocator for per-cpu 4k temporary mappings
pub vrange_4k: VirtualRange,
Expand All @@ -197,6 +199,7 @@ impl PerCpu {
svsm_vmsa: None,
guest_vmsa: SpinLock::new(GuestVmsaRef::new()),
reset_ip: 0xffff_fff0u64,
ln_buf: LineBuffer::new(),
vrange_4k: VirtualRange::new(),
vrange_2m: VirtualRange::new(),
}
Expand Down Expand Up @@ -497,6 +500,10 @@ impl PerCpu {
PAGE_SHIFT_2M,
);
}

pub fn get_line_buffer(&mut self) -> &mut LineBuffer {
&mut self.ln_buf
}
}

unsafe impl Sync for PerCpu {}
Expand Down
3 changes: 3 additions & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,10 @@ pub mod fw_cfg;
pub mod fw_meta;
pub mod io;
pub mod kernel_launch;
pub mod line_buffer;
pub mod locking;
pub mod log_buffer;
pub mod migrate;
pub mod mm;
pub mod protocols;
pub mod requests;
Expand Down
113 changes: 113 additions & 0 deletions src/line_buffer.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
use crate::cpu::percpu::this_cpu_mut;
use crate::log_buffer::LB;
use crate::utils::immut_after_init::ImmutAfterInitCell;
use core::fmt;
use core::fmt::Write;

const LINE_BUFFER_SIZE: usize = 256;
pub struct LineBuffer {
buf: [u8; LINE_BUFFER_SIZE],
head: usize,
tail: usize,
}

impl LineBuffer {
pub const fn new() -> Self {
LineBuffer {
buf: [0; LINE_BUFFER_SIZE],
head: 0,
tail: 0,
}
}

pub fn write_buffer(&mut self, s: &str) {
for b in s.bytes() {
self.buf[self.head] = b;
self.head = (self.head + 1) % LINE_BUFFER_SIZE;
if b == 0xa {
/* write to global log buffer when '\n' character is encountered */
if self.tail <= self.head {
let st = core::str::from_utf8(&self.buf[self.tail..self.head]).unwrap();
unsafe { LB.write_log(st) };
} else {
let st1 = core::str::from_utf8(&self.buf[self.tail..]).unwrap();
let st2 = core::str::from_utf8(&self.buf[..self.head]).unwrap();
unsafe { LB.write_log(st1) };
unsafe { LB.write_log(st2) };
}
self.tail = self.head;
}
}
}
}

impl fmt::Write for LineBuffer {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.write_buffer(s);
Ok(())
}
}

#[derive(Clone, Copy)]
struct BufferLogger {
component: &'static str,
}

impl BufferLogger {
fn new(component: &'static str) -> BufferLogger {
BufferLogger { component }
}
}

impl log::Log for BufferLogger {
fn enabled(&self, _metadata: &log::Metadata) -> bool {
true
}

fn log(&self, record: &log::Record) {
let component: &'static str = &self.component;
let line_buf: &mut LineBuffer = this_cpu_mut().get_line_buffer();
// Log format/detail depends on the level.
match record.metadata().level() {
log::Level::Error | log::Level::Warn => write!(
line_buf,
"[{}] {}: {}\n",
component,
record.metadata().level().as_str(),
record.args()
)
.expect("write error"),

log::Level::Info => {
write!(line_buf, "[{}] {}\n", component, record.args()).expect("write error")
}

log::Level::Debug | log::Level::Trace => write!(
line_buf,
"[{}/{}] {} {}\n",
component,
record.metadata().target(),
record.metadata().level().as_str(),
record.args()
)
.expect("write error"),
}
}

fn flush(&self) {}
}

static BUFFER_LOGGER: ImmutAfterInitCell<BufferLogger> = ImmutAfterInitCell::uninit();

pub fn install_buffer_logger(component: &'static str) -> Result<(), ()> {
BUFFER_LOGGER
.init(&BufferLogger::new(component))
.expect("already initialized the logger");
if let Err(_) = log::set_logger(&*BUFFER_LOGGER) {
return Err(());
}

// Log levels are to be configured via the log's library feature configuration.
log::set_max_level(log::LevelFilter::Trace);
Ok(())
}
220 changes: 220 additions & 0 deletions src/log_buffer.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,220 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2022-2023 SUSE LLC
//
//
//

use crate::mm::alloc::allocate_zeroed_page;
extern crate alloc;
use crate::error::SvsmError;
use crate::types::PAGE_SIZE;
use alloc::vec::Vec;
use core::sync::atomic::{AtomicUsize, Ordering};

pub const BUF_SIZE: usize = PAGE_SIZE;
pub const TAIL_MASK: usize = 0xffffusize;
pub const HEAD_MASK: usize = 0xffffusize << 16;
pub const WRITER_MASK: usize = 0xffffusize << 32;
pub const READER_MASK: usize = 0x7fffusize << 48;
pub const FULL_MASK: usize = 0x1usize << 63;

/* Buffer state encoding:
* bits 0-15 : tail offset
* bits 16-31 : head offset
* bits 32-48 : number of writers writing the buffer
* bits 49-62: number of readers reading the buffer
* bit 63 : set if buffer is full
*/
#[derive(Clone, Copy, Debug)]
struct LogBufferState {
tail: usize,
head: usize,
wc: usize,
rc: usize,
full: usize,
}

impl LogBufferState {
pub fn new(tail: usize, head: usize, wc: usize, rc: usize, full: usize) -> Self {
LogBufferState {
tail,
head,
wc,
rc,
full,
}
}

pub fn compute_state_write(&self, len: usize) -> Self {
let new_head = (self.head + len) % BUF_SIZE;
let place_left = if self.head >= self.tail {
BUF_SIZE - self.head + self.tail
} else {
self.tail - self.head
};
let is_full = if place_left <= len { 1 } else { self.full };
let new_tail = if is_full == 1 { new_head } else { self.tail };

LogBufferState::new(new_tail, new_head, self.wc + 1, self.rc, is_full)
}
}
impl From<usize> for LogBufferState {
fn from(state: usize) -> Self {
LogBufferState {
tail: state & TAIL_MASK,
head: (state & HEAD_MASK) >> 16,
wc: (state & WRITER_MASK) >> 32,
rc: (state & READER_MASK) >> 48,
full: (state & FULL_MASK) >> 63,
}
}
}

impl From<LogBufferState> for usize {
fn from(lb: LogBufferState) -> Self {
let t = lb.tail & TAIL_MASK;
let h = (lb.head << 16) & HEAD_MASK;
let w = (lb.wc << 32) & WRITER_MASK;
let r = (lb.rc << 48) & READER_MASK;
let f = (lb.full << 63) & FULL_MASK;
t | h | w | r | f
}
}
pub struct LogBuffer {
buf: Vec<u8>,
state: AtomicUsize,
}

impl LogBuffer {
pub const fn new() -> LogBuffer {
LogBuffer {
buf: Vec::new(),
state: AtomicUsize::new(0),
}
}

pub fn init(&mut self, buf_addr: *mut u8) {
self.buf = unsafe { Vec::from_raw_parts(buf_addr, BUF_SIZE, BUF_SIZE) };
}

pub fn write_log(&mut self, s: &str) {
let mut head;
let len = s.len();

loop {
let pos = self.state.load(Ordering::Acquire);
let st = LogBufferState::from(pos);
head = st.head;
/* wait if there are readers */
if st.rc > 0 {
core::hint::spin_loop();
continue;
}
let st_new = st.compute_state_write(len);
let new_pos = usize::from(st_new);
if self
.state
.compare_exchange(pos, new_pos, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
break;
}
core::hint::spin_loop();
}

for b in s.bytes() {
self.buf[head] = b;
head = (head + 1) % BUF_SIZE;
}

/* Decrement writer count */
loop {
let pos = self.state.load(Ordering::Acquire);
let st = LogBufferState::from(pos);
let st_new = LogBufferState::new(st.tail, st.head, st.wc - 1, st.rc, st.full);
let new_pos = usize::from(st_new);
if self
.state
.compare_exchange(pos, new_pos, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
break;
}
core::hint::spin_loop();
}
}

pub fn read_log(&mut self) -> Result<Vec<u8>, SvsmError> {
let ret: Result<Vec<u8>, SvsmError>;
let mut st;
loop {
let pos = self.state.load(Ordering::Acquire);
st = LogBufferState::from(pos);
/* wait if there are writers */
if st.wc > 0 {
core::hint::spin_loop();
continue;
}

let new_tail = st.head;
let st_new = LogBufferState::new(new_tail, st.head, st.wc, st.rc + 1, st.full);
let new_pos = usize::from(st_new);
if self
.state
.compare_exchange(pos, new_pos, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
break;
}
core::hint::spin_loop();
}

if st.head == st.tail && st.full == 0 {
/* Buffer is empty */
ret = Ok(Vec::new());
} else if st.head > st.tail && st.full == 0 {
let vec = self.buf[st.tail..st.head].to_vec();
ret = Ok(vec);
} else {
let mut vec = self.buf[st.tail..].to_vec();
vec.extend_from_slice(&self.buf[..st.head]);
ret = Ok(vec);
}

/* clear the buffer-full status */
let is_full: usize = 0;

loop {
let pos = self.state.load(Ordering::Acquire);
let st = LogBufferState::from(pos);
let st_new = LogBufferState::new(st.tail, st.head, st.wc, st.rc - 1, is_full);
let new_pos = usize::from(st_new);
if self
.state
.compare_exchange(pos, new_pos, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
break;
}
core::hint::spin_loop();
}
ret
}
}

pub static mut LB: LogBuffer = LogBuffer::new();

pub fn init_log_buffer() {
let buf_addr = allocate_zeroed_page()
.expect("Failed to allocate buffer page")
.as_mut_ptr::<u8>();
unsafe { LB.init(buf_addr) };
}

pub fn migrate_log_buffer(log_buf: &LogBuffer) {
init_log_buffer();
let val = log_buf.state.load(Ordering::Acquire);
unsafe { LB.state.store(val, Ordering::Release) };
unsafe { LB.buf = log_buf.buf.clone() };
}
Loading

0 comments on commit 5648746

Please sign in to comment.