Skip to content

Commit

Permalink
Map user-mode ELF in Salus address space.
Browse files Browse the repository at this point in the history
This patch allows to map the `umode` binary into salus address
space. More work might be needed for a better build system and
integration of the umode binary into salus.

Signed-off-by: Gianluca Guida <[email protected]>
  • Loading branch information
glg-rv committed Dec 12, 2022
1 parent 54c282c commit ca212f7
Show file tree
Hide file tree
Showing 2 changed files with 136 additions and 28 deletions.
124 changes: 108 additions & 16 deletions src/hyp_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,26 +4,29 @@

use arrayvec::ArrayVec;
use page_tracking::{HwMemMap, HwMemRegion, HwMemRegionType, HwReservedMemType, HypPageAlloc};
use riscv_elf::{ElfMap, ElfSegment, ElfSegmentPerms};
use riscv_page_tables::{FirstStagePageTable, PteFieldBits, PteLeafPerms, Sv48};
use riscv_pages::{
InternalClean, Page, PageAddr, PageSize, RawAddr, SupervisorPhys, SupervisorVirt,
};
use riscv_regs::{satp, LocalRegisterCopy, SatpHelpers};

/// Maximum number of regions that will be mapped in the hypervisor.
const MAX_HYPMAP_REGIONS: usize = 32;
/// Maximum number of supervisor regions.
const MAX_HYPMAP_SUPERVISOR_REGIONS: usize = 32;
/// Maximum number of user regions.
const MAX_HYPMAP_USER_REGIONS: usize = 32;

/// Represents a region of the virtual address space of the hypervisor.
pub struct HypMapRegion {
/// Represents a virtual address region of the hypervisor with a fixed VA->PA Mapping.
pub struct HypMapFixedRegion {
vaddr: PageAddr<SupervisorVirt>,
paddr: PageAddr<SupervisorPhys>,
page_count: usize,
pte_fields: PteFieldBits,
}

impl HypMapRegion {
/// Create an HypMapRegion from a Hw Memory Map entry.
pub fn from_hw_mem_region(r: &HwMemRegion) -> Option<Self> {
impl HypMapFixedRegion {
// Create a supervisor VA region from a Hw Memory Map entry.
fn from_hw_mem_region(r: &HwMemRegion) -> Option<Self> {
let perms = match r.region_type() {
HwMemRegionType::Available => {
// map available memory as rw - unsure what it'll be used for.
Expand All @@ -49,7 +52,7 @@ impl HypMapRegion {
if let Some(pte_perms) = perms {
let paddr = r.base();
// vaddr == paddr in mapping HW memory map.
// Unwrap okay. `paddr` is a page addr so it is aligned to the page.
// Unwrap okay. `r.base()` is a page addr so it is aligned to the page.
let vaddr = PageAddr::new(RawAddr::supervisor_virt(r.base().bits())).unwrap();
let page_count = PageSize::num_4k_pages(r.size()) as usize;
let pte_fields = PteFieldBits::leaf_with_perms(pte_perms);
Expand All @@ -64,7 +67,7 @@ impl HypMapRegion {
}
}

/// Map this region into a page table.
// Map this region into a page table.
fn map(
&self,
sv48: &FirstStagePageTable<Sv48>,
Expand Down Expand Up @@ -93,6 +96,81 @@ impl HypMapRegion {
}
}

/// Represents a virtual address region that must be allocated and populated to be mapped.
struct HypMapPopulatedRegion {
// The address space where this region starts.
vaddr: PageAddr<SupervisorVirt>,
// Number of bytes of the VA area
size: usize,
// PTE bits for the mappings.
pte_fields: PteFieldBits,
// Data to be populated at the beginning of the VA area
data: &'static [u8],
}

impl HypMapPopulatedRegion {
// Creates an user space virtual address region from a ELF segment.
fn from_user_elf_segment(seg: &ElfSegment<'static>) -> Option<Self> {
// Sanity Check for segment alignments.
//
// In general ELF might have segments overlapping in the same page, possibly with different
// permissions. In order to maintain separation and expected permissions on every page, the
// linker script for user ELF creates different segments at different pages. Failure to do so
// would make `map_range()` in `map()` fail.
//
// The following check enforces that the segment starts at a 4k page aligned address. Unless
// the linking is completely corrupt, this also means that it starts at a different page.
// Assert is okay. This is a build error.
assert!(PageSize::Size4k.is_aligned(seg.vaddr()));

let pte_perms = match seg.perms() {
ElfSegmentPerms::ReadOnly => PteLeafPerms::UR,
ElfSegmentPerms::ReadWrite => PteLeafPerms::URW,
ElfSegmentPerms::ReadOnlyExecute => PteLeafPerms::URX,
};
// Unwrap okay. `seg.vaddr()` has been checked to be 4k aligned.
let vaddr = PageAddr::new(RawAddr::supervisor_virt(seg.vaddr())).unwrap();
let pte_fields = PteFieldBits::leaf_with_perms(pte_perms);
Some(HypMapPopulatedRegion {
vaddr,
size: seg.size(),
pte_fields,
data: seg.data(),
})
}

// Map this region into a page table.
fn map(&self, sv48: &FirstStagePageTable<Sv48>, hyp_mem: &mut HypPageAlloc) {
// Allocate and populate first.
let page_count = PageSize::num_4k_pages(self.size as u64);
let pages = hyp_mem.take_pages_for_hyp_state(page_count as usize);
let dest = pages.base().bits() as *mut u8;
let len = core::cmp::min(self.data.len(), self.size);
// Safe because we copy the minimum between the data size and the VA size.
unsafe {
core::ptr::copy(self.data.as_ptr(), dest, len);
}
// Map the populated pages in the page table.
let mapper = sv48
.map_range(self.vaddr, PageSize::Size4k, page_count, &mut || {
hyp_mem.take_pages_for_hyp_state(1).into_iter().next()
})
.unwrap();
for (virt, phys) in self
.vaddr
.iter_from()
.zip(pages.base().iter_from())
.take(page_count as usize)
{
// Safe because these pages are mapped into user mode and will not be accessed in
// supervisor mode.
unsafe {
mapper.map_addr(virt, phys, self.pte_fields).unwrap();
}
}
}
}

/// A page table that contains hypervisor mappings.
pub struct HypPageTable {
inner: FirstStagePageTable<Sv48>,
Expand All @@ -109,17 +187,27 @@ impl HypPageTable {

/// A set of global mappings of the hypervisor that can be used to create page tables.
pub struct HypMap {
regions: ArrayVec<HypMapRegion, MAX_HYPMAP_REGIONS>,
supervisor_regions: ArrayVec<HypMapFixedRegion, MAX_HYPMAP_SUPERVISOR_REGIONS>,
user_regions: ArrayVec<HypMapPopulatedRegion, MAX_HYPMAP_USER_REGIONS>,
}

impl HypMap {
/// Create a new hypervisor map from a hardware memory mem map.
pub fn new(mem_map: HwMemMap) -> HypMap {
let regions = mem_map
pub fn new(mem_map: HwMemMap, user_map: ElfMap<'static>) -> HypMap {
// All supervisor regions comes from the HW memory map.
let supervisor_regions = mem_map
.regions()
.filter_map(HypMapRegion::from_hw_mem_region)
.filter_map(HypMapFixedRegion::from_hw_mem_region)
.collect();
HypMap { regions }
// All user regions come from the ELF segment.
let user_regions = user_map
.segments()
.filter_map(HypMapPopulatedRegion::from_user_elf_segment)
.collect();
HypMap {
supervisor_regions,
user_regions,
}
}

/// Create a new page table based on this memory map.
Expand All @@ -134,12 +222,16 @@ impl HypMap {
let sv48: FirstStagePageTable<Sv48> =
FirstStagePageTable::new(root_page).expect("creating first sv48");

// Map all the regions in the memory map that the hypervisor could need.
for r in &self.regions {
// Map supervisor regions
for r in &self.supervisor_regions {
r.map(&sv48, &mut || {
hyp_mem.take_pages_for_hyp_state(1).into_iter().next()
});
}
// Map user regions.
for r in &self.user_regions {
r.map(&sv48, hyp_mem);
}
HypPageTable { inner: sv48 }
}
}
40 changes: 28 additions & 12 deletions src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ use host_vm_loader::HostVmLoader;
use hyp_alloc::HypAlloc;
use hyp_map::HypMap;
use page_tracking::*;
use riscv_elf::ElfMap;
use riscv_page_tables::*;
use riscv_pages::*;
use riscv_regs::{hedeleg, henvcfg, hideleg, hie, scounteren};
Expand Down Expand Up @@ -432,17 +433,6 @@ extern "C" fn kernel_init(hart_id: u64, fdt_addr: u64) {
// NOTE: Do not modify the hardware memory map from here on.
let mem_map = mem_map; // Remove mutability.

println!("HW memory map:");
for (i, r) in mem_map.regions().enumerate() {
println!(
"[{}] region: 0x{:x} -> 0x{:x}, {}",
i,
r.base().bits(),
r.end().bits() - 1,
r.region_type()
);
}

// We start RAM in the host address space at the same location as it is in the supervisor
// address space.
//
Expand All @@ -459,8 +449,34 @@ extern "C" fn kernel_init(hart_id: u64, fdt_addr: u64) {
let guest_phys_size = mem_map.regions().last().unwrap().end().bits()
- mem_map.regions().next().unwrap().base().bits();

// Parse the user-mode ELF containing the user-mode task.
let user_elf = include_bytes!("../target/riscv64gc-unknown-none-elf/release/umode");
let user_map = ElfMap::new(user_elf).expect("Cannot load user-mode ELF");

println!("HW memory map:");
for (i, r) in mem_map.regions().enumerate() {
println!(
"[{:02}] region: 0x{:016x} -> 0x{:016x}, {}",
i,
r.base().bits(),
r.end().bits() - 1,
r.region_type()
);
}

println!("USER memory map:");
for (i, s) in user_map.segments().enumerate() {
println!(
"[{:02}] region: 0x{:016x} -> 0x{:016x}, {}",
i,
s.vaddr(),
s.vaddr() + s.size() as u64,
s.perms()
);
}

// Create the hypervisor mapping starting from the hardware memory map.
let hyp_map = HypMap::new(mem_map);
let hyp_map = HypMap::new(mem_map, user_map);

// The hypervisor mapping is complete. Can setup paging structures now.
setup_hyp_paging(hyp_map, &mut hyp_mem);
Expand Down

0 comments on commit ca212f7

Please sign in to comment.