Skip to content

Commit

Permalink
Use List for Kmem
Browse files Browse the repository at this point in the history
  • Loading branch information
Medowhill committed Apr 1, 2021
1 parent 1e49214 commit f9931c4
Show file tree
Hide file tree
Showing 4 changed files with 120 additions and 82 deletions.
102 changes: 70 additions & 32 deletions kernel-rs/src/kalloc.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
//! Physical memory allocator, for user processes,
//! kernel stacks, page-table pages,
//! and pipe buffers. Allocates whole 4096-byte pages.
use core::mem;
use core::ptr;
use core::{mem, pin::Pin};

use pin_project::pin_project;

use crate::{
list::{List, ListEntry, ListNode},
lock::Spinlock,
memlayout::PHYSTOP,
page::Page,
Expand All @@ -17,23 +19,61 @@ extern "C" {
pub static mut end: [u8; 0];
}

#[repr(transparent)]
#[pin_project]
struct Run {
next: *mut Run,
#[pin]
entry: ListEntry,
}

impl Run {
/// # Safety
///
/// It must be used only after initializing it with `Run::init`.
unsafe fn new() -> Self {
Self {
entry: unsafe { ListEntry::new() },
}
}

fn init(self: Pin<&mut Self>) {
self.project().entry.init();
}
}

// SAFETY: `Run` owns a `ListEntry`.
unsafe impl ListNode for Run {
fn get_list_entry(&self) -> &ListEntry {
&self.entry
}

fn from_list_entry(list_entry: *const ListEntry) -> *const Self {
list_entry as _
}
}

/// # Safety
///
/// - This singly linked list does not have a cycle.
/// - If head is null, then it is an empty list. Ohterwise, it is nonempty, and
/// head is its first element, which is a valid page.
/// The address of each `Run` in `runs` can become a `Page` by `Page::from_usize`.
// This implementation defers from xv6. Kmem of xv6 uses intrusive singly linked list, while this
// Kmem uses List, which is a intrusive doubly linked list type of rv6. In a intrusive singly
// linked list, it is impossible to automatically remove an entry from a list when it is dropped.
// Therefore, it is nontrivial to make a general intrusive singly linked list type in a safe way.
// For this reason, we use a doubly linked list instead. It adds runtime overhead, but the overhead
// seems negligible.
#[pin_project]
pub struct Kmem {
head: *mut Run,
#[pin]
runs: List<Run>,
}

impl Kmem {
pub const fn new() -> Self {
/// # Safety
///
/// It must be used only after initializing it with `Kmem::init`.
pub const unsafe fn new() -> Self {
Self {
head: ptr::null_mut(),
runs: unsafe { List::new() },
}
}

Expand All @@ -43,7 +83,9 @@ impl Kmem {
///
/// There must be no existing pages. It implies that this method should be
/// called only once.
pub unsafe fn init(&mut self) {
pub unsafe fn init(mut self: Pin<&mut Self>) {
self.as_mut().project().runs.init();

// SAFETY: safe to acquire only the address of a static variable.
let pa_start = pgroundup(unsafe { end.as_ptr() as usize });
let pa_end = pgrounddown(PHYSTOP);
Expand All @@ -53,35 +95,31 @@ impl Kmem {
// * end <= pa < PHYSTOP
// * the safety condition of this method guarantees that the
// created page does not overlap with existing pages
self.free(unsafe { Page::from_usize(pa) });
self.as_ref()
.get_ref()
.free(unsafe { Page::from_usize(pa) });
}
}

pub fn free(&mut self, mut page: Page) {
pub fn free(&self, mut page: Page) {
// Fill with junk to catch dangling refs.
page.write_bytes(1);
let pa = page.into_usize();
debug_assert!(
// SAFETY: safe to acquire only the address of a static variable.
pa % PGSIZE == 0 && (unsafe { end.as_ptr() as usize }..PHYSTOP).contains(&pa),
"Kmem::free"
);
let mut r = pa as *mut Run;
// SAFETY: By the invariant of Page, it does not create a cycle in this list and
// thus is safe.
unsafe { (*r).next = self.head };
self.head = r;

let run = page.as_uninit_mut();
// SAFETY: `run` will be initialized by the following `init`.
let run = run.write(unsafe { Run::new() });
let mut run = unsafe { Pin::new_unchecked(run) };
run.as_mut().init();
self.runs.push_front(run.as_ref().get_ref());

// Since the page has returned to the list, forget the page.
mem::forget(page);
}

pub fn alloc(&mut self) -> Option<Page> {
if self.head.is_null() {
return None;
}
// SAFETY: head is not null and the structure of this list
// is maintained by the invariant.
let next = unsafe { (*self.head).next };
// SAFETY: the first element is a valid page by the invariant.
let mut page = unsafe { Page::from_usize(mem::replace(&mut self.head, next) as _) };
pub fn alloc(&self) -> Option<Page> {
let run = self.runs.pop_front()?;
// SAFETY: the invariant of `Kmem`.
let mut page = unsafe { Page::from_usize(run as _) };
// fill with junk
page.write_bytes(5);
Some(page)
Expand Down
12 changes: 7 additions & 5 deletions kernel-rs/src/kernel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ pub struct KernelBuilder {

pub printer: Spinlock<Printer>,

#[pin]
pub kmem: Spinlock<Kmem>,

/// The kernel's memory manager.
Expand Down Expand Up @@ -132,7 +133,7 @@ impl KernelBuilder {
console: Sleepablelock::new("CONS", Console::new()),
uart: Uart::new(),
printer: Spinlock::new("PRINTLN", Printer::new()),
kmem: Spinlock::new("KMEM", Kmem::new()),
kmem: Spinlock::new("KMEM", unsafe { Kmem::new() }),
memory: MaybeUninit::uninit(),
ticks: Sleepablelock::new("time", 0),
procs: ProcsBuilder::zero(),
Expand Down Expand Up @@ -217,7 +218,7 @@ pub unsafe fn kernel_main() -> ! {
static STARTED: AtomicBool = AtomicBool::new(false);

if cpuid() == 0 {
let kernel = unsafe { kernel_builder_unchecked_pin().project() };
let mut kernel = unsafe { kernel_builder_unchecked_pin().project() };

// Initialize the kernel.

Expand All @@ -230,10 +231,11 @@ pub unsafe fn kernel_main() -> ! {
println!();

// Physical page allocator.
unsafe { kernel.kmem.get_mut().init() };
unsafe { kernel.kmem.as_mut().get_pin_mut().init() };

// Create kernel memory manager.
let memory = KernelMemory::new(kernel.kmem).expect("PageTable::new failed");
let memory =
KernelMemory::new(kernel.kmem.as_ref().get_ref()).expect("PageTable::new failed");

// Turn on paging.
unsafe { kernel.memory.write(memory).init_hart() };
Expand All @@ -260,7 +262,7 @@ pub unsafe fn kernel_main() -> ! {
kernel.file_system.log.disk.get_mut().init();

// First user process.
procs.user_proc_init(kernel.kmem);
procs.user_proc_init(kernel.kmem.as_ref().get_ref());

STARTED.store(true, Ordering::Release);
} else {
Expand Down
21 changes: 19 additions & 2 deletions kernel-rs/src/page.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
use core::{
mem,
mem::MaybeUninit,
ops::{Deref, DerefMut},
ptr,
ptr::NonNull,
};

use crate::{println, riscv::PGSIZE, vm::PAddr};
use crate::{riscv::PGSIZE, vm::PAddr};

/// Page type.
#[repr(align(4096))]
Expand Down Expand Up @@ -71,6 +72,23 @@ impl Page {
inner: unsafe { NonNull::new_unchecked(addr as *mut _) },
}
}

pub fn as_uninit_mut<T>(&mut self) -> &mut MaybeUninit<T> {
// TODO(https://github.com/kaist-cp/rv6/issues/471):
// Use const_assert! (or equivalent) instead. Currently, use of T inside const_assert!
// incurs a compile error: "can't use generic parameters from outer function".
// Also, there's a workaround using feature(const_generics) and
// feature(const_evaluatable_checked). However, using them makes the compiler panic.
// When the compiler becomes updated, we will fix the following lines to use static checks.
assert!(mem::size_of::<T>() <= PGSIZE);
assert_eq!(PGSIZE % mem::align_of::<T>(), 0);

// SAFETY: self.inner is an array of length PGSIZE aligned with PGSIZE bytes.
// The above assertions show that it can contain a value of T. As it contains arbitrary
// data, we cannot treat it as &mut T. Instead, we use &mut MaybeUninit<T>. It's ok because
// T and MaybeUninit<T> have the same size, alignment, and ABI.
unsafe { &mut *(self.inner.as_ptr() as *mut MaybeUninit<T>) }
}
}

impl Deref for Page {
Expand All @@ -91,7 +109,6 @@ impl Drop for Page {
fn drop(&mut self) {
// HACK(@efenniht): we really need linear type here:
// https://github.com/rust-lang/rfcs/issues/814
println!("page must never drop.");
panic!("Page must never drop.");
}
}
67 changes: 24 additions & 43 deletions kernel-rs/src/pipe.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,11 @@
use core::{mem, ops::Deref, ptr::NonNull};

use static_assertions::const_assert;

use crate::{
file::{FileType, RcFile},
kernel::Kernel,
lock::Spinlock,
page::Page,
proc::{CurrentProc, WaitChannel},
riscv::PGSIZE,
vm::UVAddr,
};

Expand Down Expand Up @@ -135,16 +132,12 @@ impl Deref for AllocatedPipe {
impl Kernel {
pub fn allocate_pipe(&self) -> Result<(RcFile, RcFile), ()> {
let page = self.kmem.alloc().ok_or(())?;
// SAFETY: by the invariant of `Page`, `page` is always non-null.
let mut ptr = unsafe { NonNull::new_unchecked(page.into_usize() as *mut Pipe) };

// `Pipe` must be aligned with `Page`.
const_assert!(mem::size_of::<Pipe>() <= PGSIZE);
let mut page = scopeguard::guard(page, |page| self.kmem.free(page));
let ptr = page.as_uninit_mut();

//TODO(https://github.com/kaist-cp/rv6/issues/367): Since Pipe is a huge struct, need to check whether stack is used to fill `*ptr`.
// SAFETY: `ptr` holds a valid, unique page allocated from `self.alloc()`,
// and the pipe size and alignment are compatible with the page.
let _ = unsafe { ptr.as_uninit_mut() }.write(Pipe {
// TODO(https://github.com/kaist-cp/rv6/issues/367):
// Since Pipe is a huge struct, need to check whether stack is used to fill `*ptr`.
let ptr = NonNull::from(ptr.write(Pipe {
inner: Spinlock::new(
"pipe",
PipeInner {
Expand All @@ -157,36 +150,24 @@ impl Kernel {
),
read_waitchannel: WaitChannel::new(),
write_waitchannel: WaitChannel::new(),
});
let f0 = self
.ftable
.alloc_file(
FileType::Pipe {
pipe: AllocatedPipe { ptr },
},
true,
false,
)
// SAFETY: ptr is an address of a page obtained by alloc().
.map_err(|_| {
self.kmem
.free(unsafe { Page::from_usize(ptr.as_ptr() as _) })
})?;
let f1 = self
.ftable
.alloc_file(
FileType::Pipe {
pipe: AllocatedPipe { ptr },
},
false,
true,
)
// SAFETY: ptr is an address of a page obtained by alloc().
.map_err(|_| {
self.kmem
.free(unsafe { Page::from_usize(ptr.as_ptr() as _) })
})?;

}));
let f0 = self.ftable.alloc_file(
FileType::Pipe {
pipe: AllocatedPipe { ptr },
},
true,
false,
)?;
let f1 = self.ftable.alloc_file(
FileType::Pipe {
pipe: AllocatedPipe { ptr },
},
false,
true,
)?;

// Since files have been created successfully, prevent the page from being deallocated.
mem::forget(scopeguard::ScopeGuard::into_inner(page));
Ok((f0, f1))
}
}
Expand All @@ -197,7 +178,7 @@ impl AllocatedPipe {
// SAFETY:
// If `Pipe::close()` returned true, this means all `AllocatedPipe`s were closed.
// Hence, we can free the `Pipe`.
// Also, the following is safe since `ptr` holds a `Pipe` stored in a valid page allocated from `kernel().alloc()`.
// Also, the following is safe since `ptr` holds a `Pipe` stored in a valid page allocated from `Kmem::alloc`.
Some(unsafe { Page::from_usize(self.ptr.as_ptr() as _) })
} else {
None
Expand Down

0 comments on commit f9931c4

Please sign in to comment.