From 53b16ab89f51fe43b4024aba97c61eb115f5dbf4 Mon Sep 17 00:00:00 2001 From: kxxt Date: Thu, 8 Dec 2022 13:58:29 +0800 Subject: [PATCH 1/4] fix cargo.toml --- os8-ref/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/os8-ref/Cargo.toml b/os8-ref/Cargo.toml index 2f93010..1aaa586 100644 --- a/os8-ref/Cargo.toml +++ b/os8-ref/Cargo.toml @@ -14,7 +14,7 @@ log = "0.4" riscv = { git = "https://github.com/rcore-os/riscv", features = ["inline-asm"] } lock_api = "=0.4.6" xmas-elf = "0.7.0" -virtio-drivers = { git = "https://github.com/rcore-os/virtio-drivers" } +virtio-drivers = { git = "https://github.com/rcore-os/virtio-drivers", rev = "93f821c" } easy-fs = { path = "../easy-fs" } [profile.release] From b797b9873f0b43e4928c1a237280b09ae78d9ab7 Mon Sep 17 00:00:00 2001 From: kxxt Date: Thu, 8 Dec 2022 13:59:54 +0800 Subject: [PATCH 2/4] move os8-ref to os8 --- os8/src/console.rs | 12 +- os8/src/fs/README.md | 0 os8/src/fs/inode.rs | 169 ++++++++++++ os8/src/fs/mod.rs | 45 ++++ os8/src/fs/pipe.rs | 179 +++++++++++++ os8/src/fs/stdio.rs | 48 ++++ os8/src/main.rs | 7 +- os8/src/mm/README.md | 0 os8/src/mm/address.rs | 259 ++++++++++++++++++ os8/src/mm/frame_allocator.rs | 137 ++++++++++ os8/src/mm/heap_allocator.rs | 51 ++++ os8/src/mm/memory_set.rs | 393 ++++++++++++++++++++++++++++ os8/src/mm/mod.rs | 29 ++ os8/src/mm/page_table.rs | 260 ++++++++++++++++++ os8/src/sync/README.md | 0 os8/src/sync/condvar.rs | 39 +++ os8/src/sync/mod.rs | 11 + os8/src/sync/mutex.rs | 88 +++++++ os8/src/sync/semaphore.rs | 45 ++++ os8/src/sync/up.rs | 31 +++ os8/src/syscall/README.md | 0 os8/src/syscall/fs.rs | 114 ++++++++ os8/src/syscall/mod.rs | 100 +++++++ os8/src/syscall/process.rs | 158 +++++++++++ os8/src/syscall/sync.rs | 143 ++++++++++ os8/src/syscall/thread.rs | 86 ++++++ os8/src/task/README.md | 0 os8/src/task/context.rs | 33 +++ os8/src/task/id.rs | 262 +++++++++++++++++++ os8/src/task/kthread.rs | 76 ++++++ os8/src/task/manager.rs | 46 ++++ os8/src/task/mod.rs | 158 +++++++++++ os8/src/task/process.rs | 280 ++++++++++++++++++++ os8/src/task/processor.rs | 121 +++++++++ os8/src/task/stackless_coroutine.rs | 125 +++++++++ os8/src/task/switch.S | 34 +++ os8/src/task/switch.rs | 16 ++ os8/src/task/task.rs | 140 ++++++++++ os8/src/trap/README.md | 0 os8/src/trap/context.rs | 47 ++++ os8/src/trap/mod.rs | 133 ++++++++++ os8/src/trap/trap.S | 69 +++++ 42 files changed, 3935 insertions(+), 9 deletions(-) delete mode 100644 os8/src/fs/README.md create mode 100644 os8/src/fs/inode.rs create mode 100644 os8/src/fs/mod.rs create mode 100644 os8/src/fs/pipe.rs create mode 100644 os8/src/fs/stdio.rs delete mode 100644 os8/src/mm/README.md create mode 100644 os8/src/mm/address.rs create mode 100644 os8/src/mm/frame_allocator.rs create mode 100644 os8/src/mm/heap_allocator.rs create mode 100644 os8/src/mm/memory_set.rs create mode 100644 os8/src/mm/mod.rs create mode 100644 os8/src/mm/page_table.rs delete mode 100644 os8/src/sync/README.md create mode 100644 os8/src/sync/condvar.rs create mode 100644 os8/src/sync/mod.rs create mode 100644 os8/src/sync/mutex.rs create mode 100644 os8/src/sync/semaphore.rs create mode 100644 os8/src/sync/up.rs delete mode 100644 os8/src/syscall/README.md create mode 100644 os8/src/syscall/fs.rs create mode 100644 os8/src/syscall/mod.rs create mode 100644 os8/src/syscall/process.rs create mode 100644 os8/src/syscall/sync.rs create mode 100644 os8/src/syscall/thread.rs delete mode 100644 os8/src/task/README.md create mode 100644 os8/src/task/context.rs create mode 100644 os8/src/task/id.rs create mode 100644 os8/src/task/kthread.rs create mode 100644 os8/src/task/manager.rs create mode 100644 os8/src/task/mod.rs create mode 100644 os8/src/task/process.rs create mode 100644 os8/src/task/processor.rs create mode 100644 os8/src/task/stackless_coroutine.rs create mode 100644 os8/src/task/switch.S create mode 100644 os8/src/task/switch.rs create mode 100644 os8/src/task/task.rs delete mode 100644 os8/src/trap/README.md create mode 100644 os8/src/trap/context.rs create mode 100644 os8/src/trap/mod.rs create mode 100644 os8/src/trap/trap.S diff --git a/os8/src/console.rs b/os8/src/console.rs index a23a0e0..c90f9e6 100644 --- a/os8/src/console.rs +++ b/os8/src/console.rs @@ -34,13 +34,9 @@ macro_rules! println { } } -/* -以下代码提供了与颜色相关的 ANSI 转义字符,以及彩色输出可以使用的函数与宏。 - -可以使用它们,甚至扩展它们,来提升开发体验和显示效果。 -*/ - -// 使用 ANSI 转义字符来加上颜色 +/// 以下代码提供了与颜色相关的 ANSI 转义字符,以及彩色输出可以使用的函数与宏。 +/// 可以使用它们,甚至扩展它们,来提升开发体验和显示效果。 +/// 使用 ANSI 转义字符来加上颜色 #[macro_export] macro_rules! colorize { ($content: ident, $foreground_color: ident) => { @@ -66,6 +62,7 @@ pub fn print_colorized( .unwrap(); } +/// 带色彩的 print #[macro_export] macro_rules! print_colorized { ($fmt: literal, $foreground_color: expr, $background_color: expr $(, $($arg: tt)+)?) => { @@ -73,6 +70,7 @@ macro_rules! print_colorized { }; } +/// 带色彩的 println #[macro_export] macro_rules! println_colorized { ($fmt: literal, $foreground_color: expr, $background_color: expr $(, $($arg: tt)+)?) => { diff --git a/os8/src/fs/README.md b/os8/src/fs/README.md deleted file mode 100644 index e69de29..0000000 diff --git a/os8/src/fs/inode.rs b/os8/src/fs/inode.rs new file mode 100644 index 0000000..5e4ca68 --- /dev/null +++ b/os8/src/fs/inode.rs @@ -0,0 +1,169 @@ +use easy_fs::{ + EasyFileSystem, + Inode, +}; +use crate::drivers::BLOCK_DEVICE; +use crate::sync::UPSafeCell; +use alloc::sync::Arc; +use lazy_static::*; +use bitflags::*; +use alloc::vec::Vec; +use super::File; +use crate::mm::UserBuffer; + +/// A wrapper around a filesystem inode +/// to implement File trait atop +pub struct OSInode { + readable: bool, + writable: bool, + inner: UPSafeCell, +} + +/// The OS inode inner in 'UPSafeCell' +pub struct OSInodeInner { + offset: usize, + inode: Arc, +} + +impl OSInode { + /// Construct an OS inode from a inode + pub fn new( + readable: bool, + writable: bool, + inode: Arc, + ) -> Self { + Self { + readable, + writable, + inner: unsafe { UPSafeCell::new(OSInodeInner { + offset: 0, + inode, + })}, + } + } + /// Read all data inside a inode into vector + pub fn read_all(&self) -> Vec { + let mut inner = self.inner.exclusive_access(); + let mut buffer = [0u8; 512]; + let mut v: Vec = Vec::new(); + loop { + let len = inner.inode.read_at(inner.offset, &mut buffer); + if len == 0 { + break; + } + inner.offset += len; + v.extend_from_slice(&buffer[..len]); + } + v + } +} + +lazy_static! { + /// The root of all inodes, or '/' in short + pub static ref ROOT_INODE: Arc = { + let efs = EasyFileSystem::open(BLOCK_DEVICE.clone()); + Arc::new(EasyFileSystem::root_inode(&efs)) + }; +} + +/// List all files in the filesystems +pub fn list_apps() { + println!("/**** APPS ****"); + for app in ROOT_INODE.ls() { + println!("{}", app); + } + println!("**************/"); +} + +bitflags! { + /// Flags for opening files + pub struct OpenFlags: u32 { + const RDONLY = 0; + const WRONLY = 1 << 0; + const RDWR = 1 << 1; + const CREATE = 1 << 9; + const TRUNC = 1 << 10; + } +} + +impl OpenFlags { + /// Get the current read write permission on an inode + /// does not check validity for simplicity + /// returns (readable, writable) + pub fn read_write(&self) -> (bool, bool) { + if self.is_empty() { + (true, false) + } else if self.contains(Self::WRONLY) { + (false, true) + } else { + (true, true) + } + } +} + +/// Open a file by path +pub fn open_file(name: &str, flags: OpenFlags) -> Option> { + let (readable, writable) = flags.read_write(); + if flags.contains(OpenFlags::CREATE) { + if let Some(inode) = ROOT_INODE.find(name) { + // clear size + inode.clear(); + Some(Arc::new(OSInode::new( + readable, + writable, + inode, + ))) + } else { + // create file + ROOT_INODE.create(name) + .map(|inode| { + Arc::new(OSInode::new( + readable, + writable, + inode, + )) + }) + } + } else { + ROOT_INODE.find(name) + .map(|inode| { + if flags.contains(OpenFlags::TRUNC) { + inode.clear(); + } + Arc::new(OSInode::new( + readable, + writable, + inode + )) + }) + } +} + +impl File for OSInode { + fn readable(&self) -> bool { self.readable } + fn writable(&self) -> bool { self.writable } + fn read(&self, mut buf: UserBuffer) -> usize { + let mut inner = self.inner.exclusive_access(); + let mut total_read_size = 0usize; + for slice in buf.buffers.iter_mut() { + let read_size = inner.inode.read_at(inner.offset, *slice); + if read_size == 0 { + break; + } + inner.offset += read_size; + total_read_size += read_size; + } + total_read_size + } + fn write(&self, buf: UserBuffer) -> usize { + let mut inner = self.inner.exclusive_access(); + let mut total_write_size = 0usize; + for slice in buf.buffers.iter() { + let write_size = inner.inode.write_at(inner.offset, *slice); + assert_eq!(write_size, slice.len()); + inner.offset += write_size; + total_write_size += write_size; + } + total_write_size + } +} diff --git a/os8/src/fs/mod.rs b/os8/src/fs/mod.rs new file mode 100644 index 0000000..c14151e --- /dev/null +++ b/os8/src/fs/mod.rs @@ -0,0 +1,45 @@ +mod stdio; +mod inode; +mod pipe; + +use crate::mm::UserBuffer; + +/// The common abstraction of all IO resources +pub trait File : Send + Sync { + fn readable(&self) -> bool; + fn writable(&self) -> bool; + fn read(&self, buf: UserBuffer) -> usize; + fn write(&self, buf: UserBuffer) -> usize; +} + +/// The stat of a inode +#[repr(C)] +#[derive(Debug)] +pub struct Stat { + /// ID of device containing file + pub dev: u64, + /// inode number + pub ino: u64, + /// file type and mode + pub mode: StatMode, + /// number of hard links + pub nlink: u32, + /// unused pad + pad: [u64; 7], +} + +bitflags! { + /// The mode of a inode + /// whether a directory or a file + pub struct StatMode: u32 { + const NULL = 0; + /// directory + const DIR = 0o040000; + /// ordinary regular file + const FILE = 0o100000; + } +} + +pub use stdio::{Stdin, Stdout}; +pub use inode::{OSInode, open_file, OpenFlags, list_apps}; +pub use pipe::{Pipe, make_pipe}; diff --git a/os8/src/fs/pipe.rs b/os8/src/fs/pipe.rs new file mode 100644 index 0000000..0827594 --- /dev/null +++ b/os8/src/fs/pipe.rs @@ -0,0 +1,179 @@ +use super::File; +use alloc::sync::{Arc, Weak}; +use crate::sync::UPSafeCell; +use crate::mm::UserBuffer; + +use crate::task::suspend_current_and_run_next; + +/// One end of a pipe +pub struct Pipe { + readable: bool, + writable: bool, + buffer: Arc>, +} + +impl Pipe { + /// Create the read end of a pipe from a ring buffer + pub fn read_end_with_buffer(buffer: Arc>) -> Self { + Self { + readable: true, + writable: false, + buffer, + } + } + /// Create the write end of a pipe with a ring buffer + pub fn write_end_with_buffer(buffer: Arc>) -> Self { + Self { + readable: false, + writable: true, + buffer, + } + } +} + +const RING_BUFFER_SIZE: usize = 32; + +#[derive(Copy, Clone, PartialEq)] +enum RingBufferStatus { + FULL, + EMPTY, + NORMAL, +} + +/// The underlying ring buffer of a pipe +pub struct PipeRingBuffer { + arr: [u8; RING_BUFFER_SIZE], + head: usize, + tail: usize, + status: RingBufferStatus, + write_end: Option>, +} + +impl PipeRingBuffer { + pub fn new() -> Self { + Self { + arr: [0; RING_BUFFER_SIZE], + head: 0, + tail: 0, + status: RingBufferStatus::EMPTY, + write_end: None, + } + } + /// Set the write end bound to this buffer + pub fn set_write_end(&mut self, write_end: &Arc) { + self.write_end = Some(Arc::downgrade(write_end)); + } + /// Write into the buffer + pub fn write_byte(&mut self, byte: u8) { + self.status = RingBufferStatus::NORMAL; + self.arr[self.tail] = byte; + self.tail = (self.tail + 1) % RING_BUFFER_SIZE; + if self.tail == self.head { + self.status = RingBufferStatus::FULL; + } + } + /// Read from the buffer + pub fn read_byte(&mut self) -> u8 { + self.status = RingBufferStatus::NORMAL; + let c = self.arr[self.head]; + self.head = (self.head + 1) % RING_BUFFER_SIZE; + if self.head == self.tail { + self.status = RingBufferStatus::EMPTY; + } + c + } + /// Get the length of remaining data in the buffer + pub fn available_read(&self) -> usize { + if self.status == RingBufferStatus::EMPTY { + 0 + } else { + if self.tail > self.head { + self.tail - self.head + } else { + self.tail + RING_BUFFER_SIZE - self.head + } + } + } + /// Get the length of remaining space in the buffer + pub fn available_write(&self) -> usize { + if self.status == RingBufferStatus::FULL { + 0 + } else { + RING_BUFFER_SIZE - self.available_read() + } + } + /// Check if all write ends bounded to this buffer are closed + pub fn all_write_ends_closed(&self) -> bool { + self.write_end.as_ref().unwrap().upgrade().is_none() + } +} + +/// Crate a pipe +/// return (read_end, write_end) +pub fn make_pipe() -> (Arc, Arc) { + let buffer = Arc::new(unsafe { + UPSafeCell::new(PipeRingBuffer::new()) + }); + let read_end = Arc::new( + Pipe::read_end_with_buffer(buffer.clone()) + ); + let write_end = Arc::new( + Pipe::write_end_with_buffer(buffer.clone()) + ); + buffer.exclusive_access().set_write_end(&write_end); + (read_end, write_end) +} + +impl File for Pipe { + fn readable(&self) -> bool { self.readable } + fn writable(&self) -> bool { self.writable } + fn read(&self, buf: UserBuffer) -> usize { + assert_eq!(self.readable(), true); + let mut buf_iter = buf.into_iter(); + let mut read_size = 0usize; + loop { + let mut ring_buffer = self.buffer.exclusive_access(); + let loop_read = ring_buffer.available_read(); + if loop_read == 0 { + if ring_buffer.all_write_ends_closed() { + return read_size; + } + drop(ring_buffer); + suspend_current_and_run_next(); + continue; + } + // read at most loop_read bytes + for _ in 0..loop_read { + if let Some(byte_ref) = buf_iter.next() { + unsafe { *byte_ref = ring_buffer.read_byte(); } + read_size += 1; + } else { + return read_size; + } + } + } + } + fn write(&self, buf: UserBuffer) -> usize { + assert_eq!(self.writable(), true); + let mut buf_iter = buf.into_iter(); + let mut write_size = 0usize; + loop { + let mut ring_buffer = self.buffer.exclusive_access(); + let loop_write = ring_buffer.available_write(); + if loop_write == 0 { + drop(ring_buffer); + suspend_current_and_run_next(); + continue; + } + // write at most loop_write bytes + for _ in 0..loop_write { + if let Some(byte_ref) = buf_iter.next() { + ring_buffer.write_byte(unsafe { *byte_ref }); + write_size += 1; + } else { + return write_size; + } + } + } + } +} diff --git a/os8/src/fs/stdio.rs b/os8/src/fs/stdio.rs new file mode 100644 index 0000000..87dca0e --- /dev/null +++ b/os8/src/fs/stdio.rs @@ -0,0 +1,48 @@ +use super::File; +use crate::mm::{UserBuffer}; +use crate::sbi::console_getchar; +use crate::task::suspend_current_and_run_next; + +/// The standard input +pub struct Stdin; +/// The standard output +pub struct Stdout; + +impl File for Stdin { + fn readable(&self) -> bool { true } + fn writable(&self) -> bool { false } + fn read(&self, mut user_buf: UserBuffer) -> usize { + assert_eq!(user_buf.len(), 1); + // busy loop + let mut c: usize; + loop { + c = console_getchar(); + if c == 0 { + suspend_current_and_run_next(); + continue; + } else { + break; + } + } + let ch = c as u8; + unsafe { user_buf.buffers[0].as_mut_ptr().write_volatile(ch); } + 1 + } + fn write(&self, _user_buf: UserBuffer) -> usize { + panic!("Cannot write to stdin!"); + } +} + +impl File for Stdout { + fn readable(&self) -> bool { false } + fn writable(&self) -> bool { true } + fn read(&self, _user_buf: UserBuffer) -> usize{ + panic!("Cannot read from stdout!"); + } + fn write(&self, user_buf: UserBuffer) -> usize { + for buffer in user_buf.buffers.iter() { + print!("{}", core::str::from_utf8(*buffer).unwrap()); + } + user_buf.len() + } +} diff --git a/os8/src/main.rs b/os8/src/main.rs index 8036bca..df21f04 100644 --- a/os8/src/main.rs +++ b/os8/src/main.rs @@ -30,6 +30,8 @@ extern crate alloc; #[macro_use] mod console; mod config; +mod drivers; +mod fs; mod lang_items; mod logging; mod mm; @@ -39,8 +41,6 @@ mod syscall; mod task; mod timer; mod trap; -mod drivers; -mod fs; core::arch::global_asm!(include_str!("entry.asm")); @@ -67,6 +67,9 @@ pub fn rust_main() -> ! { trap::init(); trap::enable_timer_interrupt(); timer::set_next_trigger(); + // Uncomment following lines and see what happens! + // task::kernel_stackless_coroutine_test(); + // task::kernel_stackful_coroutine_test(); fs::list_apps(); task::add_initproc(); task::run_tasks(); diff --git a/os8/src/mm/README.md b/os8/src/mm/README.md deleted file mode 100644 index e69de29..0000000 diff --git a/os8/src/mm/address.rs b/os8/src/mm/address.rs new file mode 100644 index 0000000..5c24ae7 --- /dev/null +++ b/os8/src/mm/address.rs @@ -0,0 +1,259 @@ +//! Implementation of physical and virtual address and page number. +use super::PageTableEntry; +use crate::config::{PAGE_SIZE, PAGE_SIZE_BITS}; +use core::fmt::{self, Debug, Formatter}; + +/// Definitions +#[repr(C)] +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] +pub struct PhysAddr(pub usize); + +#[repr(C)] +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] +pub struct VirtAddr(pub usize); + +#[repr(C)] +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] +pub struct PhysPageNum(pub usize); + +#[repr(C)] +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] +pub struct VirtPageNum(pub usize); + +/// Debugging + +impl Debug for VirtAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("VA:{:#x}", self.0)) + } +} +impl Debug for VirtPageNum { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("VPN:{:#x}", self.0)) + } +} +impl Debug for PhysAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("PA:{:#x}", self.0)) + } +} +impl Debug for PhysPageNum { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("PPN:{:#x}", self.0)) + } +} + +/// T: {PhysAddr, VirtAddr, PhysPageNum, VirtPageNum} +/// T -> usize: T.0 +/// usize -> T: usize.into() + +impl From for PhysAddr { + fn from(v: usize) -> Self { + Self(v) + } +} +impl From for PhysPageNum { + fn from(v: usize) -> Self { + Self(v) + } +} +impl From for VirtAddr { + fn from(v: usize) -> Self { + Self(v) + } +} +impl From for VirtPageNum { + fn from(v: usize) -> Self { + Self(v) + } +} +impl From for usize { + fn from(v: PhysAddr) -> Self { + v.0 + } +} +impl From for usize { + fn from(v: PhysPageNum) -> Self { + v.0 + } +} +impl From for usize { + fn from(v: VirtAddr) -> Self { + v.0 + } +} +impl From for usize { + fn from(v: VirtPageNum) -> Self { + v.0 + } +} + +impl VirtAddr { + pub fn floor(&self) -> VirtPageNum { + VirtPageNum(self.0 / PAGE_SIZE) + } + pub fn ceil(&self) -> VirtPageNum { + VirtPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE) + } + pub fn page_offset(&self) -> usize { + self.0 & (PAGE_SIZE - 1) + } + pub fn aligned(&self) -> bool { + self.page_offset() == 0 + } +} +impl From for VirtPageNum { + fn from(v: VirtAddr) -> Self { + assert_eq!(v.page_offset(), 0); + v.floor() + } +} +impl From for VirtAddr { + fn from(v: VirtPageNum) -> Self { + Self(v.0 << PAGE_SIZE_BITS) + } +} +impl PhysAddr { + pub fn floor(&self) -> PhysPageNum { + PhysPageNum(self.0 / PAGE_SIZE) + } + pub fn ceil(&self) -> PhysPageNum { + PhysPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE) + } + pub fn page_offset(&self) -> usize { + self.0 & (PAGE_SIZE - 1) + } + pub fn aligned(&self) -> bool { + self.page_offset() == 0 + } +} +impl From for PhysPageNum { + fn from(v: PhysAddr) -> Self { + assert_eq!(v.page_offset(), 0); + v.floor() + } +} +impl From for PhysAddr { + fn from(v: PhysPageNum) -> Self { + Self(v.0 << PAGE_SIZE_BITS) + } +} + +impl VirtPageNum { + pub fn indexes(&self) -> [usize; 3] { + let mut vpn = self.0; + let mut idx = [0usize; 3]; + for i in (0..3).rev() { + idx[i] = vpn & 511; + vpn >>= 9; + } + idx + } +} + +impl PhysAddr { + pub fn get_ref(&self) -> &'static T { + unsafe { (self.0 as *const T).as_ref().unwrap() } + } + pub fn get_mut(&self) -> &'static mut T { + unsafe { (self.0 as *mut T).as_mut().unwrap() } + } +} +impl PhysPageNum { + pub fn get_pte_array(&self) -> &'static mut [PageTableEntry] { + let pa: PhysAddr = (*self).into(); + unsafe { core::slice::from_raw_parts_mut(pa.0 as *mut PageTableEntry, 512) } + } + pub fn get_bytes_array(&self) -> &'static mut [u8] { + let pa: PhysAddr = (*self).into(); + unsafe { core::slice::from_raw_parts_mut(pa.0 as *mut u8, 4096) } + } + pub fn get_mut(&self) -> &'static mut T { + let pa: PhysAddr = (*self).into(); + pa.get_mut() + } +} + +pub trait StepByOne { + fn step(&mut self); +} +impl StepByOne for VirtPageNum { + fn step(&mut self) { + self.0 += 1; + } +} + +impl StepByOne for PhysPageNum { + fn step(&mut self) { + self.0 += 1; + } +} + +#[derive(Copy, Clone)] +/// a simple range structure for type T +pub struct SimpleRange +where + T: StepByOne + Copy + PartialEq + PartialOrd + Debug, +{ + l: T, + r: T, +} +impl SimpleRange +where + T: StepByOne + Copy + PartialEq + PartialOrd + Debug, +{ + pub fn new(start: T, end: T) -> Self { + assert!(start <= end, "start {:?} > end {:?}!", start, end); + Self { l: start, r: end } + } + pub fn get_start(&self) -> T { + self.l + } + pub fn get_end(&self) -> T { + self.r + } +} +impl IntoIterator for SimpleRange +where + T: StepByOne + Copy + PartialEq + PartialOrd + Debug, +{ + type Item = T; + type IntoIter = SimpleRangeIterator; + fn into_iter(self) -> Self::IntoIter { + SimpleRangeIterator::new(self.l, self.r) + } +} +/// iterator for the simple range structure +pub struct SimpleRangeIterator +where + T: StepByOne + Copy + PartialEq + PartialOrd + Debug, +{ + current: T, + end: T, +} +impl SimpleRangeIterator +where + T: StepByOne + Copy + PartialEq + PartialOrd + Debug, +{ + pub fn new(l: T, r: T) -> Self { + Self { current: l, end: r } + } +} +impl Iterator for SimpleRangeIterator +where + T: StepByOne + Copy + PartialEq + PartialOrd + Debug, +{ + type Item = T; + fn next(&mut self) -> Option { + if self.current == self.end { + None + } else { + let t = self.current; + self.current.step(); + Some(t) + } + } +} + +/// a simple range structure for virtual page number +pub type VPNRange = SimpleRange; diff --git a/os8/src/mm/frame_allocator.rs b/os8/src/mm/frame_allocator.rs new file mode 100644 index 0000000..5be113f --- /dev/null +++ b/os8/src/mm/frame_allocator.rs @@ -0,0 +1,137 @@ +//! Implementation of [`FrameAllocator`] which +//! controls all the frames in the operating system. + +use super::{PhysAddr, PhysPageNum}; +use crate::config::MEMORY_END; +use crate::sync::UPSafeCell; +use alloc::vec::Vec; +use core::fmt::{self, Debug, Formatter}; +use lazy_static::*; + +/// manage a frame which has the same lifecycle as the tracker +#[derive(Clone)] +pub struct FrameTracker { + pub ppn: PhysPageNum, +} + +impl FrameTracker { + pub fn new(ppn: PhysPageNum) -> Self { + // page cleaning + let bytes_array = ppn.get_bytes_array(); + for i in bytes_array { + *i = 0; + } + Self { ppn } + } +} + +impl Debug for FrameTracker { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("FrameTracker:PPN={:#x}", self.ppn.0)) + } +} + +impl Drop for FrameTracker { + fn drop(&mut self) { + frame_dealloc(self.ppn); + } +} + +trait FrameAllocator { + fn new() -> Self; + fn alloc(&mut self) -> Option; + fn dealloc(&mut self, ppn: PhysPageNum); +} + +/// an implementation for frame allocator +pub struct StackFrameAllocator { + current: usize, + end: usize, + recycled: Vec, +} + +impl StackFrameAllocator { + pub fn init(&mut self, l: PhysPageNum, r: PhysPageNum) { + self.current = l.0; + self.end = r.0; + info!("last {} Physical Frames.", self.end - self.current); + } +} +impl FrameAllocator for StackFrameAllocator { + fn new() -> Self { + Self { + current: 0, + end: 0, + recycled: Vec::new(), + } + } + fn alloc(&mut self) -> Option { + if let Some(ppn) = self.recycled.pop() { + Some(ppn.into()) + } else if self.current == self.end { + None + } else { + self.current += 1; + Some((self.current - 1).into()) + } + } + fn dealloc(&mut self, ppn: PhysPageNum) { + let ppn = ppn.0; + // validity check + if ppn >= self.current || self.recycled.iter().any(|v| *v == ppn) { + panic!("Frame ppn={:#x} has not been allocated!", ppn); + } + // recycle + self.recycled.push(ppn); + } +} + +type FrameAllocatorImpl = StackFrameAllocator; + +lazy_static! { + /// frame allocator instance through lazy_static! + pub static ref FRAME_ALLOCATOR: UPSafeCell = + unsafe { UPSafeCell::new(FrameAllocatorImpl::new()) }; +} + +pub fn init_frame_allocator() { + extern "C" { + fn ekernel(); + } + FRAME_ALLOCATOR.exclusive_access().init( + PhysAddr::from(ekernel as usize).ceil(), + PhysAddr::from(MEMORY_END).floor(), + ); +} + +/// initiate the frame allocator using `ekernel` and `MEMORY_END` +pub fn frame_alloc() -> Option { + FRAME_ALLOCATOR + .exclusive_access() + .alloc() + .map(FrameTracker::new) +} + +/// deallocate a frame +pub fn frame_dealloc(ppn: PhysPageNum) { + FRAME_ALLOCATOR.exclusive_access().dealloc(ppn); +} + +#[allow(unused)] +/// a simple test for frame allocator +pub fn frame_allocator_test() { + let mut v: Vec = Vec::new(); + for i in 0..5 { + let frame = frame_alloc().unwrap(); + info!("{:?}", frame); + v.push(frame); + } + v.clear(); + for i in 0..5 { + let frame = frame_alloc().unwrap(); + info!("{:?}", frame); + v.push(frame); + } + drop(v); + info!("frame_allocator_test passed!"); +} diff --git a/os8/src/mm/heap_allocator.rs b/os8/src/mm/heap_allocator.rs new file mode 100644 index 0000000..d518fae --- /dev/null +++ b/os8/src/mm/heap_allocator.rs @@ -0,0 +1,51 @@ +//! The global allocator + +use crate::config::KERNEL_HEAP_SIZE; +use buddy_system_allocator::LockedHeap; + +#[global_allocator] +/// heap allocator instance +static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty(); + +#[alloc_error_handler] +/// panic when heap allocation error occurs +pub fn handle_alloc_error(layout: core::alloc::Layout) -> ! { + panic!("Heap allocation error, layout = {:?}", layout); +} + +/// heap space ([u8; KERNEL_HEAP_SIZE]) +static mut HEAP_SPACE: [u8; KERNEL_HEAP_SIZE] = [0; KERNEL_HEAP_SIZE]; + +/// initiate heap allocator +pub fn init_heap() { + unsafe { + HEAP_ALLOCATOR + .lock() + .init(HEAP_SPACE.as_ptr() as usize, KERNEL_HEAP_SIZE); + } +} + +#[allow(unused)] +pub fn heap_test() { + use alloc::boxed::Box; + use alloc::vec::Vec; + extern "C" { + fn sbss(); + fn ebss(); + } + let bss_range = sbss as usize..ebss as usize; + let a = Box::new(5); + assert_eq!(*a, 5); + assert!(bss_range.contains(&(a.as_ref() as *const _ as usize))); + drop(a); + let mut v: Vec = Vec::new(); + for i in 0..500 { + v.push(i); + } + for (i, vi) in v.iter().enumerate().take(500) { + assert_eq!(*vi, i); + } + assert!(bss_range.contains(&(v.as_ptr() as usize))); + drop(v); + info!("heap_test passed!"); +} diff --git a/os8/src/mm/memory_set.rs b/os8/src/mm/memory_set.rs new file mode 100644 index 0000000..3ecd7dd --- /dev/null +++ b/os8/src/mm/memory_set.rs @@ -0,0 +1,393 @@ +//! Implementation of [`MapArea`] and [`MemorySet`]. + +use super::{frame_alloc, FrameTracker}; +use super::{PTEFlags, PageTable, PageTableEntry}; +use super::{PhysAddr, PhysPageNum, VirtAddr, VirtPageNum}; +use super::{StepByOne, VPNRange}; +use crate::config::{MEMORY_END, MMIO, PAGE_SIZE, TRAMPOLINE}; +use crate::sync::UPSafeCell; +use alloc::collections::BTreeMap; +use alloc::sync::Arc; +use alloc::vec::Vec; +use lazy_static::*; +use riscv::register::satp; + +extern "C" { + fn stext(); + fn etext(); + fn srodata(); + fn erodata(); + fn sdata(); + fn edata(); + fn sbss_with_stack(); + fn ebss(); + fn ekernel(); + fn strampoline(); +} + +lazy_static! { + /// a memory set instance through lazy_static! managing kernel space + pub static ref KERNEL_SPACE: Arc> = + Arc::new(unsafe { UPSafeCell::new(MemorySet::new_kernel()) }); +} + +/// Get the token of the kernel memory space +pub fn kernel_token() -> usize { + KERNEL_SPACE.exclusive_access().token() +} + +/// memory set structure, controls virtual-memory space +pub struct MemorySet { + page_table: PageTable, + areas: Vec, +} + +impl MemorySet { + pub fn new_bare() -> Self { + Self { + page_table: PageTable::new(), + areas: Vec::new(), + } + } + pub fn token(&self) -> usize { + self.page_table.token() + } + /// Assume that no conflicts. + pub fn insert_framed_area( + &mut self, + start_va: VirtAddr, + end_va: VirtAddr, + permission: MapPermission, + ) { + self.push( + MapArea::new(start_va, end_va, MapType::Framed, permission), + None, + ); + } + pub fn remove_area_with_start_vpn(&mut self, start_vpn: VirtPageNum) { + if let Some((idx, area)) = self + .areas + .iter_mut() + .enumerate() + .find(|(_, area)| area.vpn_range.get_start() == start_vpn) + { + area.unmap(&mut self.page_table); + self.areas.remove(idx); + } + } + fn push(&mut self, mut map_area: MapArea, data: Option<&[u8]>) { + map_area.map(&mut self.page_table); + if let Some(data) = data { + map_area.copy_data(&mut self.page_table, data); + } + self.areas.push(map_area); + } + /// Mention that trampoline is not collected by areas. + fn map_trampoline(&mut self) { + self.page_table.map( + VirtAddr::from(TRAMPOLINE).into(), + PhysAddr::from(strampoline as usize).into(), + PTEFlags::R | PTEFlags::X, + ); + } + /// Without kernel stacks. + pub fn new_kernel() -> Self { + let mut memory_set = Self::new_bare(); + // map trampoline + memory_set.map_trampoline(); + // map kernel sections + info!(".text [{:#x}, {:#x})", stext as usize, etext as usize); + info!(".rodata [{:#x}, {:#x})", srodata as usize, erodata as usize); + info!(".data [{:#x}, {:#x})", sdata as usize, edata as usize); + info!( + ".bss [{:#x}, {:#x})", + sbss_with_stack as usize, ebss as usize + ); + info!("mapping .text section"); + memory_set.push( + MapArea::new( + (stext as usize).into(), + (etext as usize).into(), + MapType::Identical, + MapPermission::R | MapPermission::X, + ), + None, + ); + info!("mapping .rodata section"); + memory_set.push( + MapArea::new( + (srodata as usize).into(), + (erodata as usize).into(), + MapType::Identical, + MapPermission::R, + ), + None, + ); + info!("mapping .data section"); + memory_set.push( + MapArea::new( + (sdata as usize).into(), + (edata as usize).into(), + MapType::Identical, + MapPermission::R | MapPermission::W, + ), + None, + ); + info!("mapping .bss section"); + memory_set.push( + MapArea::new( + (sbss_with_stack as usize).into(), + (ebss as usize).into(), + MapType::Identical, + MapPermission::R | MapPermission::W, + ), + None, + ); + info!("mapping physical memory"); + memory_set.push( + MapArea::new( + (ekernel as usize).into(), + MEMORY_END.into(), + MapType::Identical, + MapPermission::R | MapPermission::W, + ), + None, + ); + info!("mapping memory-mapped registers"); + for pair in MMIO { + memory_set.push( + MapArea::new( + (*pair).0.into(), + ((*pair).0 + (*pair).1).into(), + MapType::Identical, + MapPermission::R | MapPermission::W, + ), + None, + ); + } + memory_set + } + /// Include sections in elf and trampoline and TrapContext and user stack, + /// also returns user_sp and entry point. + pub fn from_elf(elf_data: &[u8]) -> (Self, usize, usize) { + let mut memory_set = Self::new_bare(); + // map trampoline + memory_set.map_trampoline(); + // map program headers of elf, with U flag + let elf = xmas_elf::ElfFile::new(elf_data).unwrap(); + let elf_header = elf.header; + let magic = elf_header.pt1.magic; + assert_eq!(magic, [0x7f, 0x45, 0x4c, 0x46], "invalid elf!"); + let ph_count = elf_header.pt2.ph_count(); + let mut max_end_vpn = VirtPageNum(0); + for i in 0..ph_count { + let ph = elf.program_header(i).unwrap(); + if ph.get_type().unwrap() == xmas_elf::program::Type::Load { + let start_va: VirtAddr = (ph.virtual_addr() as usize).into(); + let end_va: VirtAddr = ((ph.virtual_addr() + ph.mem_size()) as usize).into(); + let mut map_perm = MapPermission::U; + let ph_flags = ph.flags(); + if ph_flags.is_read() { + map_perm |= MapPermission::R; + } + if ph_flags.is_write() { + map_perm |= MapPermission::W; + } + if ph_flags.is_execute() { + map_perm |= MapPermission::X; + } + let map_area = MapArea::new(start_va, end_va, MapType::Framed, map_perm); + max_end_vpn = map_area.vpn_range.get_end(); + memory_set.push( + map_area, + Some(&elf.input[ph.offset() as usize..(ph.offset() + ph.file_size()) as usize]), + ); + } + } + // We don't map user stack and trapframe here since they will be later + // allocated through TaskControlBlock::new() + let max_end_va: VirtAddr = max_end_vpn.into(); + let mut user_stack_top: usize = max_end_va.into(); + user_stack_top += PAGE_SIZE; + ( + memory_set, + user_stack_top, + elf.header.pt2.entry_point() as usize, + ) + } + /// Copy an identical user_space + pub fn from_existed_user(user_space: &MemorySet) -> MemorySet { + let mut memory_set = Self::new_bare(); + // map trampoline + memory_set.map_trampoline(); + // copy data sections/trap_context/user_stack + for area in user_space.areas.iter() { + let new_area = MapArea::from_another(area); + memory_set.push(new_area, None); + // copy data from another space + for vpn in area.vpn_range { + let src_ppn = user_space.translate(vpn).unwrap().ppn(); + let dst_ppn = memory_set.translate(vpn).unwrap().ppn(); + dst_ppn + .get_bytes_array() + .copy_from_slice(src_ppn.get_bytes_array()); + } + } + memory_set + } + pub fn activate(&self) { + let satp = self.page_table.token(); + unsafe { + satp::write(satp); + core::arch::asm!("sfence.vma"); + } + } + pub fn translate(&self, vpn: VirtPageNum) -> Option { + self.page_table.translate(vpn) + } + pub fn recycle_data_pages(&mut self) { + //*self = Self::new_bare(); + self.areas.clear(); + } + pub fn kernel_copy() -> Self { + let areas = KERNEL_SPACE.exclusive_access().areas.clone(); + Self { + page_table: PageTable::from_token(kernel_token()), + areas: areas, + } + } +} + +/// map area structure, controls a contiguous piece of virtual memory +#[derive(Clone)] +pub struct MapArea { + vpn_range: VPNRange, + data_frames: BTreeMap, + map_type: MapType, + map_perm: MapPermission, +} + +impl MapArea { + pub fn new( + start_va: VirtAddr, + end_va: VirtAddr, + map_type: MapType, + map_perm: MapPermission, + ) -> Self { + let start_vpn: VirtPageNum = start_va.floor(); + let end_vpn: VirtPageNum = end_va.ceil(); + Self { + vpn_range: VPNRange::new(start_vpn, end_vpn), + data_frames: BTreeMap::new(), + map_type, + map_perm, + } + } + pub fn from_another(another: &MapArea) -> Self { + Self { + vpn_range: VPNRange::new(another.vpn_range.get_start(), another.vpn_range.get_end()), + data_frames: BTreeMap::new(), + map_type: another.map_type, + map_perm: another.map_perm, + } + } + pub fn map_one(&mut self, page_table: &mut PageTable, vpn: VirtPageNum) { + let ppn: PhysPageNum; + match self.map_type { + MapType::Identical => { + ppn = PhysPageNum(vpn.0); + } + MapType::Framed => { + let frame = frame_alloc().unwrap(); + ppn = frame.ppn; + self.data_frames.insert(vpn, frame); + } + } + let pte_flags = PTEFlags::from_bits(self.map_perm.bits).unwrap(); + page_table.map(vpn, ppn, pte_flags); + } + + pub fn unmap_one(&mut self, page_table: &mut PageTable, vpn: VirtPageNum) { + #[allow(clippy::single_match)] + match self.map_type { + MapType::Framed => { + self.data_frames.remove(&vpn); + } + _ => {} + } + page_table.unmap(vpn); + } + pub fn map(&mut self, page_table: &mut PageTable) { + for vpn in self.vpn_range { + self.map_one(page_table, vpn); + } + } + pub fn unmap(&mut self, page_table: &mut PageTable) { + for vpn in self.vpn_range { + self.unmap_one(page_table, vpn); + } + } + /// data: start-aligned but maybe with shorter length + /// assume that all frames were cleared before + pub fn copy_data(&mut self, page_table: &mut PageTable, data: &[u8]) { + assert_eq!(self.map_type, MapType::Framed); + let mut start: usize = 0; + let mut current_vpn = self.vpn_range.get_start(); + let len = data.len(); + loop { + let src = &data[start..len.min(start + PAGE_SIZE)]; + let dst = &mut page_table + .translate(current_vpn) + .unwrap() + .ppn() + .get_bytes_array()[..src.len()]; + dst.copy_from_slice(src); + start += PAGE_SIZE; + if start >= len { + break; + } + current_vpn.step(); + } + } +} + +#[derive(Copy, Clone, PartialEq, Debug)] +/// map type for memory set: identical or framed +pub enum MapType { + Identical, + Framed, +} + +bitflags! { + /// map permission corresponding to that in pte: `R W X U` + pub struct MapPermission: u8 { + const R = 1 << 1; + const W = 1 << 2; + const X = 1 << 3; + const U = 1 << 4; + } +} + +#[allow(unused)] +pub fn remap_test() { + let mut kernel_space = KERNEL_SPACE.exclusive_access(); + let mid_text: VirtAddr = ((stext as usize + etext as usize) / 2).into(); + let mid_rodata: VirtAddr = ((srodata as usize + erodata as usize) / 2).into(); + let mid_data: VirtAddr = ((sdata as usize + edata as usize) / 2).into(); + assert!(!kernel_space + .page_table + .translate(mid_text.floor()) + .unwrap() + .writable()); + assert!(!kernel_space + .page_table + .translate(mid_rodata.floor()) + .unwrap() + .writable()); + assert!(!kernel_space + .page_table + .translate(mid_data.floor()) + .unwrap() + .executable()); + info!("remap_test passed!"); +} diff --git a/os8/src/mm/mod.rs b/os8/src/mm/mod.rs new file mode 100644 index 0000000..211cc2f --- /dev/null +++ b/os8/src/mm/mod.rs @@ -0,0 +1,29 @@ +//! Memory management implementation +//! +//! SV39 page-based virtual-memory architecture for RV64 systems, and +//! everything about memory management, like frame allocator, page table, +//! map area and memory set, is implemented here. +//! +//! Every task or process has a memory_set to control its virtual memory. + + +mod address; +mod frame_allocator; +mod heap_allocator; +mod memory_set; +mod page_table; + +pub use address::{PhysAddr, PhysPageNum, VirtAddr, VirtPageNum}; +pub use address::{StepByOne, VPNRange}; +pub use frame_allocator::{frame_alloc, frame_dealloc, FrameTracker}; +pub use memory_set::{remap_test, kernel_token}; +pub use memory_set::{MapPermission, MemorySet, KERNEL_SPACE}; +pub use page_table::{translated_byte_buffer, translated_refmut, translated_ref, translated_str, PageTableEntry}; +pub use page_table::{PTEFlags, PageTable, UserBuffer}; + +/// initiate heap allocator, frame allocator and kernel space +pub fn init() { + heap_allocator::init_heap(); + frame_allocator::init_frame_allocator(); + KERNEL_SPACE.exclusive_access().activate(); +} diff --git a/os8/src/mm/page_table.rs b/os8/src/mm/page_table.rs new file mode 100644 index 0000000..dbb195a --- /dev/null +++ b/os8/src/mm/page_table.rs @@ -0,0 +1,260 @@ +//! Implementation of [`PageTableEntry`] and [`PageTable`]. + +use super::{frame_alloc, FrameTracker, PhysAddr, PhysPageNum, StepByOne, VirtAddr, VirtPageNum}; +use alloc::string::String; +use alloc::vec; +use alloc::vec::Vec; +use bitflags::*; + +bitflags! { + /// page table entry flags + pub struct PTEFlags: u8 { + const V = 1 << 0; + const R = 1 << 1; + const W = 1 << 2; + const X = 1 << 3; + const U = 1 << 4; + const G = 1 << 5; + const A = 1 << 6; + const D = 1 << 7; + } +} + +#[derive(Copy, Clone)] +#[repr(C)] +/// page table entry structure +pub struct PageTableEntry { + pub bits: usize, +} + +impl PageTableEntry { + pub fn new(ppn: PhysPageNum, flags: PTEFlags) -> Self { + PageTableEntry { + bits: ppn.0 << 10 | flags.bits as usize, + } + } + pub fn empty() -> Self { + PageTableEntry { bits: 0 } + } + pub fn ppn(&self) -> PhysPageNum { + (self.bits >> 10 & ((1usize << 44) - 1)).into() + } + pub fn flags(&self) -> PTEFlags { + PTEFlags::from_bits(self.bits as u8).unwrap() + } + pub fn is_valid(&self) -> bool { + (self.flags() & PTEFlags::V) != PTEFlags::empty() + } + pub fn readable(&self) -> bool { + (self.flags() & PTEFlags::R) != PTEFlags::empty() + } + pub fn writable(&self) -> bool { + (self.flags() & PTEFlags::W) != PTEFlags::empty() + } + pub fn executable(&self) -> bool { + (self.flags() & PTEFlags::X) != PTEFlags::empty() + } +} + +/// page table structure +pub struct PageTable { + root_ppn: PhysPageNum, + frames: Vec, +} + +/// Assume that it won't oom when creating/mapping. +impl PageTable { + pub fn new() -> Self { + let frame = frame_alloc().unwrap(); + PageTable { + root_ppn: frame.ppn, + frames: vec![frame], + } + } + /// Temporarily used to get arguments from user space. + pub fn from_token(satp: usize) -> Self { + Self { + root_ppn: PhysPageNum::from(satp & ((1usize << 44) - 1)), + frames: Vec::new(), + } + } + fn find_pte_create(&mut self, vpn: VirtPageNum) -> Option<&mut PageTableEntry> { + let mut idxs = vpn.indexes(); + let mut ppn = self.root_ppn; + let mut result: Option<&mut PageTableEntry> = None; + for (i, idx) in idxs.iter_mut().enumerate() { + let pte = &mut ppn.get_pte_array()[*idx]; + if i == 2 { + result = Some(pte); + break; + } + if !pte.is_valid() { + let frame = frame_alloc().unwrap(); + *pte = PageTableEntry::new(frame.ppn, PTEFlags::V); + self.frames.push(frame); + } + ppn = pte.ppn(); + } + result + } + fn find_pte(&self, vpn: VirtPageNum) -> Option<&PageTableEntry> { + let idxs = vpn.indexes(); + let mut ppn = self.root_ppn; + let mut result: Option<&PageTableEntry> = None; + for (i, idx) in idxs.iter().enumerate() { + let pte = &ppn.get_pte_array()[*idx]; + if i == 2 { + result = Some(pte); + break; + } + if !pte.is_valid() { + return None; + } + ppn = pte.ppn(); + } + result + } + #[allow(unused)] + pub fn map(&mut self, vpn: VirtPageNum, ppn: PhysPageNum, flags: PTEFlags) { + let pte = self.find_pte_create(vpn).unwrap(); + assert!(!pte.is_valid(), "vpn {:?} is mapped before mapping", vpn); + *pte = PageTableEntry::new(ppn, flags | PTEFlags::V); + } + #[allow(unused)] + pub fn unmap(&mut self, vpn: VirtPageNum) { + let pte = self.find_pte_create(vpn).unwrap(); + assert!(pte.is_valid(), "vpn {:?} is invalid before unmapping", vpn); + *pte = PageTableEntry::empty(); + } + pub fn translate(&self, vpn: VirtPageNum) -> Option { + self.find_pte(vpn).copied() + } + pub fn translate_va(&self, va: VirtAddr) -> Option { + self.find_pte(va.clone().floor()).map(|pte| { + //println!("translate_va:va = {:?}", va); + let aligned_pa: PhysAddr = pte.ppn().into(); + //println!("translate_va:pa_align = {:?}", aligned_pa); + let offset = va.page_offset(); + let aligned_pa_usize: usize = aligned_pa.into(); + (aligned_pa_usize + offset).into() + }) + } + pub fn token(&self) -> usize { + 8usize << 60 | self.root_ppn.0 + } +} + +/// translate a pointer to a mutable u8 Vec through page table +pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&'static mut [u8]> { + let page_table = PageTable::from_token(token); + let mut start = ptr as usize; + let end = start + len; + let mut v = Vec::new(); + while start < end { + let start_va = VirtAddr::from(start); + let mut vpn = start_va.floor(); + let ppn = page_table.translate(vpn).unwrap().ppn(); + vpn.step(); + let mut end_va: VirtAddr = vpn.into(); + end_va = end_va.min(VirtAddr::from(end)); + if end_va.page_offset() == 0 { + v.push(&mut ppn.get_bytes_array()[start_va.page_offset()..]); + } else { + v.push(&mut ppn.get_bytes_array()[start_va.page_offset()..end_va.page_offset()]); + } + start = end_va.into(); + } + v +} + +pub fn translated_str(token: usize, ptr: *const u8) -> String { + let page_table = PageTable::from_token(token); + let mut string = String::new(); + let mut va = ptr as usize; + loop { + let ch: u8 = *(page_table + .translate_va(VirtAddr::from(va)) + .unwrap() + .get_mut()); + if ch == 0 { + break; + } else { + string.push(ch as char); + va += 1; + } + } + string +} + +pub fn translated_ref(token: usize, ptr: *const T) -> &'static T { + let page_table = PageTable::from_token(token); + page_table.translate_va(VirtAddr::from(ptr as usize)).unwrap().get_mut() +} + +pub fn translated_refmut(token: usize, ptr: *mut T) -> &'static mut T { + //println!("into translated_refmut!"); + let page_table = PageTable::from_token(token); + let va = ptr as usize; + //println!("translated_refmut: before translate_va"); + page_table + .translate_va(VirtAddr::from(va)) + .unwrap() + .get_mut() +} + +/// An abstraction over a buffer passed from user space to kernel space +pub struct UserBuffer { + pub buffers: Vec<&'static mut [u8]>, +} + +impl UserBuffer { + /// Constuct a UserBuffer + pub fn new(buffers: Vec<&'static mut [u8]>) -> Self { + Self { buffers } + } + /// Get the length of a UserBuffer + pub fn len(&self) -> usize { + let mut total: usize = 0; + for b in self.buffers.iter() { + total += b.len(); + } + total + } +} + +impl IntoIterator for UserBuffer { + type Item = *mut u8; + type IntoIter = UserBufferIterator; + fn into_iter(self) -> Self::IntoIter { + UserBufferIterator { + buffers: self.buffers, + current_buffer: 0, + current_idx: 0, + } + } +} + +// An iterator over a UserBuffer +pub struct UserBufferIterator { + buffers: Vec<&'static mut [u8]>, + current_buffer: usize, + current_idx: usize, +} + +impl Iterator for UserBufferIterator { + type Item = *mut u8; + fn next(&mut self) -> Option { + if self.current_buffer >= self.buffers.len() { + None + } else { + let r = &mut self.buffers[self.current_buffer][self.current_idx] as *mut _; + if self.current_idx + 1 == self.buffers[self.current_buffer].len() { + self.current_idx = 0; + self.current_buffer += 1; + } else { + self.current_idx += 1; + } + Some(r) + } + } +} diff --git a/os8/src/sync/README.md b/os8/src/sync/README.md deleted file mode 100644 index e69de29..0000000 diff --git a/os8/src/sync/condvar.rs b/os8/src/sync/condvar.rs new file mode 100644 index 0000000..f96cd91 --- /dev/null +++ b/os8/src/sync/condvar.rs @@ -0,0 +1,39 @@ +use crate::sync::{Mutex, UPSafeCell}; +use crate::task::{add_task, block_current_and_run_next, current_task, TaskControlBlock}; +use alloc::{collections::VecDeque, sync::Arc}; + +pub struct Condvar { + pub inner: UPSafeCell, +} + +pub struct CondvarInner { + pub wait_queue: VecDeque>, +} + +impl Condvar { + pub fn new() -> Self { + Self { + inner: unsafe { + UPSafeCell::new(CondvarInner { + wait_queue: VecDeque::new(), + }) + }, + } + } + + pub fn signal(&self) { + let mut inner = self.inner.exclusive_access(); + if let Some(task) = inner.wait_queue.pop_front() { + add_task(task); + } + } + + pub fn wait(&self, mutex: Arc) { + mutex.unlock(); + let mut inner = self.inner.exclusive_access(); + inner.wait_queue.push_back(current_task().unwrap()); + drop(inner); + block_current_and_run_next(); + mutex.lock(); + } +} diff --git a/os8/src/sync/mod.rs b/os8/src/sync/mod.rs new file mode 100644 index 0000000..1516557 --- /dev/null +++ b/os8/src/sync/mod.rs @@ -0,0 +1,11 @@ +//! Synchronization and interior mutability primitives + +mod condvar; +mod mutex; +mod semaphore; +mod up; + +pub use condvar::Condvar; +pub use mutex::{Mutex, MutexBlocking, MutexSpin}; +pub use semaphore::Semaphore; +pub use up::UPSafeCell; diff --git a/os8/src/sync/mutex.rs b/os8/src/sync/mutex.rs new file mode 100644 index 0000000..be58f79 --- /dev/null +++ b/os8/src/sync/mutex.rs @@ -0,0 +1,88 @@ +use super::UPSafeCell; +use crate::task::TaskControlBlock; +use crate::task::{add_task, current_task}; +use crate::task::{block_current_and_run_next, suspend_current_and_run_next}; +use alloc::{collections::VecDeque, sync::Arc}; + +pub trait Mutex: Sync + Send { + fn lock(&self); + fn unlock(&self); +} + +pub struct MutexSpin { + locked: UPSafeCell, +} + +impl MutexSpin { + pub fn new() -> Self { + Self { + locked: unsafe { UPSafeCell::new(false) }, + } + } +} + +impl Mutex for MutexSpin { + fn lock(&self) { + loop { + let mut locked = self.locked.exclusive_access(); + if *locked { + drop(locked); + suspend_current_and_run_next(); + continue; + } else { + *locked = true; + return; + } + } + } + + fn unlock(&self) { + let mut locked = self.locked.exclusive_access(); + *locked = false; + } +} + +pub struct MutexBlocking { + inner: UPSafeCell, +} + +pub struct MutexBlockingInner { + locked: bool, + wait_queue: VecDeque>, +} + +impl MutexBlocking { + pub fn new() -> Self { + Self { + inner: unsafe { + UPSafeCell::new(MutexBlockingInner { + locked: false, + wait_queue: VecDeque::new(), + }) + }, + } + } +} + +impl Mutex for MutexBlocking { + fn lock(&self) { + let mut mutex_inner = self.inner.exclusive_access(); + if mutex_inner.locked { + mutex_inner.wait_queue.push_back(current_task().unwrap()); + drop(mutex_inner); + block_current_and_run_next(); + } else { + mutex_inner.locked = true; + } + } + + fn unlock(&self) { + let mut mutex_inner = self.inner.exclusive_access(); + assert!(mutex_inner.locked); + if let Some(waking_task) = mutex_inner.wait_queue.pop_front() { + add_task(waking_task); + } else { + mutex_inner.locked = false; + } + } +} diff --git a/os8/src/sync/semaphore.rs b/os8/src/sync/semaphore.rs new file mode 100644 index 0000000..7f3870f --- /dev/null +++ b/os8/src/sync/semaphore.rs @@ -0,0 +1,45 @@ +use crate::sync::UPSafeCell; +use crate::task::{add_task, block_current_and_run_next, current_task, TaskControlBlock}; +use alloc::{collections::VecDeque, sync::Arc}; + +pub struct Semaphore { + pub inner: UPSafeCell, +} + +pub struct SemaphoreInner { + pub count: isize, + pub wait_queue: VecDeque>, +} + +impl Semaphore { + pub fn new(res_count: usize) -> Self { + Self { + inner: unsafe { + UPSafeCell::new(SemaphoreInner { + count: res_count as isize, + wait_queue: VecDeque::new(), + }) + }, + } + } + + pub fn up(&self) { + let mut inner = self.inner.exclusive_access(); + inner.count += 1; + if inner.count <= 0 { + if let Some(task) = inner.wait_queue.pop_front() { + add_task(task); + } + } + } + + pub fn down(&self) { + let mut inner = self.inner.exclusive_access(); + inner.count -= 1; + if inner.count < 0 { + inner.wait_queue.push_back(current_task().unwrap()); + drop(inner); + block_current_and_run_next(); + } + } +} diff --git a/os8/src/sync/up.rs b/os8/src/sync/up.rs new file mode 100644 index 0000000..039b039 --- /dev/null +++ b/os8/src/sync/up.rs @@ -0,0 +1,31 @@ +//! Uniprocessor interior mutability primitives + +use core::cell::{RefCell, RefMut}; + +/// Wrap a static data structure inside it so that we are +/// able to access it without any `unsafe`. +/// +/// We should only use it in uniprocessor. +/// +/// In order to get mutable reference of inner data, call +/// `exclusive_access`. +pub struct UPSafeCell { + /// inner data + inner: RefCell, +} + +unsafe impl Sync for UPSafeCell {} + +impl UPSafeCell { + /// User is responsible to guarantee that inner struct is only used in + /// uniprocessor. + pub unsafe fn new(value: T) -> Self { + Self { + inner: RefCell::new(value), + } + } + /// Panic if the data has been borrowed. + pub fn exclusive_access(&self) -> RefMut<'_, T> { + self.inner.borrow_mut() + } +} diff --git a/os8/src/syscall/README.md b/os8/src/syscall/README.md deleted file mode 100644 index e69de29..0000000 diff --git a/os8/src/syscall/fs.rs b/os8/src/syscall/fs.rs new file mode 100644 index 0000000..1259ee2 --- /dev/null +++ b/os8/src/syscall/fs.rs @@ -0,0 +1,114 @@ +//! File and filesystem-related syscalls + +use crate::fs::make_pipe; +use crate::fs::open_file; +use crate::fs::OpenFlags; +use crate::fs::Stat; +use crate::mm::translated_byte_buffer; +use crate::mm::translated_refmut; +use crate::mm::translated_str; +use crate::mm::UserBuffer; +use crate::task::current_process; +use crate::task::current_user_token; +use alloc::sync::Arc; + +pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize { + let token = current_user_token(); + let process = current_process(); + let inner = process.inner_exclusive_access(); + if fd >= inner.fd_table.len() { + return -1; + } + if let Some(file) = &inner.fd_table[fd] { + let file = file.clone(); + // release current process TCB manually to avoid multi-borrow + drop(inner); + file.write(UserBuffer::new(translated_byte_buffer(token, buf, len))) as isize + } else { + -1 + } +} + +pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize { + let token = current_user_token(); + let process = current_process(); + let inner = process.inner_exclusive_access(); + if fd >= inner.fd_table.len() { + return -1; + } + if let Some(file) = &inner.fd_table[fd] { + let file = file.clone(); + // release current process TCB manually to avoid multi-borrow + drop(inner); + file.read(UserBuffer::new(translated_byte_buffer(token, buf, len))) as isize + } else { + -1 + } +} + +pub fn sys_open(path: *const u8, flags: u32) -> isize { + let process = current_process(); + let token = current_user_token(); + let path = translated_str(token, path); + if let Some(inode) = open_file(path.as_str(), OpenFlags::from_bits(flags).unwrap()) { + let mut inner = process.inner_exclusive_access(); + let fd = inner.alloc_fd(); + inner.fd_table[fd] = Some(inode); + fd as isize + } else { + -1 + } +} + +pub fn sys_close(fd: usize) -> isize { + let process = current_process(); + let mut inner = process.inner_exclusive_access(); + if fd >= inner.fd_table.len() { + return -1; + } + if inner.fd_table[fd].is_none() { + return -1; + } + inner.fd_table[fd].take(); + 0 +} + +pub fn sys_pipe(pipe: *mut usize) -> isize { + let process = current_process(); + let token = current_user_token(); + let mut inner = process.inner_exclusive_access(); + let (pipe_read, pipe_write) = make_pipe(); + let read_fd = inner.alloc_fd(); + inner.fd_table[read_fd] = Some(pipe_read); + let write_fd = inner.alloc_fd(); + inner.fd_table[write_fd] = Some(pipe_write); + *translated_refmut(token, pipe) = read_fd; + *translated_refmut(token, unsafe { pipe.add(1) }) = write_fd; + 0 +} + +pub fn sys_dup(fd: usize) -> isize { + let process = current_process(); + let mut inner = process.inner_exclusive_access(); + if fd >= inner.fd_table.len() { + return -1; + } + if inner.fd_table[fd].is_none() { + return -1; + } + let new_fd = inner.alloc_fd(); + inner.fd_table[new_fd] = Some(Arc::clone(inner.fd_table[fd].as_ref().unwrap())); + new_fd as isize +} + +pub fn sys_fstat(_fd: usize, _st: *mut Stat) -> isize { + -1 +} + +pub fn sys_linkat(_old_name: *const u8, _new_name: *const u8) -> isize { + -1 +} + +pub fn sys_unlinkat(_name: *const u8) -> isize { + -1 +} diff --git a/os8/src/syscall/mod.rs b/os8/src/syscall/mod.rs new file mode 100644 index 0000000..509ff1e --- /dev/null +++ b/os8/src/syscall/mod.rs @@ -0,0 +1,100 @@ +//! Implementation of syscalls +//! +//! The single entry point to all system calls, [`syscall()`], is called +//! whenever userspace wishes to perform a system call using the `ecall` +//! instruction. In this case, the processor raises an 'Environment call from +//! U-mode' exception, which is handled as one of the cases in +//! [`crate::trap::trap_handler`]. +//! +//! For clarity, each single syscall is implemented as its own function, named +//! `sys_` then the name of the syscall. You can find functions like this in +//! submodules, and you should also implement syscalls this way. + +const SYSCALL_DUP: usize = 24; +const SYSCALL_UNLINKAT: usize = 35; +const SYSCALL_LINKAT: usize = 37; +const SYSCALL_OPEN: usize = 56; +const SYSCALL_CLOSE: usize = 57; +const SYSCALL_PIPE: usize = 59; +const SYSCALL_READ: usize = 63; +const SYSCALL_WRITE: usize = 64; +const SYSCALL_FSTAT: usize = 80; +const SYSCALL_EXIT: usize = 93; +const SYSCALL_SLEEP: usize = 101; +const SYSCALL_YIELD: usize = 124; +const SYSCALL_GET_TIME: usize = 169; +const SYSCALL_GETPID: usize = 172; +const SYSCALL_GETTID: usize = 178; +const SYSCALL_FORK: usize = 220; +const SYSCALL_EXEC: usize = 221; +const SYSCALL_WAITPID: usize = 260; +const SYSCALL_SPAWN: usize = 400; +const SYSCALL_MUNMAP: usize = 215; +const SYSCALL_MMAP: usize = 222; +const SYSCALL_SET_PRIORITY: usize = 140; +const SYSCALL_TASK_INFO: usize = 410; +const SYSCALL_THREAD_CREATE: usize = 460; +const SYSCALL_WAITTID: usize = 462; +const SYSCALL_MUTEX_CREATE: usize = 463; +const SYSCALL_MUTEX_LOCK: usize = 464; +const SYSCALL_MUTEX_UNLOCK: usize = 466; +const SYSCALL_SEMAPHORE_CREATE: usize = 467; +const SYSCALL_SEMAPHORE_UP: usize = 468; +const SYSCALL_ENABLE_DEADLOCK_DETECT: usize = 469; +const SYSCALL_SEMAPHORE_DOWN: usize = 470; +const SYSCALL_CONDVAR_CREATE: usize = 471; +const SYSCALL_CONDVAR_SIGNAL: usize = 472; +const SYSCALL_CONDVAR_WAIT: usize = 473; + +mod fs; +pub mod process; +mod sync; +mod thread; + +use crate::fs::Stat; +use fs::*; +use process::*; +use sync::*; +use thread::*; + +/// handle syscall exception with `syscall_id` and other arguments +pub fn syscall(syscall_id: usize, args: [usize; 4]) -> isize { + match syscall_id { + SYSCALL_DUP => sys_dup(args[0]), + SYSCALL_LINKAT => sys_linkat(args[1] as *const u8, args[3] as *const u8), + SYSCALL_UNLINKAT => sys_unlinkat(args[1] as *const u8), + SYSCALL_OPEN => sys_open(args[1] as *const u8, args[2] as u32), + SYSCALL_CLOSE => sys_close(args[0]), + SYSCALL_PIPE => sys_pipe(args[0] as *mut usize), + SYSCALL_READ => sys_read(args[0], args[1] as *const u8, args[2]), + SYSCALL_WRITE => sys_write(args[0], args[1] as *const u8, args[2]), + SYSCALL_FSTAT => sys_fstat(args[0], args[1] as *mut Stat), + SYSCALL_EXIT => sys_exit(args[0] as i32), + SYSCALL_SLEEP => sys_sleep(args[0]), + SYSCALL_YIELD => sys_yield(), + SYSCALL_GETPID => sys_getpid(), + SYSCALL_GETTID => sys_gettid(), + SYSCALL_FORK => sys_fork(), + SYSCALL_EXEC => sys_exec(args[0] as *const u8, args[1] as *const usize), + SYSCALL_WAITPID => sys_waitpid(args[0] as isize, args[1] as *mut i32), + SYSCALL_GET_TIME => sys_get_time(args[0] as *mut TimeVal, args[1]), + SYSCALL_MMAP => sys_mmap(args[0], args[1], args[2]), + SYSCALL_MUNMAP => sys_munmap(args[0], args[1]), + SYSCALL_SET_PRIORITY => sys_set_priority(args[0] as isize), + SYSCALL_TASK_INFO => sys_task_info(args[0] as *mut TaskInfo), + SYSCALL_SPAWN => sys_spawn(args[0] as *const u8), + SYSCALL_THREAD_CREATE => sys_thread_create(args[0], args[1]), + SYSCALL_WAITTID => sys_waittid(args[0]) as isize, + SYSCALL_MUTEX_CREATE => sys_mutex_create(args[0] == 1), + SYSCALL_MUTEX_LOCK => sys_mutex_lock(args[0]), + SYSCALL_MUTEX_UNLOCK => sys_mutex_unlock(args[0]), + SYSCALL_SEMAPHORE_CREATE => sys_semaphore_create(args[0]), + SYSCALL_SEMAPHORE_UP => sys_semaphore_up(args[0]), + SYSCALL_ENABLE_DEADLOCK_DETECT => sys_enable_deadlock_detect(args[0]), + SYSCALL_SEMAPHORE_DOWN => sys_semaphore_down(args[0]), + SYSCALL_CONDVAR_CREATE => sys_condvar_create(args[0]), + SYSCALL_CONDVAR_SIGNAL => sys_condvar_signal(args[0]), + SYSCALL_CONDVAR_WAIT => sys_condvar_wait(args[0], args[1]), + _ => panic!("Unsupported syscall_id: {}", syscall_id), + } +} diff --git a/os8/src/syscall/process.rs b/os8/src/syscall/process.rs new file mode 100644 index 0000000..87fade0 --- /dev/null +++ b/os8/src/syscall/process.rs @@ -0,0 +1,158 @@ +//! Process management syscalls + +use crate::config::MAX_SYSCALL_NUM; +use crate::fs::{open_file, OpenFlags}; +use crate::mm::{translated_ref, translated_refmut, translated_str, PageTable, VirtAddr}; +use crate::task::{ + current_process, current_task, current_user_token, exit_current_and_run_next, + suspend_current_and_run_next, TaskStatus, +}; +use crate::timer::get_time_us; +use alloc::string::String; +use alloc::sync::Arc; +use alloc::vec::Vec; + +#[repr(C)] +#[derive(Debug)] +pub struct TimeVal { + pub sec: usize, + pub usec: usize, +} + +#[derive(Clone, Copy)] +pub struct TaskInfo { + pub status: TaskStatus, + pub syscall_times: [u32; MAX_SYSCALL_NUM], + pub time: usize, +} + +pub fn sys_exit(exit_code: i32) -> ! { + // debug!("[kernel] Application exited with code {}", exit_code); + exit_current_and_run_next(exit_code); + panic!("Unreachable in sys_exit!"); +} + +/// current task gives up resources for other tasks +pub fn sys_yield() -> isize { + suspend_current_and_run_next(); + 0 +} + +pub fn sys_getpid() -> isize { + current_task().unwrap().process.upgrade().unwrap().getpid() as isize +} + +/// Syscall Fork which returns 0 for child process and child_pid for parent process +pub fn sys_fork() -> isize { + let current_process = current_process(); + let new_process = current_process.fork(); + let new_pid = new_process.getpid(); + // modify trap context of new_task, because it returns immediately after switching + let new_process_inner = new_process.inner_exclusive_access(); + let task = new_process_inner.tasks[0].as_ref().unwrap(); + let trap_cx = task.inner_exclusive_access().get_trap_cx(); + // we do not have to move to next instruction since we have done it before + // for child process, fork returns 0 + trap_cx.x[10] = 0; + new_pid as isize +} + +/// Syscall Exec which accepts the elf path +pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize { + let token = current_user_token(); + let path = translated_str(token, path); + let mut args_vec: Vec = Vec::new(); + loop { + let arg_str_ptr = *translated_ref(token, args); + if arg_str_ptr == 0 { + break; + } + args_vec.push(translated_str(token, arg_str_ptr as *const u8)); + unsafe { + args = args.add(1); + } + } + if let Some(app_inode) = open_file(path.as_str(), OpenFlags::RDONLY) { + let all_data = app_inode.read_all(); + let process = current_process(); + let argc = args_vec.len(); + process.exec(all_data.as_slice(), args_vec); + argc as isize + } else { + -1 + } +} + +/// If there is not a child process whose pid is same as given, return -1. +/// Else if there is a child process but it is still running, return -2. +pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize { + let process = current_process(); + // find a child process + + // ---- access current TCB exclusively + let mut inner = process.inner_exclusive_access(); + if !inner + .children + .iter() + .any(|p| pid == -1 || pid as usize == p.getpid()) + { + return -1; + // ---- release current PCB + } + let pair = inner.children.iter().enumerate().find(|(_, p)| { + // ++++ temporarily access child PCB lock exclusively + p.inner_exclusive_access().is_zombie && (pid == -1 || pid as usize == p.getpid()) + // ++++ release child PCB + }); + if let Some((idx, _)) = pair { + let child = inner.children.remove(idx); + // confirm that child will be deallocated after removing from children list + assert_eq!(Arc::strong_count(&child), 1); + let found_pid = child.getpid(); + // ++++ temporarily access child TCB exclusively + let exit_code = child.inner_exclusive_access().exit_code; + // ++++ release child PCB + *translated_refmut(inner.memory_set.token(), exit_code_ptr) = exit_code; + found_pid as isize + } else { + -2 + } + // ---- release current PCB lock automatically +} + +pub fn sys_get_time(_ts: *mut TimeVal, _tz: usize) -> isize { + let _us = get_time_us(); + // unsafe { + // *ts = TimeVal { + // sec: us / 1_000_000, + // usec: us % 1_000_000, + // }; + // } + *translated_refmut(current_user_token(), _ts) = TimeVal { + sec: _us / 1_000_000, + usec: _us % 1_000_000, + }; + 0 +} + +pub fn sys_task_info(_ti: *mut TaskInfo) -> isize { + -1 +} + +pub fn sys_set_priority(_prio: isize) -> isize { + -1 +} + +pub fn sys_mmap(_start: usize, _len: usize, _port: usize) -> isize { + -1 +} + +pub fn sys_munmap(_start: usize, _len: usize) -> isize { + -1 +} + +// +// ALERT: 注意在实现 SPAWN 时不需要复制父进程地址空间,SPAWN != FORK + EXEC +pub fn sys_spawn(_path: *const u8) -> isize { + -1 +} diff --git a/os8/src/syscall/sync.rs b/os8/src/syscall/sync.rs new file mode 100644 index 0000000..57fc2fc --- /dev/null +++ b/os8/src/syscall/sync.rs @@ -0,0 +1,143 @@ +use crate::sync::{Condvar, Mutex, MutexBlocking, MutexSpin, Semaphore}; +use crate::task::{block_current_and_run_next, current_process, current_task}; +use crate::timer::{add_timer, get_time_ms}; +use alloc::sync::Arc; + +pub fn sys_sleep(ms: usize) -> isize { + let expire_ms = get_time_ms() + ms; + let task = current_task().unwrap(); + add_timer(expire_ms, task); + block_current_and_run_next(); + 0 +} + +// LAB5 HINT: you might need to maintain data structures used for deadlock detection +// during sys_mutex_* and sys_semaphore_* syscalls +pub fn sys_mutex_create(blocking: bool) -> isize { + let process = current_process(); + let mutex: Option> = if !blocking { + Some(Arc::new(MutexSpin::new())) + } else { + Some(Arc::new(MutexBlocking::new())) + }; + let mut process_inner = process.inner_exclusive_access(); + if let Some(id) = process_inner + .mutex_list + .iter() + .enumerate() + .find(|(_, item)| item.is_none()) + .map(|(id, _)| id) + { + process_inner.mutex_list[id] = mutex; + id as isize + } else { + process_inner.mutex_list.push(mutex); + process_inner.mutex_list.len() as isize - 1 + } +} + +// LAB5 HINT: Return -0xDEAD if deadlock is detected +pub fn sys_mutex_lock(mutex_id: usize) -> isize { + let process = current_process(); + let process_inner = process.inner_exclusive_access(); + let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap()); + drop(process_inner); + drop(process); + mutex.lock(); + 0 +} + +pub fn sys_mutex_unlock(mutex_id: usize) -> isize { + let process = current_process(); + let process_inner = process.inner_exclusive_access(); + let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap()); + drop(process_inner); + drop(process); + mutex.unlock(); + 0 +} + +pub fn sys_semaphore_create(res_count: usize) -> isize { + let process = current_process(); + let mut process_inner = process.inner_exclusive_access(); + let id = if let Some(id) = process_inner + .semaphore_list + .iter() + .enumerate() + .find(|(_, item)| item.is_none()) + .map(|(id, _)| id) + { + process_inner.semaphore_list[id] = Some(Arc::new(Semaphore::new(res_count))); + id + } else { + process_inner + .semaphore_list + .push(Some(Arc::new(Semaphore::new(res_count)))); + process_inner.semaphore_list.len() - 1 + }; + id as isize +} + +pub fn sys_semaphore_up(sem_id: usize) -> isize { + let process = current_process(); + let process_inner = process.inner_exclusive_access(); + let sem = Arc::clone(process_inner.semaphore_list[sem_id].as_ref().unwrap()); + drop(process_inner); + sem.up(); + 0 +} + +// LAB5 HINT: Return -0xDEAD if deadlock is detected +pub fn sys_semaphore_down(sem_id: usize) -> isize { + let process = current_process(); + let process_inner = process.inner_exclusive_access(); + let sem = Arc::clone(process_inner.semaphore_list[sem_id].as_ref().unwrap()); + drop(process_inner); + sem.down(); + 0 +} + +pub fn sys_condvar_create(_arg: usize) -> isize { + let process = current_process(); + let mut process_inner = process.inner_exclusive_access(); + let id = if let Some(id) = process_inner + .condvar_list + .iter() + .enumerate() + .find(|(_, item)| item.is_none()) + .map(|(id, _)| id) + { + process_inner.condvar_list[id] = Some(Arc::new(Condvar::new())); + id + } else { + process_inner + .condvar_list + .push(Some(Arc::new(Condvar::new()))); + process_inner.condvar_list.len() - 1 + }; + id as isize +} + +pub fn sys_condvar_signal(condvar_id: usize) -> isize { + let process = current_process(); + let process_inner = process.inner_exclusive_access(); + let condvar = Arc::clone(process_inner.condvar_list[condvar_id].as_ref().unwrap()); + drop(process_inner); + condvar.signal(); + 0 +} + +pub fn sys_condvar_wait(condvar_id: usize, mutex_id: usize) -> isize { + let process = current_process(); + let process_inner = process.inner_exclusive_access(); + let condvar = Arc::clone(process_inner.condvar_list[condvar_id].as_ref().unwrap()); + let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap()); + drop(process_inner); + condvar.wait(mutex); + 0 +} + +// LAB5 YOUR JOB: Implement deadlock detection, but might not all in this syscall +pub fn sys_enable_deadlock_detect(_enabled: usize) -> isize { + -1 +} diff --git a/os8/src/syscall/thread.rs b/os8/src/syscall/thread.rs new file mode 100644 index 0000000..bfba822 --- /dev/null +++ b/os8/src/syscall/thread.rs @@ -0,0 +1,86 @@ +use crate::{ + mm::kernel_token, + task::{add_task, current_task, TaskControlBlock}, + trap::{trap_handler, TrapContext}, +}; +use alloc::sync::Arc; + +pub fn sys_thread_create(entry: usize, arg: usize) -> isize { + let task = current_task().unwrap(); + let process = task.process.upgrade().unwrap(); + // create a new thread + let new_task = Arc::new(TaskControlBlock::new( + Arc::clone(&process), + task.inner_exclusive_access() + .res + .as_ref() + .unwrap() + .ustack_base, + true, + )); + let new_task_inner = new_task.inner_exclusive_access(); + let new_task_res = new_task_inner.res.as_ref().unwrap(); + let new_task_tid = new_task_res.tid; + let new_task_trap_cx = new_task_inner.get_trap_cx(); + *new_task_trap_cx = TrapContext::app_init_context( + entry, + new_task_res.ustack_top(), + kernel_token(), + new_task.kernel_stack.get_top(), + trap_handler as usize, + ); + (*new_task_trap_cx).x[10] = arg; + + let mut process_inner = process.inner_exclusive_access(); + // add new thread to current process + let tasks = &mut process_inner.tasks; + while tasks.len() < new_task_tid + 1 { + tasks.push(None); + } + tasks[new_task_tid] = Some(Arc::clone(&new_task)); + // add new task to scheduler + add_task(Arc::clone(&new_task)); + new_task_tid as isize +} + +pub fn sys_gettid() -> isize { + current_task() + .unwrap() + .inner_exclusive_access() + .res + .as_ref() + .unwrap() + .tid as isize +} + +/// thread does not exist, return -1 +/// thread has not exited yet, return -2 +/// otherwise, return thread's exit code +pub fn sys_waittid(tid: usize) -> i32 { + let task = current_task().unwrap(); + let process = task.process.upgrade().unwrap(); + let task_inner = task.inner_exclusive_access(); + let mut process_inner = process.inner_exclusive_access(); + // a thread cannot wait for itself + if task_inner.res.as_ref().unwrap().tid == tid { + return -1; + } + let mut exit_code: Option = None; + let waited_task = process_inner.tasks[tid].as_ref(); + if let Some(waited_task) = waited_task { + if let Some(waited_exit_code) = waited_task.inner_exclusive_access().exit_code { + exit_code = Some(waited_exit_code); + } + } else { + // waited thread does not exist + return -1; + } + if let Some(exit_code) = exit_code { + // dealloc the exited thread + process_inner.tasks[tid] = None; + exit_code + } else { + // waited thread has not exited + -2 + } +} diff --git a/os8/src/task/README.md b/os8/src/task/README.md deleted file mode 100644 index e69de29..0000000 diff --git a/os8/src/task/context.rs b/os8/src/task/context.rs new file mode 100644 index 0000000..44f9813 --- /dev/null +++ b/os8/src/task/context.rs @@ -0,0 +1,33 @@ +//! Implementation of [`TaskContext`] + +use crate::trap::trap_return; + +#[derive(Copy, Clone)] +#[repr(C)] +/// task context structure containing some registers +pub struct TaskContext { + /// Ret position after task switching + pub ra: usize, + /// Stack pointer + pub sp: usize, + /// s0-11 register, callee saved + pub s: [usize; 12], +} + +impl TaskContext { + pub fn zero_init() -> Self { + Self { + ra: 0, + sp: 0, + s: [0; 12], + } + } + + pub fn goto_trap_return(kstack_ptr: usize) -> Self { + Self { + ra: trap_return as usize, + sp: kstack_ptr, + s: [0; 12], + } + } +} diff --git a/os8/src/task/id.rs b/os8/src/task/id.rs new file mode 100644 index 0000000..988cfa6 --- /dev/null +++ b/os8/src/task/id.rs @@ -0,0 +1,262 @@ +use super::ProcessControlBlock; +use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT, USER_STACK_SIZE}; +use crate::mm::{MapPermission, PhysPageNum, VirtAddr, KERNEL_SPACE}; +use crate::sync::UPSafeCell; +use alloc::{ + sync::{Arc, Weak}, + vec::Vec, +}; +use lazy_static::*; + +pub struct RecycleAllocator { + current: usize, + recycled: Vec, +} + +impl RecycleAllocator { + pub fn new() -> Self { + RecycleAllocator { + current: 0, + recycled: Vec::new(), + } + } + pub fn alloc(&mut self) -> usize { + if let Some(id) = self.recycled.pop() { + id + } else { + self.current += 1; + self.current - 1 + } + } + pub fn dealloc(&mut self, id: usize) { + assert!(id < self.current); + assert!( + !self.recycled.iter().any(|i| *i == id), + "id {} has been deallocated!", + id + ); + self.recycled.push(id); + } +} + +lazy_static! { + static ref PID_ALLOCATOR: UPSafeCell = + unsafe { UPSafeCell::new(RecycleAllocator::new()) }; + static ref KSTACK_ALLOCATOR: UPSafeCell = + unsafe { UPSafeCell::new(RecycleAllocator::new()) }; +} + +pub struct PidHandle(pub usize); + +pub fn pid_alloc() -> PidHandle { + PidHandle(PID_ALLOCATOR.exclusive_access().alloc()) +} + +impl Drop for PidHandle { + fn drop(&mut self) { + PID_ALLOCATOR.exclusive_access().dealloc(self.0); + } +} + +/// Return (bottom, top) of a kernel stack in kernel space. +pub fn kernel_stack_position(kstack_id: usize) -> (usize, usize) { + let top = TRAMPOLINE - kstack_id * (KERNEL_STACK_SIZE + PAGE_SIZE); + let bottom = top - KERNEL_STACK_SIZE; + (bottom, top) +} + +pub struct KernelStack(pub usize); + +pub fn kstack_alloc() -> KernelStack { + let kstack_id = KSTACK_ALLOCATOR.exclusive_access().alloc(); + let (kstack_bottom, kstack_top) = kernel_stack_position(kstack_id); + //println!("kstack_alloc kstack_bottom: {:#x?}, kstack_top: {:#x?}", kstack_bottom, kstack_top); + KERNEL_SPACE.exclusive_access().insert_framed_area( + kstack_bottom.into(), + kstack_top.into(), + MapPermission::R | MapPermission::W, + ); + KernelStack(kstack_id) +} + +impl Drop for KernelStack { + fn drop(&mut self) { + let (kernel_stack_bottom, _) = kernel_stack_position(self.0); + let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into(); + // let kernel_stack_bottom_pa: PhysAddr = kernel_stack_bottom.into(); + // println!("kstack_drop kstack_bottom: va: {:#x?}, pa: {:#x?}", kernel_stack_bottom_va, kernel_stack_bottom_pa); + KERNEL_SPACE + .exclusive_access() + .remove_area_with_start_vpn(kernel_stack_bottom_va.into()); + } +} + +impl KernelStack { + #[allow(unused)] + pub fn push_on_top(&self, value: T) -> *mut T + where + T: Sized, + { + let kernel_stack_top = self.get_top(); + let ptr_mut = (kernel_stack_top - core::mem::size_of::()) as *mut T; + unsafe { + *ptr_mut = value; + } + ptr_mut + } + pub fn get_top(&self) -> usize { + let (_, kernel_stack_top) = kernel_stack_position(self.0); + kernel_stack_top + } +} + +pub struct TaskUserRes { + pub tid: usize, + pub ustack_base: usize, + pub process: Weak, +} + +fn trap_cx_bottom_from_tid(tid: usize) -> usize { + TRAP_CONTEXT - tid * PAGE_SIZE +} + +fn ustack_bottom_from_tid(ustack_base: usize, tid: usize) -> usize { + ustack_base + tid * (PAGE_SIZE + USER_STACK_SIZE) +} + +impl TaskUserRes { + pub fn new( + process: Arc, + ustack_base: usize, + alloc_user_res: bool, + ) -> Self { + let tid = process.inner_exclusive_access().alloc_tid(); + let task_user_res = Self { + tid, + ustack_base, + process: Arc::downgrade(&process), + }; + if alloc_user_res { + task_user_res.alloc_user_res(); + } + task_user_res + } + + pub fn alloc_user_res(&self) { + let process = self.process.upgrade().unwrap(); + let mut process_inner = process.inner_exclusive_access(); + // alloc user stack + let ustack_bottom = ustack_bottom_from_tid(self.ustack_base, self.tid); + let ustack_top = ustack_bottom + USER_STACK_SIZE; + process_inner.memory_set.insert_framed_area( + ustack_bottom.into(), + ustack_top.into(), + MapPermission::R | MapPermission::W | MapPermission::U, + ); + // alloc trap_cx + let trap_cx_bottom = trap_cx_bottom_from_tid(self.tid); + let trap_cx_top = trap_cx_bottom + PAGE_SIZE; + process_inner.memory_set.insert_framed_area( + trap_cx_bottom.into(), + trap_cx_top.into(), + MapPermission::R | MapPermission::W, + ); + } + + fn dealloc_user_res(&self) { + // dealloc tid + let process = self.process.upgrade().unwrap(); + let mut process_inner = process.inner_exclusive_access(); + // dealloc ustack manually + let ustack_bottom_va: VirtAddr = ustack_bottom_from_tid(self.ustack_base, self.tid).into(); + process_inner + .memory_set + .remove_area_with_start_vpn(ustack_bottom_va.into()); + // dealloc trap_cx manually + let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into(); + process_inner + .memory_set + .remove_area_with_start_vpn(trap_cx_bottom_va.into()); + } + + #[allow(unused)] + pub fn alloc_tid(&mut self) { + self.tid = self + .process + .upgrade() + .unwrap() + .inner_exclusive_access() + .alloc_tid(); + } + + pub fn dealloc_tid(&self) { + let process = self.process.upgrade().unwrap(); + let mut process_inner = process.inner_exclusive_access(); + process_inner.dealloc_tid(self.tid); + } + + pub fn trap_cx_user_va(&self) -> usize { + trap_cx_bottom_from_tid(self.tid) + } + + pub fn trap_cx_ppn(&self) -> PhysPageNum { + let process = self.process.upgrade().unwrap(); + let process_inner = process.inner_exclusive_access(); + let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into(); + process_inner + .memory_set + .translate(trap_cx_bottom_va.into()) + .unwrap() + .ppn() + } + + pub fn ustack_base(&self) -> usize { + self.ustack_base + } + pub fn ustack_top(&self) -> usize { + ustack_bottom_from_tid(self.ustack_base, self.tid) + USER_STACK_SIZE + } +} + +impl Drop for TaskUserRes { + fn drop(&mut self) { + self.dealloc_tid(); + self.dealloc_user_res(); + } +} + +use alloc::alloc::{alloc, dealloc, Layout}; + +#[derive(Clone)] +pub struct KStack(usize); + +const STACK_SIZE: usize = 0x8000; + +impl KStack { + pub fn new() -> KStack { + let bottom = + unsafe { alloc(Layout::from_size_align(STACK_SIZE, STACK_SIZE).unwrap()) } as usize; + KStack(bottom) + } + + pub fn top(&self) -> usize { + self.0 + STACK_SIZE + } +} +use core::fmt::{self, Debug, Formatter}; +impl Debug for KStack { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("KStack:{:#x}", self.0)) + } +} + +impl Drop for KStack { + fn drop(&mut self) { + unsafe { + dealloc( + self.0 as _, + Layout::from_size_align(STACK_SIZE, STACK_SIZE).unwrap(), + ); + } + } +} diff --git a/os8/src/task/kthread.rs b/os8/src/task/kthread.rs new file mode 100644 index 0000000..9e6955d --- /dev/null +++ b/os8/src/task/kthread.rs @@ -0,0 +1,76 @@ +use super::suspend_current_and_run_next; +use crate::task::{add_task, schedule, TaskContext, TaskControlBlock}; +use alloc::sync::Arc; + +// NOTE: This module is not required to finish the lab5, though you may run +// kernel_stackless_coroutine_test() in kernel main() to see what happens + +#[no_mangle] +pub fn kthread_create(f: fn()) { + println!("kthread_create"); + + // create kernel thread + let new_tcb = TaskControlBlock::create_kthread(f); + // let kernel_stack = new_tcb.get_kernel_stack(); + let new_task = Arc::new(new_tcb); + + // add kernel thread into TASK_MANAGER + // println!("add task"); + add_task(Arc::clone(&new_task)); +} + +#[no_mangle] +pub fn kernel_stackful_coroutine_test() { + println!("kernel_stackful_coroutine_test"); + kthread_create(|| { + let id = 1; + println!("kernel thread {:?} STARTING", id); + for i in 0..10 { + println!("kernel thread: {} counter: {}", id, i); + } + println!("kernel thread {:?} FINISHED", id); + kthread_stop(); + }); + kthread_create(|| { + let id = 2; + println!("kernel thread {:?} STARTING", id); + for i in 0..10 { + println!("kernel thread: {} counter: {}", id, i); + kthread_yield(); + } + println!("kernel thread {:?} FINISHED", id); + kthread_stop(); + }); + kthread_create(|| { + let id = 3; + println!("kernel thread {:?} STARTING", id); + for i in 0..10 { + println!("kernel thread: {} counter: {}", id, i); + kthread_yield(); + } + println!("kernel thread {:?} FINISHED", id); + kthread_stop(); + }); +} + +pub fn kthread_stop() { + do_exit(); +} +#[no_mangle] +pub fn do_exit() { + println!("kthread do exit"); + exit_kthread_and_run_next(0); + panic!("Unreachable in sys_exit!"); +} + +pub fn kthread_yield() { + suspend_current_and_run_next(); +} + +#[no_mangle] +pub fn exit_kthread_and_run_next(exit_code: i32) { + println!("exit_kthread_and_run_next with code: {}", exit_code); + // we do not have to save task context + let mut _unused = TaskContext::zero_init(); + schedule(&mut _unused as *mut _); +} diff --git a/os8/src/task/manager.rs b/os8/src/task/manager.rs new file mode 100644 index 0000000..cde08ce --- /dev/null +++ b/os8/src/task/manager.rs @@ -0,0 +1,46 @@ +//! Implementation of [`TaskManager`] +//! +//! It is only used to manage processes and schedule process based on ready queue. +//! Other CPU process monitoring functions are in Processor. + + +use super::TaskControlBlock; +use crate::sync::UPSafeCell; +use alloc::collections::VecDeque; +use alloc::sync::Arc; +use lazy_static::*; + +pub struct TaskManager { + ready_queue: VecDeque>, +} + +/// A simple FIFO scheduler. +impl TaskManager { + pub fn new() -> Self { + Self { + ready_queue: VecDeque::new(), + } + } + /// Add process back to ready queue + pub fn add(&mut self, task: Arc) { + self.ready_queue.push_back(task); + } + /// Take a process out of the ready queue + pub fn fetch(&mut self) -> Option> { + self.ready_queue.pop_front() + } +} + +lazy_static! { + /// TASK_MANAGER instance through lazy_static! + pub static ref TASK_MANAGER: UPSafeCell = + unsafe { UPSafeCell::new(TaskManager::new()) }; +} + +pub fn add_task(task: Arc) { + TASK_MANAGER.exclusive_access().add(task); +} + +pub fn fetch_task() -> Option> { + TASK_MANAGER.exclusive_access().fetch() +} diff --git a/os8/src/task/mod.rs b/os8/src/task/mod.rs new file mode 100644 index 0000000..360457a --- /dev/null +++ b/os8/src/task/mod.rs @@ -0,0 +1,158 @@ +//! Implementation of process management mechanism +//! +//! Here is the entry for process scheduling required by other modules +//! (such as syscall or clock interrupt). +//! By suspending or exiting the current process, you can +//! modify the process state, manage the process queue through TASK_MANAGER, +//! and switch the control flow through PROCESSOR. +//! +//! Be careful when you see [`__switch`]. Control flow around this function +//! might not be what you expect. + +mod context; +mod id; +pub mod kthread; +mod manager; +mod process; +mod processor; +pub mod stackless_coroutine; +mod switch; +#[allow(clippy::module_inception)] +mod task; + +pub use crate::syscall::process::TaskInfo; +use crate::{ + fs::{open_file, OpenFlags}, + task::id::TaskUserRes, +}; +use alloc::{sync::Arc, vec::Vec}; +pub use context::TaskContext; +pub use id::{kstack_alloc, pid_alloc, KernelStack, PidHandle}; +pub use kthread::kernel_stackful_coroutine_test; +use lazy_static::*; +pub use manager::add_task; +use manager::fetch_task; +use process::ProcessControlBlock; +pub use processor::{ + current_process, current_task, current_trap_cx, current_trap_cx_user_va, current_user_token, + run_tasks, schedule, take_current_task, +}; +pub use stackless_coroutine::kernel_stackless_coroutine_test; +use switch::__switch; +pub use task::{TaskControlBlock, TaskStatus}; + +pub fn block_current_and_run_next() { + let task = take_current_task().unwrap(); + let mut task_inner = task.inner_exclusive_access(); + let task_cx_ptr = &mut task_inner.task_cx as *mut TaskContext; + task_inner.task_status = TaskStatus::Blocking; + drop(task_inner); + schedule(task_cx_ptr); +} + +/// Make current task suspended and switch to the next task +pub fn suspend_current_and_run_next() { + // There must be an application running. + let task = take_current_task().unwrap(); + + // ---- access current TCB exclusively + let mut task_inner = task.inner_exclusive_access(); + + let task_cx_ptr = &mut task_inner.task_cx as *mut TaskContext; + // Change status to Ready + task_inner.task_status = TaskStatus::Ready; + drop(task_inner); + // ---- release current PCB + + // push back to ready queue. + add_task(task); + // jump to scheduling cycle + schedule(task_cx_ptr); +} + +/// Exit current task, recycle process resources and switch to the next task +pub fn exit_current_and_run_next(exit_code: i32) { + // take from Processor + let task = take_current_task().unwrap(); + // **** access current TCB exclusively + let mut task_inner = task.inner_exclusive_access(); + let process = task.process.upgrade().unwrap(); + let tid = task_inner.res.as_ref().unwrap().tid; + // Record exit code + task_inner.exit_code = Some(exit_code); + task_inner.res = None; + + // here we do not remove the thread since we are still using the kstack + // it will be deallocated when sys_waittid is called + drop(task_inner); + drop(task); + // debug!("task {} dropped", tid); + + if tid == 0 { + let mut process_inner = process.inner_exclusive_access(); + // mark this process as a zombie process + process_inner.is_zombie = true; + // record exit code of main process + process_inner.exit_code = exit_code; + + // do not move to its parent but under initproc + // debug!("reparent"); + + // ++++++ access initproc PCB exclusively + { + let mut initproc_inner = INITPROC.inner_exclusive_access(); + for child in process_inner.children.iter() { + child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC)); + initproc_inner.children.push(child.clone()); + } + } + let mut recycle_res = Vec::::new(); + + // debug!("deallocate user res"); + // deallocate user res (including tid/trap_cx/ustack) of all threads + // it has to be done before we dealloc the whole memory_set + // otherwise they will be deallocated twice + for task in process_inner.tasks.iter().filter(|t| t.is_some()) { + let task = task.as_ref().unwrap(); + let mut task_inner = task.inner_exclusive_access(); + if let Some(res) = task_inner.res.take() { + recycle_res.push(res); + } + } + drop(process_inner); + recycle_res.clear(); + let mut process_inner = process.inner_exclusive_access(); + // debug!("deallocate pcb res"); + process_inner.children.clear(); + // deallocate other data in user space i.e. program code/data section + process_inner.memory_set.recycle_data_pages(); + // drop file descriptors + process_inner.fd_table.clear(); + } + // debug!("pcb dropped"); + + // ++++++ release parent PCB + drop(process); + + // we do not have to save task context + let mut _unused = TaskContext::zero_init(); + schedule(&mut _unused as *mut _); +} + +lazy_static! { + /// Creation of initial process + /// + /// the name "initproc" may be changed to any other app name like "usertests", + /// but we have user_shell, so we don't need to change it. + pub static ref INITPROC: Arc = { + let inode = open_file("ch8b_initproc", OpenFlags::RDONLY).unwrap(); + let v = inode.read_all(); + ProcessControlBlock::new(v.as_slice()) + }; +} + +pub fn add_initproc() { + // INITPROC must be referenced at least once so that it can be initialized + // through lazy_static + let _initproc = INITPROC.clone(); +} diff --git a/os8/src/task/process.rs b/os8/src/task/process.rs new file mode 100644 index 0000000..ed84735 --- /dev/null +++ b/os8/src/task/process.rs @@ -0,0 +1,280 @@ +use super::id::RecycleAllocator; +use super::{add_task, pid_alloc, PidHandle, TaskControlBlock}; +use crate::fs::{File, Stdin, Stdout}; +use crate::mm::{translated_refmut, MemorySet, KERNEL_SPACE}; +use crate::sync::{Condvar, Mutex, Semaphore, UPSafeCell}; +use crate::trap::{trap_handler, TrapContext}; +use alloc::string::String; +use alloc::sync::{Arc, Weak}; +use alloc::vec; +use alloc::vec::Vec; +use core::cell::RefMut; + +pub struct ProcessControlBlock { + // immutable + pub pid: PidHandle, + // mutable + inner: UPSafeCell, +} + +// LAB5 HINT: you may add data structures for deadlock detection here +pub struct ProcessControlBlockInner { + pub is_zombie: bool, + pub memory_set: MemorySet, + pub parent: Option>, + pub children: Vec>, + pub exit_code: i32, + pub fd_table: Vec>>, + pub tasks: Vec>>, + pub task_res_allocator: RecycleAllocator, + pub mutex_list: Vec>>, + pub semaphore_list: Vec>>, + pub condvar_list: Vec>>, +} + +impl ProcessControlBlockInner { + #[allow(unused)] + pub fn get_user_token(&self) -> usize { + self.memory_set.token() + } + + pub fn alloc_fd(&mut self) -> usize { + if let Some(fd) = (0..self.fd_table.len()).find(|fd| self.fd_table[*fd].is_none()) { + fd + } else { + self.fd_table.push(None); + self.fd_table.len() - 1 + } + } + + pub fn alloc_tid(&mut self) -> usize { + self.task_res_allocator.alloc() + } + + pub fn dealloc_tid(&mut self, tid: usize) { + self.task_res_allocator.dealloc(tid) + } + + pub fn thread_count(&self) -> usize { + self.tasks.len() + } + + pub fn get_task(&self, tid: usize) -> Arc { + self.tasks[tid].as_ref().unwrap().clone() + } +} + +impl ProcessControlBlock { + pub fn inner_exclusive_access(&self) -> RefMut<'_, ProcessControlBlockInner> { + self.inner.exclusive_access() + } + + // LAB5 HINT: How to initialize deadlock data structures? + pub fn new(elf_data: &[u8]) -> Arc { + // memory_set with elf program headers/trampoline/trap context/user stack + let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data); + // allocate a pid + let pid_handle = pid_alloc(); + let process = Arc::new(Self { + pid: pid_handle, + inner: unsafe { + UPSafeCell::new(ProcessControlBlockInner { + is_zombie: false, + memory_set, + parent: None, + children: Vec::new(), + exit_code: 0, + fd_table: vec![ + // 0 -> stdin + Some(Arc::new(Stdin)), + // 1 -> stdout + Some(Arc::new(Stdout)), + // 2 -> stderr + Some(Arc::new(Stdout)), + ], + tasks: Vec::new(), + task_res_allocator: RecycleAllocator::new(), + mutex_list: Vec::new(), + semaphore_list: Vec::new(), + condvar_list: Vec::new(), + }) + }, + }); + // create a main thread, we should allocate ustack and trap_cx here + let task = Arc::new(TaskControlBlock::new( + Arc::clone(&process), + ustack_base, + true, + )); + // prepare trap_cx of main thread + let task_inner = task.inner_exclusive_access(); + let trap_cx = task_inner.get_trap_cx(); + let ustack_top = task_inner.res.as_ref().unwrap().ustack_top(); + let kernel_stack_top = task.kernel_stack.get_top(); + drop(task_inner); + *trap_cx = TrapContext::app_init_context( + entry_point, + ustack_top, + KERNEL_SPACE.exclusive_access().token(), + kernel_stack_top, + trap_handler as usize, + ); + // add main thread to the process + let mut process_inner = process.inner_exclusive_access(); + process_inner.tasks.push(Some(Arc::clone(&task))); + drop(process_inner); + // add main thread to scheduler + add_task(task); + process + } + + // LAB5 HINT: How to initialize deadlock data structures? + /// Load a new elf to replace the original application address space and start execution + /// Only support processes with a single thread. + pub fn exec(self: &Arc, elf_data: &[u8], args: Vec) { + assert_eq!(self.inner_exclusive_access().thread_count(), 1); + // memory_set with elf program headers/trampoline/trap context/user stack + let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data); + let new_token = memory_set.token(); + // substitute memory_set + self.inner_exclusive_access().memory_set = memory_set; + // then we alloc user resource for main thread again + // since memory_set has been changed + let task = self.inner_exclusive_access().get_task(0); + let mut task_inner = task.inner_exclusive_access(); + task_inner.res.as_mut().unwrap().ustack_base = ustack_base; + task_inner.res.as_mut().unwrap().alloc_user_res(); + task_inner.trap_cx_ppn = task_inner.res.as_mut().unwrap().trap_cx_ppn(); + // push arguments on user stack + let mut user_sp = task_inner.res.as_mut().unwrap().ustack_top(); + user_sp -= (args.len() + 1) * core::mem::size_of::(); + let argv_base = user_sp; + let mut argv: Vec<_> = (0..=args.len()) + .map(|arg| { + translated_refmut( + new_token, + (argv_base + arg * core::mem::size_of::()) as *mut usize, + ) + }) + .collect(); + *argv[args.len()] = 0; + for i in 0..args.len() { + user_sp -= args[i].len() + 1; + *argv[i] = user_sp; + let mut p = user_sp; + for c in args[i].as_bytes() { + *translated_refmut(new_token, p as *mut u8) = *c; + p += 1; + } + *translated_refmut(new_token, p as *mut u8) = 0; + } + // make the user_sp aligned to 8B for k210 platform + user_sp -= user_sp % core::mem::size_of::(); + // initialize trap_cx + let mut trap_cx = TrapContext::app_init_context( + entry_point, + user_sp, + KERNEL_SPACE.exclusive_access().token(), + task.kernel_stack.get_top(), + trap_handler as usize, + ); + trap_cx.x[10] = args.len(); + trap_cx.x[11] = argv_base; + *task_inner.get_trap_cx() = trap_cx; + } + + // LAB5 HINT: How to initialize deadlock data structures? + /// Fork from parent to child + /// Only support processes with a single thread. + pub fn fork(self: &Arc) -> Arc { + let mut parent = self.inner_exclusive_access(); + assert_eq!(parent.thread_count(), 1); + // clone parent's memory_set completely including trampoline/ustacks/trap_cxs + let memory_set = MemorySet::from_existed_user(&parent.memory_set); + // alloc a pid + let pid = pid_alloc(); + // copy fd table + let mut new_fd_table: Vec>> = Vec::new(); + for fd in parent.fd_table.iter() { + if let Some(file) = fd { + new_fd_table.push(Some(file.clone())); + } else { + new_fd_table.push(None); + } + } + // create child process pcb + let child = Arc::new(Self { + pid, + inner: unsafe { + UPSafeCell::new(ProcessControlBlockInner { + is_zombie: false, + memory_set, + parent: Some(Arc::downgrade(self)), + children: Vec::new(), + exit_code: 0, + fd_table: new_fd_table, + tasks: Vec::new(), + task_res_allocator: RecycleAllocator::new(), + mutex_list: Vec::new(), + semaphore_list: Vec::new(), + condvar_list: Vec::new(), + }) + }, + }); + // add child + parent.children.push(Arc::clone(&child)); + // create main thread of child process + let task = Arc::new(TaskControlBlock::new( + Arc::clone(&child), + parent + .get_task(0) + .inner_exclusive_access() + .res + .as_ref() + .unwrap() + .ustack_base(), + // here we do not allocate trap_cx or ustack again + // but mention that we allocate a new kernel_stack here + false, + )); + // attach task to child process + let mut child_inner = child.inner_exclusive_access(); + child_inner.tasks.push(Some(Arc::clone(&task))); + drop(child_inner); + // modify kernel_stack_top in trap_cx of this thread + let task_inner = task.inner_exclusive_access(); + let trap_cx = task_inner.get_trap_cx(); + trap_cx.kernel_sp = task.kernel_stack.get_top(); + drop(task_inner); + // add this thread to scheduler + add_task(task); + child + } + + pub fn getpid(&self) -> usize { + self.pid.0 + } + + pub fn kernel_process() -> Arc { + let memory_set = MemorySet::kernel_copy(); + let process = Arc::new(ProcessControlBlock { + pid: super::pid_alloc(), + inner: unsafe { + UPSafeCell::new(ProcessControlBlockInner { + is_zombie: false, + memory_set: memory_set, + parent: None, + children: Vec::new(), + exit_code: 0, + fd_table: Vec::new(), + tasks: Vec::new(), + task_res_allocator: RecycleAllocator::new(), + mutex_list: Vec::new(), + semaphore_list: Vec::new(), + condvar_list: Vec::new(), + }) + }, + }); + process + } +} diff --git a/os8/src/task/processor.rs b/os8/src/task/processor.rs new file mode 100644 index 0000000..8308b85 --- /dev/null +++ b/os8/src/task/processor.rs @@ -0,0 +1,121 @@ +//! Implementation of [`Processor`] and Intersection of control flow +//! +//! Here, the continuous operation of user apps in CPU is maintained, +//! the current running state of CPU is recorded, +//! and the replacement and transfer of control flow of different applications are executed. + +use super::__switch; +use super::process::ProcessControlBlock; +use super::{fetch_task, TaskStatus}; +use super::{TaskContext, TaskControlBlock}; +use crate::sync::UPSafeCell; +use crate::trap::TrapContext; +use alloc::sync::Arc; +use lazy_static::*; + +/// Processor management structure +pub struct Processor { + /// The task currently executing on the current processor + current: Option>, + /// The basic control flow of each core, helping to select and switch process + idle_task_cx: TaskContext, +} + +impl Processor { + pub fn new() -> Self { + Self { + current: None, + idle_task_cx: TaskContext::zero_init(), + } + } + fn get_idle_task_cx_ptr(&mut self) -> *mut TaskContext { + &mut self.idle_task_cx as *mut _ + } + pub fn take_current(&mut self) -> Option> { + self.current.take() + } + pub fn current(&self) -> Option> { + self.current.as_ref().map(|task| Arc::clone(task)) + } +} + +lazy_static! { + /// PROCESSOR instance through lazy_static! + pub static ref PROCESSOR: UPSafeCell = unsafe { UPSafeCell::new(Processor::new()) }; +} + +/// The main part of process execution and scheduling +/// +/// Loop fetch_task to get the process that needs to run, +/// and switch the process through __switch +pub fn run_tasks() { + loop { + let mut processor = PROCESSOR.exclusive_access(); + if let Some(task) = fetch_task() { + // println!("task get!"); + let idle_task_cx_ptr = processor.get_idle_task_cx_ptr(); + // access coming task TCB exclusively + let mut task_inner = task.inner_exclusive_access(); + let next_task_cx_ptr = &task_inner.task_cx as *const TaskContext; + task_inner.task_status = TaskStatus::Running; + drop(task_inner); + // release coming task TCB manually + processor.current = Some(task); + // release processor manually + drop(processor); + unsafe { + __switch(idle_task_cx_ptr, next_task_cx_ptr); + } + } else { + println!("no tasks available in run_tasks"); + } + } +} + +/// Get current task through take, leaving a None in its place +pub fn take_current_task() -> Option> { + PROCESSOR.exclusive_access().take_current() +} + +/// Get a copy of the current task +pub fn current_task() -> Option> { + PROCESSOR.exclusive_access().current() +} + +pub fn current_process() -> Arc { + current_task().unwrap().process.upgrade().unwrap() +} + +/// Get token of the address space of current task +pub fn current_user_token() -> usize { + let task = current_task().unwrap(); + task.get_user_token() +} + +/// Get the mutable reference to trap context of current task +pub fn current_trap_cx() -> &'static mut TrapContext { + current_task() + .unwrap() + .inner_exclusive_access() + .get_trap_cx() +} + +pub fn current_trap_cx_user_va() -> usize { + current_task() + .unwrap() + .inner_exclusive_access() + .res + .as_ref() + .unwrap() + .trap_cx_user_va() +} + +/// Return to idle control flow for new scheduling +pub fn schedule(switched_task_cx_ptr: *mut TaskContext) { + let mut processor = PROCESSOR.exclusive_access(); + let idle_task_cx_ptr = processor.get_idle_task_cx_ptr(); + drop(processor); + unsafe { + __switch(switched_task_cx_ptr, idle_task_cx_ptr); + } +} diff --git a/os8/src/task/stackless_coroutine.rs b/os8/src/task/stackless_coroutine.rs new file mode 100644 index 0000000..eb0762e --- /dev/null +++ b/os8/src/task/stackless_coroutine.rs @@ -0,0 +1,125 @@ +// https://blog.aloni.org/posts/a-stack-less-rust-coroutine-100-loc/ +// https://github.com/chyyuu/example-coroutine-and-thread/tree/stackless-coroutine-x86 + +// NOTE: This module is not required to finish the lab5, though you may run +// kernel_stackless_coroutine_test() in kernel main() to see what happens + +use core::future::Future; +use core::pin::Pin; +use core::task::{Context, Poll}; +use core::task::{RawWaker, RawWakerVTable, Waker}; + +extern crate alloc; +use alloc::collections::VecDeque; + +use alloc::boxed::Box; + +enum State { + Halted, + Running, +} + +struct Task { + state: State, +} + +impl Task { + fn waiter<'a>(&'a mut self) -> Waiter<'a> { + Waiter { task: self } + } +} + +struct Waiter<'a> { + task: &'a mut Task, +} + +impl<'a> Future for Waiter<'a> { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, _cx: &mut Context) -> Poll { + match self.task.state { + State::Halted => { + self.task.state = State::Running; + Poll::Ready(()) + } + State::Running => { + self.task.state = State::Halted; + Poll::Pending + } + } + } +} + +struct Executor { + tasks: VecDeque>>>, +} + +impl Executor { + fn new() -> Self { + Executor { + tasks: VecDeque::new(), + } + } + + fn push(&mut self, closure: C) + where + F: Future + 'static, + C: FnOnce(Task) -> F, + { + let task = Task { + state: State::Running, + }; + self.tasks.push_back(Box::pin(closure(task))); + } + + fn run(&mut self) { + let waker = create_waker(); + let mut context = Context::from_waker(&waker); + + while let Some(mut task) = self.tasks.pop_front() { + match task.as_mut().poll(&mut context) { + Poll::Pending => { + self.tasks.push_back(task); + } + Poll::Ready(()) => {} + } + } + } +} + +pub fn create_waker() -> Waker { + // Safety: The waker points to a vtable with functions that do nothing. Doing + // nothing is memory-safe. + unsafe { Waker::from_raw(RAW_WAKER) } +} + +const RAW_WAKER: RawWaker = RawWaker::new(core::ptr::null(), &VTABLE); +const VTABLE: RawWakerVTable = RawWakerVTable::new(clone, wake, wake_by_ref, drop); + +unsafe fn clone(_: *const ()) -> RawWaker { + RAW_WAKER +} +unsafe fn wake(_: *const ()) {} +unsafe fn wake_by_ref(_: *const ()) {} +unsafe fn drop(_: *const ()) {} + +#[no_mangle] +pub fn kernel_stackless_coroutine_test() { + println!("kernel stackless coroutine Begin.."); + let mut exec = Executor::new(); + println!(" Create futures"); + for instance in 1..=3 { + exec.push(move |mut task| async move { + println!(" Kernel Task {}: begin state", instance); + task.waiter().await; + println!(" Kernel Task {}: next state", instance); + task.waiter().await; + println!(" Kernel Task {}: end state", instance); + }); + } + + println!(" Running"); + exec.run(); + println!(" Done"); + println!("kernel stackless coroutine PASSED"); +} diff --git a/os8/src/task/switch.S b/os8/src/task/switch.S new file mode 100644 index 0000000..3f985d2 --- /dev/null +++ b/os8/src/task/switch.S @@ -0,0 +1,34 @@ +.altmacro +.macro SAVE_SN n + sd s\n, (\n+2)*8(a0) +.endm +.macro LOAD_SN n + ld s\n, (\n+2)*8(a1) +.endm + .section .text + .globl __switch +__switch: + # __switch( + # current_task_cx_ptr: *mut TaskContext, + # next_task_cx_ptr: *const TaskContext + # ) + # save kernel stack of current task + sd sp, 8(a0) + # save ra & s0~s11 of current execution + sd ra, 0(a0) + .set n, 0 + .rept 12 + SAVE_SN %n + .set n, n + 1 + .endr + # restore ra & s0~s11 of next execution + ld ra, 0(a1) + .set n, 0 + .rept 12 + LOAD_SN %n + .set n, n + 1 + .endr + # restore kernel stack of next task + ld sp, 8(a1) + ret + diff --git a/os8/src/task/switch.rs b/os8/src/task/switch.rs new file mode 100644 index 0000000..af08289 --- /dev/null +++ b/os8/src/task/switch.rs @@ -0,0 +1,16 @@ +//! Rust wrapper around `__switch`. +//! +//! Switching to a different task's context happens here. The actual +//! implementation must not be in Rust and (essentially) has to be in assembly +//! language (Do you know why?), so this module really is just a wrapper around +//! `switch.S`. + +core::arch::global_asm!(include_str!("switch.S")); + +use super::TaskContext; + +extern "C" { + /// Switch to the context of `next_task_cx_ptr`, saving the current context + /// in `current_task_cx_ptr`. + pub fn __switch(current_task_cx_ptr: *mut TaskContext, next_task_cx_ptr: *const TaskContext); +} diff --git a/os8/src/task/task.rs b/os8/src/task/task.rs new file mode 100644 index 0000000..1591f4d --- /dev/null +++ b/os8/src/task/task.rs @@ -0,0 +1,140 @@ +//! Types related to task management & Functions for completely changing TCB + +use super::id::TaskUserRes; +use super::{kstack_alloc, KernelStack, ProcessControlBlock, TaskContext}; +use crate::trap::TrapContext; +use crate::{mm::PhysPageNum, sync::UPSafeCell}; +use alloc::sync::{Arc, Weak}; +use core::cell::RefMut; + +/// Task control block structure +/// +/// Directly save the contents that will not change during running +pub struct TaskControlBlock { + // immutable + pub process: Weak, + /// Kernel stack corresponding to TID + pub kernel_stack: KernelStack, + // mutable + inner: UPSafeCell, +} + +/// Structure containing more process content +/// +/// Store the contents that will change during operation +/// and are wrapped by UPSafeCell to provide mutual exclusion +pub struct TaskControlBlockInner { + /// The physical page number of the frame where the trap context is placed + pub trap_cx_ppn: PhysPageNum, + /// Save task context + pub task_cx: TaskContext, + /// Maintain the execution status of the current process + pub task_status: TaskStatus, + /// It is set when active exit or execution error occurs + pub exit_code: Option, + /// Tid and ustack will be deallocated when this goes None + pub res: Option, +} + +/// Simple access to its internal fields +impl TaskControlBlockInner { + /* + pub fn get_task_cx_ptr2(&self) -> *const usize { + &self.task_cx_ptr as *const usize + } + */ + pub fn get_trap_cx(&self) -> &'static mut TrapContext { + self.trap_cx_ppn.get_mut() + } + + #[allow(unused)] + fn get_status(&self) -> TaskStatus { + self.task_status + } +} + +impl TaskControlBlock { + pub fn new( + process: Arc, + ustack_base: usize, + alloc_user_res: bool, + ) -> Self { + let res = TaskUserRes::new(Arc::clone(&process), ustack_base, alloc_user_res); + let trap_cx_ppn = res.trap_cx_ppn(); + let kernel_stack = kstack_alloc(); + let kstack_top = kernel_stack.get_top(); + Self { + process: Arc::downgrade(&process), + kernel_stack, + inner: unsafe { + UPSafeCell::new(TaskControlBlockInner { + res: Some(res), + trap_cx_ppn, + task_cx: TaskContext::goto_trap_return(kstack_top), + task_status: TaskStatus::Ready, + exit_code: None, + }) + }, + } + } + + /// Get the mutex to get the RefMut TaskControlBlockInner + pub fn inner_exclusive_access(&self) -> RefMut<'_, TaskControlBlockInner> { + let inner = self.inner.exclusive_access(); + // if self.process.upgrade().unwrap().pid.0 > 1 { + // if let Some(res) = inner.res.as_ref() { + // println!("t{}i", res.tid); + // } + // } + inner + } + + pub fn get_user_token(&self) -> usize { + let process = self.process.upgrade().unwrap(); + let inner = process.inner_exclusive_access(); + inner.memory_set.token() + } + + pub fn create_kthread(f: fn()) -> Self { + use crate::mm::PhysAddr; + let process = ProcessControlBlock::kernel_process(); + let process = Arc::downgrade(&process); + + let kernelstack = crate::task::id::KStack::new(); + let kstack_top = kernelstack.top(); + + let mut context = TaskContext::zero_init(); + let context_addr = &context as *const TaskContext as usize; + let pa = PhysAddr::from(context_addr); + let context_ppn = pa.floor(); + + context.ra = f as usize; + context.sp = kstack_top; + + //println!("context ppn :{:#x?}", context_ppn); + + Self { + process, + kernel_stack: KernelStack(kstack_top), + //kstack, + inner: unsafe { + UPSafeCell::new(TaskControlBlockInner { + res: None, + trap_cx_ppn: context_ppn, + task_cx: context, + task_status: TaskStatus::Ready, + exit_code: None, + }) + }, + } + } +} + +#[derive(Copy, Clone, PartialEq)] +/// task status: UnInit, Ready, Running, Exited +pub enum TaskStatus { + UnInit, + Ready, + Running, + Blocking, +} diff --git a/os8/src/trap/README.md b/os8/src/trap/README.md deleted file mode 100644 index e69de29..0000000 diff --git a/os8/src/trap/context.rs b/os8/src/trap/context.rs new file mode 100644 index 0000000..58e199c --- /dev/null +++ b/os8/src/trap/context.rs @@ -0,0 +1,47 @@ +//! Implementation of [`TrapContext`] + +use riscv::register::sstatus::{self, Sstatus, SPP}; + +#[repr(C)] +/// trap context structure containing sstatus, sepc and registers +pub struct TrapContext { + /// General-Purpose Register x0-31 + pub x: [usize; 32], + /// sstatus + pub sstatus: Sstatus, + /// sepc + pub sepc: usize, + /// Token of kernel address space + pub kernel_satp: usize, + /// Kernel stack pointer of the current application + pub kernel_sp: usize, + /// Virtual address of trap handler entry point in kernel + pub trap_handler: usize, +} + +impl TrapContext { + pub fn set_sp(&mut self, sp: usize) { + self.x[2] = sp; + } + pub fn app_init_context( + entry: usize, + sp: usize, + kernel_satp: usize, + kernel_sp: usize, + trap_handler: usize, + ) -> Self { + let mut sstatus = sstatus::read(); + // set CPU privilege to User after trapping back + sstatus.set_spp(SPP::User); + let mut cx = Self { + x: [0; 32], + sstatus, + sepc: entry, + kernel_satp, + kernel_sp, + trap_handler, + }; + cx.set_sp(sp); + cx + } +} diff --git a/os8/src/trap/mod.rs b/os8/src/trap/mod.rs new file mode 100644 index 0000000..d025b9a --- /dev/null +++ b/os8/src/trap/mod.rs @@ -0,0 +1,133 @@ +//! Trap handling functionality +//! +//! For rCore, we have a single trap entry point, namely `__alltraps`. At +//! initialization in [`init()`], we set the `stvec` CSR to point to it. +//! +//! All traps go through `__alltraps`, which is defined in `trap.S`. The +//! assembly language code does just enough work restore the kernel space +//! context, ensuring that Rust code safely runs, and transfers control to +//! [`trap_handler()`]. +//! +//! It then calls different functionality based on what exactly the exception +//! was. For example, timer interrupts trigger task preemption, and syscalls go +//! to [`syscall()`]. + +mod context; + +use crate::config::TRAMPOLINE; +use crate::syscall::syscall; +use crate::task::{ + current_trap_cx, current_trap_cx_user_va, current_user_token, exit_current_and_run_next, + suspend_current_and_run_next, +}; +use crate::timer::{check_timer, set_next_trigger}; +use riscv::register::{ + mtvec::TrapMode, + scause::{self, Exception, Interrupt, Trap}, + sie, stval, stvec, +}; + +core::arch::global_asm!(include_str!("trap.S")); + +pub fn init() { + set_kernel_trap_entry(); +} + +fn set_kernel_trap_entry() { + unsafe { + stvec::write(trap_from_kernel as usize, TrapMode::Direct); + } +} + +fn set_user_trap_entry() { + unsafe { + stvec::write(TRAMPOLINE as usize, TrapMode::Direct); + } +} + +pub fn enable_timer_interrupt() { + unsafe { + sie::set_stimer(); + } +} + +#[no_mangle] +pub fn trap_handler() -> ! { + set_kernel_trap_entry(); + let scause = scause::read(); + let stval = stval::read(); + match scause.cause() { + Trap::Exception(Exception::UserEnvCall) => { + // jump to next instruction anyway + let mut cx = current_trap_cx(); + cx.sepc += 4; + // get system call return value + let result = syscall(cx.x[17], [cx.x[10], cx.x[11], cx.x[12], cx.x[13]]); + // cx is changed during sys_exec, so we have to call it again + cx = current_trap_cx(); + cx.x[10] = result as usize; + } + Trap::Exception(Exception::StoreFault) + | Trap::Exception(Exception::StorePageFault) + | Trap::Exception(Exception::InstructionFault) + | Trap::Exception(Exception::InstructionPageFault) + | Trap::Exception(Exception::LoadFault) + | Trap::Exception(Exception::LoadPageFault) => { + println!( + "[kernel] {:?} in application, bad addr = {:#x}, bad instruction = {:#x}, core dumped.", + scause.cause(), + stval, + current_trap_cx().sepc, + ); + // page fault exit code + exit_current_and_run_next(-2); + } + Trap::Exception(Exception::IllegalInstruction) => { + println!("[kernel] IllegalInstruction in application, core dumped."); + // illegal instruction exit code + exit_current_and_run_next(-3); + } + Trap::Interrupt(Interrupt::SupervisorTimer) => { + set_next_trigger(); + check_timer(); + suspend_current_and_run_next(); + } + _ => { + panic!( + "Unsupported trap {:?}, stval = {:#x}!", + scause.cause(), + stval + ); + } + } + trap_return(); +} + +#[no_mangle] +pub fn trap_return() -> ! { + set_user_trap_entry(); + let trap_cx_ptr = current_trap_cx_user_va(); + let user_satp = current_user_token(); + extern "C" { + fn __alltraps(); + fn __restore(); + } + let restore_va = __restore as usize - __alltraps as usize + TRAMPOLINE; + unsafe { + core::arch::asm!( + "fence.i", + "jr {restore_va}", + restore_va = in(reg) restore_va, + in("a0") trap_cx_ptr, + in("a1") user_satp, + options(noreturn) + ); + } +} + +#[no_mangle] +pub fn trap_from_kernel() -> ! { + panic!("a trap {:?} from kernel!", scause::read().cause()); +} + +pub use context::TrapContext; diff --git a/os8/src/trap/trap.S b/os8/src/trap/trap.S new file mode 100644 index 0000000..c0e2d15 --- /dev/null +++ b/os8/src/trap/trap.S @@ -0,0 +1,69 @@ +.altmacro +.macro SAVE_GP n + sd x\n, \n*8(sp) +.endm +.macro LOAD_GP n + ld x\n, \n*8(sp) +.endm + .section .text.trampoline + .globl __alltraps + .globl __restore + .align 2 +__alltraps: + csrrw sp, sscratch, sp + # now sp->*TrapContext in user space, sscratch->user stack + # save other general purpose registers + sd x1, 1*8(sp) + # skip sp(x2), we will save it later + sd x3, 3*8(sp) + # skip tp(x4), application does not use it + # save x5~x31 + .set n, 5 + .rept 27 + SAVE_GP %n + .set n, n+1 + .endr + # we can use t0/t1/t2 freely, because they have been saved in TrapContext + csrr t0, sstatus + csrr t1, sepc + sd t0, 32*8(sp) + sd t1, 33*8(sp) + # read user stack from sscratch and save it in TrapContext + csrr t2, sscratch + sd t2, 2*8(sp) + # load kernel_satp into t0 + ld t0, 34*8(sp) + # load trap_handler into t1 + ld t1, 36*8(sp) + # move to kernel_sp + ld sp, 35*8(sp) + # switch to kernel space + csrw satp, t0 + sfence.vma + # jump to trap_handler + jr t1 + +__restore: + # a0: *TrapContext in user space(Constant); a1: user space token + # switch to user space + csrw satp, a1 + sfence.vma + csrw sscratch, a0 + mv sp, a0 + # now sp points to TrapContext in user space, start restoring based on it + # restore sstatus/sepc + ld t0, 32*8(sp) + ld t1, 33*8(sp) + csrw sstatus, t0 + csrw sepc, t1 + # restore general purpose registers except x0/sp/tp + ld x1, 1*8(sp) + ld x3, 3*8(sp) + .set n, 5 + .rept 27 + LOAD_GP %n + .set n, n+1 + .endr + # back to user stack + ld sp, 2*8(sp) + sret From f8bcac68ecbf4b7f6a7e19d4a5585dfad2e1a647 Mon Sep 17 00:00:00 2001 From: kxxt Date: Thu, 8 Dec 2022 14:00:32 +0800 Subject: [PATCH 3/4] split mode --- .github/workflows/chatgpt.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/chatgpt.yml b/.github/workflows/chatgpt.yml index 57e1c96..87600e4 100644 --- a/.github/workflows/chatgpt.yml +++ b/.github/workflows/chatgpt.yml @@ -13,6 +13,6 @@ jobs: with: number: ${{ github.event.pull_request.number }} sessionToken: ${{ secrets.CHATGPT_SESSION_TOKEN }} - split: 'yolo' # Use true to enable the unstable split feature. + split: 'true' # Use true to enable the unstable split feature. env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From 9a442e8f2c122a162510eae181c4b06d6d224bfc Mon Sep 17 00:00:00 2001 From: kxxt Date: Thu, 8 Dec 2022 14:26:02 +0800 Subject: [PATCH 4/4] chatgpt code review: use latest commit --- .github/workflows/chatgpt.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/chatgpt.yml b/.github/workflows/chatgpt.yml index 87600e4..32b73bd 100644 --- a/.github/workflows/chatgpt.yml +++ b/.github/workflows/chatgpt.yml @@ -8,7 +8,7 @@ jobs: name: Let chatgpt comment on your PR. steps: - name: ChatGPT comment - uses: kxxt/chatgpt-action@v0.2 + uses: kxxt/chatgpt-action@HEAD id: chatgpt with: number: ${{ github.event.pull_request.number }}