Commit 25ac97ad authored by Nifou's avatar Nifou

Better memory usage

parent 25d3e488
Pipeline #191980347 passed with stages
in 8 minutes and 32 seconds
......@@ -185,7 +185,7 @@ impl<'a> ElfLoader<'a> {
area.mappings(mappings.clone());
// Add the mappings in the process' page table
self.process.as_mut().unwrap().memory().map(area);
self.process.as_mut().unwrap().memory().map(area).unwrap();
// Unmap the binary in the kernel's address space
memory::unmap(mappings);
......@@ -215,6 +215,7 @@ impl<'a> ElfLoader<'a> {
memory.image = Some((start_image, end_image));
memory.data_segment = Some((start_data_segment, end_data_segment));
memory.stack = Some((start_stack, end_stack));
memory.signal_stack = Some((start_signal_stack, end_signal_stack));
if let Some(interpreter) = self.binary.interpreter {
self.load_interpreter(String::from(interpreter), end_signal_stack);
......@@ -240,6 +241,7 @@ impl<'a> ElfLoader<'a> {
let start = load_addr + ph.p_vaddr as u64;
// Map the binary in the kernel's address space
// TODO: Use a fixed address for temporary mapping
let mappings =
memory::map(start, start + ph.p_memsz, Flags::PRESENT | Flags::WRITABLE);
......@@ -285,7 +287,7 @@ impl<'a> ElfLoader<'a> {
area.mappings(mappings.clone());
// Add the mappings in the process' page table
self.process.as_mut().unwrap().memory().map(area);
self.process.as_mut().unwrap().memory().map(area).unwrap();
// Unmap the binary in the kernel's address space
memory::unmap(mappings);
......@@ -295,7 +297,7 @@ impl<'a> ElfLoader<'a> {
// Change the entry point
self.process.as_mut().unwrap().thread.get_kstack().rip = (load_addr + binary.entry) as usize;
println!("Interpreter {} loaded successfully!", path);
println!("Interpreter {} successfully loaded at {:#x}!", path, load_addr);
},
Err(e) => {
println!("ELF Error: {}", e);
......
......@@ -53,7 +53,6 @@ pub fn stop() -> ! {
/// A helper which signals that the current interrupt ended
pub fn end(id: IrqId) {
println!("{:?} terminated successfully", id);
unsafe {
PICS.lock().notify_end_of_interrupt(id as u8);
}
......@@ -299,6 +298,8 @@ pub extern "x86-interrupt" fn page_fault_handler(
let page = Page::<Size4KiB>::containing_address(addr);
// This is a critical function, so the end justifies the means
unsafe { PROCESSES.force_write_unlock(); }
let mut processes = PROCESSES.write();
if let Some(area) = processes
......@@ -399,7 +400,7 @@ pub extern "x86-interrupt" fn page_fault_handler(
.get_mut(&current_thread.process)
.expect("Could not find the current process")
.memory
.map(area);
.map(area).unwrap();
handled = true;
......
......@@ -84,7 +84,7 @@ macro_rules! fault_without_code {
}
#[macro_export]
/// Defines a handler for faults wich does not return
/// Defines a handler for faults which does not return
macro_rules! fault_non_return {
($fn: ident, $stack: ident, $signal: ident, $kernel_panic_code: block) => {
/// A handler for `$fn` faults which does not return
......
......@@ -19,7 +19,7 @@ use x86_64::{
registers::control::{Cr3, Cr3Flags},
structures::paging::{
frame::PhysFrame,
mapper::{Mapper, RecursivePageTable},
mapper::{Mapper, MapToError, RecursivePageTable},
page_table::PageTableFlags as Flags,
FrameAllocator, Page, PageTable, PageTableIndex,
},
......@@ -99,26 +99,29 @@ impl ProcessPageTable {
}
/// Map a range of pages to a range of frames in the process' page table
pub unsafe fn mappings(&self, mappings: Mappings, mut flags: Flags, mut table_flags: Flags) {
pub unsafe fn mappings(&self, mappings: Mappings, mut flags: Flags, mut table_flags: Flags) -> Result<(), MapToError<Size4KiB>> {
// Add the USER_ACCESSIBLE flag to make sure that the content can be accessed in
// userspace
table_flags.set(Flags::USER_ACCESSIBLE, true);
flags.set(Flags::USER_ACCESSIBLE, true);
let mut ret = Ok(());
self.with(|mapper| {
for mapping in mappings {
mapper
if let Err(e) = mapper
.map_to_with_table_flags(
mapping.1,
mapping.0,
flags,
table_flags,
&mut *FRAME_ALLOCATOR.r#try().unwrap().lock(),
)
.expect("Mapping error")
.ignore(); // Ignore mapping because that is not the active page table
) {
ret = Err(e);
}
}
});
ret
}
/// Map all pages between two addresses in the process' page table
......@@ -128,7 +131,7 @@ impl ProcessPageTable {
end: u64,
mut flags: Flags,
mut table_flags: Flags,
) -> Mappings {
) -> Result<Mappings, MapToError<Size4KiB>> {
let mut mappings = Vec::new();
// Add the USER_ACCESSIBLE flag to make sure that the content can be accessed in
......@@ -136,6 +139,7 @@ impl ProcessPageTable {
table_flags.set(Flags::USER_ACCESSIBLE, true);
flags.set(Flags::USER_ACCESSIBLE, true);
let mut ret = Ok(());
self.with(|mapper| {
let page_range = {
let start_addr = VirtAddr::new(start);
......@@ -155,18 +159,22 @@ impl ProcessPageTable {
mappings.push((frame, page));
mapper
if let Err(e) = mapper
.map_to(
page,
frame,
flags,
&mut *FRAME_ALLOCATOR.r#try().unwrap().lock(),
)
.expect("Mapping error")
.ignore(); // Ignore mapping because we will flush all mappings after
) {
ret = Err(e);
}
}
});
mappings
match ret {
Ok(()) => Ok(mappings),
Err(e) => Err(e),
}
}
/// Unmap a page
......
......@@ -17,6 +17,7 @@
//! This module defines the errors which can be used in the syscalls
/// An error happens during a syscall
#[derive(Debug, PartialOrd, PartialEq, Clone, Copy)]
pub enum Error {
/// No such file or directory
ENOENT = 2,
......
......@@ -112,7 +112,7 @@ impl Syscall {
/// Read a file descriptor
pub fn read(&self, fd: usize, buf: usize, count: usize) -> Result {
println!("read({}, {:#x}, {})", fd, buf, count);
print!("read({}, {:#x}, {}) = ", fd, buf, count);
let mut processes = PROCESSES.write();
let current = SCHEDULER.get().current().unwrap().process;
let process = processes.get_mut(&current).unwrap();
......@@ -126,12 +126,19 @@ impl Syscall {
unsafe { src.copy_to(dest, count); }
file.offset += count;
println!("{}", count);
Ok(count)
},
Err(_) => Err(Error::EINVAL),
Err(_) => {
println!("EINVAL");
Err(Error::EINVAL)
},
}
},
None => Err(Error::EBADFD),
None => {
println!("EBADF");
Err(Error::EBADFD)
},
}
}
......@@ -245,7 +252,10 @@ impl Syscall {
match process.get_file(Fd(fd)) {
Some(file) => {
let size = INITFS.size(file.path.clone()).unwrap();
stat.st_dev = 0;
stat.st_ino = 10 + buf as u64; // HACK: Random number ;)
stat.st_size = size as i64;
stat.st_mode = 0o100000 + 0o777;
println!("{}", 0);
Ok(0)
......
......@@ -16,7 +16,14 @@
*/
//! Syscalls used to manage the memory
use bitflags::bitflags;
use x86_64::structures::paging::page_table::PageTableFlags as Flags;
use core::cmp;
use x86_64::{
VirtAddr,
structures::paging::{
page_table::PageTableFlags as Flags,
Page, Size4KiB,
},
};
use super::{
error::{Error, Result},
......@@ -26,6 +33,10 @@ use crate::tasking::{
memory::MemoryArea,
scheduler::{PROCESSES, SCHEDULER},
};
use crate::{
initfs::INITFS,
fs::Fd,
};
bitflags! {
struct MmapFlags: u32 {
......@@ -78,14 +89,15 @@ impl Syscall {
fd: usize,
offset: usize,
) -> Result {
println!(
"mmap({:#x}, {}, {}, {}, {:#x}, {})",
addr, len, prot, flags, fd, offset
);
let prot = MmapProt::from_bits_truncate(prot as u32);
let flags = MmapFlags::from_bits_truncate(flags as u32);
print!(
"mmap({:#x}, {}, {:?}, {:?}, {:#x}, {}) = ",
addr, len, prot, flags, fd, offset
);
// TODO: Return EEXISTS on already mapping
if flags.contains(MmapFlags::ANONYMOUS) {
if addr == 0 {
let mut processes = PROCESSES.write();
......@@ -111,13 +123,14 @@ impl Syscall {
.get_mut(&current_process)
.unwrap()
.memory
.map(free_area);
.map(free_area).unwrap();
processes
.get_mut(&current_process)
.unwrap()
.memory
.switch_page_table();
println!("{:#x}", free_area_start);
Ok(free_area_start as usize)
} else {
let mut processes = PROCESSES.write();
......@@ -138,20 +151,146 @@ impl Syscall {
.get_mut(&current_process)
.unwrap()
.memory
.map(area);
.map(area).unwrap();
processes
.get_mut(&current_process)
.unwrap()
.memory
.switch_page_table();
println!("{:#x}", addr);
Ok(addr)
}
} else if flags.contains(MmapFlags::PRIVATE) {
if addr == 0 {
let mut processes = PROCESSES.write();
// Use the kernel's page table
crate::memory::switch_to_kernel_page_table();
// Get the current thread's process
let current_process = SCHEDULER.get().current().unwrap().process;
let free_area_start = processes
.get_mut(&current_process)
.unwrap()
.memory
.find_free_area(len);
let free_area = MemoryArea::new(
free_area_start,
free_area_start + len as u64,
Flags::from(prot) | Flags::WRITABLE, // TODO: Remove it later
);
processes
.get_mut(&current_process)
.unwrap()
.memory
.map(free_area).unwrap();
processes
.get_mut(&current_process)
.unwrap()
.memory
.switch_page_table();
match processes.get(&current_process).unwrap().get_file(Fd(fd)) {
Some(file) => {
let data = INITFS.get_binary_file(file.path.clone(), offset).unwrap();
let src = data.as_ptr();
let dest = free_area_start as *mut u8;
unsafe { src.copy_to(dest, len); }
},
None => {
println!("ENOENT");
return Err(Error::ENOENT);
},
}
println!("{:#x}", free_area_start as usize);
Ok(free_area_start as usize)
} else {
let mut processes = PROCESSES.write();
// Use the kernel's page table
crate::memory::switch_to_kernel_page_table();
// Get the current thread's process
let current_process = SCHEDULER.get().current().unwrap().process;
let area = MemoryArea::new(
addr as u64,
addr as u64 + len as u64,
Flags::from(prot) | Flags::WRITABLE, // TODO: Remove it later
);
// Test if the address is overlapping with another
if let Some(area) = processes
.get_mut(&current_process)
.unwrap()
.memory
.get_area_mut(addr as u64) {
let area_end = area.end;
// Change end of the area
area.end = (addr + len) as u64;
// Unmap pages
let range = Page::<Size4KiB>::range_inclusive(
Page::containing_address(VirtAddr::new(cmp::min((addr + len) as u64, area_end))),
Page::containing_address(VirtAddr::new(cmp::max((addr + len) as u64, area_end))),
);
for page in range {
processes
.get_mut(&current_process)
.unwrap()
.memory
.table
.unmap(page);
}
} else {
processes
.get_mut(&current_process)
.unwrap()
.memory
.map(area).unwrap();
}
processes
.get_mut(&current_process)
.unwrap()
.memory
.switch_page_table();
match processes.get(&current_process).unwrap().get_file(Fd(fd)) {
Some(file) => {
let data = INITFS.get_binary_file(file.path.clone(), offset).unwrap();
let src = data.as_ptr();
let dest = addr as *mut u8;
unsafe { src.copy_to(dest, len); }
},
None => {
println!("ENOENT");
return Err(Error::ENOENT);
},
}
println!("{:#x}", addr);
Ok(addr)
}
} else {
println!("EINVAL");
Err(Error::EINVAL)
}
}
/// Set protection on a region of memory
pub fn mprotect(&self, addr: usize, len: usize, prot: usize) -> Result {
println!("mprotect({:#x}, {}, {}) = 0", addr, len, prot);
Ok(0)
}
/// Change data segment size
pub fn brk(&self, addr: usize) -> Result {
print!("brk({:#x}) = ", addr);
......
......@@ -60,4 +60,7 @@ impl Syscall {
Ok(0)
}
/// Fast userspace locking
pub fn futex(&self, _: usize, _: usize, _: usize, _: usize) -> Result { Ok(0) }
}
......@@ -129,6 +129,7 @@ const CLOSE: usize = 3;
const STAT: usize = 4;
const FSTAT: usize = 5;
const MMAP: usize = 9;
const MPROTECT: usize = 10;
const BRK: usize = 12;
const SIGACTION: usize = 13;
const SIGPROCMASK: usize = 14;
......@@ -147,6 +148,7 @@ const ARCH_PRCTL: usize = 158;
const GETTID: usize = 186;
const TKILL: usize = 200;
const SET_TID_ADDR: usize = 218;
const FUTEX: usize = 202;
const EXIT_GROUP: usize = 231;
const OPENAT: usize = 257;
const PIPE2: usize = 293;
......@@ -169,7 +171,7 @@ impl Syscall {
self.stack.rdx,
self.stack.r10,
self.stack.r8,
0,
self.stack.r9,
];
let result = match num {
......@@ -180,6 +182,7 @@ impl Syscall {
STAT => self.stat(args[0], args[1]),
FSTAT => self.fstat(args[0], args[1]),
MMAP => self.mmap(args[0], args[1], args[2], args[3], args[4], args[5]),
MPROTECT => self.mprotect(args[0], args[1], args[2]),
BRK => self.brk(args[0]),
SIGACTION => self.sigaction(args[0], args[1], args[2]),
SIGPROCMASK => self.sigprocmask(args[0], args[1], args[2], args[3]),
......@@ -198,6 +201,7 @@ impl Syscall {
GETTID => self.gettid(),
TKILL => self.tkill(args[0], args[1]),
SET_TID_ADDR => self.set_tid_addr(args[0]),
FUTEX => self.futex(args[0], args[1], args[2], args[3]),
EXIT_GROUP => self.exit_group(args[0]),
OPENAT => self.openat(args[0], args[1], args[2], args[3]),
PIPE2 => self.pipe2(args[0], args[1]),
......
......@@ -16,7 +16,7 @@
*/
//! A trait which make the addition of other scheduler algorithms easier
use alloc::{
prelude::v1::Box,
prelude::v1::{String, Box},
alloc::{GlobalAlloc, Layout},
};
use core::mem::{transmute, size_of};
......@@ -80,7 +80,14 @@ pub trait SchedulerAlgo {
let signal = process.signals.pop_front().unwrap();
let action = process.actions.get(&signal.0).expect("cannot find the requested signal");
println!("[ {} ] {:#x} {:?} {}", process.pid.0, signal.1.si_addr, signal.0, process.name);
if process.name == String::from("/bin/test-std-dynamic") {
// The interpreter is loaded in some location in the memory
// We subtract it for simpler debugging
println!("[ {} ] {:#x} {:?} {}", process.pid.0, signal.1.si_addr as u64 - process.memory.signal_stack.unwrap().1, signal.0, process.name);
}
else {
println!("[ {} ] {:#x} {:?} {}", process.pid.0, signal.1.si_addr, signal.0, process.name);
}
if action.sa_handler == SIGIGN {
// Ignore this signal, so nothing to do!
......@@ -247,4 +254,9 @@ pub trait SchedulerAlgo {
}
unreachable!();
}
/// Idle function when there is no process
fn idle(&self) {
loop {}
}
}
......@@ -19,8 +19,9 @@ use alloc::prelude::v1::Vec;
use core::{cmp::Ordering, fmt};
use x86_64::{
structures::paging::{
mapper::Mapper, page_table::PageTableFlags as Flags, FrameAllocator, Page, PhysFrame,
Size4KiB,
mapper::{Mapper, MapToError},
page_table::PageTableFlags as Flags,
FrameAllocator, Page, PhysFrame, Size4KiB,
},
instructions::tlb,
registers::control::Cr3,
......@@ -51,6 +52,9 @@ pub struct ProcessMemory {
/// The start and end addresses of the stack
pub stack: Option<(u64, u64)>,
/// The start and end addresses of the signal stack
pub signal_stack: Option<(u64, u64)>,
}
impl ProcessMemory {
......@@ -123,21 +127,29 @@ impl ProcessMemory {
image: None,
data_segment: None,
stack: None,
signal_stack: None,
}
}
/// Map all pages between two addresses in the process' page table
pub fn map(&mut self, mut area: MemoryArea) {
pub fn map(&mut self, mut area: MemoryArea) -> Result<(), MapToError<Size4KiB>>{
// Use existing mappings if provided
unsafe {
if !area.mappings.is_empty() {
self.table
.mappings(area.clone().mappings, area.flags, Flags::PRESENT);
match self.table
.mappings(area.clone().mappings, area.flags, Flags::PRESENT) {
Ok(()) => (),
Err(e) => return Err(e),
}
} else {
let mappings = self
.table
.map(area.start, area.end, area.flags, Flags::PRESENT);
area.mappings = mappings;
match mappings {
Ok(mappings) => area.mappings = mappings,
Err(e) => return Err(e),
}
}
}
......@@ -148,6 +160,8 @@ impl ProcessMemory {
// Add the area in the list of existing areas
self.areas.push(area);
Ok(())
}
/// Find a free memory area
......@@ -186,7 +200,7 @@ impl ProcessMemory {
pub fn get_area_mut(&mut self, addr: u64) -> Option<&mut MemoryArea> {
self.areas
.iter_mut()
.find(|area| area.start <= addr && area.end >= addr)
.find(|area| area.start <= addr && area.end > addr)
}
/// Switch to the process' page table
......@@ -242,7 +256,7 @@ impl ProcessMemory {
new.areas.push(area.clone());
// Change page table
new.map(area.clone());
new.map(area.clone()).unwrap();
}
(old, new)
......
......@@ -179,7 +179,7 @@ impl Process {
Flags::PRESENT | Flags::WRITABLE | Flags::NO_EXECUTE,
);
area.mappings(mappings.clone());
self.memory.map(area);
self.memory.map(area).unwrap();
// Unmap the stack in the kernel's address space
memory::unmap(mappings);
......@@ -198,7 +198,7 @@ impl Process {
addr + DATA_SEGMENT_SIZE as u64,
Flags::PRESENT | Flags::WRITABLE | Flags::NO_EXECUTE,
);
self.memory.map(area);
self.memory.map(area).unwrap();