kernel_thread/task/
mem.rsuse alloc::{collections::btree_map::BTreeMap, vec::Vec};
use common::{config::PAGE_SIZE, page::PhysPage};
use core::cmp;
use crate::consts::task::{DEF_HEAP_ADDR, DEF_STACK_BOTTOM, DEF_STACK_TOP};
use super::Sel4Task;
pub struct TaskMemInfo {
pub mapped_pt: Vec<sel4::cap::PT>,
pub mapped_page: BTreeMap<usize, PhysPage>,
pub heap: usize,
}
impl Default for TaskMemInfo {
fn default() -> Self {
Self {
mapped_pt: Default::default(),
mapped_page: Default::default(),
heap: DEF_HEAP_ADDR,
}
}
}
impl Sel4Task {
pub fn brk(&self, value: usize) -> usize {
let mut mem_info = self.mem.lock();
if value == 0 {
return mem_info.heap;
}
let origin = mem_info.heap;
mem_info.heap = value;
drop(mem_info);
for vaddr in (origin..value).step_by(PAGE_SIZE) {
self.map_blank_page(vaddr);
}
value
}
pub fn read_ins(&self, vaddr: usize) -> Option<u32> {
self.mem
.lock()
.mapped_page
.get(&(vaddr / PAGE_SIZE * PAGE_SIZE))
.map(|page| {
let offset = vaddr % PAGE_SIZE;
let ins = page.lock()[offset..offset + 4].try_into().unwrap();
u32::from_le_bytes(ins)
})
}
pub fn read_bytes(&self, mut vaddr: usize, len: usize) -> Option<Vec<u8>> {
self.check_addr(vaddr, len);
let mut data = Vec::new();
let mem_info = self.mem.lock();
let vaddr_end = vaddr + len;
while vaddr < vaddr_end {
let page = mem_info.mapped_page.get(&(vaddr / PAGE_SIZE * PAGE_SIZE))?;
let offset = vaddr % PAGE_SIZE;
let rsize = cmp::min(PAGE_SIZE - offset, vaddr_end - vaddr);
data.extend_from_slice(&page.lock()[offset..offset + rsize]);
vaddr += rsize;
}
Some(data)
}
pub fn read_cstr(&self, mut vaddr: usize) -> Option<Vec<u8>> {
let mut data = Vec::new();
let mem_info = self.mem.lock();
loop {
let page = mem_info.mapped_page.get(&(vaddr / PAGE_SIZE * PAGE_SIZE))?;
let offset = vaddr % PAGE_SIZE;
let position = page.lock()[offset..].iter().position(|x| *x == 0);
if let Some(position) = position {
data.extend_from_slice(&page.lock()[offset..offset + position]);
break;
}
data.extend_from_slice(&page.lock()[offset..]);
vaddr += PAGE_SIZE - offset;
}
Some(data)
}
pub fn read_vec(&self, mut vaddr: usize) -> Option<Vec<usize>> {
let mut data = Vec::new();
let mem_info = self.mem.lock();
loop {
let page = mem_info.mapped_page.get(&(vaddr / PAGE_SIZE * PAGE_SIZE))?;
let mut offset = vaddr % PAGE_SIZE;
while offset < PAGE_SIZE {
let value = page.lock().read_usize(offset);
if value == 0 {
return Some(data);
}
offset += size_of::<usize>();
data.push(value);
}
vaddr += PAGE_SIZE - offset;
}
}
pub fn write_bytes(&self, mut vaddr: usize, data: &[u8]) -> Option<()> {
let mem_info = self.mem.lock();
let vaddr_end = vaddr + data.len();
while vaddr < vaddr_end {
let page = mem_info.mapped_page.get(&(vaddr / PAGE_SIZE * PAGE_SIZE))?;
let offset = vaddr % PAGE_SIZE;
let rsize = cmp::min(PAGE_SIZE - offset, vaddr_end - vaddr);
page.lock()[offset..offset + rsize].copy_from_slice(&data[..rsize]);
vaddr += rsize;
}
Some(())
}
pub fn check_addr(&self, vaddr: usize, size: usize) {
let bottom = vaddr / PAGE_SIZE * PAGE_SIZE;
let top = (vaddr + size).div_ceil(PAGE_SIZE) * PAGE_SIZE;
for vaddr in (bottom..top).step_by(PAGE_SIZE) {
if self.mem.lock().mapped_page.contains_key(&vaddr) {
continue;
}
if (DEF_STACK_BOTTOM..DEF_STACK_TOP).contains(&vaddr) {
self.map_blank_page(vaddr);
}
}
}
pub fn clear_maped(&self) {
self.mem.lock().mapped_page.values().for_each(|x| {
x.cap().frame_unmap().unwrap();
let slot = sel4_kit::slot_manager::LeafSlot::from_cap(x.cap());
slot.revoke().unwrap();
slot.delete().unwrap();
common::slot::recycle_slot(slot);
});
self.mem.lock().mapped_page.clear();
}
}