mod file;
mod info;
mod init;
mod mem;
mod pcb;
pub mod shm;
mod signal;
use alloc::{sync::Arc, vec::Vec};
use common::{
config::{DEFAULT_PARENT_EP, DEFAULT_SERVE_EP, LINUX_APP_CNODE_RADIX_BITS, PAGE_SIZE},
mem::CapMemSet,
page::PhysPage,
slot::{alloc_slot, recycle_slot},
};
use core::{
cmp,
sync::atomic::{AtomicU64, Ordering},
task::Waker,
};
use file::TaskFileInfo;
use info::TaskInfo;
use libc_core::signal::SignalNum;
use mem::TaskMemInfo;
use object::{File, Object, ObjectSection, ObjectSegment};
use sel4::{
CapRights, Error, VmAttributes,
init_thread::{self, slot},
};
use sel4_kit::slot_manager::LeafSlot;
use signal::TaskSignal;
use spin::mutex::Mutex;
use zerocopy::IntoBytes;
use crate::{
child_test::{FutexTable, TASK_MAP, futex_wake, wake_hangs},
consts::task::VDSO_REGION_APP_ADDR,
task::{pcb::ProcessControlBlock, shm::MapedSharedMemory},
utils::obj::{alloc_untyped_unit, recycle_untyped_unit},
vdso::get_vdso_caps,
};
pub struct Sel4Task {
pub pid: usize,
pub ppid: usize,
pub pgid: usize,
pub tid: usize,
pub capset: Arc<Mutex<CapMemSet>>,
pub tcb: sel4::cap::Tcb,
pub cnode: sel4::cap::CNode,
pub vspace: sel4::cap::VSpace,
pub mem: Arc<Mutex<TaskMemInfo>>,
pub shm: Arc<Mutex<Vec<Arc<MapedSharedMemory>>>>,
pub exit: Mutex<Option<u32>>,
pub futex_table: Arc<Mutex<FutexTable>>,
pub signal: Mutex<TaskSignal>,
pub clear_child_tid: Mutex<usize>,
pub file: TaskFileInfo,
pub info: Mutex<TaskInfo>,
pub thread_counter: Mutex<Option<Arc<()>>>,
pub pcb: Arc<ProcessControlBlock>,
pub waker: Mutex<Option<(PollWakeEvent, Waker)>>,
}
pub enum PollWakeEvent {
Signal(SignalNum),
Timer,
Blocking,
}
impl Drop for Sel4Task {
fn drop(&mut self) {
if Arc::strong_count(&self.file.file_ds) == 1 {
for i in 0..=512 {
self.file.file_ds.lock().remove(i);
}
}
}
}
static ID_COUNTER: AtomicU64 = AtomicU64::new(1);
impl Sel4Task {
pub fn new() -> Result<Self, sel4::Error> {
let tid = ID_COUNTER.fetch_add(1, Ordering::SeqCst) as usize;
let mut capset = CapMemSet::new(Some(alloc_untyped_unit));
let vspace = capset.alloc_vspace();
let tcb = capset.alloc_tcb();
let cnode = capset.alloc_cnode(LINUX_APP_CNODE_RADIX_BITS);
slot::ASID_POOL.cap().asid_pool_assign(vspace).unwrap();
cnode
.absolute_cptr_from_bits_with_depth(1, LINUX_APP_CNODE_RADIX_BITS)
.copy(&LeafSlot::from_cap(tcb).abs_cptr(), CapRights::all())
.unwrap();
cnode
.absolute_cptr_from_bits_with_depth(
DEFAULT_PARENT_EP.bits(),
LINUX_APP_CNODE_RADIX_BITS,
)
.mint(
&LeafSlot::from(DEFAULT_SERVE_EP).abs_cptr(),
CapRights::all(),
tid as u64,
)?;
Ok(Sel4Task {
tid,
pid: tid,
pgid: 0,
ppid: 1,
tcb,
cnode,
vspace,
shm: Arc::new(Mutex::new(Vec::new())),
capset: Arc::new(Mutex::new(capset)),
futex_table: Arc::new(Mutex::new(Vec::new())),
mem: Arc::new(Mutex::new(TaskMemInfo::default())),
signal: Mutex::new(TaskSignal::default()),
exit: Mutex::new(None),
clear_child_tid: Mutex::new(0),
file: TaskFileInfo::default(),
info: Mutex::new(TaskInfo::default()),
thread_counter: Mutex::new(Some(Arc::new(()))),
pcb: Arc::new(ProcessControlBlock::new()),
waker: Mutex::new(None),
})
}
pub fn create_thread(&self) -> Result<Self, sel4::Error> {
let capset = self.capset.clone();
let tid = ID_COUNTER.fetch_add(1, Ordering::SeqCst) as usize;
let tcb = capset.lock().alloc_tcb();
let cnode = capset.lock().alloc_cnode(LINUX_APP_CNODE_RADIX_BITS);
cnode
.absolute_cptr_from_bits_with_depth(1, LINUX_APP_CNODE_RADIX_BITS)
.copy(&LeafSlot::from_cap(tcb).abs_cptr(), CapRights::all())
.unwrap();
cnode
.absolute_cptr_from_bits_with_depth(
DEFAULT_PARENT_EP.bits(),
LINUX_APP_CNODE_RADIX_BITS,
)
.mint(
&LeafSlot::from(DEFAULT_SERVE_EP).abs_cptr(),
CapRights::all(),
tid as u64,
)?;
Ok(Sel4Task {
pid: self.pid,
ppid: self.ppid,
pgid: self.pgid,
tid,
tcb,
cnode,
shm: self.shm.clone(),
vspace: self.vspace,
capset: self.capset.clone(),
mem: self.mem.clone(),
exit: Mutex::new(None),
futex_table: self.futex_table.clone(),
signal: Mutex::new(TaskSignal::default()),
clear_child_tid: Mutex::new(0),
file: self.file.clone(),
info: Mutex::new(self.info.lock().clone()),
thread_counter: Mutex::new(self.thread_counter.lock().clone()),
pcb: self.pcb.clone(),
waker: Mutex::new(None),
})
}
pub fn find_free_area(&self, start: usize, size: usize) -> usize {
let mut last_addr = self.info.lock().task_vm_end.max(start);
for vaddr in self.mem.lock().mapped_page.keys() {
if last_addr + size <= *vaddr {
return last_addr;
}
last_addr = *vaddr + PAGE_SIZE;
}
last_addr
}
pub fn map_page(&self, vaddr: usize, page: PhysPage) {
assert_eq!(vaddr % PAGE_SIZE, 0);
for _ in 0..sel4::vspace_levels::NUM_LEVELS {
let res: core::result::Result<(), sel4::Error> = page.cap().frame_map(
self.vspace,
vaddr as _,
CapRights::all(),
VmAttributes::DEFAULT,
);
match res {
Ok(_) => {
self.mem.lock().mapped_page.insert(vaddr, page);
return;
}
Err(Error::FailedLookup) => {
let pt_cap = self.capset.lock().alloc_pt();
pt_cap
.pt_map(self.vspace, vaddr, VmAttributes::DEFAULT)
.unwrap();
self.mem.lock().mapped_pt.push(pt_cap);
}
_ => {
log::error!("map page to {:#x}", vaddr);
res.unwrap()
}
}
}
}
pub fn unmap_page(&mut self, vaddr: usize) {
assert_eq!(vaddr % PAGE_SIZE, 0);
if let Some(page) = self.mem.lock().mapped_page.remove(&vaddr) {
page.cap().frame_unmap().unwrap();
self.capset.lock().recycle_page(page.cap());
}
}
pub fn map_blank_page(&self, mut vaddr: usize) -> PhysPage {
vaddr = vaddr / PAGE_SIZE * PAGE_SIZE;
let page_cap = PhysPage::new(self.capset.lock().alloc_page());
self.map_page(vaddr, page_cap.clone());
page_cap
}
pub fn map_region(&self, start: usize, end: usize) {
assert!(end % 0x1000 == 0);
assert!(start % 0x1000 == 0);
for vaddr in (start..end).step_by(PAGE_SIZE) {
self.map_blank_page(vaddr);
}
}
pub fn load_elf(&self, file: &File<'_>) {
file.sections()
.filter(|x| x.name() == Ok(".text"))
.for_each(|sec| {
#[cfg(target_arch = "aarch64")]
{
const SVC_INST: u32 = 0xd4000001;
const ERR_INST: u32 = 0xdeadbeef;
let data = sec.data().unwrap();
let ptr = data.as_ptr() as *mut u32;
for i in 0..sec.size() as usize / size_of::<u32>() {
unsafe {
if ptr.add(i).read() == SVC_INST {
ptr.add(i).write_volatile(ERR_INST);
}
}
}
}
#[cfg(not(target_arch = "aarch64"))]
log::warn!("Modify Syscall Instruction Not Supported For This Arch.");
});
file.segments().for_each(|seg| {
let mut data = seg.data().unwrap();
let mut vaddr = seg.address() as usize;
let vaddr_end = vaddr + seg.size() as usize;
while vaddr < vaddr_end {
let voffset = vaddr % PAGE_SIZE;
let finded = self
.mem
.lock()
.mapped_page
.remove(&(vaddr / PAGE_SIZE * PAGE_SIZE));
let page_cap = match finded {
Some(page_cap) => {
page_cap.cap().frame_unmap().unwrap();
page_cap
}
None => self.map_blank_page(vaddr),
};
if !data.is_empty() {
let rsize = cmp::min(PAGE_SIZE - vaddr % PAGE_SIZE, data.len());
page_cap.lock()[voffset..voffset + rsize].copy_from_slice(&data[..rsize]);
data = &data[rsize..];
}
self.map_page(vaddr / PAGE_SIZE * PAGE_SIZE, page_cap);
vaddr += PAGE_SIZE - vaddr % PAGE_SIZE;
}
});
self.info.lock().task_vm_end = file
.sections()
.fold(0, |acc, x| cmp::max(acc, x.address() + x.size()))
.div_ceil(PAGE_SIZE as _) as usize
* PAGE_SIZE;
get_vdso_caps().iter().enumerate().for_each(|(i, page)| {
let new_slot = alloc_slot();
new_slot
.copy_from(&LeafSlot::from_cap(*page), CapRights::all())
.unwrap();
self.map_page(
VDSO_REGION_APP_ADDR + i * PAGE_SIZE,
PhysPage::new(new_slot.cap()),
);
});
}
pub fn exit_with(&self, code: u32) {
*self.exit.lock() = Some(code);
wake_hangs(self);
let uaddr = *self.clear_child_tid.lock();
if uaddr != 0 {
self.write_bytes(uaddr, 0u32.as_bytes());
futex_wake(self.futex_table.clone(), uaddr, 1);
}
if self.ppid != self.pid {
if let Some(signal) = self.signal.lock().exit_sig {
TASK_MAP
.lock()
.iter()
.find(|x| *x.0 == self.ppid as _)
.inspect(|parent| parent.1.add_signal(signal, self.tid));
}
}
let root_cnode = init_thread::slot::CNODE.cap();
root_cnode.absolute_cptr(self.tcb).revoke().unwrap();
root_cnode.absolute_cptr(self.tcb).delete().unwrap();
root_cnode.absolute_cptr(self.cnode).revoke().unwrap();
root_cnode.absolute_cptr(self.cnode).delete().unwrap();
recycle_slot(self.tcb.into());
recycle_slot(self.cnode.into());
if Arc::strong_count(self.thread_counter.lock().as_ref().unwrap()) == 1 {
root_cnode.absolute_cptr(self.vspace).revoke().unwrap();
root_cnode.absolute_cptr(self.vspace).delete().unwrap();
recycle_slot(self.vspace.into());
self.mem.lock().mapped_pt.iter().for_each(|cap| {
root_cnode.absolute_cptr(*cap).revoke().unwrap();
root_cnode.absolute_cptr(*cap).delete().unwrap();
recycle_slot((*cap).into());
});
self.mem
.lock()
.mapped_page
.iter()
.for_each(|(_, phys_page)| {
root_cnode.absolute_cptr(phys_page.cap()).revoke().unwrap();
root_cnode.absolute_cptr(phys_page.cap()).delete().unwrap();
recycle_slot(phys_page.cap().into());
});
let mut capset = self.capset.lock();
capset.release();
capset.untyped_list().iter().for_each(|(untyped, _)| {
root_cnode.absolute_cptr(*untyped).revoke().unwrap();
recycle_untyped_unit(*untyped);
});
}
*self.thread_counter.lock() = None;
}
}