kernel_thread/task/
shm.rsuse alloc::{collections::btree_map::BTreeMap, sync::Arc, vec::Vec};
use common::{mem::CapMemSet, slot::recycle_slot};
use sel4::cap::Granule;
use sel4_kit::slot_manager::LeafSlot;
use spin::Mutex;
use crate::utils::obj::recycle_untyped_unit;
pub static SHARED_MEMORY: Mutex<BTreeMap<usize, Arc<SharedMemory>>> = Mutex::new(BTreeMap::new());
pub struct SharedMemory {
pub capset: Mutex<CapMemSet>,
pub trackers: Vec<Granule>,
pub deleted: Mutex<bool>,
}
impl SharedMemory {
pub const fn new(capset: Mutex<CapMemSet>, trackers: Vec<Granule>) -> Self {
Self {
capset,
trackers,
deleted: Mutex::new(false),
}
}
}
impl Drop for SharedMemory {
fn drop(&mut self) {
self.trackers.iter().for_each(|cap| {
let slot = LeafSlot::from_cap(*cap);
slot.revoke().unwrap();
slot.delete().unwrap();
recycle_slot(slot);
});
self.capset
.lock()
.untyped_list()
.iter()
.for_each(|(ut, _)| {
recycle_untyped_unit(*ut);
});
}
}
#[derive(Clone)]
pub struct MapedSharedMemory {
pub key: usize,
pub mem: Arc<SharedMemory>,
pub start: usize,
pub size: usize,
}
impl MapedSharedMemory {
pub fn contains(&self, vaddr: usize) -> bool {
vaddr >= self.start && vaddr < self.start + self.size
}
}
impl Drop for MapedSharedMemory {
fn drop(&mut self) {
if Arc::strong_count(&self.mem) == 2 && *self.mem.deleted.lock() {
SHARED_MEMORY.lock().remove(&self.key);
}
}
}