diff --git a/modules/axmm/Cargo.toml b/modules/axmm/Cargo.toml index 46a7d5d828..d07cccd266 100644 --- a/modules/axmm/Cargo.toml +++ b/modules/axmm/Cargo.toml @@ -4,14 +4,12 @@ version.workspace = true edition.workspace = true authors = ["Yuekai Jia "] description = "ArceOS virtual memory management module" -license.workspace = true homepage.workspace = true repository = "https://github.com/arceos-org/arceos/tree/main/modules/axmm" documentation = "https://arceos-org.github.io/arceos/axmm/index.html" [dependencies] axhal = { workspace = true, features = ["paging"] } -axalloc = { workspace = true } axconfig = { workspace = true } log = "=0.4.21" @@ -21,3 +19,7 @@ memory_addr = "0.3" kspin = "0.1" memory_set = "0.3" page_table_multiarch = "0.5.3" +page_table_entry = "0.5.3" +cfg-if = "1.0" + +aspace_generic = { git = "https://github.com/arceos-org/aspace_generic.git", features = ["uspace"] } diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs deleted file mode 100644 index ef86486171..0000000000 --- a/modules/axmm/src/aspace.rs +++ /dev/null @@ -1,448 +0,0 @@ -use core::fmt; - -use axerrno::{AxError, AxResult, ax_err}; -use axhal::mem::phys_to_virt; -use axhal::paging::{MappingFlags, PageTable, PagingError}; -use memory_addr::{ - MemoryAddr, PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned_4k, -}; -use memory_set::{MemoryArea, MemorySet}; - -use crate::backend::Backend; -use crate::mapping_err_to_ax_err; - -/// The virtual memory address space. -pub struct AddrSpace { - va_range: VirtAddrRange, - areas: MemorySet, - pt: PageTable, -} - -impl AddrSpace { - /// Returns the address space base. - pub const fn base(&self) -> VirtAddr { - self.va_range.start - } - - /// Returns the address space end. - pub const fn end(&self) -> VirtAddr { - self.va_range.end - } - - /// Returns the address space size. - pub fn size(&self) -> usize { - self.va_range.size() - } - - /// Returns the reference to the inner page table. - pub const fn page_table(&self) -> &PageTable { - &self.pt - } - - /// Returns the root physical address of the inner page table. - pub const fn page_table_root(&self) -> PhysAddr { - self.pt.root_paddr() - } - - /// Checks if the address space contains the given address range. - pub fn contains_range(&self, start: VirtAddr, size: usize) -> bool { - self.va_range - .contains_range(VirtAddrRange::from_start_size(start, size)) - } - - /// Creates a new empty address space. - pub fn new_empty(base: VirtAddr, size: usize) -> AxResult { - Ok(Self { - va_range: VirtAddrRange::from_start_size(base, size), - areas: MemorySet::new(), - pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?, - }) - } - - /// Copies page table mappings from another address space. - /// - /// It copies the page table entries only rather than the memory regions, - /// usually used to copy a portion of the kernel space mapping to the - /// user space. - /// - /// Note that on dropping, the copied PTEs will also be cleared, which could - /// taint the original page table. For workaround, you can use - /// [`AddrSpace::clear_mappings`]. - /// - /// Returns an error if the two address spaces overlap. - pub fn copy_mappings_from(&mut self, other: &AddrSpace) -> AxResult { - if self.va_range.overlaps(other.va_range) { - return ax_err!(InvalidInput, "address space overlap"); - } - self.pt.copy_from(&other.pt, other.base(), other.size()); - Ok(()) - } - - /// Clears the page table mappings in the given address range. - /// - /// This should be used in pair with [`AddrSpace::copy_mappings_from`]. - pub fn clear_mappings(&mut self, range: VirtAddrRange) { - self.pt.clear_copy_range(range.start, range.size()); - } - - fn validate_region(&self, start: VirtAddr, size: usize) -> AxResult { - if !self.contains_range(start, size) { - return ax_err!(InvalidInput, "address out of range"); - } - if !start.is_aligned_4k() || !is_aligned_4k(size) { - return ax_err!(InvalidInput, "address not aligned"); - } - Ok(()) - } - - /// Finds a free area that can accommodate the given size. - /// - /// The search starts from the given hint address, and the area should be within the given limit range. - /// - /// Returns the start address of the free area. Returns None if no such area is found. - pub fn find_free_area( - &self, - hint: VirtAddr, - size: usize, - limit: VirtAddrRange, - ) -> Option { - self.areas.find_free_area(hint, size, limit) - } - - /// Add a new linear mapping. - /// - /// See [`Backend`] for more details about the mapping backends. - /// - /// The `flags` parameter indicates the mapping permissions and attributes. - /// - /// Returns an error if the address range is out of the address space or not - /// aligned. - pub fn map_linear( - &mut self, - start_vaddr: VirtAddr, - start_paddr: PhysAddr, - size: usize, - flags: MappingFlags, - ) -> AxResult { - self.validate_region(start_vaddr, size)?; - if !start_paddr.is_aligned_4k() { - return ax_err!(InvalidInput, "address not aligned"); - } - - let offset = start_vaddr.as_usize() - start_paddr.as_usize(); - let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset)); - self.areas - .map(area, &mut self.pt, false) - .map_err(mapping_err_to_ax_err)?; - Ok(()) - } - - /// Add a new allocation mapping. - /// - /// See [`Backend`] for more details about the mapping backends. - /// - /// The `flags` parameter indicates the mapping permissions and attributes. - /// - /// Returns an error if the address range is out of the address space or not - /// aligned. - pub fn map_alloc( - &mut self, - start: VirtAddr, - size: usize, - flags: MappingFlags, - populate: bool, - ) -> AxResult { - self.validate_region(start, size)?; - - let area = MemoryArea::new(start, size, flags, Backend::new_alloc(populate)); - self.areas - .map(area, &mut self.pt, false) - .map_err(mapping_err_to_ax_err)?; - Ok(()) - } - - /// Populates the area with physical frames, returning false if the area - /// contains unmapped area. - pub fn populate_area(&mut self, mut start: VirtAddr, size: usize) -> AxResult { - self.validate_region(start, size)?; - let end = start + size; - - while let Some(area) = self.areas.find(start) { - let backend = area.backend(); - if let Backend::Alloc { populate } = backend { - if !*populate { - for addr in PageIter4K::new(start, area.end().min(end)).unwrap() { - match self.pt.query(addr) { - Ok(_) => {} - // If the page is not mapped, try map it. - Err(PagingError::NotMapped) => { - if !backend.handle_page_fault(addr, area.flags(), &mut self.pt) { - return Err(AxError::NoMemory); - } - } - Err(_) => return Err(AxError::BadAddress), - }; - } - } - } - start = area.end(); - assert!(start.is_aligned_4k()); - if start >= end { - break; - } - } - - if start < end { - // If the area is not fully mapped, we return ENOMEM. - return ax_err!(NoMemory); - } - - Ok(()) - } - - /// Removes mappings within the specified virtual address range. - /// - /// Returns an error if the address range is out of the address space or not - /// aligned. - pub fn unmap(&mut self, start: VirtAddr, size: usize) -> AxResult { - self.validate_region(start, size)?; - - self.areas - .unmap(start, size, &mut self.pt) - .map_err(mapping_err_to_ax_err)?; - Ok(()) - } - - /// To remove user area mappings from address space. - pub fn unmap_user_areas(&mut self) -> AxResult { - for area in self.areas.iter() { - assert!(area.start().is_aligned_4k()); - assert!(area.size() % PAGE_SIZE_4K == 0); - assert!(area.flags().contains(MappingFlags::USER)); - assert!( - self.va_range - .contains_range(VirtAddrRange::from_start_size(area.start(), area.size())), - "MemorySet contains out-of-va-range area" - ); - } - self.areas.clear(&mut self.pt).unwrap(); - Ok(()) - } - - /// To process data in this area with the given function. - /// - /// Now it supports reading and writing data in the given interval. - /// - /// # Arguments - /// - `start`: The start virtual address to process. - /// - `size`: The size of the data to process. - /// - `f`: The function to process the data, whose arguments are the start virtual address, - /// the offset and the size of the data. - /// - /// # Notes - /// The caller must ensure that the permission of the operation is allowed. - fn process_area_data(&self, start: VirtAddr, size: usize, f: F) -> AxResult - where - F: FnMut(VirtAddr, usize, usize), - { - Self::process_area_data_with_page_table(&self.pt, &self.va_range, start, size, f) - } - - fn process_area_data_with_page_table( - pt: &PageTable, - va_range: &VirtAddrRange, - start: VirtAddr, - size: usize, - mut f: F, - ) -> AxResult - where - F: FnMut(VirtAddr, usize, usize), - { - if !va_range.contains_range(VirtAddrRange::from_start_size(start, size)) { - return ax_err!(InvalidInput, "address out of range"); - } - let mut cnt = 0; - // If start is aligned to 4K, start_align_down will be equal to start_align_up. - let end_align_up = (start + size).align_up_4k(); - for vaddr in PageIter4K::new(start.align_down_4k(), end_align_up) - .expect("Failed to create page iterator") - { - let (mut paddr, _, _) = pt.query(vaddr).map_err(|_| AxError::BadAddress)?; - - let mut copy_size = (size - cnt).min(PAGE_SIZE_4K); - - if copy_size == 0 { - break; - } - if vaddr == start.align_down_4k() && start.align_offset_4k() != 0 { - let align_offset = start.align_offset_4k(); - copy_size = copy_size.min(PAGE_SIZE_4K - align_offset); - paddr += align_offset; - } - f(phys_to_virt(paddr), cnt, copy_size); - cnt += copy_size; - } - Ok(()) - } - - /// To read data from the address space. - /// - /// # Arguments - /// - /// * `start` - The start virtual address to read. - /// * `buf` - The buffer to store the data. - pub fn read(&self, start: VirtAddr, buf: &mut [u8]) -> AxResult { - self.process_area_data(start, buf.len(), |src, offset, read_size| unsafe { - core::ptr::copy_nonoverlapping(src.as_ptr(), buf.as_mut_ptr().add(offset), read_size); - }) - } - - /// To write data to the address space. - /// - /// # Arguments - /// - /// * `start_vaddr` - The start virtual address to write. - /// * `buf` - The buffer to write to the address space. - pub fn write(&self, start: VirtAddr, buf: &[u8]) -> AxResult { - self.process_area_data(start, buf.len(), |dst, offset, write_size| unsafe { - core::ptr::copy_nonoverlapping(buf.as_ptr().add(offset), dst.as_mut_ptr(), write_size); - }) - } - - /// Updates mapping within the specified virtual address range. - /// - /// Returns an error if the address range is out of the address space or not - /// aligned. - pub fn protect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) -> AxResult { - // Populate the area first, which also checks the address range for us. - self.populate_area(start, size)?; - - self.areas - .protect(start, size, |_| Some(flags), &mut self.pt) - .map_err(mapping_err_to_ax_err)?; - - Ok(()) - } - - /// Removes all mappings in the address space. - pub fn clear(&mut self) { - self.areas.clear(&mut self.pt).unwrap(); - } - - /// Checks whether an access to the specified memory region is valid. - /// - /// Returns `true` if the memory region given by `range` is all mapped and - /// has proper permission flags (i.e. containing `access_flags`). - pub fn check_region_access( - &self, - mut range: VirtAddrRange, - access_flags: MappingFlags, - ) -> bool { - for area in self.areas.iter() { - if area.end() <= range.start { - continue; - } - if area.start() > range.start { - return false; - } - - // This area overlaps with the memory region - if !area.flags().contains(access_flags) { - return false; - } - - range.start = area.end(); - if range.is_empty() { - return true; - } - } - - false - } - - /// Handles a page fault at the given address. - /// - /// `access_flags` indicates the access type that caused the page fault. - /// - /// Returns `true` if the page fault is handled successfully (not a real - /// fault). - pub fn handle_page_fault(&mut self, vaddr: VirtAddr, access_flags: MappingFlags) -> bool { - if !self.va_range.contains(vaddr) { - return false; - } - if let Some(area) = self.areas.find(vaddr) { - let orig_flags = area.flags(); - if orig_flags.contains(access_flags) { - return area - .backend() - .handle_page_fault(vaddr, orig_flags, &mut self.pt); - } - } - false - } - - /// Clone a [`AddrSpace`] by re-mapping all [`MemoryArea`]s in a new page table and copying data in user space. - pub fn clone_or_err(&mut self) -> AxResult { - let mut new_aspace = Self::new_empty(self.base(), self.size())?; - - for area in self.areas.iter() { - let backend = area.backend(); - // Remap the memory area in the new address space. - let new_area = - MemoryArea::new(area.start(), area.size(), area.flags(), backend.clone()); - new_aspace - .areas - .map(new_area, &mut new_aspace.pt, false) - .map_err(mapping_err_to_ax_err)?; - // Copy data from old memory area to new memory area. - for vaddr in - PageIter4K::new(area.start(), area.end()).expect("Failed to create page iterator") - { - let addr = match self.pt.query(vaddr) { - Ok((paddr, _, _)) => paddr, - // If the page is not mapped, skip it. - Err(PagingError::NotMapped) => continue, - Err(_) => return Err(AxError::BadAddress), - }; - let new_addr = match new_aspace.pt.query(vaddr) { - Ok((paddr, _, _)) => paddr, - // If the page is not mapped, try map it. - Err(PagingError::NotMapped) => { - if !backend.handle_page_fault(vaddr, area.flags(), &mut new_aspace.pt) { - return Err(AxError::NoMemory); - } - match new_aspace.pt.query(vaddr) { - Ok((paddr, _, _)) => paddr, - Err(_) => return Err(AxError::BadAddress), - } - } - Err(_) => return Err(AxError::BadAddress), - }; - unsafe { - core::ptr::copy_nonoverlapping( - phys_to_virt(addr).as_ptr(), - phys_to_virt(new_addr).as_mut_ptr(), - PAGE_SIZE_4K, - ) - }; - } - } - Ok(new_aspace) - } -} - -impl fmt::Debug for AddrSpace { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("AddrSpace") - .field("va_range", &self.va_range) - .field("page_table_root", &self.pt.root_paddr()) - .field("areas", &self.areas) - .finish() - } -} - -impl Drop for AddrSpace { - fn drop(&mut self) { - self.clear(); - } -} diff --git a/modules/axmm/src/backend/alloc.rs b/modules/axmm/src/backend/alloc.rs deleted file mode 100644 index d5ec5787e6..0000000000 --- a/modules/axmm/src/backend/alloc.rs +++ /dev/null @@ -1,101 +0,0 @@ -use axalloc::global_allocator; -use axhal::mem::{phys_to_virt, virt_to_phys}; -use axhal::paging::{MappingFlags, PageSize, PageTable}; -use memory_addr::{PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr}; - -use super::Backend; - -fn alloc_frame(zeroed: bool) -> Option { - let vaddr = VirtAddr::from(global_allocator().alloc_pages(1, PAGE_SIZE_4K).ok()?); - if zeroed { - unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, PAGE_SIZE_4K) }; - } - let paddr = virt_to_phys(vaddr); - Some(paddr) -} - -fn dealloc_frame(frame: PhysAddr) { - let vaddr = phys_to_virt(frame); - global_allocator().dealloc_pages(vaddr.as_usize(), 1); -} - -impl Backend { - /// Creates a new allocation mapping backend. - pub const fn new_alloc(populate: bool) -> Self { - Self::Alloc { populate } - } - - pub(crate) fn map_alloc( - start: VirtAddr, - size: usize, - flags: MappingFlags, - pt: &mut PageTable, - populate: bool, - ) -> bool { - debug!( - "map_alloc: [{:#x}, {:#x}) {:?} (populate={})", - start, - start + size, - flags, - populate - ); - if populate { - // allocate all possible physical frames for populated mapping. - for addr in PageIter4K::new(start, start + size).unwrap() { - if let Some(frame) = alloc_frame(true) { - if let Ok(tlb) = pt.map(addr, frame, PageSize::Size4K, flags) { - tlb.ignore(); // TLB flush on map is unnecessary, as there are no outdated mappings. - } else { - return false; - } - } - } - } else { - // create mapping entries on demand later in `handle_page_fault_alloc`. - } - true - } - - pub(crate) fn unmap_alloc( - start: VirtAddr, - size: usize, - pt: &mut PageTable, - _populate: bool, - ) -> bool { - debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size); - for addr in PageIter4K::new(start, start + size).unwrap() { - if let Ok((frame, page_size, tlb)) = pt.unmap(addr) { - // Deallocate the physical frame if there is a mapping in the - // page table. - if page_size.is_huge() { - return false; - } - tlb.flush(); - dealloc_frame(frame); - } else { - // Deallocation is needn't if the page is not mapped. - } - } - true - } - - pub(crate) fn handle_page_fault_alloc( - vaddr: VirtAddr, - orig_flags: MappingFlags, - pt: &mut PageTable, - populate: bool, - ) -> bool { - if populate { - false // Populated mappings should not trigger page faults. - } else if let Some(frame) = alloc_frame(true) { - // Allocate a physical frame lazily and map it to the fault address. - // `vaddr` does not need to be aligned. It will be automatically - // aligned during `pt.map` regardless of the page size. - pt.map(vaddr, frame, PageSize::Size4K, orig_flags) - .map(|tlb| tlb.flush()) - .is_ok() - } else { - false - } - } -} diff --git a/modules/axmm/src/backend/linear.rs b/modules/axmm/src/backend/linear.rs deleted file mode 100644 index e324a1e11e..0000000000 --- a/modules/axmm/src/backend/linear.rs +++ /dev/null @@ -1,44 +0,0 @@ -use axhal::paging::{MappingFlags, PageTable}; -use memory_addr::{PhysAddr, VirtAddr}; - -use super::Backend; - -impl Backend { - /// Creates a new linear mapping backend. - pub const fn new_linear(pa_va_offset: usize) -> Self { - Self::Linear { pa_va_offset } - } - - pub(crate) fn map_linear( - start: VirtAddr, - size: usize, - flags: MappingFlags, - pt: &mut PageTable, - pa_va_offset: usize, - ) -> bool { - let va_to_pa = |va: VirtAddr| PhysAddr::from(va.as_usize() - pa_va_offset); - debug!( - "map_linear: [{:#x}, {:#x}) -> [{:#x}, {:#x}) {:?}", - start, - start + size, - va_to_pa(start), - va_to_pa(start + size), - flags - ); - pt.map_region(start, va_to_pa, size, flags, false, false) - .map(|tlb| tlb.ignore()) // TLB flush on map is unnecessary, as there are no outdated mappings. - .is_ok() - } - - pub(crate) fn unmap_linear( - start: VirtAddr, - size: usize, - pt: &mut PageTable, - _pa_va_offset: usize, - ) -> bool { - debug!("unmap_linear: [{:#x}, {:#x})", start, start + size); - pt.unmap_region(start, size, true) - .map(|tlb| tlb.ignore()) // flush each page on unmap, do not flush the entire TLB. - .is_ok() - } -} diff --git a/modules/axmm/src/backend/mod.rs b/modules/axmm/src/backend/mod.rs deleted file mode 100644 index be58b3e59d..0000000000 --- a/modules/axmm/src/backend/mod.rs +++ /dev/null @@ -1,87 +0,0 @@ -//! Memory mapping backends. - -use axhal::paging::{MappingFlags, PageTable}; -use memory_addr::VirtAddr; -use memory_set::MappingBackend; - -mod alloc; -mod linear; - -/// A unified enum type for different memory mapping backends. -/// -/// Currently, two backends are implemented: -/// -/// - **Linear**: used for linear mappings. The target physical frames are -/// contiguous and their addresses should be known when creating the mapping. -/// - **Allocation**: used in general, or for lazy mappings. The target physical -/// frames are obtained from the global allocator. -#[derive(Clone)] -pub enum Backend { - /// Linear mapping backend. - /// - /// The offset between the virtual address and the physical address is - /// constant, which is specified by `pa_va_offset`. For example, the virtual - /// address `vaddr` is mapped to the physical address `vaddr - pa_va_offset`. - Linear { - /// `vaddr - paddr`. - pa_va_offset: usize, - }, - /// Allocation mapping backend. - /// - /// If `populate` is `true`, all physical frames are allocated when the - /// mapping is created, and no page faults are triggered during the memory - /// access. Otherwise, the physical frames are allocated on demand (by - /// handling page faults). - Alloc { - /// Whether to populate the physical frames when creating the mapping. - populate: bool, - }, -} - -impl MappingBackend for Backend { - type Addr = VirtAddr; - type Flags = MappingFlags; - type PageTable = PageTable; - fn map(&self, start: VirtAddr, size: usize, flags: MappingFlags, pt: &mut PageTable) -> bool { - match *self { - Self::Linear { pa_va_offset } => Self::map_linear(start, size, flags, pt, pa_va_offset), - Self::Alloc { populate } => Self::map_alloc(start, size, flags, pt, populate), - } - } - - fn unmap(&self, start: VirtAddr, size: usize, pt: &mut PageTable) -> bool { - match *self { - Self::Linear { pa_va_offset } => Self::unmap_linear(start, size, pt, pa_va_offset), - Self::Alloc { populate } => Self::unmap_alloc(start, size, pt, populate), - } - } - - fn protect( - &self, - start: Self::Addr, - size: usize, - new_flags: Self::Flags, - page_table: &mut Self::PageTable, - ) -> bool { - page_table - .protect_region(start, size, new_flags, true) - .map(|tlb| tlb.ignore()) - .is_ok() - } -} - -impl Backend { - pub(crate) fn handle_page_fault( - &self, - vaddr: VirtAddr, - orig_flags: MappingFlags, - page_table: &mut PageTable, - ) -> bool { - match *self { - Self::Linear { .. } => false, // Linear mappings should not trigger page faults. - Self::Alloc { populate } => { - Self::handle_page_fault_alloc(vaddr, orig_flags, page_table, populate) - } - } - } -} diff --git a/modules/axmm/src/lib.rs b/modules/axmm/src/lib.rs index 1be9043498..e2d36c9cc4 100644 --- a/modules/axmm/src/lib.rs +++ b/modules/axmm/src/lib.rs @@ -6,30 +6,42 @@ extern crate log; extern crate alloc; -mod aspace; -mod backend; - -pub use self::aspace::AddrSpace; -pub use self::backend::Backend; - -use axerrno::{AxError, AxResult}; +use axerrno::AxResult; use axhal::mem::phys_to_virt; +use axhal::paging::PagingHandlerImpl; use kspin::SpinNoIrq; use lazyinit::LazyInit; use memory_addr::{PhysAddr, va}; -use memory_set::MappingError; - -static KERNEL_ASPACE: LazyInit> = LazyInit::new(); -fn mapping_err_to_ax_err(err: MappingError) -> AxError { - warn!("Mapping error: {:?}", err); - match err { - MappingError::InvalidParam => AxError::InvalidInput, - MappingError::AlreadyExists => AxError::AlreadyExists, - MappingError::BadState => AxError::BadState, +cfg_if::cfg_if! { + if #[cfg(target_arch = "x86_64")] { + /// The paging metadata for specific arch. + pub type ArchPagingMetatData = page_table_multiarch::x86_64::X64PagingMetaData; + /// The PTE for specific arch. + pub type ArchPTE = page_table_entry::x86_64::X64PTE; + } else if #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] { + /// The paging metadata for specific arch. + pub type ArchPagingMetatData = page_table_multiarch::riscv::Sv39MetaData; + /// The PTE for specific arch. + pub type ArchPTE = page_table_entry::riscv::Rv64PTE; + } else if #[cfg(target_arch = "aarch64")]{ + /// The paging metadata for specific arch. + pub type ArchPagingMetatData = page_table_multiarch::aarch64::A64PagingMetaData; + /// The PTE for specific arch. + pub type ArchPTE = page_table_entry::aarch64::A64PTE; + } else if #[cfg(target_arch = "loongarch64")] { + /// The paging metadata for specific arch. + pub type ArchPagingMetatData = page_table_multiarch::loongarch64::LA64MetaData; + /// The PTE for specific arch. + pub type ArchPTE = page_table_entry::loongarch64::LA64PTE; } } +/// The virtual memory address space. +pub type AddrSpace = aspace_generic::AddrSpace; + +static KERNEL_ASPACE: LazyInit> = LazyInit::new(); + /// Creates a new address space for kernel itself. pub fn new_kernel_aspace() -> AxResult { let mut aspace = AddrSpace::new_empty(