hal_core/mem/page.rs
1use crate::{Address, PAddr, VAddr};
2use core::{cmp, ops, slice};
3use mycelium_util::fmt;
4
5pub trait Size: Copy + Eq + PartialEq + fmt::Display {
6 /// Returns the size (in bytes) of this page.
7 fn as_usize(&self) -> usize;
8}
9
10/// A statically known page size.
11pub trait StaticSize: Copy + Eq + PartialEq + fmt::Display {
12 /// The size (in bytes) of this page.
13 const SIZE: usize;
14 const PRETTY_NAME: &'static str;
15 const INSTANCE: Self;
16}
17
18pub type TranslateResult<A, S> = Result<Page<A, S>, TranslateError<S>>;
19/// An allocator for physical pages of a given size.
20///
21/// # Safety
22///
23/// This trait is unsafe to implement, as implementations are responsible for
24/// guaranteeing that allocated pages are unique, and may not be allocated by
25/// another page allocator.
26pub unsafe trait Alloc<S: Size> {
27 /// Allocate a single page.
28 ///
29 /// Note that an implementation of this method is provided as long as an
30 /// implementor of this trait provides `alloc_range`.
31 ///
32 /// # Returns
33 /// - `Ok(Page)` if a page was successfully allocated.
34 /// - `Err` if no more pages can be allocated by this allocator.
35 fn alloc(&self, size: S) -> Result<Page<PAddr, S>, AllocError> {
36 self.alloc_range(size, 1).map(|r| r.start())
37 }
38
39 /// Allocate a range of `len` pages.
40 ///
41 /// # Returns
42 /// - `Ok(PageRange)` if a range of pages was successfully allocated
43 /// - `Err` if the requested range could not be satisfied by this allocator.
44 fn alloc_range(&self, size: S, len: usize) -> Result<PageRange<PAddr, S>, AllocError>;
45
46 /// Deallocate a single page.
47 ///
48 /// Note that an implementation of this method is provided as long as an
49 /// implementor of this trait provides `dealloc_range`.
50 ///
51 /// # Returns
52 /// - `Ok(())` if the page was successfully deallocated.
53 /// - `Err` if the requested range could not be deallocated.
54 fn dealloc(&self, page: Page<PAddr, S>) -> Result<(), AllocError> {
55 self.dealloc_range(page.range_inclusive(page))
56 }
57
58 /// Deallocate a range of pages.
59 ///
60 /// # Returns
61 /// - `Ok(())` if a range of pages was successfully deallocated
62 /// - `Err` if the requested range could not be deallocated.
63 fn dealloc_range(&self, range: PageRange<PAddr, S>) -> Result<(), AllocError>;
64}
65
66pub trait Map<S, A>
67where
68 S: Size,
69 A: Alloc<S>,
70{
71 type Entry: PageFlags<S>;
72
73 /// Map the virtual memory page represented by `virt` to the physical page
74 /// represented bt `phys`.
75 ///
76 /// # Panics
77 ///
78 /// - If the physical address is invalid.
79 /// - If the page is already mapped.
80 ///
81 /// # Safety
82 ///
83 /// Manual control of page mappings may be used to violate Rust invariants
84 /// in a variety of exciting ways. For example, aliasing a physical page by
85 /// mapping multiple virtual pages to it and setting one or more of those
86 /// virtual pages as writable may result in undefined behavior.
87 ///
88 /// Some rules of thumb:
89 ///
90 /// - Ensure that the writable XOR executable rule is not violated (by
91 /// making a page both writable and executable).
92 /// - Don't alias stack pages onto the heap or vice versa.
93 /// - If loading arbitrary code into executable pages, ensure that this code
94 /// is trusted and will not violate the kernel's invariants.
95 ///
96 /// Good luck and have fun!
97 unsafe fn map_page(
98 &mut self,
99 virt: Page<VAddr, S>,
100 phys: Page<PAddr, S>,
101 frame_alloc: &A,
102 ) -> Handle<'_, S, Self::Entry>;
103
104 fn flags_mut(&mut self, virt: Page<VAddr, S>) -> Handle<'_, S, Self::Entry>;
105
106 /// Unmap the provided virtual page, returning the physical page it was
107 /// previously mapped to.
108 ///
109 /// This does not deallocate any page frames.
110 ///
111 /// # Panics
112 ///
113 /// - If the virtual page was not mapped.
114 ///
115 /// # Safety
116 ///
117 /// Manual control of page mappings may be used to violate Rust invariants
118 /// in a variety of exciting ways.
119 unsafe fn unmap(&mut self, virt: Page<VAddr, S>) -> Page<PAddr, S>;
120
121 /// Identity map the provided physical page to the virtual page with the
122 /// same address.
123 fn identity_map(
124 &mut self,
125 phys: Page<PAddr, S>,
126 frame_alloc: &A,
127 ) -> Handle<'_, S, Self::Entry> {
128 let base_paddr = phys.base_addr().as_usize();
129 let virt = Page::containing(VAddr::from_usize(base_paddr), phys.size());
130 unsafe { self.map_page(virt, phys, frame_alloc) }
131 }
132
133 /// Map the range of virtual memory pages represented by `virt` to the range
134 /// of physical pages represented by `phys`.
135 ///
136 /// # Arguments
137 ///
138 /// - `virt`: the range of virtual pages to map.
139 /// - `phys`: the range of physical pages to map `virt` to.
140 /// - `set_flags`: a closure invoked with a `Handle` to each page in the
141 /// range as it is mapped. This closure may modify the flags for that page
142 /// before the changes to the page mapping are committed.
143 ///
144 /// **Note**: The [`Handle::virt_page`] method may be used to determine
145 /// which page's flags would be modified by each invocation of the cosure.
146 /// - `frame_alloc`: a page-frame allocator.
147 ///
148 /// # Panics
149 ///
150 /// - If the two ranges have different lengths.
151 /// - If the size is dynamic and the two ranges are of different sized pages.
152 /// - If the physical address is invalid.
153 /// - If any page is already mapped.
154 ///
155 /// # Safety
156 ///
157 /// Manual control of page mappings may be used to violate Rust invariants
158 /// in a variety of exciting ways. For example, aliasing a physical page by
159 /// mapping multiple virtual pages to it and setting one or more of those
160 /// virtual pages as writable may result in undefined behavior.
161 ///
162 /// Some rules of thumb:
163 ///
164 /// - Ensure that the writable XOR executable rule is not violated (by
165 /// making a page both writable and executable).
166 /// - Don't alias stack pages onto the heap or vice versa.
167 /// - If loading arbitrary code into executable pages, ensure that this code
168 /// is trusted and will not violate the kernel's invariants.
169 ///
170 /// Good luck and have fun!
171 unsafe fn map_range<F>(
172 &mut self,
173 virt: PageRange<VAddr, S>,
174 phys: PageRange<PAddr, S>,
175 mut set_flags: F,
176 frame_alloc: &A,
177 ) -> PageRange<VAddr, S>
178 where
179 F: FnMut(&mut Handle<'_, S, Self::Entry>),
180 {
181 let _span = tracing::trace_span!("map_range", ?virt, ?phys).entered();
182 assert_eq!(
183 virt.len(),
184 phys.len(),
185 "virtual and physical page ranges must have the same number of pages"
186 );
187 assert_eq!(
188 virt.size(),
189 phys.size(),
190 "virtual and physical pages must be the same size"
191 );
192 for (virt, phys) in virt.into_iter().zip(&phys) {
193 tracing::trace!(virt.page = ?virt, phys.page = ?phys, "mapping...");
194 let mut flags = self.map_page(virt, phys, frame_alloc);
195 set_flags(&mut flags);
196 flags.commit();
197 tracing::trace!(virt.page = ?virt, phys.page = ?phys, "mapped");
198 }
199 virt
200 }
201
202 /// Unmap the provided range of virtual pages.
203 ///
204 /// This does not deallocate any page frames.
205 ///
206 /// # Notes
207 ///
208 /// The default implementation of this method does *not* assume that the
209 /// virtual pages are mapped to a contiguous range of physical page frames.
210 /// Overridden implementations *may* perform different behavior when the
211 /// pages are mapped contiguously in the physical address space, but they
212 /// *must not* assume this. If an implementation performs additional
213 /// behavior for contiguously-mapped virtual page ranges, it must check that
214 /// the page range is, in fact, contiguously mapped.
215 ///
216 /// Additionally, and unlike [`unmap`], this method does not return
217 /// a physical [`PageRange`], since it is not guaranteed that the unmapped
218 /// pages are mapped to a contiguous physical page range.
219 ///
220 /// # Panics
221 ///
222 /// - If any virtual page in the range was not mapped.
223 ///
224 /// # Safety
225 ///
226 /// Manual control of page mappings may be used to violate Rust invariants
227 /// in a variety of exciting ways.
228 ///
229 /// [`unmap`]: Self::unmap
230 unsafe fn unmap_range(&mut self, virt: PageRange<VAddr, S>) {
231 let _span = tracing::trace_span!("unmap_range", ?virt).entered();
232
233 for virt in &virt {
234 self.unmap(virt);
235 }
236 }
237
238 /// Identity map the provided physical page range to a range of virtual
239 /// pages with the same address
240 ///
241 /// # Arguments
242 ///
243 /// - `phys`: the range of physical pages to identity map
244 /// - `set_flags`: a closure invoked with a `Handle` to each page in the
245 /// range as it is mapped. This closure may modify the flags for that page
246 /// before the changes to the page mapping are committed.
247 ///
248 /// **Note**: The [`Handle::virt_page`] method may be used to determine
249 /// which page's flags would be modified by each invocation of the cosure.
250 /// - `frame_alloc`: a page-frame allocator.
251 ///
252 /// # Panics
253 ///
254 /// - If any page's physical address is invalid.
255 /// - If any page is already mapped.
256 fn identity_map_range<F>(
257 &mut self,
258 phys: PageRange<PAddr, S>,
259 set_flags: F,
260 frame_alloc: &A,
261 ) -> PageRange<VAddr, S>
262 where
263 F: FnMut(&mut Handle<'_, S, Self::Entry>),
264 {
265 let base_paddr = phys.base_addr().as_usize();
266 let page_size = phys.start().size();
267 let virt_base = Page::containing(VAddr::from_usize(base_paddr), page_size);
268 let end_paddr = phys.end_addr().as_usize();
269 let virt_end = Page::containing(VAddr::from_usize(end_paddr), page_size);
270 let virt = virt_base.range_to(virt_end);
271 unsafe { self.map_range(virt, phys, set_flags, frame_alloc) }
272 }
273}
274
275impl<M, A, S> Map<S, A> for &mut M
276where
277 M: Map<S, A>,
278 S: Size,
279 A: Alloc<S>,
280{
281 type Entry = M::Entry;
282
283 #[inline]
284 unsafe fn map_page(
285 &mut self,
286 virt: Page<VAddr, S>,
287 phys: Page<PAddr, S>,
288 frame_alloc: &A,
289 ) -> Handle<'_, S, Self::Entry> {
290 (*self).map_page(virt, phys, frame_alloc)
291 }
292
293 #[inline]
294 fn flags_mut(&mut self, virt: Page<VAddr, S>) -> Handle<'_, S, Self::Entry> {
295 (*self).flags_mut(virt)
296 }
297
298 #[inline]
299 unsafe fn unmap(&mut self, virt: Page<VAddr, S>) -> Page<PAddr, S> {
300 (*self).unmap(virt)
301 }
302
303 #[inline]
304 fn identity_map(
305 &mut self,
306 phys: Page<PAddr, S>,
307 frame_alloc: &A,
308 ) -> Handle<'_, S, Self::Entry> {
309 (*self).identity_map(phys, frame_alloc)
310 }
311}
312
313pub trait TranslatePage<S: Size> {
314 fn translate_page(&self, virt: Page<VAddr, S>) -> TranslateResult<PAddr, S>;
315}
316
317pub trait TranslateAddr {
318 fn translate_addr(&self, addr: VAddr) -> Option<PAddr>;
319}
320
321pub trait PageFlags<S: Size>: fmt::Debug {
322 /// Set whether or not this page is writable.
323 ///
324 /// # Safety
325 ///
326 /// Manual control of page flags can be used to violate Rust invariants.
327 /// Using `set_writable` to make memory that the Rust compiler expects to be
328 /// read-only may cause undefined behavior. Making a page which is aliased
329 /// page table (i.e. it has multiple page table entries pointing to it) may
330 /// also cause undefined behavior.
331 unsafe fn set_writable(&mut self, writable: bool);
332
333 /// Set whether or not this page is executable.
334 ///
335 /// # Safety
336 ///
337 /// Manual control of page flags can be used to violate Rust invariants.
338 /// Using `set_executable` to make writable memory executable may cause
339 /// undefined behavior. Also, this can be used to execute the contents of
340 /// arbitrary memory, which (of course) is wildly unsafe.
341 unsafe fn set_executable(&mut self, executable: bool);
342
343 /// Set whether or not this page is present.
344 ///
345 /// # Safety
346 ///
347 /// Manual control of page flags can be used to violate Rust invariants.
348 unsafe fn set_present(&mut self, present: bool);
349
350 fn is_writable(&self) -> bool;
351 fn is_executable(&self) -> bool;
352 fn is_present(&self) -> bool;
353
354 /// Commit the changes to the page table.
355 ///
356 /// Depending on the CPU architecture, this may be a nop. In other cases, it
357 /// may invoke special instructions (such as `invlpg` on x86) or write data
358 /// to the page table.
359 ///
360 /// If page table changes are reflected as soon as flags are modified, the
361 /// implementation may do nothing.
362 fn commit(&mut self, page: Page<VAddr, S>);
363}
364
365/// A page in the process of being remapped.
366///
367/// This reference allows updating page table flags prior to committing changes.
368#[derive(Debug)]
369#[must_use = "page table updates may not be reflected until changes are committed (using `Handle::commit`)"]
370pub struct Handle<'mapper, S: Size, E: PageFlags<S>> {
371 entry: &'mapper mut E,
372 page: Page<VAddr, S>,
373}
374
375/// A memory page.
376#[derive(Clone, Copy, PartialEq, Eq)]
377#[repr(C)]
378pub struct Page<A, S: Size> {
379 base: A,
380 size: S,
381}
382
383/// A range of memory pages of the same size.
384#[derive(Clone, Copy, PartialEq, Eq, PartialOrd)]
385pub struct PageRange<A: Address, S: Size> {
386 start: Page<A, S>,
387 end: Page<A, S>,
388}
389
390#[derive(Debug, Default)]
391pub struct EmptyAlloc {
392 _p: (),
393}
394
395#[derive(thiserror::Error)]
396#[error("not aligned on a {size} boundary")]
397pub struct NotAligned<S: Size> {
398 size: S,
399}
400
401#[derive(Debug, thiserror::Error)]
402#[error("allocator error")]
403pub struct AllocError {
404 // TODO: eliza
405 _p: (),
406}
407
408#[derive(Clone, Eq, PartialEq, thiserror::Error)]
409#[non_exhaustive]
410pub enum TranslateError<S: Size> {
411 #[error("cannot translate an unmapped page/address")]
412 NotMapped,
413 #[error("mapped page is a different size ({0})")]
414 WrongSize(S),
415 #[error("error translating page/address: {0}")]
416 Other(&'static str),
417}
418
419// === impl Page ===
420
421impl<A: Address, S: StaticSize> Page<A, S> {
422 /// Returns a page starting at the given address.
423 pub fn starting_at_fixed(addr: impl Into<A>) -> Result<Self, NotAligned<S>> {
424 Self::starting_at(addr, S::INSTANCE)
425 }
426
427 /// Returns the page that contains the given address.
428 pub fn containing_fixed(addr: impl Into<A>) -> Self {
429 Self::containing(addr, S::INSTANCE)
430 }
431}
432
433impl<A: Address, S: Size> Page<A, S> {
434 /// Returns a page starting at the given address.
435 pub fn starting_at(addr: impl Into<A>, size: S) -> Result<Self, NotAligned<S>> {
436 let addr = addr.into();
437 if !addr.is_aligned(size.as_usize()) {
438 return Err(NotAligned { size });
439 }
440 Ok(Self::containing(addr, size))
441 }
442
443 /// Returns the page that contains the given address.
444 pub fn containing(addr: impl Into<A>, size: S) -> Self {
445 let base = addr.into().align_down(size.as_usize());
446 Self { base, size }
447 }
448
449 pub fn base_addr(&self) -> A {
450 self.base
451 }
452
453 /// Returns the last address in the page, exclusive.
454 ///
455 /// The returned address will be the base address of the next page.
456 pub fn end_addr(&self) -> A {
457 self.base + (self.size.as_usize() - 1)
458 }
459
460 pub fn size(&self) -> S {
461 self.size
462 }
463
464 pub fn contains(&self, addr: impl Into<A>) -> bool {
465 let addr = addr.into();
466 addr >= self.base && addr < self.end_addr()
467 }
468
469 pub fn range_inclusive(self, end: Page<A, S>) -> PageRange<A, S> {
470 PageRange { start: self, end }
471 }
472
473 pub fn range_to(self, end: Page<A, S>) -> PageRange<A, S> {
474 PageRange {
475 start: self,
476 end: end - 1,
477 }
478 }
479
480 /// Returns the entire contents of the page as a slice.
481 ///
482 /// # Safety
483 ///
484 /// When calling this method, ensure that the page will not be mutated
485 /// concurrently, including by user code.
486 pub unsafe fn as_slice(&self) -> &[u8] {
487 let start = self.base.as_mut_ptr() as *const u8;
488 slice::from_raw_parts::<u8>(start, self.size.as_usize())
489 }
490
491 /// Returns the entire contents of the page as a mutable slice.
492 ///
493 /// # Safety
494 ///
495 /// When calling this method, ensure that the page will not be read or mutated
496 /// concurrently, including by user code.
497 pub unsafe fn as_slice_mut(&mut self) -> &mut [u8] {
498 let start = self.base.as_mut_ptr::<u8>() as *mut _;
499 slice::from_raw_parts_mut::<u8>(start, self.size.as_usize())
500 }
501}
502
503impl<A: Address, S: Size> ops::Add<usize> for Page<A, S> {
504 type Output = Self;
505 fn add(self, rhs: usize) -> Self {
506 Page {
507 base: self.base + (self.size.as_usize() * rhs),
508 ..self
509 }
510 }
511}
512
513impl<A: Address, S: Size> ops::Sub<usize> for Page<A, S> {
514 type Output = Self;
515 fn sub(self, rhs: usize) -> Self {
516 Page {
517 base: self.base - (self.size.as_usize() * rhs),
518 ..self
519 }
520 }
521}
522
523impl<A: Address, S: Size> cmp::PartialOrd for Page<A, S> {
524 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
525 if self.size == other.size {
526 self.base.partial_cmp(&other.base)
527 } else {
528 // XXX(eliza): does it make sense to compare pages of different sizes?
529 None
530 }
531 }
532}
533
534impl<A: Address, S: StaticSize> cmp::Ord for Page<A, S> {
535 fn cmp(&self, other: &Self) -> cmp::Ordering {
536 self.base.cmp(&other.base)
537 }
538}
539
540impl<A: fmt::Debug, S: Size + fmt::Display> fmt::Debug for Page<A, S> {
541 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
542 let Self { base, size } = self;
543 f.debug_struct("Page")
544 .field("base", base)
545 .field("size", &format_args!("{size}"))
546 .finish()
547 }
548}
549
550// === impl PageRange ===
551
552// A PageRange has a minimum size of 1, this will never be empty.
553#[allow(clippy::len_without_is_empty)]
554impl<A: Address, S: Size> PageRange<A, S> {
555 pub fn start(&self) -> Page<A, S> {
556 self.start
557 }
558
559 pub fn end(&self) -> Page<A, S> {
560 self.end
561 }
562
563 /// Returns the base address of the first page in the range.
564 pub fn base_addr(&self) -> A {
565 self.start.base_addr()
566 }
567
568 /// Returns the end address on the last page in the range.
569 ///
570 /// This is the base address of the page immediately following this range.
571 pub fn end_addr(&self) -> A {
572 self.end.end_addr()
573 }
574
575 /// Returns the size of the pages in the range. All pages in a page range
576 /// have the same size.
577 #[track_caller]
578 pub fn page_size(&self) -> S {
579 debug_assert_eq!(self.start.size().as_usize(), self.end.size().as_usize());
580 self.start.size()
581 }
582
583 pub fn len(&self) -> usize {
584 self.size() / self.page_size().as_usize()
585 }
586
587 /// Returns the size in bytes of the page range.
588 #[track_caller]
589 pub fn size(&self) -> usize {
590 let diff = self.start.base_addr().difference(self.end.end_addr());
591 debug_assert!(
592 diff >= 0,
593 "assertion failed: page range base address must be lower than end \
594 address!\n\
595 \x20 base addr = {:?}\n\
596 \x20 end addr = {:?}\n\
597 ",
598 self.base_addr(),
599 self.end_addr(),
600 );
601 // add 1 to compensate for the base address not being included in `difference`
602 let diff = diff as usize + 1;
603 debug_assert!(
604 diff >= self.page_size().as_usize(),
605 "assertion failed: page range must be at least one page!\n\
606 \x20 difference = {}\n\
607 \x20 size = {}\n\
608 \x20 base addr = {:?}\n\
609 \x20 end addr = {:?}\n\
610 ",
611 diff,
612 self.page_size().as_usize(),
613 self.base_addr(),
614 self.end_addr(),
615 );
616 diff
617 }
618}
619
620impl<A: Address, S: Size> IntoIterator for &'_ PageRange<A, S> {
621 type IntoIter = PageRange<A, S>;
622 type Item = Page<A, S>;
623 #[inline]
624 fn into_iter(self) -> Self::IntoIter {
625 *self
626 }
627}
628
629impl<A: Address, S: Size> Iterator for PageRange<A, S> {
630 type Item = Page<A, S>;
631 fn next(&mut self) -> Option<Self::Item> {
632 if self.start > self.end {
633 return None;
634 }
635 let next = self.start;
636 self.start = self.start + 1;
637 Some(next)
638 }
639}
640
641impl<A, S> fmt::Debug for PageRange<A, S>
642where
643 A: Address + fmt::Debug,
644 S: Size + fmt::Display,
645{
646 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
647 let Self { start, end } = self;
648 f.debug_struct("PageRange")
649 .field("start", start)
650 .field("end", end)
651 .finish()
652 }
653}
654
655unsafe impl<S: Size> Alloc<S> for EmptyAlloc {
656 fn alloc_range(&self, _: S, _len: usize) -> Result<PageRange<PAddr, S>, AllocError> {
657 Err(AllocError { _p: () })
658 }
659
660 fn dealloc_range(&self, _range: PageRange<PAddr, S>) -> Result<(), AllocError> {
661 Err(AllocError { _p: () })
662 }
663}
664
665// === impl NotAligned ===
666
667impl<S: Size + fmt::Display> fmt::Debug for NotAligned<S> {
668 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
669 let Self { size } = self;
670 f.debug_struct("NotAligned")
671 .field("size", &fmt::display(size))
672 .finish()
673 }
674}
675
676// === impl TranslateError ===
677
678impl<S: Size> From<&'static str> for TranslateError<S> {
679 fn from(msg: &'static str) -> Self {
680 TranslateError::Other(msg)
681 }
682}
683
684impl<S: Size + fmt::Display> fmt::Debug for TranslateError<S> {
685 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
686 match *self {
687 TranslateError::Other(msg) => {
688 f.debug_tuple("TranslateError::Other").field(&msg).finish()
689 }
690 TranslateError::NotMapped => f.debug_tuple("TranslateError::NotMapped").finish(),
691 TranslateError::WrongSize(s) => f
692 .debug_tuple("TranslateError::WrongSize")
693 .field(&format_args!("{s}"))
694 .finish(),
695 }
696 }
697}
698
699// === impl AllocErr ===
700
701impl AllocError {
702 pub fn oom() -> Self {
703 Self { _p: () }
704 }
705}
706
707impl<S: Size> From<NotAligned<S>> for AllocError {
708 fn from(_na: NotAligned<S>) -> Self {
709 Self { _p: () } // TODO(eliza)
710 }
711}
712
713impl<S> Size for S
714where
715 S: StaticSize,
716{
717 fn as_usize(&self) -> usize {
718 Self::SIZE
719 }
720}
721
722// === impl Handle ===
723
724impl<'mapper, S, E> Handle<'mapper, S, E>
725where
726 S: Size,
727 E: PageFlags<S>,
728{
729 pub fn new(page: Page<VAddr, S>, entry: &'mapper mut E) -> Self {
730 Self { entry, page }
731 }
732
733 /// Returns the virtual page this entry is currently mapped to.
734 pub fn virt_page(&self) -> &Page<VAddr, S> {
735 &self.page
736 }
737
738 /// Set whether or not this page is writable.
739 ///
740 /// # Safety
741 ///
742 /// Manual control of page flags can be used to violate Rust invariants.
743 /// Using `set_writable` to make memory that the Rust compiler expects to be
744 /// read-only may cause undefined behavior. Making a page which is aliased
745 /// page table (i.e. it has multiple page table entries pointing to it) may
746 /// also cause undefined behavior.
747 #[inline]
748 pub unsafe fn set_writable(self, writable: bool) -> Self {
749 self.entry.set_writable(writable);
750 self
751 }
752
753 /// Set whether or not this page is executable.
754 ///
755 /// # Safety
756 ///
757 /// Manual control of page flags can be used to violate Rust invariants.
758 /// Using `set_executable` to make writable memory executable may cause
759 /// undefined behavior. Also, this can be used to execute the contents of
760 /// arbitrary memory, which (of course) is wildly unsafe.
761 #[inline]
762 pub unsafe fn set_executable(self, executable: bool) -> Self {
763 self.entry.set_executable(executable);
764 self
765 }
766
767 /// Set whether or not this page is present.
768 ///
769 /// # Safety
770 ///
771 /// Manual control of page flags can be used to violate Rust invariants.
772 #[inline]
773 pub unsafe fn set_present(self, present: bool) -> Self {
774 self.entry.set_present(present);
775 self
776 }
777
778 #[inline]
779 pub fn is_writable(&self) -> bool {
780 self.entry.is_writable()
781 }
782
783 #[inline]
784 pub fn is_executable(&self) -> bool {
785 self.entry.is_executable()
786 }
787
788 #[inline]
789 pub fn is_present(&self) -> bool {
790 self.entry.is_present()
791 }
792
793 #[inline]
794 pub fn commit(self) -> Page<VAddr, S> {
795 tracing::debug!(
796 page = ?self.page,
797 entry = ?self.entry,
798 "commiting page table update"
799 );
800 self.entry.commit(self.page);
801 self.page
802 }
803}