1use self::size::*;
2use crate::{PAddr, VAddr};
3use core::{
4 marker::PhantomData,
5 ops,
6 ptr::NonNull,
7 sync::atomic::{AtomicUsize, Ordering},
8};
9pub use hal_core::mem::page;
10use hal_core::{
11 mem::page::{
12 Map, Page, Size, StaticSize, TranslateAddr, TranslateError, TranslatePage, TranslateResult,
13 },
14 Address,
15};
16use mycelium_util::fmt;
17pub const MIN_PAGE_SIZE: usize = Size4Kb::SIZE;
18const ENTRIES: usize = 512;
19
20pub type VirtPage<S> = Page<VAddr, S>;
21pub type PhysPage<S> = Page<PAddr, S>;
22
23pub struct PageCtrl {
24 pml4: NonNull<PageTable<level::Pml4>>,
25}
26
27#[repr(align(4096))]
29#[repr(C)]
30pub struct PageTable<L> {
31 entries: [Entry<L>; ENTRIES],
32}
33
34#[derive(Clone)]
35#[repr(transparent)]
36pub struct Entry<L> {
37 entry: u64,
38 _level: PhantomData<L>,
39}
40
41#[tracing::instrument(level = "info")]
42pub fn init_paging(vm_offset: VAddr) {
43 VM_OFFSET.store(vm_offset.as_usize(), Ordering::Release);
44
45 tracing::info!("initializing paging...");
46
47 let (pml4_page, flags) = crate::control_regs::cr3::read();
48 tracing::debug!(?pml4_page, ?flags);
49 tracing::trace!("old PML4:");
50 let pml4 = PageTable::<level::Pml4>::current(vm_offset);
51
52 let mut present_entries = 0;
54 for (idx, entry) in pml4.entries[..].iter().enumerate() {
55 if entry.is_present() {
56 tracing::trace!(idx, ?entry);
57 present_entries += 1;
58 }
59 }
60 tracing::trace!(present_entries);
61
62 assert!(
64 !pml4.entries[RECURSIVE_INDEX].is_present(),
65 "bootloader must not have used entry 511"
66 );
67 pml4.entries[RECURSIVE_INDEX]
68 .set_present(true)
69 .set_writable(true)
70 .set_phys_addr(pml4_page.base_addr());
71 tracing::info!("recursive entry created");
72
73 unsafe {
75 crate::control_regs::cr3::write(pml4_page, flags);
76 }
77 tracing::info!("new PML4 set");
78
79 let mut present_entries = 0;
81 for (idx, entry) in pml4.entries[..].iter().enumerate() {
82 if entry.is_present() {
83 tracing::trace!(idx, ?entry);
84 present_entries += 1;
85 }
86 }
87 tracing::trace!(present_entries);
88}
89
90#[inline(always)]
91pub fn kernel_paddr_of<A>(it: A) -> PAddr
92where
93 VAddr: From<A>,
94{
95 let vaddr = VAddr::from(it);
96 let paddr = vaddr.as_usize() - vm_offset().as_usize();
97 PAddr::from_u64(paddr as u64)
98}
99
100#[inline(always)]
101pub fn kernel_vaddr_of<A>(it: A) -> VAddr
102where
103 PAddr: From<A>,
104{
105 let paddr = PAddr::from(it);
106 let vaddr = paddr.as_usize() + vm_offset().as_usize();
107 VAddr::from_usize(vaddr)
108}
109
110static VM_OFFSET: AtomicUsize = AtomicUsize::new(usize::MAX);
114
115pub fn vm_offset() -> VAddr {
117 let off = VM_OFFSET.load(Ordering::Acquire);
118 assert_ne!(
119 off,
120 usize::MAX,
121 "`init_paging` must be called before calling `vm_offset`!"
122 );
123 VAddr::from_usize(off)
124}
125
126impl PageTable<level::Pml4> {
127 fn current(vm_offset: VAddr) -> &'static mut Self {
128 let (phys, _) = crate::control_regs::cr3::read();
129 unsafe { Self::from_pml4_page(vm_offset, phys) }
130 }
131 unsafe fn from_pml4_page(vm_offset: VAddr, page: Page<PAddr, Size4Kb>) -> &'static mut Self {
132 let pml4_paddr = page.base_addr();
133 tracing::trace!(?pml4_paddr, ?vm_offset);
134
135 let virt = vm_offset + VAddr::from_usize(pml4_paddr.as_usize());
136 tracing::debug!(current_pml4_vaddr = ?virt);
137 &mut *(virt.as_ptr::<Self>() as *mut _)
138 }
139}
140
141impl<A> Map<Size4Kb, A> for PageCtrl
142where
143 A: page::Alloc<Size4Kb>,
144{
145 type Entry = Entry<level::Pt>;
146
147 unsafe fn map_page(
148 &mut self,
149 virt: Page<VAddr, Size4Kb>,
150 phys: Page<PAddr, Size4Kb>,
151 frame_alloc: &A,
152 ) -> page::Handle<'_, Size4Kb, Self::Entry> {
153 let span = tracing::debug_span!("map_page", ?virt, ?phys);
156 let _e = span.enter();
157 let pml4 = self.pml4.as_mut();
158
159 let vaddr = virt.base_addr();
160 tracing::trace!(?vaddr);
161
162 let page_table = pml4
163 .create_next_table(virt, frame_alloc)
164 .create_next_table(virt, frame_alloc)
165 .create_next_table(virt, frame_alloc);
166 tracing::debug!(?page_table);
167
168 let entry = &mut page_table[virt];
169 tracing::trace!(?entry);
170 assert!(
171 !entry.is_present(),
172 "mapped page table entry already in use"
173 );
174 assert!(!entry.is_huge(), "huge bit should not be set for 4KB entry");
175 let entry = entry.set_phys_page(phys).set_present(true);
176 tracing::trace!(?entry, "flags set");
177 page::Handle::new(virt, entry)
178 }
179
180 fn flags_mut(&mut self, _virt: Page<VAddr, Size4Kb>) -> page::Handle<'_, Size4Kb, Self::Entry> {
181 unimplemented!()
182 }
183
184 unsafe fn unmap(&mut self, _virt: Page<VAddr, Size4Kb>) -> Page<PAddr, Size4Kb> {
188 unimplemented!()
189 }
190}
191
192impl<S> TranslatePage<S> for PageCtrl
193where
194 S: Size,
195 PageTable<level::Pml4>: TranslatePage<S>,
196{
197 fn translate_page(&self, virt: Page<VAddr, S>) -> TranslateResult<PAddr, S> {
198 unsafe { self.pml4.as_ref() }.translate_page(virt)
199 }
200}
201
202impl PageCtrl {
203 pub fn current() -> Self {
204 let vm_offset = VM_OFFSET.load(Ordering::Acquire);
205 assert_ne!(
206 vm_offset,
207 usize::MAX,
208 "`init_paging` must be called before calling `PageTable::current`!"
209 );
210 let vm_offset = VAddr::from_usize(vm_offset);
211 let pml4 = PageTable::current(vm_offset);
212 Self {
213 pml4: NonNull::from(pml4),
214 }
215 }
216}
217
218impl<S> TranslatePage<S> for PageTable<level::Pml4>
219where
220 S: Size,
221 PageTable<level::Pdpt>: TranslatePage<S>,
222{
223 fn translate_page(&self, virt: Page<VAddr, S>) -> TranslateResult<PAddr, S> {
224 self.next_table(virt)
225 .ok_or(TranslateError::NotMapped)?
226 .translate_page(virt)
227 }
228}
229
230impl TranslatePage<Size1Gb> for PageTable<level::Pdpt> {
231 fn translate_page(&self, virt: Page<VAddr, Size1Gb>) -> TranslateResult<PAddr, Size1Gb> {
232 self[&virt].phys_page()
233 }
234}
235
236impl TranslatePage<Size2Mb> for PageTable<level::Pdpt> {
237 fn translate_page(&self, virt: Page<VAddr, Size2Mb>) -> TranslateResult<PAddr, Size2Mb> {
238 self.next_table(virt)
239 .ok_or(TranslateError::NotMapped)?
240 .translate_page(virt)
241 }
242}
243
244impl TranslatePage<Size4Kb> for PageTable<level::Pdpt> {
245 fn translate_page(&self, virt: Page<VAddr, Size4Kb>) -> TranslateResult<PAddr, Size4Kb> {
246 self.next_table(virt)
247 .ok_or(TranslateError::NotMapped)?
248 .translate_page(virt)
249 }
250}
251
252impl TranslatePage<Size2Mb> for PageTable<level::Pd> {
253 fn translate_page(&self, virt: Page<VAddr, Size2Mb>) -> TranslateResult<PAddr, Size2Mb> {
254 self[&virt].phys_page()
255 }
256}
257
258impl TranslatePage<Size4Kb> for PageTable<level::Pd> {
259 fn translate_page(&self, virt: Page<VAddr, Size4Kb>) -> TranslateResult<PAddr, Size4Kb> {
260 self.next_table(virt)
261 .ok_or(TranslateError::NotMapped)?
262 .translate_page(virt)
263 }
264}
265
266impl TranslatePage<Size4Kb> for PageTable<level::Pt> {
267 fn translate_page(&self, virt: Page<VAddr, Size4Kb>) -> TranslateResult<PAddr, Size4Kb> {
268 self[&virt].phys_page()
269 }
270}
271
272impl TranslateAddr for PageTable<level::Pml4> {
273 fn translate_addr(&self, _virt: VAddr) -> Option<PAddr> {
274 unimplemented!()
275 }
276}
277
278impl TranslateAddr for PageTable<level::Pdpt> {
279 fn translate_addr(&self, _virt: VAddr) -> Option<PAddr> {
280 unimplemented!()
281 }
282}
283
284impl<R: level::Recursive> PageTable<R> {
285 #[inline]
286 fn next_table<S: Size>(&self, idx: VirtPage<S>) -> Option<&PageTable<R::Next>> {
287 let span = tracing::debug_span!(
288 "next_table",
289 ?idx,
290 self.level = %R::NAME,
291 next.level = %<R::Next>::NAME,
292 self.addr = ?&self as *const _,
293 );
294 let _e = span.enter();
295 let entry = &self[idx];
296 tracing::trace!(?entry);
297 if !entry.is_present() {
298 tracing::debug!("entry not present!");
299 return None;
300 }
301
302 let vaddr = R::Next::table_addr(idx.base_addr());
303 tracing::trace!(next.addr = ?vaddr, "found next table virtual address");
304 Some(unsafe { vaddr.as_non_null()?.as_ref() })
307 }
308
309 #[inline]
310 fn next_table_mut<S: Size>(&mut self, idx: VirtPage<S>) -> Option<&mut PageTable<R::Next>> {
311 let _span = tracing::debug_span!(
312 "next_table_mut",
313 ?idx,
314 self.level = %R::NAME,
315 next.level = %<R::Next>::NAME,
316 self.addr = ?&self as *const _,
317 )
318 .entered();
319 let entry = &mut self[idx];
320 tracing::trace!(?entry);
321 if !entry.is_present() {
322 tracing::debug!("entry not present!");
323 return None;
324 }
325
326 if !entry.is_writable() {
328 tracing::debug!("making page writable");
329 entry.set_writable(true);
330 unsafe {
331 tlb::flush_page(idx.base_addr());
332 }
333 }
334
335 let vaddr = R::Next::table_addr(idx.base_addr());
336 tracing::trace!(next.addr = ?vaddr, "found next table virtual address");
337 Some(unsafe { vaddr.as_non_null()?.as_mut() })
340 }
341
342 fn create_next_table<S: Size>(
343 &mut self,
344 idx: VirtPage<S>,
345 alloc: &impl page::Alloc<Size4Kb>,
346 ) -> &mut PageTable<R::Next> {
347 let span = tracing::trace_span!("create_next_table", ?idx, self.level = %R::NAME, next.level = %<R::Next>::NAME);
348 let _e = span.enter();
349
350 if self.next_table(idx).is_some() {
351 tracing::trace!("next table already exists");
352 return self
353 .next_table_mut(idx)
354 .expect("if next_table().is_some(), the next table exists!");
355 }
356
357 tracing::trace!("no next table exists");
358 let entry = &mut self[idx];
359 if entry.is_huge() {
360 panic!(
361 "cannot create {} table for {:?}: the corresponding entry is huge!\n{:#?}",
362 <R::Next>::NAME,
363 idx,
364 entry
365 );
366 }
367
368 tracing::trace!("trying to allocate page table frame...");
369 let frame = match alloc.alloc(Size4Kb) {
370 Ok(frame) => frame,
371 Err(_) => panic!(
372 "cannot create {} table for {:?}: allocation failed!",
373 <R::Next>::NAME,
374 idx,
375 ),
376 };
377
378 tracing::trace!(?frame, "allocated page table frame");
379 entry
380 .set_present(true)
381 .set_writable(true)
382 .set_phys_addr(frame.base_addr());
383 tracing::trace!(?entry, ?frame, "set page table entry to point to frame");
384
385 self.next_table_mut(idx)
386 .expect("we should have just created this table!")
387 .zero()
388 }
389}
390
391impl<L: Level> PageTable<L> {
392 pub fn zero(&mut self) -> &mut Self {
393 for e in &mut self.entries[..] {
394 *e = Entry::none();
395 }
396 self
397 }
398}
399
400impl<L, S> ops::Index<VirtPage<S>> for PageTable<L>
401where
402 L: Level,
403 S: Size,
404{
405 type Output = Entry<L>;
406 fn index(&self, page: VirtPage<S>) -> &Self::Output {
407 &self.entries[L::index_of(page.base_addr())]
408 }
409}
410
411impl<L, S> ops::IndexMut<VirtPage<S>> for PageTable<L>
412where
413 L: Level,
414 S: Size,
415{
416 fn index_mut(&mut self, page: VirtPage<S>) -> &mut Self::Output {
417 &mut self.entries[L::index_of(page.base_addr())]
418 }
419}
420
421impl<'a, L, S> ops::Index<&'a VirtPage<S>> for PageTable<L>
422where
423 L: Level,
424 S: Size,
425{
426 type Output = Entry<L>;
427 fn index(&self, pg: &'a VirtPage<S>) -> &Self::Output {
428 &self[*pg]
429 }
430}
431
432impl<'a, L, S> ops::IndexMut<&'a VirtPage<S>> for PageTable<L>
433where
434 L: Level,
435 L: level::HoldsSize<S>,
436 S: Size,
437{
438 fn index_mut(&mut self, pg: &'a VirtPage<S>) -> &mut Self::Output {
439 &mut self[*pg]
440 }
441}
442
443impl<L> Entry<L> {
444 const PRESENT: u64 = 1 << 0;
445 const WRITABLE: u64 = 1 << 1;
446 const WRITE_THROUGH: u64 = 1 << 3;
447 const CACHE_DISABLE: u64 = 1 << 4;
448 const ACCESSED: u64 = 1 << 5;
449 const DIRTY: u64 = 1 << 6;
450 const HUGE: u64 = 1 << 7;
451 const GLOBAL: u64 = 1 << 8;
452 const NOEXEC: u64 = 1 << 63;
453
454 const ADDR_MASK: u64 = 0x000f_ffff_ffff_f000;
455
456 const fn none() -> Self {
457 Self {
458 entry: 0,
459 _level: PhantomData,
460 }
461 }
462
463 fn set_present(&mut self, present: bool) -> &mut Self {
464 if present {
465 self.entry |= Self::PRESENT;
466 } else {
467 self.entry &= !Self::PRESENT;
468 }
469 self
470 }
471
472 fn set_writable(&mut self, writable: bool) -> &mut Self {
473 if writable {
474 self.entry |= Self::WRITABLE;
475 } else {
476 self.entry &= !Self::WRITABLE;
477 }
478 self
479 }
480
481 fn set_executable(&mut self, executable: bool) -> &mut Self {
484 if executable {
485 self.entry &= !Self::NOEXEC;
486 } else {
487 self.entry |= Self::NOEXEC;
488 }
489 self
490 }
491
492 #[allow(dead_code)] fn set_huge(&mut self, huge: bool) -> &mut Self {
494 if huge {
495 self.entry |= Self::HUGE;
496 } else {
497 self.entry &= !Self::HUGE;
498 }
499 self
500 }
501
502 fn set_phys_addr(&mut self, paddr: PAddr) -> &mut Self {
503 let paddr = paddr.as_usize() as u64;
504 assert_eq!(paddr & !Self::ADDR_MASK, 0);
505 self.entry = (self.entry & !Self::ADDR_MASK) | paddr;
506 self
507 }
508
509 fn is_present(&self) -> bool {
510 self.entry & Self::PRESENT != 0
511 }
512
513 fn is_huge(&self) -> bool {
514 self.entry & Self::HUGE != 0
515 }
516
517 fn is_executable(&self) -> bool {
518 self.entry & Self::NOEXEC == 0
519 }
520
521 fn is_writable(&self) -> bool {
522 self.entry & Self::WRITABLE != 0
523 }
524
525 fn phys_addr(&self) -> PAddr {
526 PAddr::from_u64(self.entry & Self::ADDR_MASK)
527 }
528}
529
530impl<L: level::PointsToPage> Entry<L> {
531 fn phys_page(&self) -> TranslateResult<PAddr, L::Size> {
532 if !self.is_present() {
533 return Err(TranslateError::NotMapped);
534 }
535
536 if self.is_huge() != L::IS_HUGE {
537 return Err(TranslateError::WrongSize(L::Size::INSTANCE));
538 }
539
540 Ok(Page::starting_at_fixed(self.phys_addr()).expect("page addr must be aligned"))
541 }
542
543 fn set_phys_page(&mut self, page: Page<PAddr, L::Size>) -> &mut Self {
544 self.set_phys_addr(page.base_addr());
545 self
546 }
547}
548
549impl<L: level::PointsToPage> page::PageFlags<L::Size> for Entry<L> {
550 #[inline]
551 fn is_writable(&self) -> bool {
552 self.is_writable()
553 }
554
555 #[inline]
556 unsafe fn set_writable(&mut self, writable: bool) {
557 self.set_writable(writable);
558 }
559
560 #[inline]
561 fn is_executable(&self) -> bool {
562 self.is_executable()
563 }
564
565 #[inline]
566 unsafe fn set_executable(&mut self, executable: bool) {
567 self.set_executable(executable);
568 }
569
570 #[inline]
571 fn is_present(&self) -> bool {
572 self.is_present()
573 }
574
575 #[inline]
576 unsafe fn set_present(&mut self, present: bool) {
577 self.set_present(present);
578 }
579
580 fn commit(&mut self, page: Page<VAddr, L::Size>) {
581 unsafe {
582 tlb::flush_page(page.base_addr());
583 }
584 }
585}
586
587impl<L: Level> fmt::Debug for Entry<L> {
588 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
589 struct FmtFlags<'a, L>(&'a Entry<L>);
590 impl<L> fmt::Debug for FmtFlags<'_, L> {
591 #[inline(always)]
592 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
593 macro_rules! write_flags {
594 ($f:expr, $e:expr, $($bit:ident),+) => {
595 let mut wrote_any = false;
596 $(
597 if $e & Entry::<L>::$bit != 0 {
598 write!($f, "{}{}", if wrote_any { " | " } else { "" }, stringify!($bit))?;
599 #[allow(unused_assignments)]
601 {
602 wrote_any = true;
603 }
604 }
605 )+
606 }
607 }
608
609 write_flags! {
610 f, self.0.entry,
611 PRESENT,
612 WRITABLE,
613 WRITE_THROUGH,
614 CACHE_DISABLE,
615 ACCESSED,
616 DIRTY,
617 HUGE,
618 GLOBAL
619 }
620 Ok(())
621 }
622 }
623 f.debug_struct("Entry")
624 .field("level", &format_args!("{}", L::NAME))
625 .field("addr", &self.phys_addr())
626 .field("flags", &FmtFlags(self))
627 .finish()
628 }
629}
630
631pub mod size {
632 use core::fmt;
633 use hal_core::mem::page::{Size, StaticSize};
634
635 #[derive(Copy, Clone, Eq, PartialEq)]
636 pub struct Size4Kb;
637
638 impl StaticSize for Size4Kb {
639 const SIZE: usize = 4 * 1024;
640 const PRETTY_NAME: &'static str = "4KB";
641 const INSTANCE: Self = Size4Kb;
642 }
643
644 impl fmt::Display for Size4Kb {
645 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
646 f.pad(Self::PRETTY_NAME)
647 }
648 }
649
650 #[derive(Copy, Clone, Eq, PartialEq)]
651 pub struct Size2Mb;
652
653 impl StaticSize for Size2Mb {
654 const SIZE: usize = Size4Kb::SIZE * 512;
655 const PRETTY_NAME: &'static str = "2MB";
656 const INSTANCE: Self = Size2Mb;
657 }
658
659 impl fmt::Display for Size2Mb {
660 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
661 f.pad(Self::PRETTY_NAME)
662 }
663 }
664
665 #[derive(Copy, Clone, Eq, PartialEq)]
666 pub struct Size1Gb;
667
668 impl StaticSize for Size1Gb {
669 const SIZE: usize = Size2Mb::SIZE * 512;
670 const PRETTY_NAME: &'static str = "1GB";
671 const INSTANCE: Self = Size1Gb;
672 }
673
674 impl fmt::Display for Size1Gb {
675 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
676 f.pad(Self::PRETTY_NAME)
677 }
678 }
679
680 #[derive(Copy, Clone, Debug, Eq)]
681 #[repr(usize)]
682 pub enum AnySize {
683 Size4Kb = Size4Kb::SIZE,
684 Size2Mb = Size2Mb::SIZE,
685 Size1Gb = Size1Gb::SIZE,
686 }
687
688 impl<S: Size> PartialEq<S> for AnySize {
689 fn eq(&self, other: &S) -> bool {
690 *self as usize == other.as_usize()
691 }
692 }
693
694 impl fmt::Display for AnySize {
695 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
696 match self {
697 AnySize::Size4Kb => f.pad(Size4Kb::PRETTY_NAME),
698 AnySize::Size2Mb => f.pad(Size2Mb::PRETTY_NAME),
699 AnySize::Size1Gb => f.pad(Size1Gb::PRETTY_NAME),
700 }
701 }
702 }
703
704 impl Size for AnySize {
705 fn as_usize(&self) -> usize {
706 *self as usize
707 }
708 }
709}
710
711const RECURSIVE_INDEX: usize = 0o777;
712const SIGN: usize = 0o177777 << 48;
713pub trait Level {
714 const NAME: &'static str;
715 const SUBLEVELS: usize;
716
717 const INDEX_SHIFT: usize = 12 + (9 * Self::SUBLEVELS);
718
719 fn index_of(v: VAddr) -> usize {
720 (v.as_usize() >> Self::INDEX_SHIFT) & RECURSIVE_INDEX
721 }
722
723 fn table_addr(v: VAddr) -> VAddr;
724}
725
726impl<L: Level> fmt::Debug for PageTable<L> {
727 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
728 f.debug_struct("PageTable")
729 .field("level", &fmt::display(L::NAME))
730 .field("addr", &fmt::ptr(self))
731 .finish()
732 }
733}
734
735pub mod level {
736 use super::{size::*, Level, RECURSIVE_INDEX, SIGN};
737 use crate::VAddr;
738 use core::fmt;
739 use hal_core::mem::page::{Size, StaticSize};
740 use hal_core::Address;
741
742 pub trait PointsToPage: Level {
743 type Size: StaticSize + fmt::Display;
744 const IS_HUGE: bool;
745 }
746
747 pub trait Recursive: Level {
748 type Next: Level;
749 }
750
751 pub trait HoldsSize<S: Size>: Level {}
752
753 pub enum Pml4 {}
754 pub enum Pdpt {}
755
756 impl Level for Pml4 {
757 const SUBLEVELS: usize = 3;
758 const NAME: &'static str = "PML4";
759 fn table_addr(v: VAddr) -> VAddr {
760 let addr = SIGN
761 | (RECURSIVE_INDEX << 39)
762 | (RECURSIVE_INDEX << 30)
763 | (RECURSIVE_INDEX << 21)
764 | (RECURSIVE_INDEX << 12);
765 tracing::trace!(?v);
766 VAddr::from_usize(addr)
767 }
768 }
769
770 impl HoldsSize<Size1Gb> for Pml4 {}
771 impl HoldsSize<Size2Mb> for Pml4 {}
772 impl HoldsSize<Size4Kb> for Pml4 {}
773
774 impl Level for Pdpt {
775 const SUBLEVELS: usize = 2;
776 const NAME: &'static str = "PDPT";
777 fn table_addr(v: VAddr) -> VAddr {
778 let pml4_idx = Pml4::index_of(v);
779 let addr = SIGN
780 | (RECURSIVE_INDEX << 39)
781 | (RECURSIVE_INDEX << 30)
782 | (RECURSIVE_INDEX << 21)
783 | (pml4_idx << 12);
784 tracing::trace!(?v, ?pml4_idx);
785 VAddr::from_usize(addr)
786 }
787 }
788 impl HoldsSize<Size1Gb> for Pdpt {}
789 impl HoldsSize<Size2Mb> for Pdpt {}
790 impl HoldsSize<Size4Kb> for Pdpt {}
791
792 impl Recursive for Pml4 {
793 type Next = Pdpt;
794 }
795
796 impl PointsToPage for Pdpt {
797 type Size = Size1Gb;
798 const IS_HUGE: bool = true;
799 }
800
801 impl Recursive for Pdpt {
802 type Next = Pd;
803 }
804
805 pub enum Pd {}
806
807 impl Level for Pd {
808 const NAME: &'static str = "PD";
809 const SUBLEVELS: usize = 1;
810 fn table_addr(v: VAddr) -> VAddr {
811 let pml4_idx = Pml4::index_of(v);
812 let pdpt_idx = Pdpt::index_of(v);
813 let addr = SIGN
814 | (RECURSIVE_INDEX << 39)
815 | (RECURSIVE_INDEX << 30)
816 | (pml4_idx << 21)
817 | (pdpt_idx << 12);
818 tracing::trace!(?v, ?pml4_idx, ?pdpt_idx);
819 VAddr::from_usize(addr)
820 }
821 }
822
823 impl Recursive for Pd {
824 type Next = Pt;
825 }
826
827 impl PointsToPage for Pd {
828 type Size = Size2Mb;
829 const IS_HUGE: bool = true;
830 }
831 impl HoldsSize<Size2Mb> for Pd {}
832 impl HoldsSize<Size4Kb> for Pd {}
833
834 pub enum Pt {}
835
836 impl Level for Pt {
837 const NAME: &'static str = "PT";
838 const SUBLEVELS: usize = 0;
839
840 fn table_addr(v: VAddr) -> VAddr {
841 let pml4_idx = Pml4::index_of(v);
842 let pdpt_idx = Pdpt::index_of(v);
843 let pd_idx = Pd::index_of(v);
844 let addr = SIGN
845 | (RECURSIVE_INDEX << 39)
846 | (pml4_idx << 30)
847 | (pdpt_idx << 21)
848 | (pd_idx << 12);
849 tracing::trace!(?v, ?pml4_idx, ?pdpt_idx, ?pd_idx);
850 VAddr::from_usize(addr)
851 }
852 }
853
854 impl PointsToPage for Pt {
855 type Size = Size4Kb;
856 const IS_HUGE: bool = false;
857 }
858 impl HoldsSize<Size4Kb> for Pt {}
859}
860
861pub(crate) mod tlb {
862 use crate::control_regs::cr3;
863 use crate::VAddr;
864 use core::arch::asm;
865 use hal_core::Address;
866
867 #[allow(dead_code)] pub(crate) unsafe fn flush_all() {
869 let (pml4_paddr, flags) = cr3::read();
870 cr3::write(pml4_paddr, flags);
871 }
872
873 pub(crate) unsafe fn flush_page(addr: VAddr) {
877 tracing::trace!(?addr, "flush_page");
878 asm!("invlpg [{0}]", in(reg) addr.as_usize() as u64);
879 }
880}
881
882mycotest::decl_test! {
883 fn basic_map() -> mycotest::TestResult {
884 let mut ctrl = PageCtrl::current();
885 let frame_alloc = page::EmptyAlloc::default();
887
888 let frame = Page::containing_fixed(PAddr::from_usize(0xb8000));
889 let page = Page::containing_fixed(VAddr::from_usize(0));
890
891 let page = unsafe {
892 ctrl.map_page(page, frame, &frame_alloc).set_writable(true).commit()
893 };
894 tracing::info!(?page, "page mapped!");
895
896 let page_ptr = page.base_addr().as_mut_ptr::<u64>();
897 unsafe { page_ptr.offset(400).write_volatile(0x_f021_f077_f065_f04e)};
898
899 tracing::info!("wow, it didn't fault");
900
901 Ok(())
902 }
903}
904
905mycotest::decl_test! {
906 fn identity_mapped_pages_are_reasonable() -> mycotest::TestResult {
907 let mut ctrl = PageCtrl::current();
908
909 let frame_alloc = page::EmptyAlloc::default();
911 let actual_frame = Page::containing_fixed(PAddr::from_usize(0xb8000));
912 unsafe {
913 ctrl.identity_map(actual_frame, &frame_alloc).set_writable(true).commit()
914 };
915
916 let page = VirtPage::<Size4Kb>::containing_fixed(VAddr::from_usize(0xb8000));
917 let frame = ctrl.translate_page(page).expect("translate");
918 tracing::info!(?page, ?frame, "translated");
919 mycotest::assert_eq!(frame, actual_frame, "identity mapped address should translate to itself");
920 Ok(())
921 }
922}