hal_x86_64/
segment.rs

1/// x86 memory segmentation structures.
2use crate::{cpu, task};
3use core::{arch::asm, mem};
4use mycelium_util::{
5    bits::{self, Pack64, Packing64, Pair64},
6    fmt,
7};
8
9bits::bitfield! {
10    /// A segment selector.
11    ///
12    /// These values are stored in a segmentation register (`ss`, `cs`, `ds`, `es`,
13    /// `gs`, or `fs`) to select the current segment in that register. A selector
14    /// consists of two bits indicating the privilege ring of that segment, a bit
15    /// indicating whether it selects a [GDT] or LDT, and a 5-bit index into [GDT]
16    /// or LDT indicating which segment is selected.
17    ///
18    /// Refer to sections 3.4.3 and 3.4.4 in [Vol. 3A of the _Intel® 64 and IA-32
19    /// Architectures Developer's Manual_][manual] for details.
20    ///
21    /// [GDT]: Gdt
22    /// [manual]: https://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-vol-3a-part-1-manual.html
23    #[derive(Eq, PartialEq)]
24    pub struct Selector<u16> {
25        /// The first 2 least-significant bits are the selector's priveliege ring.
26        const RING: cpu::Ring;
27        /// The next bit is set if this is an LDT segment selector.
28        const IS_LDT: bool;
29        /// The remaining bits are the index in the GDT/LDT.
30        const INDEX = 5;
31    }
32}
33
34bits::bitfield! {
35    /// A 64-bit mode user segment descriptor.
36    ///
37    /// A segment descriptor is an entry in a [GDT] or LDT that provides the
38    /// processor with the size and location of a segment, as well as access control
39    /// and status information.
40    ///
41    /// Refer to section 3.4.5 in [Vol. 3A of the _Intel® 64 and IA-32 Architectures
42    /// Developer's Manual_][manual] for details.
43    ///
44    /// [GDT]: Gdt
45    /// [manual]: https://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-vol-3a-part-1-manual.html
46    #[derive(Eq, PartialEq)]
47    pub struct Descriptor<u64> {
48        /// First 16 bits of the limit field (ignored in 64-bit mode)
49        const LIMIT_LOW = 16;
50        /// First 24 bits of the base field (ignored in 64-bit mode)
51        const BASE_LOW = 24;
52
53        // Access flags (5 bits).
54        // In order, least to most significant:
55        // - `ACCESSED`
56        // - `READABLE`/`WRITABLE`
57        // - `DIRECTION`/`CONFORMING`
58        // - `EXECUTABLE` (code/data)
59        // - `TYPE` (user/system)
60
61        /// Set by the processor if this segment has been accessed. Only cleared by software.
62        /// _Setting_ this bit in software prevents GDT writes on first use.
63        const ACCESSED: bool;
64
65        /// Readable bit for code segments/writable bit for data segments.
66        const READABLE: bool;
67        /// Direction bit for code segments/conforming bit for data segments.
68        const CONFORMING: bool;
69        /// Executable bit (if 1, this is a code segment)
70        const IS_CODE_SEGMENT: bool;
71        /// Descriptor type bit (if 1, this is a user segment)
72        const IS_USER_SEGMENT: bool;
73        /// Priveliege ring.
74        const RING: cpu::Ring;
75        /// Present bit
76        const IS_PRESENT: bool;
77        /// High 4 bits of the limit (ignored in 64-bit mode)
78        const LIMIT_HIGH = 4;
79
80        // High flags (5 bits).
81        // In order, least to most significant:
82        // - `AVAILABLE`
83        // - `LONG_MODE`
84        // - `DEFAULT_SIZE`
85        // - `GRANULARITY`
86
87        /// Available for use by the Operating System
88        const AVAILABLE: bool;
89
90        /// Must be set for 64-bit code segments, unset otherwise.
91        const IS_LONG_MODE: bool;
92
93        /// Use 32-bit (as opposed to 16-bit) operands. If [`LONG_MODE`][Self::LONG_MODE] is set,
94        /// this must be unset. In 64-bit mode, ignored for data segments.
95        const IS_32_BIT: bool;
96
97        /// Limit field is scaled by 4096 bytes. In 64-bit mode, ignored for all segments.
98        const GRANULARITY: bool;
99
100        /// Highest 8 bits of the base field
101        const BASE_MID = 8;
102    }
103}
104
105/// A [Global Descriptor Table (GDT)][gdt].
106///
107/// This can have up to 65535 entries, but in 64-bit mode, you don't need most
108/// of those (since you can't do real segmentation), so it defaults to 8.
109///
110/// Refer to section 3.5.1 in [Vol. 3A of the _Intel® 64 and IA-32 Architectures
111/// Developer's Manual_][manual] for details.
112///
113/// [gdt]: https://wiki.osdev.org/Global_Descriptor_Table
114/// [manual]: https://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-vol-3a-part-1-manual.html
115// TODO(eliza): i'd like to make the size a u16 to enforce this limit and cast
116//   it to `usize` in the array, but this requires unstable const generics
117//   features and i didn't want to mess with it...
118#[derive(Clone)]
119// rustfmt eats default parameters in const generics for some reason (probably a
120// bug...)
121#[rustfmt::skip]
122pub struct Gdt<const SIZE: usize = 8> {
123    entries: [u64; SIZE],
124    sys_segments: [bool; SIZE],
125    push_at: usize,
126}
127
128/// A 64-bit mode descriptor for a system segment (such as an LDT or TSS
129/// descriptor).
130#[derive(Copy, Clone, Eq, PartialEq)]
131pub struct SystemDescriptor {
132    low: u64,
133    high: u64,
134}
135
136// === impl Segment ===
137
138impl Selector {
139    pub const fn null() -> Self {
140        Self(0)
141    }
142
143    pub const fn from_index(u: u16) -> Self {
144        Self(Self::INDEX.pack_truncating(0, u))
145    }
146
147    pub const fn from_raw(u: u16) -> Self {
148        Self(u)
149    }
150
151    pub fn ring(self) -> cpu::Ring {
152        self.get(Self::RING)
153    }
154
155    /// Returns which descriptor table (GDT or LDT) this selector references.
156    ///
157    /// # Note
158    ///
159    /// This will never return [`cpu::DescriptorTable::Idt`], as a segment
160    /// selector only references segmentation table descriptors.
161    pub fn table(&self) -> cpu::DescriptorTable {
162        if self.is_gdt() {
163            cpu::DescriptorTable::Gdt
164        } else {
165            cpu::DescriptorTable::Idt
166        }
167    }
168
169    /// Returns true if this is an LDT segment selector.
170    pub fn is_ldt(&self) -> bool {
171        self.get(Self::IS_LDT)
172    }
173
174    /// Returns true if this is a GDT segment selector.
175    #[inline]
176    pub fn is_gdt(&self) -> bool {
177        !self.is_ldt()
178    }
179
180    /// Returns the index into the LDT or GDT this selector refers to.
181    pub const fn index(&self) -> u16 {
182        Self::INDEX.unpack_bits(self.0)
183    }
184
185    pub fn set_gdt(&mut self) -> &mut Self {
186        self.set(Self::IS_LDT, false)
187    }
188
189    pub fn set_ldt(&mut self) -> &mut Self {
190        self.set(Self::IS_LDT, true)
191    }
192
193    pub fn set_ring(&mut self, ring: cpu::Ring) -> &mut Self {
194        tracing::trace!("before set_ring: {:b}", self.0);
195        self.set(Self::RING, ring);
196        tracing::trace!("after set_ring: {:b}", self.0);
197        self
198    }
199
200    pub fn set_index(&mut self, index: u16) -> &mut Self {
201        self.set(Self::INDEX, index)
202    }
203
204    /// Returns the current selector in the `cs` (code segment) register
205    pub fn current_cs() -> Self {
206        let sel: u16;
207        unsafe {
208            asm!("mov {0:x}, cs", out(reg) sel, options(nomem, nostack, preserves_flags));
209        }
210        Self(sel)
211    }
212
213    /// Sets `self` as the current code segment selector in the `cs` register.
214    ///
215    /// # Notes
216    ///
217    /// In 64-bit long mode, the code segment selector *must* have a base
218    /// address of 0 and limit 2^64.
219    ///
220    /// # Safety
221    ///
222    /// lol
223    pub unsafe fn set_cs(self) {
224        // because x86 is a very well designed and normal CPU architecture, you
225        // can set the value of the `cs` register with a normal `mov`
226        // instruction, just like you can with every other segment register.
227        //
228        // HA HA JUST KIDDING LOL. you can't set the value of `cs` with a `mov`.
229        // the only way to set the value of `cs` is by doing a ljmp, a lcall, or
230        // a lret with a `cs` selector on the stack (or triggering an interrupt).
231        //
232        // a thing i think is very cool about the AMD64 CPU Architecture is how
233        // we have to do all this cool segmentation bullshit in long mode even
234        // though we ... can't ... actually use memory segmentation.
235        //
236        // see https://wiki.osdev.org/Far_Call_Trick
237        tracing::trace!("setting code segment...");
238        asm!(
239            "push {selector}",
240            "lea {retaddr}, [2f + rip]",
241            "push {retaddr}",
242            "retfq",
243            // This is cool: apparently we can't use '0' or '1' as a label in
244            // LLVM assembly, due to an LLVM bug:
245            // https://github.com/llvm/llvm-project/issues/99547
246            //
247            // So, this is 2 instead of 1. lmao.
248            "2:",
249            selector = in(reg) self.0 as u64,
250            retaddr = lateout(reg) _,
251            options(preserves_flags),
252        );
253        tracing::trace!(selector = fmt::alt(self), "set code segment");
254    }
255
256    /// Sets `self` as the current stack segment selector in the `ss` register.
257    ///
258    /// # Notes
259    ///
260    /// In 64-bit long mode, the stack segment selector *must* have a base
261    /// address of 0 and limit 2^64.
262    ///
263    /// # Safety
264    ///
265    /// lol
266    pub unsafe fn set_ss(self) {
267        asm!("mov ss, {:x}", in(reg) self.0, options(nostack, preserves_flags));
268        tracing::trace!(selector = fmt::alt(self), "set stack segment");
269    }
270
271    /// Sets `self` as the current data segment selector in the `ds` register.
272    ///
273    /// # Notes
274    ///
275    /// In 64-bit long mode, the data segment selector *must* have a base
276    /// address of 0 and limit 2^64.
277    ///
278    /// # Safety
279    ///
280    /// lol
281    pub unsafe fn set_ds(self) {
282        asm!("mov ds, {:x}", in(reg) self.0, options(nostack, preserves_flags));
283        tracing::trace!(selector = fmt::alt(self), "set data segment");
284    }
285
286    /// Sets `self` as the current segment in the general-purpose data segment
287    /// register `es` ("Extra Segment").
288    ///
289    /// # Notes
290    ///
291    /// In 64-bit long mode, the extra segment selector *must* have a base
292    /// address of 0 and limit 2^64.
293    ///
294    /// # Safety
295    ///
296    /// lol
297    pub unsafe fn set_es(self) {
298        asm!("mov es, {:x}", in(reg) self.0, options(nostack, preserves_flags));
299        tracing::trace!(selector = fmt::alt(self), "set extra segment");
300    }
301
302    /// Sets `self` as the current segment in the general-purpose data segment
303    /// register `fs` ("File Segment").
304    ///
305    /// # Notes
306    ///
307    /// Unlike the `cs`, `ss`, `ds`, and `es` registers, the `fs` register need
308    /// not be zeroed in long mode, and can be used by the operating system. In
309    /// particular, the `gs` and `fs` registers may be useful for storing
310    /// thread- or CPU-local data.
311    ///
312    /// # Safety
313    ///
314    /// lol
315    pub unsafe fn set_fs(self) {
316        asm!("mov fs, {:x}", in(reg) self.0, options(nostack, preserves_flags));
317        tracing::trace!(selector = fmt::alt(self), "set fs");
318    }
319    /// Sets `self` as the current segment in the general-purpose data segment
320    /// register `gs` ("G Segment").
321    ///
322    /// # Notes
323    ///
324    /// Unlike the `cs`, `ss`, `ds`, and `es` registers, the `gs` register need
325    /// not be zeroed in long mode, and can be used by the operating system. In
326    /// particular, the `gs` and `fs` registers may be useful for storing
327    /// thread- or CPU-local data.
328    ///
329    /// # Safety
330    ///
331    /// lol
332    pub unsafe fn set_gs(self) {
333        asm!("mov gs, {:x}", in(reg) self.0, options(nostack, preserves_flags));
334        tracing::trace!(selector = fmt::alt(self), "set gs");
335    }
336}
337
338// === impl Gdt ===
339
340impl<const SIZE: usize> Gdt<SIZE> {
341    /// Sets `self` as the current GDT.
342    ///
343    /// This method is safe, because the `'static` bound on `self` ensures that
344    /// the pointed GDT doesn't go away while it's active. Therefore, this
345    /// method is a safe wrapper around the `lgdt` CPU instruction
346    pub fn load(&'static self) {
347        // Create the descriptor table pointer with *just* the actual table, so
348        // that the next push index isn't considered a segment descriptor!
349        let ptr = cpu::DtablePtr::new(&self.entries);
350        tracing::trace!(?ptr, "loading GDT");
351        unsafe {
352            // Safety: the `'static` bound ensures the GDT isn't going away
353            // unless you did something really evil.
354            cpu::intrinsics::lgdt(ptr)
355        }
356        tracing::trace!("loaded GDT!");
357    }
358
359    /// Returns a new `Gdt` with all entries zeroed.
360    pub const fn new() -> Self {
361        Gdt {
362            entries: [0; SIZE],
363            sys_segments: [false; SIZE],
364            push_at: 1,
365        }
366    }
367
368    #[tracing::instrument(level = "trace", skip(self))]
369    pub fn add_segment(&mut self, segment: Descriptor) -> Selector {
370        let ring = segment.ring_bits();
371        let idx = self.push(segment.0);
372        let selector = Selector::from_raw(Selector::from_index(idx).0 | ring as u16);
373        tracing::trace!(idx, ?selector, "added segment");
374        selector
375    }
376
377    #[tracing::instrument(level = "trace", skip(self))]
378    pub fn add_sys_segment(&mut self, segment: SystemDescriptor) -> Selector {
379        tracing::trace!(?segment, "Gdt::add_add_sys_segment");
380        let idx = self.push(segment.low);
381        self.sys_segments[idx as usize] = true;
382        self.push(segment.high);
383        // sys segments are always ring 0
384        let selector = Selector::null()
385            .with(Selector::INDEX, idx)
386            .with(Selector::RING, cpu::Ring::Ring0);
387        tracing::trace!(idx, ?selector, "added system segment");
388        selector
389    }
390
391    const fn push(&mut self, entry: u64) -> u16 {
392        let idx = self.push_at;
393        self.entries[idx] = entry;
394        self.push_at += 1;
395        idx as u16
396    }
397}
398
399impl<const SIZE: usize> Default for Gdt<SIZE> {
400    fn default() -> Self {
401        Self::new()
402    }
403}
404
405impl<const SIZE: usize> fmt::Debug for Gdt<SIZE> {
406    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
407        struct GdtEntries<'a, const SIZE: usize>(&'a Gdt<SIZE>);
408        impl<const SIZE: usize> fmt::Debug for GdtEntries<'_, SIZE> {
409            #[inline]
410            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
411                let mut sys0 = None;
412                let mut entries = f.debug_list();
413                for (&entry, &is_sys) in self.0.entries[..self.0.push_at]
414                    .iter()
415                    .zip(self.0.sys_segments.iter())
416                {
417                    if let Some(low) = sys0.take() {
418                        entries.entry(&SystemDescriptor { low, high: entry });
419                    } else if is_sys {
420                        sys0 = Some(entry);
421                    } else {
422                        entries.entry(&Descriptor(entry));
423                    }
424                }
425
426                entries.finish()
427            }
428        }
429
430        f.debug_struct("Gdt")
431            .field("capacity", &SIZE)
432            .field("len", &(self.push_at - 1))
433            .field("entries", &GdtEntries(self))
434            .finish()
435    }
436}
437
438// === impl Descriptor ===
439
440impl Descriptor {
441    const LIMIT_LOW_PAIR: Pair64 = Self::LIMIT_LOW.pair_with(limit::LOW);
442    const LIMIT_HIGH_PAIR: Pair64 = Self::LIMIT_HIGH.pair_with(limit::HIGH);
443
444    const BASE_LOW_PAIR: Pair64 = Self::BASE_LOW.pair_with(base::LOW);
445    const BASE_MID_PAIR: Pair64 = Self::BASE_MID.pair_with(base::MID);
446
447    // Access flags (5 bits).
448    // In order, least to most significant:
449    // - `ACCESSED`
450    // - `READABLE`/`WRITABLE`
451    // - `DIRECTION`/`CONFORMING`
452    // - `EXECUTABLE` (code/data)
453    // - `TYPE` (user/system)
454    // TODO(eliza): add a nicer `mycelium-bitfield` API for combining pack specs...
455    const ACCESS_FLAGS: Pack64 = Self::BASE_LOW.typed::<u64, ()>().next(5);
456
457    // hahaha lol no limits
458    const DEFAULT_BITS: u64 = Packing64::new(0)
459        .set_all(&Self::LIMIT_LOW)
460        .set_all(&Self::LIMIT_HIGH)
461        .bits();
462
463    const USER_FLAGS: u64 = Packing64::new(0)
464        .set_all(&Self::IS_USER_SEGMENT)
465        .set_all(&Self::IS_PRESENT)
466        .set_all(&Self::READABLE)
467        .set_all(&Self::ACCESSED)
468        .set_all(&Self::GRANULARITY)
469        .bits();
470
471    const CODE_FLAGS: u64 = Packing64::new(Self::USER_FLAGS)
472        .set_all(&Self::IS_CODE_SEGMENT)
473        .bits();
474    const DATA_FLAGS: u64 = Packing64::new(Self::USER_FLAGS)
475        .set_all(&Self::IS_32_BIT)
476        .bits();
477
478    /// Returns a new segment descriptor for a 64-bit code segment.
479    pub const fn code() -> Self {
480        Self(Self::DEFAULT_BITS | Self::CODE_FLAGS | Self::IS_LONG_MODE.raw_mask())
481    }
482
483    /// Returns a new segment descriptor for a 32-bit code segment.
484    pub const fn code_32() -> Self {
485        Self(Self::DEFAULT_BITS | Self::CODE_FLAGS | Self::IS_32_BIT.raw_mask())
486    }
487
488    /// Returns a new segment descriptor for data segment.
489    pub const fn data() -> Self {
490        Self(Self::DEFAULT_BITS | Self::DATA_FLAGS)
491    }
492
493    pub fn ring(&self) -> cpu::Ring {
494        cpu::Ring::from_u8(self.ring_bits())
495    }
496
497    pub const fn limit(&self) -> u64 {
498        Pack64::pack_in(0)
499            .pack_from_dst(self.0, &Self::LIMIT_LOW_PAIR)
500            .pack_from_dst(self.0, &Self::LIMIT_HIGH_PAIR)
501            .bits()
502    }
503
504    pub const fn base(&self) -> u64 {
505        Pack64::pack_in(0)
506            .pack_from_dst(self.0, &Self::BASE_LOW_PAIR)
507            .pack_from_dst(self.0, &Self::BASE_MID_PAIR)
508            .bits()
509    }
510
511    /// Separated out from constructing the `cpu::Ring` for use in `const fn`s
512    /// (since `Ring::from_u8` panics), but shouldn't be public because it
513    /// performs no validation.
514    const fn ring_bits(&self) -> u8 {
515        Self::RING.unpack_bits(self.0) as u8
516    }
517
518    pub const fn with_ring(self, ring: cpu::Ring) -> Self {
519        Self(Self::RING.pack_truncating(ring as u8 as u64, self.0))
520    }
521}
522
523impl SystemDescriptor {
524    const BASE_HIGH: bits::Pack64 = bits::Pack64::least_significant(32);
525    const BASE_HIGH_PAIR: Pair64 = Self::BASE_HIGH.pair_with(base::HIGH);
526
527    /// Construct a system segment descriptor for the provided boxed task-state
528    /// segment (TSS).
529    #[cfg(feature = "alloc")]
530    pub fn boxed_tss(tss: alloc::boxed::Box<task::StateSegment>) -> Self {
531        Self::tss(alloc::boxed::Box::leak(tss))
532    }
533
534    /// Construct a system segment descriptor for the provided task-state
535    /// segment (TSS), stored in a `static`..
536    pub fn tss(tss: &'static task::StateSegment) -> Self {
537        let tss_addr = tss as *const _ as u64;
538        unsafe {
539            // Safety: we know the address is valid because we got it from a
540            // `&'static task::StateSegment` reference, which could only point
541            // to a valid TSS if it was constructed in safe code.
542            Self::tss_from_exposed_addr(tss_addr)
543        }
544    }
545
546    /// Construct a system segment descriptor for an alleged task-state segment
547    /// (TSS) address.
548    ///
549    /// # Safety
550    ///
551    ///  `tss_addr` must be a valid TSS address!
552    unsafe fn tss_from_exposed_addr(tss_addr: u64) -> Self {
553        tracing::trace!(tss_addr = fmt::hex(tss_addr), "making TSS descriptor...");
554
555        // limit (-1 because the bound is inclusive)
556        let limit = (mem::size_of::<task::StateSegment>() - 1) as u64;
557
558        let low = Pack64::pack_in(0)
559            .pack(true, &Descriptor::IS_PRESENT)
560            .pack_from_src(limit, &Descriptor::LIMIT_LOW_PAIR)
561            // base addr (low 24 bits)
562            .pack_from_src(tss_addr, &Descriptor::BASE_LOW_PAIR)
563            .pack_from_src(limit, &Descriptor::LIMIT_HIGH_PAIR)
564            .pack_truncating(0b1001, &Descriptor::ACCESS_FLAGS)
565            // base addr (mid 8 bits)
566            .pack_from_src(tss_addr, &Descriptor::BASE_MID_PAIR)
567            .bits();
568
569        let high = Pack64::pack_in(0)
570            // base addr (highest 32 bits)
571            .pack_from_src(tss_addr, &Self::BASE_HIGH_PAIR)
572            .bits();
573
574        Self { high, low }
575    }
576
577    pub fn base(&self) -> u64 {
578        Pack64::pack_in(0)
579            .pack_from_dst(self.low, &Descriptor::BASE_LOW_PAIR)
580            .pack_from_dst(self.low, &Descriptor::BASE_MID_PAIR)
581            .pack_from_dst(self.high, &Self::BASE_HIGH_PAIR)
582            .bits()
583    }
584
585    pub const fn limit(&self) -> u64 {
586        Pack64::pack_in(0)
587            .pack_from_dst(self.low, &Descriptor::LIMIT_LOW_PAIR)
588            .pack_from_dst(self.low, &Descriptor::LIMIT_HIGH_PAIR)
589            .bits()
590    }
591}
592
593impl fmt::Debug for SystemDescriptor {
594    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
595        f.debug_struct("segment::SystemDescriptor")
596            .field(
597                "limit_low",
598                &fmt::hex(Descriptor::LIMIT_LOW.unpack(self.low)),
599            )
600            .field("base_low", &fmt::hex(Descriptor::BASE_LOW.unpack(self.low)))
601            .field("type", &fmt::bin(Descriptor::ACCESS_FLAGS.unpack(self.low)))
602            .field("ring", &Descriptor::RING.unpack(self.low))
603            .field("present", &Descriptor::IS_PRESENT.unpack(self.low))
604            .field(
605                "limit_high",
606                &fmt::bin(Descriptor::LIMIT_HIGH.unpack(self.low)),
607            )
608            .field("base_mid", &fmt::hex(Descriptor::BASE_MID.unpack(self.low)))
609            .field("base_high", &fmt::hex(Self::BASE_HIGH.unpack(self.high)))
610            .field("low_bits", &fmt::hex(self.low))
611            .field("high_bits", &fmt::hex(self.high))
612            .field("limit", &self.limit())
613            .field("base", &fmt::hex(self.base()))
614            .finish()
615    }
616}
617
618mod limit {
619    use mycelium_util::bits::Pack64;
620    pub(super) const LOW: Pack64 = Pack64::least_significant(16);
621    pub(super) const HIGH: Pack64 = LOW.next(4);
622}
623
624mod base {
625    use mycelium_util::bits::Pack64;
626    pub(super) const LOW: Pack64 = Pack64::least_significant(24);
627    pub(super) const MID: Pack64 = LOW.next(8);
628    pub(super) const HIGH: Pack64 = MID.next(32);
629}
630
631#[cfg(test)]
632mod tests {
633    use super::*;
634    use core::mem::size_of;
635    use proptest::prelude::*;
636
637    #[test]
638    fn prettyprint() {
639        let selector = Selector::null()
640            .with(Selector::INDEX, 30)
641            .with(Selector::RING, cpu::Ring::Ring0);
642        println!("{selector}");
643    }
644
645    #[test]
646    fn segment_selector_is_correct_size() {
647        assert_eq!(size_of::<Selector>(), 2);
648    }
649
650    #[test]
651    fn selector_pack_specs_valid() {
652        Selector::assert_valid()
653    }
654
655    #[test]
656    fn descriptor_pack_specs_valid() {
657        Descriptor::assert_valid();
658        assert_eq!(Descriptor::IS_PRESENT.raw_mask(), 1 << 47);
659    }
660
661    #[test]
662    fn descriptor_pack_pairs_valid() {
663        Descriptor::LIMIT_LOW_PAIR.assert_valid();
664        Descriptor::LIMIT_HIGH_PAIR.assert_valid();
665        Descriptor::BASE_LOW_PAIR.assert_valid();
666        Descriptor::BASE_MID_PAIR.assert_valid();
667    }
668
669    #[test]
670    fn sys_descriptor_pack_pairs_valid() {
671        SystemDescriptor::BASE_HIGH_PAIR.assert_valid()
672    }
673
674    #[test]
675    fn default_descriptor_flags_match_linux() {
676        use cpu::Ring::*;
677        // are our default flags reasonable? stolen from linux: arch/x86/kernel/cpu/common.c
678        assert_eq!(
679            dbg!(Descriptor::code().with_ring(Ring0)),
680            Descriptor(0x00af9b000000ffff),
681        );
682        assert_eq!(
683            dbg!(Descriptor::code_32().with_ring(Ring0)),
684            Descriptor(0x00cf9b000000ffff)
685        );
686        assert_eq!(
687            dbg!(Descriptor::data().with_ring(Ring0)),
688            Descriptor(0x00cf93000000ffff)
689        );
690        assert_eq!(
691            dbg!(Descriptor::code().with_ring(Ring3)),
692            Descriptor(0x00affb000000ffff)
693        );
694        assert_eq!(
695            dbg!(Descriptor::code_32().with_ring(Ring3)),
696            Descriptor(0x00cffb000000ffff)
697        );
698        assert_eq!(
699            dbg!(Descriptor::data().with_ring(Ring3)),
700            Descriptor(0x00cff3000000ffff)
701        );
702    }
703
704    #[test]
705    fn debug_base_pack_pairs() {
706        dbg!(Descriptor::BASE_LOW_PAIR);
707        dbg!(Descriptor::BASE_MID_PAIR);
708        dbg!(SystemDescriptor::BASE_HIGH_PAIR);
709    }
710
711    fn any_valid_tss_addr() -> impl Strategy<Value = u64> {
712        any::<u64>().prop_filter(
713            "valid addresses for TSS base must be 4-byte aligned",
714            |addr| addr.is_multiple_of(0x4),
715        )
716    }
717    proptest! {
718        #[test]
719        fn system_segment_tss_base(addr in any_valid_tss_addr()) {
720            let tss_descr = unsafe {
721                // Safety: this address doesn't point at a valid TSS descriptor.
722                // But, that's fine, because we're only using it to test the
723                // bitfield packing, and it won't ever be set as the real TSS.
724                SystemDescriptor::tss_from_exposed_addr(addr)
725            };
726            let base = tss_descr.base();
727            prop_assert_eq!(
728                base, addr,
729                "\n  left: {:#064b}\n right: {:#064b}\n descr: {:#?}\n  addr: {:#x}\n",
730                addr, base, tss_descr, addr
731            );
732        }
733
734        #[test]
735        fn system_segment_tss_limit(addr in any_valid_tss_addr()) {
736            let tss_descr = unsafe {
737                // Safety: this address doesn't point at a valid TSS descriptor.
738                // But, that's fine, because we're only using it to test the
739                // bitfield packing, and it won't ever be set as the real TSS.
740                SystemDescriptor::tss_from_exposed_addr(addr)
741            };
742            prop_assert_eq!(
743                tss_descr.limit(),
744                mem::size_of::<task::StateSegment>() as u64 - 1,
745                "limit; descr={:#?}", tss_descr
746            );
747        }
748    }
749}