mycelium_alloc/
buddy.rs

1use core::{
2    alloc::{GlobalAlloc, Layout},
3    cmp, mem, ptr,
4};
5use hal_core::{
6    mem::{
7        page::{self, AllocError, PageRange, Size},
8        Region, RegionKind,
9    },
10    Address, PAddr, VAddr,
11};
12use mycelium_util::fmt;
13use mycelium_util::intrusive::{list, Linked, List};
14use mycelium_util::math::Logarithm;
15use mycelium_util::sync::{
16    atomic::{AtomicUsize, Ordering::*},
17    blocking::Mutex,
18};
19
20#[derive(Debug)]
21pub struct Alloc<const FREE_LISTS: usize> {
22    /// Minimum allocateable page size in bytes.
23    ///
24    /// Free blocks on `free_lists[0]` are one page of this size each. For each
25    /// index higher in the array of free lists, the blocks on that free list
26    /// are 2x as large.
27    min_size: usize,
28
29    base_vaddr: AtomicUsize,
30    vm_offset: AtomicUsize,
31
32    /// Cache this so we don't have to re-evaluate it.
33    min_size_log2: usize,
34
35    /// Total size of the heap.
36    heap_size: AtomicUsize,
37
38    /// Currently allocated size.
39    allocated_size: AtomicUsize,
40
41    /// Array of free lists by "order". The order of an block is the number
42    /// of times the minimum page size must be doubled to reach that block's
43    /// size.
44    free_lists: [Mutex<List<Free>>; FREE_LISTS],
45}
46
47type Result<T> = core::result::Result<T, AllocError>;
48
49pub struct Free {
50    magic: usize,
51    links: list::Links<Self>,
52    meta: Region,
53}
54
55// ==== impl Alloc ===
56
57impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
58    #[cfg(not(loom))]
59    pub const fn new(mut min_size: usize) -> Self {
60        // clippy doesn't like interior mutable items in `const`s, because
61        // mutating an instance of the `const` value will not mutate the const.
62        // that is the *correct* behavior here, as the const is used just as an
63        // array initializer; every time it's referenced, it *should* produce a
64        // new value. therefore, this warning is incorrect in this case.
65        //
66        // see https://github.com/rust-lang/rust-clippy/issues/7665
67        #[allow(clippy::declare_interior_mutable_const)]
68        const ONE_FREE_LIST: Mutex<List<Free>> = Mutex::new(List::new());
69
70        // ensure we don't split memory into regions too small to fit the free
71        // block header in them.
72        let free_block_size = mem::size_of::<Free>();
73        if min_size < free_block_size {
74            min_size = free_block_size;
75        }
76        // round the minimum block size up to the next power of two, if it isn't
77        // a power of two (`size_of::<Free>` is *probably* 48 bytes on 64-bit
78        // architectures...)
79        min_size = min_size.next_power_of_two();
80        Self {
81            min_size,
82            base_vaddr: AtomicUsize::new(usize::MAX),
83            vm_offset: AtomicUsize::new(0),
84            min_size_log2: mycelium_util::math::usize_const_log2_ceil(min_size),
85            heap_size: AtomicUsize::new(0),
86            allocated_size: AtomicUsize::new(0),
87            free_lists: [ONE_FREE_LIST; FREE_LISTS],
88        }
89    }
90
91    pub fn set_vm_offset(&self, offset: VAddr) {
92        self.vm_offset
93            .compare_exchange(0, offset.as_usize(), AcqRel, Acquire)
94            .expect("dont do this twice lol");
95    }
96
97    /// Returns the minimum allocatable size, in bytes.
98    pub fn min_size(&self) -> usize {
99        self.min_size
100    }
101
102    /// Returns the total size of the allocator (allocated and free), in bytes.
103    pub fn total_size(&self) -> usize {
104        self.heap_size.load(Acquire)
105    }
106
107    /// Returns the currently allocated size in bytes.
108    pub fn allocated_size(&self) -> usize {
109        self.allocated_size.load(Acquire)
110    }
111
112    /// Returns the base virtual memory offset.
113    // TODO(eliza): nicer way to configure this?
114    fn offset(&self) -> usize {
115        let addr = self.vm_offset.load(Relaxed);
116        debug_assert_ne!(addr, 0, "you didn't initialize the heap yet dipshit");
117        addr
118    }
119
120    /// Returns the size of the allocation for a given order
121    fn size_for_order(&self, order: usize) -> usize {
122        1 << (self.min_size_log2 + order)
123    }
124
125    /// Returns the actual size of the block that must be allocated for an
126    /// allocation of `len` pages of `page_size`.
127    fn size_for(&self, layout: Layout) -> Option<usize> {
128        let mut size = layout.size();
129        let align = layout.align();
130        size = cmp::max(size, align);
131
132        // Round up to the heap's minimum allocateable size.
133        if size < self.min_size {
134            tracing::trace!(
135                size,
136                min_size = self.min_size,
137                layout.size = layout.size(),
138                layout.align = layout.align(),
139                "size is less than the minimum page size; rounding up"
140            );
141            size = self.min_size;
142        }
143
144        // Is the size a power of two?
145        if !size.is_power_of_two() {
146            let next_pow2 = size.next_power_of_two();
147            tracing::trace!(
148                layout.size = size,
149                next_pow2,
150                "size is not a power of two, rounding up..."
151            );
152            size = next_pow2;
153        }
154        // debug_assert!(
155        //     size.is_power_of_two(),
156        //     "somebody constructed a bad layout! don't do that!"
157        // );
158
159        // Is there enough room to meet this allocation request?
160        let available = self.heap_size.load(Acquire);
161        if size > available {
162            tracing::error!(
163                size,
164                available,
165                layout.size = layout.size(),
166                layout.align = layout.align(),
167                "out of memory!"
168            );
169            return None;
170        }
171
172        Some(size)
173    }
174
175    /// Returns the order of the block that would be allocated for a range of
176    /// `len` pages of size `size`.
177    fn order_for(&self, layout: Layout) -> Option<usize> {
178        self.size_for(layout).map(|size| self.order_for_size(size))
179    }
180
181    /// Returns the order of a block of `size` bytes.
182    fn order_for_size(&self, size: usize) -> usize {
183        size.log2_ceil() - self.min_size_log2
184    }
185}
186
187impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
188    pub fn dump_free_lists(&self) {
189        for (order, list) in self.free_lists.as_ref().iter().enumerate() {
190            let _span =
191                tracing::debug_span!("free_list", order, size = self.size_for_order(order),)
192                    .entered();
193            list.try_with_lock(|list| {
194                for entry in list.iter() {
195                    tracing::debug!("entry={entry:?}");
196                }
197            })
198            .unwrap_or_else(|| tracing::debug!("<THIS IS THE ONE WHERE THE PANIC HAPPENED LOL>"))
199        }
200    }
201
202    /// Adds a memory region to the heap from which pages may be allocated.
203    #[tracing::instrument(skip(self), level = "debug")]
204    pub unsafe fn add_region(&self, region: Region) -> core::result::Result<(), ()> {
205        // Is the region in use?
206        if region.kind() != RegionKind::FREE {
207            tracing::warn!(?region, "cannot add to page allocator, region is not free");
208            return Err(());
209        }
210
211        let mut next_region = Some(region);
212        while let Some(mut region) = next_region.take() {
213            let size = region.size();
214            let base = region.base_addr();
215            let _span = tracing::debug_span!("adding_region", size, ?base).entered();
216
217            // Is the region aligned on the heap's minimum page size? If not, we
218            // need to align it.
219            if !base.is_aligned(self.min_size) {
220                let new_base = base.align_up(self.min_size);
221                tracing::trace!(region.new_base = ?new_base, "base address not aligned!");
222                region = Region::new(new_base, region.size(), RegionKind::FREE);
223            }
224
225            // Is the size of the region a power of two? The buddy block algorithm
226            // requires each free block to be a power of two.
227            if !size.is_power_of_two() {
228                // If the region is not a power of two, split it down to the nearest
229                // power of two.
230                let prev_power_of_two = prev_power_of_two(size);
231                tracing::debug!(prev_power_of_two, "not a power of two!");
232                let region2 = region.split_back(prev_power_of_two).unwrap();
233
234                // If the region we split off is larger than the minimum page size,
235                // we can try to add it as well.
236                if region2.size() >= self.min_size {
237                    tracing::debug!("adding split-off region");
238                    next_region = Some(region2);
239                } else {
240                    // Otherwise, we can't use it --- we'll have to leak it.
241                    // TODO(eliza):
242                    //  figure out a nice way to use stuff that won't fit for "some
243                    //  other purpose"?
244                    // NOTE:
245                    //  in practice these might just be the two "bonus bytes" that
246                    //  the free regions in our memory map have for some kind of
247                    //  reason (on x84).
248                    // TODO(eliza):
249                    //  figure out why free regions in the memory map are all
250                    //  misaligned by two bytes.
251                    tracing::debug!(
252                        region = ?region2,
253                        min_size = self.min_size,
254                        "leaking a region smaller than min page size"
255                    );
256                }
257            }
258
259            // Update the base virtual address of the heap.
260            let region_vaddr = region.base_addr().as_usize() + self.offset();
261            self.base_vaddr.fetch_min(region_vaddr, AcqRel);
262
263            // ...and actually add the block to a free list.
264            let block = Free::new(region, self.offset());
265            unsafe { self.push_block(block) };
266        }
267
268        Ok(())
269    }
270
271    unsafe fn alloc_inner(&self, layout: Layout) -> Option<ptr::NonNull<Free>> {
272        // This is the minimum order necessary for the requested allocation ---
273        // the first free list we'll check.
274        let order = self.order_for(layout)?;
275        tracing::trace!(?order);
276
277        // Try each free list, starting at the minimum necessary order.
278        for (idx, free_list) in self.free_lists.as_ref()[order..].iter().enumerate() {
279            tracing::trace!(curr_order = idx + order);
280
281            // Is there an available block on this free list?
282            let allocated = free_list.with_lock(|free_list| {
283                if let Some(mut block) = free_list.pop_back() {
284                    let block = unsafe { block.as_mut() };
285                    tracing::trace!(?block, ?free_list, "found");
286
287                    // Unless this is the free list on which we'd expect to find a
288                    // block of the requested size (the first free list we checked),
289                    // the block is larger than the requested allocation. In that
290                    // case, we'll need to split it down and push the remainder onto
291                    // the appropriate free lists.
292                    if idx > 0 {
293                        let curr_order = idx + order;
294                        tracing::trace!(?curr_order, ?order, "split down");
295                        self.split_down(block, curr_order, order);
296                    }
297
298                    // Change the block's magic to indicate that it is allocated, so
299                    // that we can avoid checking the free list if we try to merge
300                    // it before the first word is written to.
301                    block.make_busy();
302                    tracing::trace!(?block, "made busy");
303                    self.allocated_size.fetch_add(block.size(), Release);
304                    Some(block.into())
305                } else {
306                    None
307                }
308            });
309            if let Some(block) = allocated {
310                return Some(block);
311            }
312        }
313        None
314    }
315
316    unsafe fn dealloc_inner(&self, paddr: PAddr, layout: Layout) -> Result<()> {
317        // Find the order of the free list on which the freed range belongs.
318        let min_order = self.order_for(layout);
319        tracing::trace!(?min_order);
320        let min_order = min_order.ok_or_else(AllocError::oom)?;
321
322        let Some(size) = self.size_for(layout) else {
323            // XXX(eliza): is it better to just leak it?
324            panic!(
325                "couldn't determine the correct layout for an allocation \
326                we previously allocated successfully, what the actual fuck!\n \
327                addr={:?}; layout={:?}; min_order={}",
328                paddr, layout, min_order,
329            )
330        };
331
332        // Construct a new free block.
333        let mut block =
334            unsafe { Free::new(Region::new(paddr, size, RegionKind::FREE), self.offset()) };
335
336        // Starting at the minimum order on which the freed range will fit
337        for (idx, free_list) in self.free_lists.as_ref()[min_order..].iter().enumerate() {
338            let curr_order = idx + min_order;
339            let done = free_list.with_lock(|free_list| {
340                // Is there a free buddy block at this order?
341                if let Some(mut buddy) = unsafe { self.take_buddy(block, curr_order, free_list) } {
342                    // Okay, merge the blocks, and try the next order!
343                    if buddy < block {
344                        mem::swap(&mut block, &mut buddy);
345                    }
346                    unsafe {
347                        block.as_mut().merge(buddy.as_mut());
348                    }
349                    tracing::trace!(?buddy, ?block, "merged with buddy");
350                    // Keep merging!
351                    false
352                } else {
353                    // Okay, we can't keep merging, so push the block on the current
354                    // free list.
355                    free_list.push_front(block);
356                    tracing::trace!("deallocated block");
357                    self.allocated_size.fetch_sub(size, Release);
358                    true
359                }
360            });
361            if done {
362                return Ok(());
363            }
364        }
365
366        unreachable!("we will always iterate over at least one free list");
367    }
368
369    #[tracing::instrument(skip(self), level = "trace")]
370    unsafe fn push_block(&self, block: ptr::NonNull<Free>) {
371        let block_size = block.as_ref().size();
372        let order = self.order_for_size(block_size);
373        tracing::trace!(block = ?block.as_ref(), block.order = order);
374        let free_lists = self.free_lists.as_ref();
375        if order > free_lists.len() {
376            todo!("(eliza): choppity chop chop down the block!");
377        }
378        free_lists[order].with_lock(|list| list.push_front(block));
379        let mut sz = self.heap_size.load(Acquire);
380        while let Err(actual) =
381            // TODO(eliza): if this overflows that's bad news lol...
382            self.heap_size
383                    .compare_exchange_weak(sz, sz + block_size, AcqRel, Acquire)
384        {
385            sz = actual;
386        }
387    }
388
389    /// Removes `block`'s buddy from the free list and returns it, if it is free
390    ///
391    /// The "buddy" of a block is the block from which that block was split off
392    /// to reach its current order, and therefore the block with which it could
393    /// be merged to reach the target order.
394    unsafe fn take_buddy(
395        &self,
396        block: ptr::NonNull<Free>,
397        order: usize,
398        free_list: &mut List<Free>,
399    ) -> Option<ptr::NonNull<Free>> {
400        let size = self.size_for_order(order);
401        let base = self.base_vaddr.load(Relaxed);
402
403        if base == usize::MAX {
404            // This is a bug.
405            tracing::error!("cannot find buddy block; heap not initialized!");
406            return None;
407        }
408
409        tracing::trace!(
410            heap.base = fmt::hex(base),
411            block.addr = ?block,
412            block.order = order,
413            block.size = size,
414            "calculating buddy"
415        );
416
417        // Find the relative offset of `block` from the base of the heap.
418        let rel_offset = block.as_ptr() as usize - base;
419        let buddy_offset = rel_offset ^ (1 << order);
420        let buddy = (base + buddy_offset) as *mut Free;
421        tracing::trace!(
422            block.rel_offset = fmt::hex(rel_offset),
423            buddy.offset = fmt::hex(buddy_offset),
424            buddy.addr = ?buddy,
425        );
426
427        if ptr::eq(buddy as *const _, block.as_ptr() as *const _) {
428            tracing::trace!("buddy block is the same as self");
429            return None;
430        }
431
432        let buddy = unsafe {
433            // Safety: we constructed this address via a usize add of two
434            // values we know are not 0, so this should not be null, and
435            // it's okay to use `new_unchecked`.
436            // TODO(eliza): we should probably die horribly if that add
437            // *does* overflow i guess...
438            ptr::NonNull::new_unchecked(buddy)
439        };
440
441        // Check if the buddy block is definitely in use before removing it from
442        // the free list.
443        //
444        // `is_maybe_free` returns a *hint* --- if it returns `false`, we know
445        // the block is in use, so we don't have to remove it from the free list.
446        let block = unsafe { buddy.as_ref() };
447        if block.is_maybe_free() {
448            tracing::trace!(
449                buddy.block = ?block,
450                buddy.addr = ?buddy, "trying to remove buddy..."
451            );
452            debug_assert_eq!(block.size(), size, "buddy block did not have correct size");
453            // Okay, now try to remove the buddy from its free list. If it's not
454            // free, this will return `None`.
455            return free_list.remove(buddy);
456        }
457
458        // Otherwise, the buddy block is currently in use.
459        None
460    }
461
462    /// Split a block of order `order` down to order `target_order`.
463    #[tracing::instrument(skip(self), level = "trace")]
464    fn split_down(&self, block: &mut Free, mut order: usize, target_order: usize) {
465        let mut size = block.size();
466        debug_assert_eq!(
467            size,
468            self.size_for_order(order),
469            "a block was a weird size for some reason"
470        );
471
472        let free_lists = self.free_lists.as_ref();
473        while order > target_order {
474            order -= 1;
475            size >>= 1;
476
477            tracing::trace!(order, target_order, size, ?block, "split at");
478            let new_block = block
479                .split_back(size, self.offset())
480                .expect("block too small to split!");
481            tracing::trace!(?block, ?new_block);
482            free_lists[order].with_lock(|list| list.push_front(new_block));
483        }
484    }
485}
486
487unsafe impl<S, const FREE_LISTS: usize> page::Alloc<S> for Alloc<FREE_LISTS>
488where
489    S: Size + fmt::Display,
490{
491    /// Allocate a range of at least `len` pages.
492    ///
493    /// If `len` is not a power of two, the length is rounded up to the next
494    /// power of two. The returned `PageRange` struct stores the actual length
495    /// of the allocated page range.
496    ///
497    /// # Returns
498    /// - `Ok(PageRange)` if a range of pages was successfully allocated
499    /// - `Err` if the requested range could not be satisfied by this allocator.
500    fn alloc_range(&self, size: S, len: usize) -> Result<PageRange<PAddr, S>> {
501        let span = tracing::trace_span!("alloc_range", size = size.as_usize(), len);
502        let _e = span.enter();
503
504        debug_assert!(
505            size.as_usize().is_power_of_two(),
506            "page size must be a power of 2; size={size}",
507        );
508
509        let actual_len = if len.is_power_of_two() {
510            len
511        } else {
512            let next = len.next_power_of_two();
513            tracing::debug!(
514                requested.len = len,
515                rounded.len = next,
516                "rounding up page range length to next power of 2"
517            );
518            next
519        };
520
521        let total_size = size
522            .as_usize()
523            // If the size of the page range would overflow, we *definitely*
524            // can't allocate that lol.
525            .checked_mul(actual_len)
526            .ok_or_else(AllocError::oom)?;
527
528        debug_assert!(
529            total_size.is_power_of_two(),
530            "total size of page range must be a power of 2; total_size={total_size} size={size} len={actual_len}",
531        );
532
533        #[cfg(debug_assertions)]
534        let layout = Layout::from_size_align(total_size, size.as_usize())
535            .expect("page ranges should have valid (power of 2) size/align");
536        #[cfg(not(debug_assertions))]
537        let layout = unsafe {
538            // Safety: we expect all page sizes to be powers of 2.
539            Layout::from_size_align_unchecked(total_size, size.as_usize())
540        };
541
542        // Try to allocate the page range
543        let block = unsafe { self.alloc_inner(layout) }.ok_or_else(AllocError::oom)?;
544
545        // Return the allocation!
546        let range = unsafe { block.as_ref() }.region().page_range(size);
547        tracing::debug!(
548            ?range,
549            requested.size = size.as_usize(),
550            requested.len = len,
551            actual.len = actual_len,
552            "allocated"
553        );
554        range.map_err(Into::into)
555    }
556
557    /// Deallocate a range of pages.
558    ///
559    /// # Returns
560    /// - `Ok(())` if a range of pages was successfully deallocated
561    /// - `Err` if the requested range could not be deallocated.
562    fn dealloc_range(&self, range: PageRange<PAddr, S>) -> Result<()> {
563        let page_size = range.page_size().as_usize();
564        let len = range.len();
565        let base = range.base_addr();
566        let span = tracing::trace_span!(
567            "dealloc_range",
568            range.base = ?base,
569            range.page_size = page_size,
570            range.len = len
571        );
572        let _e = span.enter();
573
574        let total_size = page_size
575            .checked_mul(len)
576            .expect("page range size shouldn't overflow, this is super bad news");
577
578        #[cfg(debug_assertions)]
579        let layout = Layout::from_size_align(total_size, page_size)
580            .expect("page ranges should be well-aligned");
581        #[cfg(not(debug_assertions))]
582        let layout = unsafe {
583            // Safety: we expect page ranges to be well-aligned.
584            Layout::from_size_align_unchecked(total_size, page_size)
585        };
586
587        unsafe {
588            self.dealloc_inner(base, layout)?;
589        }
590
591        tracing::debug!(
592            range.base = ?range.base_addr(),
593            range.page_size = range.page_size().as_usize(),
594            range.len = range.len(),
595            "deallocated"
596        );
597        Ok(())
598    }
599}
600
601unsafe impl<const FREE_LISTS: usize> GlobalAlloc for Alloc<FREE_LISTS> {
602    #[tracing::instrument(level = "trace", skip(self))]
603    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
604        self.alloc_inner(layout)
605            .map(ptr::NonNull::as_ptr)
606            .unwrap_or_else(ptr::null_mut)
607            .cast::<u8>()
608    }
609
610    #[tracing::instrument(level = "trace", skip(self))]
611    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
612        let addr = match (ptr as usize).checked_sub(self.offset()) {
613            Some(addr) => addr,
614            None => panic!(
615                "pointer is not to a kernel VAddr! ptr={ptr:p}; offset={:x}",
616                self.offset()
617            ),
618        };
619
620        let addr = PAddr::from_usize(addr);
621        match self.dealloc_inner(addr, layout) {
622            Ok(_) => {}
623            Err(_) => panic!(
624                "deallocating {addr:?} with layout {layout:?} failed! this shouldn't happen!",
625            ),
626        }
627    }
628}
629
630// ==== impl Free ====
631
632impl Free {
633    const MAGIC: usize = 0xF4EE_B10C; // haha lol it spells "free block"
634    const MAGIC_BUSY: usize = 0xB4D_B10C;
635
636    /// # Safety
637    ///
638    /// Don't construct a free list entry for a region that isn't actually free,
639    /// that would be, uh, bad, lol.
640    pub unsafe fn new(region: Region, offset: usize) -> ptr::NonNull<Free> {
641        tracing::trace!(?region, offset = fmt::hex(offset));
642
643        let ptr = ((region.base_addr().as_ptr::<Free>() as usize) + offset) as *mut _;
644        let nn = ptr::NonNull::new(ptr)
645            .expect("definitely don't try to free the zero page; that's evil");
646
647        ptr::write_volatile(
648            ptr,
649            Free {
650                magic: Self::MAGIC,
651                links: list::Links::default(),
652                meta: region,
653            },
654        );
655        nn
656    }
657
658    pub fn split_front(&mut self, size: usize, offset: usize) -> Option<ptr::NonNull<Self>> {
659        debug_assert_eq!(
660            self.magic,
661            Self::MAGIC,
662            "MY MAGIC WAS MESSED UP! self={:#?}, self.magic={:#x}",
663            self,
664            self.magic
665        );
666        let new_meta = self.meta.split_front(size)?;
667        let new_free = unsafe { Self::new(new_meta, offset) };
668        Some(new_free)
669    }
670
671    pub fn split_back(&mut self, size: usize, offset: usize) -> Option<ptr::NonNull<Self>> {
672        debug_assert_eq!(
673            self.magic,
674            Self::MAGIC,
675            "MY MAGIC WAS MESSED UP! self={self:#?}, self.magic={:#x}",
676            self.magic
677        );
678        debug_assert!(
679            !self.links.is_linked(),
680            "tried to split a block while it was on a free list!"
681        );
682
683        let new_meta = self.meta.split_back(size)?;
684        debug_assert_ne!(new_meta, self.meta);
685        debug_assert_eq!(new_meta.size(), size);
686        debug_assert_eq!(self.meta.size(), size);
687        tracing::trace!(?new_meta, ?self.meta, "split meta");
688
689        let new_free = unsafe { Self::new(new_meta, offset) };
690        debug_assert_ne!(new_free, ptr::NonNull::from(self));
691
692        Some(new_free)
693    }
694
695    pub fn merge(&mut self, other: &mut Self) {
696        debug_assert_eq!(
697            self.magic,
698            Self::MAGIC,
699            "MY MAGIC WAS MESSED UP! self={self:#?}, self.magic={:#x}",
700            self.magic
701        );
702        debug_assert_eq!(
703            other.magic,
704            Self::MAGIC,
705            "THEIR MAGIC WAS MESSED UP! other={other:#?}, other.magic={:#x}",
706            other.magic
707        );
708        assert!(
709            !other.links.is_linked(),
710            "tried to merge with a block that's already linked! other={other:?}",
711        );
712        self.meta.merge(&mut other.meta)
713    }
714
715    pub fn region(&self) -> Region {
716        self.meta.clone() // XXX(eliza): `Region` should probly be `Copy`.
717    }
718
719    pub fn size(&self) -> usize {
720        self.meta.size()
721    }
722
723    /// Returns `true` if the region *might* be free.
724    ///
725    /// If this returns false, the region is *definitely* not free. If it
726    /// returns true, the region is *possibly* free, and the free list should be
727    /// checked.
728    ///
729    /// This is intended as a hint to avoid traversing the free list for blocks
730    /// which are definitely in use, not as an authoritative source.
731    #[inline]
732    pub fn is_maybe_free(&self) -> bool {
733        self.magic == Self::MAGIC
734    }
735
736    pub fn make_busy(&mut self) {
737        self.magic = Self::MAGIC_BUSY;
738    }
739}
740
741unsafe impl Linked<list::Links<Self>> for Free {
742    type Handle = ptr::NonNull<Free>;
743
744    #[inline]
745    fn into_ptr(r: Self::Handle) -> ptr::NonNull<Self> {
746        r
747    }
748
749    #[inline]
750    unsafe fn from_ptr(ptr: ptr::NonNull<Self>) -> Self::Handle {
751        ptr
752    }
753
754    #[inline]
755    unsafe fn links(ptr: ptr::NonNull<Self>) -> ptr::NonNull<list::Links<Self>> {
756        // Safety: using `ptr::addr_of_mut!` avoids creating a temporary
757        // reference, which stacked borrows dislikes.
758        let links = ptr::addr_of_mut!((*ptr.as_ptr()).links);
759        // Safety: it's fine to use `new_unchecked` here; if the pointer that we
760        // offset to the `links` field is not null (which it shouldn't be, as we
761        // received it as a `NonNull`), the offset pointer should therefore also
762        // not be null.
763        ptr::NonNull::new_unchecked(links)
764    }
765}
766
767impl fmt::Debug for Free {
768    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
769        let Self { magic, links, meta } = self;
770        f.debug_struct("Free")
771            .field("magic", &fmt::hex(magic))
772            .field("links", links)
773            .field("meta", meta)
774            .finish()
775    }
776}
777
778fn prev_power_of_two(n: usize) -> usize {
779    (n / 2).next_power_of_two()
780}