1use core::{
2 alloc::{GlobalAlloc, Layout},
3 cmp, mem, ptr,
4};
5use hal_core::{
6 mem::{
7 page::{self, AllocError, PageRange, Size},
8 Region, RegionKind,
9 },
10 Address, PAddr, VAddr,
11};
12use mycelium_util::fmt;
13use mycelium_util::intrusive::{list, Linked, List};
14use mycelium_util::math::Logarithm;
15use mycelium_util::sync::{
16 atomic::{AtomicUsize, Ordering::*},
17 blocking::Mutex,
18};
19
20#[derive(Debug)]
21pub struct Alloc<const FREE_LISTS: usize> {
22 min_size: usize,
28
29 base_vaddr: AtomicUsize,
30 vm_offset: AtomicUsize,
31
32 min_size_log2: usize,
34
35 heap_size: AtomicUsize,
37
38 allocated_size: AtomicUsize,
40
41 free_lists: [Mutex<List<Free>>; FREE_LISTS],
45}
46
47type Result<T> = core::result::Result<T, AllocError>;
48
49pub struct Free {
50 magic: usize,
51 links: list::Links<Self>,
52 meta: Region,
53}
54
55impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
58 #[cfg(not(loom))]
59 pub const fn new(mut min_size: usize) -> Self {
60 #[allow(clippy::declare_interior_mutable_const)]
68 const ONE_FREE_LIST: Mutex<List<Free>> = Mutex::new(List::new());
69
70 let free_block_size = mem::size_of::<Free>();
73 if min_size < free_block_size {
74 min_size = free_block_size;
75 }
76 min_size = min_size.next_power_of_two();
80 Self {
81 min_size,
82 base_vaddr: AtomicUsize::new(usize::MAX),
83 vm_offset: AtomicUsize::new(0),
84 min_size_log2: mycelium_util::math::usize_const_log2_ceil(min_size),
85 heap_size: AtomicUsize::new(0),
86 allocated_size: AtomicUsize::new(0),
87 free_lists: [ONE_FREE_LIST; FREE_LISTS],
88 }
89 }
90
91 pub fn set_vm_offset(&self, offset: VAddr) {
92 self.vm_offset
93 .compare_exchange(0, offset.as_usize(), AcqRel, Acquire)
94 .expect("dont do this twice lol");
95 }
96
97 pub fn min_size(&self) -> usize {
99 self.min_size
100 }
101
102 pub fn total_size(&self) -> usize {
104 self.heap_size.load(Acquire)
105 }
106
107 pub fn allocated_size(&self) -> usize {
109 self.allocated_size.load(Acquire)
110 }
111
112 fn offset(&self) -> usize {
115 let addr = self.vm_offset.load(Relaxed);
116 debug_assert_ne!(addr, 0, "you didn't initialize the heap yet dipshit");
117 addr
118 }
119
120 fn size_for_order(&self, order: usize) -> usize {
122 1 << (self.min_size_log2 + order)
123 }
124
125 fn size_for(&self, layout: Layout) -> Option<usize> {
128 let mut size = layout.size();
129 let align = layout.align();
130 size = cmp::max(size, align);
131
132 if size < self.min_size {
134 tracing::trace!(
135 size,
136 min_size = self.min_size,
137 layout.size = layout.size(),
138 layout.align = layout.align(),
139 "size is less than the minimum page size; rounding up"
140 );
141 size = self.min_size;
142 }
143
144 if !size.is_power_of_two() {
146 let next_pow2 = size.next_power_of_two();
147 tracing::trace!(
148 layout.size = size,
149 next_pow2,
150 "size is not a power of two, rounding up..."
151 );
152 size = next_pow2;
153 }
154 let available = self.heap_size.load(Acquire);
161 if size > available {
162 tracing::error!(
163 size,
164 available,
165 layout.size = layout.size(),
166 layout.align = layout.align(),
167 "out of memory!"
168 );
169 return None;
170 }
171
172 Some(size)
173 }
174
175 fn order_for(&self, layout: Layout) -> Option<usize> {
178 self.size_for(layout).map(|size| self.order_for_size(size))
179 }
180
181 fn order_for_size(&self, size: usize) -> usize {
183 size.log2_ceil() - self.min_size_log2
184 }
185}
186
187impl<const FREE_LISTS: usize> Alloc<FREE_LISTS> {
188 pub fn dump_free_lists(&self) {
189 for (order, list) in self.free_lists.as_ref().iter().enumerate() {
190 let _span =
191 tracing::debug_span!("free_list", order, size = self.size_for_order(order),)
192 .entered();
193 list.try_with_lock(|list| {
194 for entry in list.iter() {
195 tracing::debug!("entry={entry:?}");
196 }
197 })
198 .unwrap_or_else(|| tracing::debug!("<THIS IS THE ONE WHERE THE PANIC HAPPENED LOL>"))
199 }
200 }
201
202 #[tracing::instrument(skip(self), level = "debug")]
204 pub unsafe fn add_region(&self, region: Region) -> core::result::Result<(), ()> {
205 if region.kind() != RegionKind::FREE {
207 tracing::warn!(?region, "cannot add to page allocator, region is not free");
208 return Err(());
209 }
210
211 let mut next_region = Some(region);
212 while let Some(mut region) = next_region.take() {
213 let size = region.size();
214 let base = region.base_addr();
215 let _span = tracing::debug_span!("adding_region", size, ?base).entered();
216
217 if !base.is_aligned(self.min_size) {
220 let new_base = base.align_up(self.min_size);
221 tracing::trace!(region.new_base = ?new_base, "base address not aligned!");
222 region = Region::new(new_base, region.size(), RegionKind::FREE);
223 }
224
225 if !size.is_power_of_two() {
228 let prev_power_of_two = prev_power_of_two(size);
231 tracing::debug!(prev_power_of_two, "not a power of two!");
232 let region2 = region.split_back(prev_power_of_two).unwrap();
233
234 if region2.size() >= self.min_size {
237 tracing::debug!("adding split-off region");
238 next_region = Some(region2);
239 } else {
240 tracing::debug!(
252 region = ?region2,
253 min_size = self.min_size,
254 "leaking a region smaller than min page size"
255 );
256 }
257 }
258
259 let region_vaddr = region.base_addr().as_usize() + self.offset();
261 self.base_vaddr.fetch_min(region_vaddr, AcqRel);
262
263 let block = Free::new(region, self.offset());
265 unsafe { self.push_block(block) };
266 }
267
268 Ok(())
269 }
270
271 unsafe fn alloc_inner(&self, layout: Layout) -> Option<ptr::NonNull<Free>> {
272 let order = self.order_for(layout)?;
275 tracing::trace!(?order);
276
277 for (idx, free_list) in self.free_lists.as_ref()[order..].iter().enumerate() {
279 tracing::trace!(curr_order = idx + order);
280
281 let allocated = free_list.with_lock(|free_list| {
283 if let Some(mut block) = free_list.pop_back() {
284 let block = unsafe { block.as_mut() };
285 tracing::trace!(?block, ?free_list, "found");
286
287 if idx > 0 {
293 let curr_order = idx + order;
294 tracing::trace!(?curr_order, ?order, "split down");
295 self.split_down(block, curr_order, order);
296 }
297
298 block.make_busy();
302 tracing::trace!(?block, "made busy");
303 self.allocated_size.fetch_add(block.size(), Release);
304 Some(block.into())
305 } else {
306 None
307 }
308 });
309 if let Some(block) = allocated {
310 return Some(block);
311 }
312 }
313 None
314 }
315
316 unsafe fn dealloc_inner(&self, paddr: PAddr, layout: Layout) -> Result<()> {
317 let min_order = self.order_for(layout);
319 tracing::trace!(?min_order);
320 let min_order = min_order.ok_or_else(AllocError::oom)?;
321
322 let Some(size) = self.size_for(layout) else {
323 panic!(
325 "couldn't determine the correct layout for an allocation \
326 we previously allocated successfully, what the actual fuck!\n \
327 addr={:?}; layout={:?}; min_order={}",
328 paddr, layout, min_order,
329 )
330 };
331
332 let mut block =
334 unsafe { Free::new(Region::new(paddr, size, RegionKind::FREE), self.offset()) };
335
336 for (idx, free_list) in self.free_lists.as_ref()[min_order..].iter().enumerate() {
338 let curr_order = idx + min_order;
339 let done = free_list.with_lock(|free_list| {
340 if let Some(mut buddy) = unsafe { self.take_buddy(block, curr_order, free_list) } {
342 if buddy < block {
344 mem::swap(&mut block, &mut buddy);
345 }
346 unsafe {
347 block.as_mut().merge(buddy.as_mut());
348 }
349 tracing::trace!(?buddy, ?block, "merged with buddy");
350 false
352 } else {
353 free_list.push_front(block);
356 tracing::trace!("deallocated block");
357 self.allocated_size.fetch_sub(size, Release);
358 true
359 }
360 });
361 if done {
362 return Ok(());
363 }
364 }
365
366 unreachable!("we will always iterate over at least one free list");
367 }
368
369 #[tracing::instrument(skip(self), level = "trace")]
370 unsafe fn push_block(&self, block: ptr::NonNull<Free>) {
371 let block_size = block.as_ref().size();
372 let order = self.order_for_size(block_size);
373 tracing::trace!(block = ?block.as_ref(), block.order = order);
374 let free_lists = self.free_lists.as_ref();
375 if order > free_lists.len() {
376 todo!("(eliza): choppity chop chop down the block!");
377 }
378 free_lists[order].with_lock(|list| list.push_front(block));
379 let mut sz = self.heap_size.load(Acquire);
380 while let Err(actual) =
381 self.heap_size
383 .compare_exchange_weak(sz, sz + block_size, AcqRel, Acquire)
384 {
385 sz = actual;
386 }
387 }
388
389 unsafe fn take_buddy(
395 &self,
396 block: ptr::NonNull<Free>,
397 order: usize,
398 free_list: &mut List<Free>,
399 ) -> Option<ptr::NonNull<Free>> {
400 let size = self.size_for_order(order);
401 let base = self.base_vaddr.load(Relaxed);
402
403 if base == usize::MAX {
404 tracing::error!("cannot find buddy block; heap not initialized!");
406 return None;
407 }
408
409 tracing::trace!(
410 heap.base = fmt::hex(base),
411 block.addr = ?block,
412 block.order = order,
413 block.size = size,
414 "calculating buddy"
415 );
416
417 let rel_offset = block.as_ptr() as usize - base;
419 let buddy_offset = rel_offset ^ (1 << order);
420 let buddy = (base + buddy_offset) as *mut Free;
421 tracing::trace!(
422 block.rel_offset = fmt::hex(rel_offset),
423 buddy.offset = fmt::hex(buddy_offset),
424 buddy.addr = ?buddy,
425 );
426
427 if ptr::eq(buddy as *const _, block.as_ptr() as *const _) {
428 tracing::trace!("buddy block is the same as self");
429 return None;
430 }
431
432 let buddy = unsafe {
433 ptr::NonNull::new_unchecked(buddy)
439 };
440
441 let block = unsafe { buddy.as_ref() };
447 if block.is_maybe_free() {
448 tracing::trace!(
449 buddy.block = ?block,
450 buddy.addr = ?buddy, "trying to remove buddy..."
451 );
452 debug_assert_eq!(block.size(), size, "buddy block did not have correct size");
453 return free_list.remove(buddy);
456 }
457
458 None
460 }
461
462 #[tracing::instrument(skip(self), level = "trace")]
464 fn split_down(&self, block: &mut Free, mut order: usize, target_order: usize) {
465 let mut size = block.size();
466 debug_assert_eq!(
467 size,
468 self.size_for_order(order),
469 "a block was a weird size for some reason"
470 );
471
472 let free_lists = self.free_lists.as_ref();
473 while order > target_order {
474 order -= 1;
475 size >>= 1;
476
477 tracing::trace!(order, target_order, size, ?block, "split at");
478 let new_block = block
479 .split_back(size, self.offset())
480 .expect("block too small to split!");
481 tracing::trace!(?block, ?new_block);
482 free_lists[order].with_lock(|list| list.push_front(new_block));
483 }
484 }
485}
486
487unsafe impl<S, const FREE_LISTS: usize> page::Alloc<S> for Alloc<FREE_LISTS>
488where
489 S: Size + fmt::Display,
490{
491 fn alloc_range(&self, size: S, len: usize) -> Result<PageRange<PAddr, S>> {
501 let span = tracing::trace_span!("alloc_range", size = size.as_usize(), len);
502 let _e = span.enter();
503
504 debug_assert!(
505 size.as_usize().is_power_of_two(),
506 "page size must be a power of 2; size={size}",
507 );
508
509 let actual_len = if len.is_power_of_two() {
510 len
511 } else {
512 let next = len.next_power_of_two();
513 tracing::debug!(
514 requested.len = len,
515 rounded.len = next,
516 "rounding up page range length to next power of 2"
517 );
518 next
519 };
520
521 let total_size = size
522 .as_usize()
523 .checked_mul(actual_len)
526 .ok_or_else(AllocError::oom)?;
527
528 debug_assert!(
529 total_size.is_power_of_two(),
530 "total size of page range must be a power of 2; total_size={total_size} size={size} len={actual_len}",
531 );
532
533 #[cfg(debug_assertions)]
534 let layout = Layout::from_size_align(total_size, size.as_usize())
535 .expect("page ranges should have valid (power of 2) size/align");
536 #[cfg(not(debug_assertions))]
537 let layout = unsafe {
538 Layout::from_size_align_unchecked(total_size, size.as_usize())
540 };
541
542 let block = unsafe { self.alloc_inner(layout) }.ok_or_else(AllocError::oom)?;
544
545 let range = unsafe { block.as_ref() }.region().page_range(size);
547 tracing::debug!(
548 ?range,
549 requested.size = size.as_usize(),
550 requested.len = len,
551 actual.len = actual_len,
552 "allocated"
553 );
554 range.map_err(Into::into)
555 }
556
557 fn dealloc_range(&self, range: PageRange<PAddr, S>) -> Result<()> {
563 let page_size = range.page_size().as_usize();
564 let len = range.len();
565 let base = range.base_addr();
566 let span = tracing::trace_span!(
567 "dealloc_range",
568 range.base = ?base,
569 range.page_size = page_size,
570 range.len = len
571 );
572 let _e = span.enter();
573
574 let total_size = page_size
575 .checked_mul(len)
576 .expect("page range size shouldn't overflow, this is super bad news");
577
578 #[cfg(debug_assertions)]
579 let layout = Layout::from_size_align(total_size, page_size)
580 .expect("page ranges should be well-aligned");
581 #[cfg(not(debug_assertions))]
582 let layout = unsafe {
583 Layout::from_size_align_unchecked(total_size, page_size)
585 };
586
587 unsafe {
588 self.dealloc_inner(base, layout)?;
589 }
590
591 tracing::debug!(
592 range.base = ?range.base_addr(),
593 range.page_size = range.page_size().as_usize(),
594 range.len = range.len(),
595 "deallocated"
596 );
597 Ok(())
598 }
599}
600
601unsafe impl<const FREE_LISTS: usize> GlobalAlloc for Alloc<FREE_LISTS> {
602 #[tracing::instrument(level = "trace", skip(self))]
603 unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
604 self.alloc_inner(layout)
605 .map(ptr::NonNull::as_ptr)
606 .unwrap_or_else(ptr::null_mut)
607 .cast::<u8>()
608 }
609
610 #[tracing::instrument(level = "trace", skip(self))]
611 unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
612 let addr = match (ptr as usize).checked_sub(self.offset()) {
613 Some(addr) => addr,
614 None => panic!(
615 "pointer is not to a kernel VAddr! ptr={ptr:p}; offset={:x}",
616 self.offset()
617 ),
618 };
619
620 let addr = PAddr::from_usize(addr);
621 match self.dealloc_inner(addr, layout) {
622 Ok(_) => {}
623 Err(_) => panic!(
624 "deallocating {addr:?} with layout {layout:?} failed! this shouldn't happen!",
625 ),
626 }
627 }
628}
629
630impl Free {
633 const MAGIC: usize = 0xF4EE_B10C; const MAGIC_BUSY: usize = 0xB4D_B10C;
635
636 pub unsafe fn new(region: Region, offset: usize) -> ptr::NonNull<Free> {
641 tracing::trace!(?region, offset = fmt::hex(offset));
642
643 let ptr = ((region.base_addr().as_ptr::<Free>() as usize) + offset) as *mut _;
644 let nn = ptr::NonNull::new(ptr)
645 .expect("definitely don't try to free the zero page; that's evil");
646
647 ptr::write_volatile(
648 ptr,
649 Free {
650 magic: Self::MAGIC,
651 links: list::Links::default(),
652 meta: region,
653 },
654 );
655 nn
656 }
657
658 pub fn split_front(&mut self, size: usize, offset: usize) -> Option<ptr::NonNull<Self>> {
659 debug_assert_eq!(
660 self.magic,
661 Self::MAGIC,
662 "MY MAGIC WAS MESSED UP! self={:#?}, self.magic={:#x}",
663 self,
664 self.magic
665 );
666 let new_meta = self.meta.split_front(size)?;
667 let new_free = unsafe { Self::new(new_meta, offset) };
668 Some(new_free)
669 }
670
671 pub fn split_back(&mut self, size: usize, offset: usize) -> Option<ptr::NonNull<Self>> {
672 debug_assert_eq!(
673 self.magic,
674 Self::MAGIC,
675 "MY MAGIC WAS MESSED UP! self={self:#?}, self.magic={:#x}",
676 self.magic
677 );
678 debug_assert!(
679 !self.links.is_linked(),
680 "tried to split a block while it was on a free list!"
681 );
682
683 let new_meta = self.meta.split_back(size)?;
684 debug_assert_ne!(new_meta, self.meta);
685 debug_assert_eq!(new_meta.size(), size);
686 debug_assert_eq!(self.meta.size(), size);
687 tracing::trace!(?new_meta, ?self.meta, "split meta");
688
689 let new_free = unsafe { Self::new(new_meta, offset) };
690 debug_assert_ne!(new_free, ptr::NonNull::from(self));
691
692 Some(new_free)
693 }
694
695 pub fn merge(&mut self, other: &mut Self) {
696 debug_assert_eq!(
697 self.magic,
698 Self::MAGIC,
699 "MY MAGIC WAS MESSED UP! self={self:#?}, self.magic={:#x}",
700 self.magic
701 );
702 debug_assert_eq!(
703 other.magic,
704 Self::MAGIC,
705 "THEIR MAGIC WAS MESSED UP! other={other:#?}, other.magic={:#x}",
706 other.magic
707 );
708 assert!(
709 !other.links.is_linked(),
710 "tried to merge with a block that's already linked! other={other:?}",
711 );
712 self.meta.merge(&mut other.meta)
713 }
714
715 pub fn region(&self) -> Region {
716 self.meta.clone() }
718
719 pub fn size(&self) -> usize {
720 self.meta.size()
721 }
722
723 #[inline]
732 pub fn is_maybe_free(&self) -> bool {
733 self.magic == Self::MAGIC
734 }
735
736 pub fn make_busy(&mut self) {
737 self.magic = Self::MAGIC_BUSY;
738 }
739}
740
741unsafe impl Linked<list::Links<Self>> for Free {
742 type Handle = ptr::NonNull<Free>;
743
744 #[inline]
745 fn into_ptr(r: Self::Handle) -> ptr::NonNull<Self> {
746 r
747 }
748
749 #[inline]
750 unsafe fn from_ptr(ptr: ptr::NonNull<Self>) -> Self::Handle {
751 ptr
752 }
753
754 #[inline]
755 unsafe fn links(ptr: ptr::NonNull<Self>) -> ptr::NonNull<list::Links<Self>> {
756 let links = ptr::addr_of_mut!((*ptr.as_ptr()).links);
759 ptr::NonNull::new_unchecked(links)
764 }
765}
766
767impl fmt::Debug for Free {
768 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
769 let Self { magic, links, meta } = self;
770 f.debug_struct("Free")
771 .field("magic", &fmt::hex(magic))
772 .field("links", links)
773 .field("meta", meta)
774 .finish()
775 }
776}
777
778fn prev_power_of_two(n: usize) -> usize {
779 (n / 2).next_power_of_two()
780}