cordyceps/stack.rs
1//! [Intrusive], singly-linked first-in, first-out (FIFO) stacks.
2//!
3//! See the documentation for the [`Stack`] and [`TransferStack`] types for
4//! details.
5//!
6//! [intrusive]: crate#intrusive-data-structures
7#![warn(missing_debug_implementations)]
8
9use crate::{loom::cell::UnsafeCell, Linked};
10use core::{fmt, marker::PhantomPinned, ptr::NonNull};
11
12#[cfg(target_has_atomic = "ptr")]
13pub use has_cas_atomics::*;
14
15/// Items exclusive to targets with CAS atomics
16#[cfg(target_has_atomic = "ptr")]
17mod has_cas_atomics {
18 use core::{
19 fmt,
20 ptr::{self, NonNull},
21 };
22
23 use crate::{
24 loom::sync::atomic::{AtomicPtr, Ordering::*},
25 Linked,
26 };
27
28 use super::{Links, Stack};
29
30 /// An [intrusive] lock-free singly-linked FIFO stack, where all entries
31 /// currently in the stack are consumed in a single atomic operation.
32 ///
33 /// A transfer stack is perhaps the world's simplest lock-free concurrent data
34 /// structure. It provides two primary operations:
35 ///
36 /// - [`TransferStack::push`], which appends an element to the end of the
37 /// transfer stack,
38 ///
39 /// - [`TransferStack::take_all`], which atomically takes all elements currently
40 /// on the transfer stack and returns them as a new mutable [`Stack`].
41 ///
42 /// These are both *O*(1) operations, although `push` performs a
43 /// compare-and-swap loop that may be retried if another producer concurrently
44 /// pushed an element.
45 ///
46 /// In order to be part of a `TransferStack`, a type `T` must implement
47 /// the [`Linked`] trait for [`stack::Links<T>`](Links).
48 ///
49 /// Pushing elements into a `TransferStack` takes ownership of those elements
50 /// through an owning [`Handle` type](Linked::Handle). Dropping a
51 /// [`TransferStack`] drops all elements currently linked into the stack.
52 ///
53 /// A transfer stack is often useful in cases where a large number of resources
54 /// must be efficiently transferred from several producers to a consumer, such
55 /// as for reuse or cleanup. For example, a [`TransferStack`] can be used as the
56 /// "thread" (shared) free list in a [`mimalloc`-style sharded
57 /// allocator][mimalloc], with a mutable [`Stack`] used as the local
58 /// (unsynchronized) free list. When an allocation is freed from the same CPU
59 /// core that it was allocated on, it is pushed to the local free list, using an
60 /// unsynchronized mutable [`Stack::push`] operation. If an allocation is freed
61 /// from a different thread, it is instead pushed to that thread's shared free
62 /// list, a [`TransferStack`], using an atomic [`TransferStack::push`]
63 /// operation. New allocations are popped from the local unsynchronized free
64 /// list, and if the local free list is empty, the entire shared free list is
65 /// moved onto the local free list. This allows objects which do not leave the
66 /// CPU core they were allocated on to be both allocated and deallocated using
67 /// unsynchronized operations, and new allocations only perform an atomic
68 /// operation when the local free list is empty.
69 ///
70 /// [intrusive]: crate#intrusive-data-structures
71 /// [mimalloc]: https://www.microsoft.com/en-us/research/uploads/prod/2019/06/mimalloc-tr-v1.pdf
72 pub struct TransferStack<T: Linked<Links<T>>> {
73 head: AtomicPtr<T>,
74 }
75
76 // === impl TransferStack ===
77 impl<T> TransferStack<T>
78 where
79 T: Linked<Links<T>>,
80 {
81 /// Returns a new `TransferStack` with no elements.
82 #[cfg(not(loom))]
83 #[must_use]
84 pub const fn new() -> Self {
85 Self {
86 head: AtomicPtr::new(ptr::null_mut()),
87 }
88 }
89
90 /// Returns a new `TransferStack` with no elements.
91 #[cfg(loom)]
92 #[must_use]
93 pub fn new() -> Self {
94 Self {
95 head: AtomicPtr::new(ptr::null_mut()),
96 }
97 }
98
99 /// Pushes `element` onto the end of this `TransferStack`, taking ownership
100 /// of it.
101 ///
102 /// This is an *O*(1) operation, although it performs a compare-and-swap
103 /// loop that may repeat if another producer is concurrently calling `push`
104 /// on the same `TransferStack`.
105 ///
106 /// This takes ownership over `element` through its [owning `Handle`
107 /// type](Linked::Handle). If the `TransferStack` is dropped before the
108 /// pushed `element` is removed from the stack, the `element` will be dropped.
109 #[inline]
110 pub fn push(&self, element: T::Handle) {
111 self.push_was_empty(element);
112 }
113
114 /// Pushes `element` onto the end of this `TransferStack`, taking ownership
115 /// of it. Returns `true` if the stack was previously empty (the previous
116 /// head was null).
117 ///
118 /// This is an *O*(1) operation, although it performs a compare-and-swap
119 /// loop that may repeat if another producer is concurrently calling `push`
120 /// on the same `TransferStack`.
121 ///
122 /// This takes ownership over `element` through its [owning `Handle`
123 /// type](Linked::Handle). If the `TransferStack` is dropped before the
124 /// pushed `element` is removed from the stack, the `element` will be dropped.
125 pub fn push_was_empty(&self, element: T::Handle) -> bool {
126 let ptr = T::into_ptr(element);
127 test_trace!(?ptr, "TransferStack::push");
128 let links = unsafe { T::links(ptr).as_mut() };
129 debug_assert!(links.next.with(|next| unsafe { (*next).is_none() }));
130
131 let mut head = self.head.load(Relaxed);
132 loop {
133 test_trace!(?ptr, ?head, "TransferStack::push");
134 links.next.with_mut(|next| unsafe {
135 *next = NonNull::new(head);
136 });
137
138 match self
139 .head
140 .compare_exchange_weak(head, ptr.as_ptr(), AcqRel, Acquire)
141 {
142 Ok(old) => {
143 let was_empty = old.is_null();
144 test_trace!(?ptr, ?head, was_empty, "TransferStack::push -> pushed");
145 return was_empty;
146 }
147 Err(actual) => head = actual,
148 }
149 }
150 }
151
152 /// Takes all elements *currently* in this `TransferStack`, returning a new
153 /// mutable [`Stack`] containing those elements.
154 ///
155 /// This is an *O*(1) operation which does not allocate memory. It will
156 /// never loop and does not spin.
157 #[must_use]
158 pub fn take_all(&self) -> Stack<T> {
159 let head = self.head.swap(ptr::null_mut(), AcqRel);
160 let head = NonNull::new(head);
161 Stack { head }
162 }
163 }
164
165 impl<T> Drop for TransferStack<T>
166 where
167 T: Linked<Links<T>>,
168 {
169 fn drop(&mut self) {
170 // The stack owns any entries that are still in the stack; ensure they
171 // are dropped before dropping the stack.
172 for entry in self.take_all() {
173 drop(entry);
174 }
175 }
176 }
177
178 impl<T> fmt::Debug for TransferStack<T>
179 where
180 T: Linked<Links<T>>,
181 {
182 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
183 let Self { head } = self;
184 f.debug_struct("TransferStack").field("head", head).finish()
185 }
186 }
187
188 impl<T> Default for TransferStack<T>
189 where
190 T: Linked<Links<T>>,
191 {
192 fn default() -> Self {
193 Self::new()
194 }
195 }
196}
197
198/// An [intrusive] singly-linked mutable FIFO stack.
199///
200/// This is a very simple implementation of a linked `Stack`, which provides
201/// *O*(1) [`push`](Self::push) and [`pop`](Self::pop) operations. Items are
202/// popped from the stack in the opposite order that they were pushed in.
203///
204/// A [`Stack`] also implements the [`Iterator`] trait, with the
205/// [`Iterator::next`] method popping elements from the end of the stack.
206///
207/// In order to be part of a `Stack`, a type `T` must implement
208/// the [`Linked`] trait for [`stack::Links<T>`](Links).
209///
210/// Pushing elements into a `Stack` takes ownership of those elements
211/// through an owning [`Handle` type](Linked::Handle). Dropping a
212/// `Stack` drops all elements currently linked into the stack.
213///
214/// [intrusive]: crate#intrusive-data-structures
215pub struct Stack<T: Linked<Links<T>>> {
216 pub(crate) head: Option<NonNull<T>>,
217}
218
219/// Singly-linked-list linkage
220///
221/// Links to other nodes in a [`TransferStack`], [`Stack`], or [`SortedList`].
222///
223/// In order to be part of a [`TransferStack`], [`Stack`], or [`SortedList`],
224/// a type must contain an instance of this type, and must implement the
225/// [`Linked`] trait for `Links<Self>`.
226///
227/// [`SortedList`]: crate::SortedList
228//
229// TODO(AJM): In the next breaking change, we might want to specifically have
230// a `SingleLinks` and `DoubleLinks` type to make the relationship more clear,
231// instead of "stack" being singly-flavored and "list" being doubly-flavored
232pub struct Links<T> {
233 /// The next node in the queue.
234 pub(crate) next: UnsafeCell<Option<NonNull<T>>>,
235
236 /// Linked list links must always be `!Unpin`, in order to ensure that they
237 /// never recieve LLVM `noalias` annotations; see also
238 /// <https://github.com/rust-lang/rust/issues/63818>.
239 _unpin: PhantomPinned,
240}
241
242// === impl Stack ===
243
244impl<T> Stack<T>
245where
246 T: Linked<Links<T>>,
247{
248 /// Returns a new `Stack` with no elements in it.
249 #[must_use]
250 pub const fn new() -> Self {
251 Self { head: None }
252 }
253
254 /// Pushes `element` onto the end of this `Stack`, taking ownership
255 /// of it.
256 ///
257 /// Returns `true` if the stack was previously empty, and `false` if the stack
258 /// contained at least one other element.
259 ///
260 /// This is an *O*(1) operation that does not allocate memory. It will never
261 /// loop.
262 ///
263 /// This takes ownership over `element` through its [owning `Handle`
264 /// type](Linked::Handle). If the `Stack` is dropped before the
265 /// pushed `element` is [`pop`](Self::pop)pped from the stack, the `element`
266 /// will be dropped.
267 pub fn push_was_empty(&mut self, element: T::Handle) -> bool {
268 let ptr = T::into_ptr(element);
269 test_trace!(?ptr, ?self.head, "Stack::push_was_empty");
270 unsafe {
271 // Safety: we have exclusive mutable access to the stack, and
272 // therefore can also mutate the stack's entries.
273 let links = T::links(ptr).as_mut();
274 links.next.with_mut(|next| {
275 debug_assert!((*next).is_none());
276 *next = self.head.replace(ptr);
277 (*next).is_none()
278 })
279 }
280 }
281
282 /// Pushes `element` onto the end of this `Stack`, taking ownership
283 /// of it.
284 ///
285 /// This is an *O*(1) operation that does not allocate memory. It will never
286 /// loop.
287 ///
288 /// This takes ownership over `element` through its [owning `Handle`
289 /// type](Linked::Handle). If the `Stack` is dropped before the
290 /// pushed `element` is [`pop`](Self::pop)pped from the stack, the `element`
291 /// will be dropped.
292 ///
293 /// For a variant of this method that returns a `bool` indicating if the
294 /// list was empty, see [`Stack::push_was_empty`].
295 #[inline]
296 pub fn push(&mut self, element: T::Handle) {
297 self.push_was_empty(element);
298 }
299
300 /// Returns the element most recently [push](Self::push)ed to this `Stack`,
301 /// or `None` if the stack is empty.
302 ///
303 /// This is an *O*(1) operation which does not allocate memory. It will
304 /// never loop and does not spin.
305 #[must_use]
306 pub fn pop(&mut self) -> Option<T::Handle> {
307 test_trace!(?self.head, "Stack::pop");
308 let head = self.head.take()?;
309 unsafe {
310 // Safety: we have exclusive ownership over this chunk of stack.
311
312 // advance the head link to the next node after the current one (if
313 // there is one).
314 self.head = T::links(head).as_mut().next.with_mut(|next| (*next).take());
315
316 test_trace!(?self.head, "Stack::pop -> popped");
317
318 // return the current node
319 Some(T::from_ptr(head))
320 }
321 }
322
323 /// Takes all elements *currently* in this `Stack`, returning a new
324 /// mutable `Stack` containing those elements.
325 ///
326 /// This is an *O*(1) operation which does not allocate memory. It will
327 /// never loop and does not spin.
328 #[must_use]
329 pub fn take_all(&mut self) -> Self {
330 Self {
331 head: self.head.take(),
332 }
333 }
334
335 /// Returns `true` if this `Stack` is empty.
336 #[inline]
337 #[must_use]
338 pub fn is_empty(&self) -> bool {
339 self.head.is_none()
340 }
341}
342
343impl<T> Drop for Stack<T>
344where
345 T: Linked<Links<T>>,
346{
347 fn drop(&mut self) {
348 // The stack owns any entries that are still in the stack; ensure they
349 // are dropped before dropping the stack.
350 for entry in self {
351 drop(entry);
352 }
353 }
354}
355
356impl<T> fmt::Debug for Stack<T>
357where
358 T: Linked<Links<T>>,
359{
360 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
361 let Self { head } = self;
362 f.debug_struct("Stack").field("head", head).finish()
363 }
364}
365
366impl<T> Iterator for Stack<T>
367where
368 T: Linked<Links<T>>,
369{
370 type Item = T::Handle;
371
372 fn next(&mut self) -> Option<Self::Item> {
373 self.pop()
374 }
375}
376
377impl<T> Default for Stack<T>
378where
379 T: Linked<Links<T>>,
380{
381 fn default() -> Self {
382 Self::new()
383 }
384}
385
386/// # Safety
387///
388/// A `Stack` is `Send` if `T` is send, because moving it across threads
389/// also implicitly moves any `T`s in the stack.
390unsafe impl<T> Send for Stack<T>
391where
392 T: Send,
393 T: Linked<Links<T>>,
394{
395}
396
397unsafe impl<T> Sync for Stack<T>
398where
399 T: Sync,
400 T: Linked<Links<T>>,
401{
402}
403
404// === impl Links ===
405
406impl<T> Links<T> {
407 /// Returns new [`TransferStack`] links.
408 #[cfg(not(loom))]
409 #[must_use]
410 pub const fn new() -> Self {
411 Self {
412 next: UnsafeCell::new(None),
413 _unpin: PhantomPinned,
414 }
415 }
416
417 /// Returns new [`TransferStack`] links.
418 #[cfg(loom)]
419 #[must_use]
420 pub fn new() -> Self {
421 Self {
422 next: UnsafeCell::new(None),
423 _unpin: PhantomPinned,
424 }
425 }
426}
427
428/// # Safety
429///
430/// Types containing [`Links`] may be `Send`: the pointers within the `Links` may
431/// mutably alias another value, but the links can only be _accessed_ by the
432/// owner of the [`TransferStack`] itself, because the pointers are private. As
433/// long as [`TransferStack`] upholds its own invariants, `Links` should not
434/// make a type `!Send`.
435unsafe impl<T: Send> Send for Links<T> {}
436
437/// # Safety
438///
439/// Types containing [`Links`] may be `Send`: the pointers within the `Links` may
440/// mutably alias another value, but the links can only be _accessed_ by the
441/// owner of the [`TransferStack`] itself, because the pointers are private. As
442/// long as [`TransferStack`] upholds its own invariants, `Links` should not
443/// make a type `!Send`.
444unsafe impl<T: Sync> Sync for Links<T> {}
445
446impl<T> fmt::Debug for Links<T> {
447 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
448 f.write_str("transfer_stack::Links { ... }")
449 }
450}
451
452impl<T> Default for Links<T> {
453 fn default() -> Self {
454 Self::new()
455 }
456}
457
458#[cfg(test)]
459mod loom {
460 use super::*;
461 use crate::loom::{
462 self,
463 sync::{
464 atomic::{AtomicUsize, Ordering},
465 Arc,
466 },
467 thread,
468 };
469 use test_util::Entry;
470
471 #[test]
472 fn multithreaded_push() {
473 const PUSHES: i32 = 2;
474 loom::model(|| {
475 let stack = Arc::new(TransferStack::new());
476 let threads = Arc::new(AtomicUsize::new(2));
477 let thread1 = thread::spawn({
478 let stack = stack.clone();
479 let threads = threads.clone();
480 move || {
481 Entry::push_all(&stack, 1, PUSHES);
482 threads.fetch_sub(1, Ordering::Relaxed);
483 }
484 });
485
486 let thread2 = thread::spawn({
487 let stack = stack.clone();
488 let threads = threads.clone();
489 move || {
490 Entry::push_all(&stack, 2, PUSHES);
491 threads.fetch_sub(1, Ordering::Relaxed);
492 }
493 });
494
495 let mut seen = Vec::new();
496
497 loop {
498 seen.extend(stack.take_all().map(|entry| entry.val));
499
500 if threads.load(Ordering::Relaxed) == 0 {
501 break;
502 }
503
504 thread::yield_now();
505 }
506
507 seen.extend(stack.take_all().map(|entry| entry.val));
508
509 seen.sort();
510 assert_eq!(seen, vec![10, 11, 20, 21]);
511
512 thread1.join().unwrap();
513 thread2.join().unwrap();
514 })
515 }
516
517 #[test]
518 fn multithreaded_pop() {
519 const PUSHES: i32 = 2;
520 loom::model(|| {
521 let stack = Arc::new(TransferStack::new());
522 let thread1 = thread::spawn({
523 let stack = stack.clone();
524 move || Entry::push_all(&stack, 1, PUSHES)
525 });
526
527 let thread2 = thread::spawn({
528 let stack = stack.clone();
529 move || Entry::push_all(&stack, 2, PUSHES)
530 });
531
532 let thread3 = thread::spawn({
533 let stack = stack.clone();
534 move || stack.take_all().map(|entry| entry.val).collect::<Vec<_>>()
535 });
536
537 let seen_thread0 = stack.take_all().map(|entry| entry.val).collect::<Vec<_>>();
538 let seen_thread3 = thread3.join().unwrap();
539
540 thread1.join().unwrap();
541 thread2.join().unwrap();
542
543 let seen_thread0_final = stack.take_all().map(|entry| entry.val).collect::<Vec<_>>();
544
545 let mut all = dbg!(seen_thread0);
546 all.extend(dbg!(seen_thread3));
547 all.extend(dbg!(seen_thread0_final));
548
549 all.sort();
550 assert_eq!(all, vec![10, 11, 20, 21]);
551 })
552 }
553
554 #[test]
555 fn doesnt_leak() {
556 const PUSHES: i32 = 2;
557 loom::model(|| {
558 let stack = Arc::new(TransferStack::new());
559 let thread1 = thread::spawn({
560 let stack = stack.clone();
561 move || Entry::push_all(&stack, 1, PUSHES)
562 });
563
564 let thread2 = thread::spawn({
565 let stack = stack.clone();
566 move || Entry::push_all(&stack, 2, PUSHES)
567 });
568
569 tracing::info!("dropping stack");
570 drop(stack);
571
572 thread1.join().unwrap();
573 thread2.join().unwrap();
574 })
575 }
576
577 #[test]
578 fn take_all_doesnt_leak() {
579 const PUSHES: i32 = 2;
580 loom::model(|| {
581 let stack = Arc::new(TransferStack::new());
582 let thread1 = thread::spawn({
583 let stack = stack.clone();
584 move || Entry::push_all(&stack, 1, PUSHES)
585 });
586
587 let thread2 = thread::spawn({
588 let stack = stack.clone();
589 move || Entry::push_all(&stack, 2, PUSHES)
590 });
591
592 thread1.join().unwrap();
593 thread2.join().unwrap();
594
595 let take_all = stack.take_all();
596
597 tracing::info!("dropping stack");
598 drop(stack);
599
600 tracing::info!("dropping take_all");
601 drop(take_all);
602 })
603 }
604
605 #[test]
606 fn take_all_doesnt_leak_racy() {
607 const PUSHES: i32 = 2;
608 loom::model(|| {
609 let stack = Arc::new(TransferStack::new());
610 let thread1 = thread::spawn({
611 let stack = stack.clone();
612 move || Entry::push_all(&stack, 1, PUSHES)
613 });
614
615 let thread2 = thread::spawn({
616 let stack = stack.clone();
617 move || Entry::push_all(&stack, 2, PUSHES)
618 });
619
620 let take_all = stack.take_all();
621
622 thread1.join().unwrap();
623 thread2.join().unwrap();
624
625 tracing::info!("dropping stack");
626 drop(stack);
627
628 tracing::info!("dropping take_all");
629 drop(take_all);
630 })
631 }
632
633 #[test]
634 fn unsync() {
635 loom::model(|| {
636 let mut stack = Stack::<Entry>::new();
637 stack.push(Entry::new(1));
638 stack.push(Entry::new(2));
639 stack.push(Entry::new(3));
640 let mut take_all = stack.take_all();
641
642 for i in (1..=3).rev() {
643 assert_eq!(take_all.next().unwrap().val, i);
644 stack.push(Entry::new(10 + i));
645 }
646
647 let mut i = 11;
648 for entry in stack.take_all() {
649 assert_eq!(entry.val, i);
650 i += 1;
651 }
652 })
653 }
654
655 #[test]
656 fn unsync_doesnt_leak() {
657 loom::model(|| {
658 let mut stack = Stack::<Entry>::new();
659 stack.push(Entry::new(1));
660 stack.push(Entry::new(2));
661 stack.push(Entry::new(3));
662 })
663 }
664}
665
666#[cfg(test)]
667mod test {
668 use super::{test_util::Entry, *};
669
670 #[test]
671 fn stack_is_send_sync() {
672 crate::util::assert_send_sync::<TransferStack<Entry>>()
673 }
674
675 #[test]
676 fn links_are_send_sync() {
677 crate::util::assert_send_sync::<Links<Entry>>()
678 }
679}
680
681#[cfg(test)]
682pub(crate) mod test_util {
683 use super::*;
684 use crate::loom::alloc;
685 use core::pin::Pin;
686 use core::ptr;
687
688 #[pin_project::pin_project]
689 pub(crate) struct Entry {
690 #[pin]
691 links: Links<Entry>,
692 pub(crate) val: i32,
693 track: alloc::Track<()>,
694 }
695
696 // ----------------------------------------------------------------------
697 // Helper impls for `sorted_list`
698 impl PartialEq for Entry {
699 fn eq(&self, other: &Self) -> bool {
700 self.val.eq(&other.val)
701 }
702 }
703
704 impl Eq for Entry {}
705
706 impl PartialOrd for Entry {
707 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
708 Some(self.cmp(other))
709 }
710 }
711
712 impl Ord for Entry {
713 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
714 self.val.cmp(&other.val)
715 }
716 }
717 // ----------------------------------------------------------------------
718
719 unsafe impl Linked<Links<Self>> for Entry {
720 type Handle = Pin<Box<Entry>>;
721
722 fn into_ptr(handle: Pin<Box<Entry>>) -> NonNull<Self> {
723 unsafe { NonNull::from(Box::leak(Pin::into_inner_unchecked(handle))) }
724 }
725
726 unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle {
727 // Safety: if this function is only called by the linked list
728 // implementation (and it is not intended for external use), we can
729 // expect that the `NonNull` was constructed from a reference which
730 // was pinned.
731 //
732 // If other callers besides `List`'s internals were to call this on
733 // some random `NonNull<Entry>`, this would not be the case, and
734 // this could be constructing an erroneous `Pin` from a referent
735 // that may not be pinned!
736 Pin::new_unchecked(Box::from_raw(ptr.as_ptr()))
737 }
738
739 unsafe fn links(target: NonNull<Self>) -> NonNull<Links<Self>> {
740 let links = ptr::addr_of_mut!((*target.as_ptr()).links);
741 // Safety: it's fine to use `new_unchecked` here; if the pointer that we
742 // offset to the `links` field is not null (which it shouldn't be, as we
743 // received it as a `NonNull`), the offset pointer should therefore also
744 // not be null.
745 NonNull::new_unchecked(links)
746 }
747 }
748
749 impl Entry {
750 pub(crate) fn new(val: i32) -> Pin<Box<Entry>> {
751 Box::pin(Entry {
752 links: Links::new(),
753 val,
754 track: alloc::Track::new(()),
755 })
756 }
757
758 pub(super) fn push_all(stack: &TransferStack<Self>, thread: i32, n: i32) {
759 for i in 0..n {
760 let entry = Self::new((thread * 10) + i);
761 stack.push(entry);
762 }
763 }
764 }
765}