maitake_sync/util/cache_pad.rs
1use core::{
2 fmt,
3 ops::{Deref, DerefMut},
4};
5
6pub use self::inner::CachePadded;
7
8/// When configured not to pad to cache alignment, just provide a no-op wrapper struct
9/// This feature is useful for platforms with no data cache, such as many Cortex-M
10/// targets.
11#[cfg(feature = "no-cache-pad")]
12mod inner {
13 /// Aligns the wrapped value to the size of a cache line.
14 ///
15 /// This is used to avoid [false sharing] for values that may be
16 /// accessed concurrently.
17 ///
18 /// # Size/Alignment
19 ///
20 /// The size and alignment of this type depends on the target architecture,
21 /// and on whether or not the `no-cache-pad` feature flag is enabled.
22 ///
23 /// When the `no-cache-pad` crate feature flag is enabled, this is simply a
24 /// no-op wrapper struct. This is intended for use on useful for platforms
25 /// with no data cache, such as many Cortex-M targets.
26 ///
27 /// In other cases, this type is always aligned to the size of a cache line,
28 /// based on the target architecture. On `x86_64`/`aarch64`, a cache line is
29 /// 128 bytes. On all other targets, a cache line is assumed to 64 bytes
30 /// long. This type's size will always be a multiple of the cache line size;
31 /// if the wrapped type is longer than the alignment of a cache line, then
32 /// this type will be padded to multiple cache lines.
33 ///
34 /// [false sharing]: https://en.wikipedia.org/wiki/False_sharing
35 #[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
36 pub struct CachePadded<T>(pub(super) T);
37}
38
39/// When not inhibited, determine cache alignment based on target architecture.
40/// Align to 128 bytes on 64-bit x86/ARM targets, otherwise align to 64 bytes.
41#[cfg(not(feature = "no-cache-pad"))]
42mod inner {
43 /// Aligns the wrapped value to the size of a cache line.
44 ///
45 /// This is used to avoid [false sharing] for values that may be
46 /// accessed concurrently.
47 ///
48 /// # Size/Alignment
49 ///
50 /// The size and alignment of this type depends on the target architecture,
51 /// and on whether or not the `no-cache-pad` feature flag is enabled.
52 ///
53 /// When the `no-cache-pad` crate feature flag is enabled, this is simply a
54 /// no-op wrapper struct. This is intended for use on useful for platforms
55 /// with no data cache, such as many Cortex-M targets.
56 ///
57 /// In other cases, this type is always aligned to the size of a cache line,
58 /// based on the target architecture. On `x86_64`/`aarch64`, a cache line is
59 /// 128 bytes. On all other targets, a cache line is assumed to 64 bytes
60 /// long. This type's size will always be a multiple of the cache line size;
61 /// if the wrapped type is longer than the alignment of a cache line, then
62 /// this type will be padded to multiple cache lines.
63 ///
64 /// [false sharing]: https://en.wikipedia.org/wiki/False_sharing
65 #[cfg_attr(any(target_arch = "x86_64", target_arch = "aarch64"), repr(align(128)))]
66 #[cfg_attr(
67 not(any(target_arch = "x86_64", target_arch = "aarch64")),
68 repr(align(64))
69 )]
70 #[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
71 pub struct CachePadded<T>(pub(super) T);
72}
73
74// === impl CachePadded ===
75
76impl<T> CachePadded<T> {
77 /// Pads `value` to the length of a cache line.
78 pub const fn new(value: T) -> Self {
79 Self(value)
80 }
81
82 /// Unwraps the inner value and returns it.
83 pub fn into_inner(self) -> T {
84 self.0
85 }
86}
87
88impl<T> Deref for CachePadded<T> {
89 type Target = T;
90
91 #[inline]
92 fn deref(&self) -> &T {
93 &self.0
94 }
95}
96
97impl<T> DerefMut for CachePadded<T> {
98 #[inline]
99 fn deref_mut(&mut self) -> &mut T {
100 &mut self.0
101 }
102}
103
104impl<T: fmt::Debug> fmt::Debug for CachePadded<T> {
105 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
106 self.0.fmt(f)
107 }
108}