crossbeam_epoch/
guard.rs

1use core::fmt;
2use core::mem;
3
4use crate::atomic::Shared;
5use crate::collector::Collector;
6use crate::deferred::Deferred;
7use crate::internal::Local;
8
9/// A guard that keeps the current thread pinned.
10///
11/// # Pinning
12///
13/// The current thread is pinned by calling [`pin`], which returns a new guard:
14///
15/// ```
16/// use crossbeam_epoch as epoch;
17///
18/// // It is often convenient to prefix a call to `pin` with a `&` in order to create a reference.
19/// // This is not really necessary, but makes passing references to the guard a bit easier.
20/// let guard = &epoch::pin();
21/// ```
22///
23/// When a guard gets dropped, the current thread is automatically unpinned.
24///
25/// # Pointers on the stack
26///
27/// Having a guard allows us to create pointers on the stack to heap-allocated objects.
28/// For example:
29///
30/// ```
31/// use crossbeam_epoch::{self as epoch, Atomic};
32/// use std::sync::atomic::Ordering::SeqCst;
33///
34/// // Create a heap-allocated number.
35/// let a = Atomic::new(777);
36///
37/// // Pin the current thread.
38/// let guard = &epoch::pin();
39///
40/// // Load the heap-allocated object and create pointer `p` on the stack.
41/// let p = a.load(SeqCst, guard);
42///
43/// // Dereference the pointer and print the value:
44/// if let Some(num) = unsafe { p.as_ref() } {
45///     println!("The number is {}.", num);
46/// }
47/// # unsafe { drop(a.into_owned()); } // avoid leak
48/// ```
49///
50/// # Multiple guards
51///
52/// Pinning is reentrant and it is perfectly legal to create multiple guards. In that case, the
53/// thread will actually be pinned only when the first guard is created and unpinned when the last
54/// one is dropped:
55///
56/// ```
57/// use crossbeam_epoch as epoch;
58///
59/// let guard1 = epoch::pin();
60/// let guard2 = epoch::pin();
61/// assert!(epoch::is_pinned());
62/// drop(guard1);
63/// assert!(epoch::is_pinned());
64/// drop(guard2);
65/// assert!(!epoch::is_pinned());
66/// ```
67///
68/// [`pin`]: super::pin
69pub struct Guard {
70    pub(crate) local: *const Local,
71}
72
73impl Guard {
74    /// Stores a function so that it can be executed at some point after all currently pinned
75    /// threads get unpinned.
76    ///
77    /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
78    /// becomes full, some functions are moved into the global cache. At the same time, some
79    /// functions from both local and global caches may get executed in order to incrementally
80    /// clean up the caches as they fill up.
81    ///
82    /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it
83    /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might
84    /// never run, but the epoch-based garbage collection will make an effort to execute it
85    /// reasonably soon.
86    ///
87    /// If this method is called from an [`unprotected`] guard, the function will simply be
88    /// executed immediately.
89    pub fn defer<F, R>(&self, f: F)
90    where
91        F: FnOnce() -> R,
92        F: Send + 'static,
93    {
94        unsafe {
95            self.defer_unchecked(f);
96        }
97    }
98
99    /// Stores a function so that it can be executed at some point after all currently pinned
100    /// threads get unpinned.
101    ///
102    /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
103    /// becomes full, some functions are moved into the global cache. At the same time, some
104    /// functions from both local and global caches may get executed in order to incrementally
105    /// clean up the caches as they fill up.
106    ///
107    /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it
108    /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might
109    /// never run, but the epoch-based garbage collection will make an effort to execute it
110    /// reasonably soon.
111    ///
112    /// If this method is called from an [`unprotected`] guard, the function will simply be
113    /// executed immediately.
114    ///
115    /// # Safety
116    ///
117    /// The given function must not hold reference onto the stack. It is highly recommended that
118    /// the passed function is **always** marked with `move` in order to prevent accidental
119    /// borrows.
120    ///
121    /// ```
122    /// use crossbeam_epoch as epoch;
123    ///
124    /// let guard = &epoch::pin();
125    /// let message = "Hello!";
126    /// unsafe {
127    ///     // ALWAYS use `move` when sending a closure into `defer_unchecked`.
128    ///     guard.defer_unchecked(move || {
129    ///         println!("{}", message);
130    ///     });
131    /// }
132    /// ```
133    ///
134    /// Apart from that, keep in mind that another thread may execute `f`, so anything accessed by
135    /// the closure must be `Send`.
136    ///
137    /// We intentionally didn't require `F: Send`, because Rust's type systems usually cannot prove
138    /// `F: Send` for typical use cases. For example, consider the following code snippet, which
139    /// exemplifies the typical use case of deferring the deallocation of a shared reference:
140    ///
141    /// ```ignore
142    /// let shared = Owned::new(7i32).into_shared(guard);
143    /// guard.defer_unchecked(move || shared.into_owned()); // `Shared` is not `Send`!
144    /// ```
145    ///
146    /// While `Shared` is not `Send`, it's safe for another thread to call the deferred function,
147    /// because it's called only after the grace period and `shared` is no longer shared with other
148    /// threads. But we don't expect type systems to prove this.
149    ///
150    /// # Examples
151    ///
152    /// When a heap-allocated object in a data structure becomes unreachable, it has to be
153    /// deallocated. However, the current thread and other threads may be still holding references
154    /// on the stack to that same object. Therefore it cannot be deallocated before those references
155    /// get dropped. This method can defer deallocation until all those threads get unpinned and
156    /// consequently drop all their references on the stack.
157    ///
158    /// ```
159    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
160    /// use std::sync::atomic::Ordering::SeqCst;
161    ///
162    /// let a = Atomic::new("foo");
163    ///
164    /// // Now suppose that `a` is shared among multiple threads and concurrently
165    /// // accessed and modified...
166    ///
167    /// // Pin the current thread.
168    /// let guard = &epoch::pin();
169    ///
170    /// // Steal the object currently stored in `a` and swap it with another one.
171    /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
172    ///
173    /// if !p.is_null() {
174    ///     // The object `p` is pointing to is now unreachable.
175    ///     // Defer its deallocation until all currently pinned threads get unpinned.
176    ///     unsafe {
177    ///         // ALWAYS use `move` when sending a closure into `defer_unchecked`.
178    ///         guard.defer_unchecked(move || {
179    ///             println!("{} is now being deallocated.", p.deref());
180    ///             // Now we have unique access to the object pointed to by `p` and can turn it
181    ///             // into an `Owned`. Dropping the `Owned` will deallocate the object.
182    ///             drop(p.into_owned());
183    ///         });
184    ///     }
185    /// }
186    /// # unsafe { drop(a.into_owned()); } // avoid leak
187    /// ```
188    pub unsafe fn defer_unchecked<F, R>(&self, f: F)
189    where
190        F: FnOnce() -> R,
191    {
192        if let Some(local) = self.local.as_ref() {
193            local.defer(Deferred::new(move || drop(f())), self);
194        } else {
195            drop(f());
196        }
197    }
198
199    /// Stores a destructor for an object so that it can be deallocated and dropped at some point
200    /// after all currently pinned threads get unpinned.
201    ///
202    /// This method first stores the destructor into the thread-local (or handle-local) cache. If
203    /// this cache becomes full, some destructors are moved into the global cache. At the same
204    /// time, some destructors from both local and global caches may get executed in order to
205    /// incrementally clean up the caches as they fill up.
206    ///
207    /// There is no guarantee when exactly the destructor will be executed. The only guarantee is
208    /// that it won't be executed until all currently pinned threads get unpinned. In theory, the
209    /// destructor might never run, but the epoch-based garbage collection will make an effort to
210    /// execute it reasonably soon.
211    ///
212    /// If this method is called from an [`unprotected`] guard, the destructor will simply be
213    /// executed immediately.
214    ///
215    /// # Safety
216    ///
217    /// The object must not be reachable by other threads anymore, otherwise it might be still in
218    /// use when the destructor runs.
219    ///
220    /// Apart from that, keep in mind that another thread may execute the destructor, so the object
221    /// must be sendable to other threads.
222    ///
223    /// We intentionally didn't require `T: Send`, because Rust's type systems usually cannot prove
224    /// `T: Send` for typical use cases. For example, consider the following code snippet, which
225    /// exemplifies the typical use case of deferring the deallocation of a shared reference:
226    ///
227    /// ```ignore
228    /// let shared = Owned::new(7i32).into_shared(guard);
229    /// guard.defer_destroy(shared); // `Shared` is not `Send`!
230    /// ```
231    ///
232    /// While `Shared` is not `Send`, it's safe for another thread to call the destructor, because
233    /// it's called only after the grace period and `shared` is no longer shared with other
234    /// threads. But we don't expect type systems to prove this.
235    ///
236    /// # Examples
237    ///
238    /// When a heap-allocated object in a data structure becomes unreachable, it has to be
239    /// deallocated. However, the current thread and other threads may be still holding references
240    /// on the stack to that same object. Therefore it cannot be deallocated before those references
241    /// get dropped. This method can defer deallocation until all those threads get unpinned and
242    /// consequently drop all their references on the stack.
243    ///
244    /// ```
245    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
246    /// use std::sync::atomic::Ordering::SeqCst;
247    ///
248    /// let a = Atomic::new("foo");
249    ///
250    /// // Now suppose that `a` is shared among multiple threads and concurrently
251    /// // accessed and modified...
252    ///
253    /// // Pin the current thread.
254    /// let guard = &epoch::pin();
255    ///
256    /// // Steal the object currently stored in `a` and swap it with another one.
257    /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
258    ///
259    /// if !p.is_null() {
260    ///     // The object `p` is pointing to is now unreachable.
261    ///     // Defer its deallocation until all currently pinned threads get unpinned.
262    ///     unsafe {
263    ///         guard.defer_destroy(p);
264    ///     }
265    /// }
266    /// # unsafe { drop(a.into_owned()); } // avoid leak
267    /// ```
268    pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) {
269        self.defer_unchecked(move || ptr.into_owned());
270    }
271
272    /// Clears up the thread-local cache of deferred functions by executing them or moving into the
273    /// global cache.
274    ///
275    /// Call this method after deferring execution of a function if you want to get it executed as
276    /// soon as possible. Flushing will make sure it is residing in in the global cache, so that
277    /// any thread has a chance of taking the function and executing it.
278    ///
279    /// If this method is called from an [`unprotected`] guard, it is a no-op (nothing happens).
280    ///
281    /// # Examples
282    ///
283    /// ```
284    /// use crossbeam_epoch as epoch;
285    ///
286    /// let guard = &epoch::pin();
287    /// guard.defer(move || {
288    ///     println!("This better be printed as soon as possible!");
289    /// });
290    /// guard.flush();
291    /// ```
292    pub fn flush(&self) {
293        if let Some(local) = unsafe { self.local.as_ref() } {
294            local.flush(self);
295        }
296    }
297
298    /// Unpins and then immediately re-pins the thread.
299    ///
300    /// This method is useful when you don't want delay the advancement of the global epoch by
301    /// holding an old epoch. For safety, you should not maintain any guard-based reference across
302    /// the call (the latter is enforced by `&mut self`). The thread will only be repinned if this
303    /// is the only active guard for the current thread.
304    ///
305    /// If this method is called from an [`unprotected`] guard, then the call will be just no-op.
306    ///
307    /// # Examples
308    ///
309    /// ```
310    /// use crossbeam_epoch::{self as epoch, Atomic};
311    /// use std::sync::atomic::Ordering::SeqCst;
312    ///
313    /// let a = Atomic::new(777);
314    /// let mut guard = epoch::pin();
315    /// {
316    ///     let p = a.load(SeqCst, &guard);
317    ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
318    /// }
319    /// guard.repin();
320    /// {
321    ///     let p = a.load(SeqCst, &guard);
322    ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
323    /// }
324    /// # unsafe { drop(a.into_owned()); } // avoid leak
325    /// ```
326    pub fn repin(&mut self) {
327        if let Some(local) = unsafe { self.local.as_ref() } {
328            local.repin();
329        }
330    }
331
332    /// Temporarily unpins the thread, executes the given function and then re-pins the thread.
333    ///
334    /// This method is useful when you need to perform a long-running operation (e.g. sleeping)
335    /// and don't need to maintain any guard-based reference across the call (the latter is enforced
336    /// by `&mut self`). The thread will only be unpinned if this is the only active guard for the
337    /// current thread.
338    ///
339    /// If this method is called from an [`unprotected`] guard, then the passed function is called
340    /// directly without unpinning the thread.
341    ///
342    /// # Examples
343    ///
344    /// ```
345    /// use crossbeam_epoch::{self as epoch, Atomic};
346    /// use std::sync::atomic::Ordering::SeqCst;
347    /// use std::thread;
348    /// use std::time::Duration;
349    ///
350    /// let a = Atomic::new(777);
351    /// let mut guard = epoch::pin();
352    /// {
353    ///     let p = a.load(SeqCst, &guard);
354    ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
355    /// }
356    /// guard.repin_after(|| thread::sleep(Duration::from_millis(50)));
357    /// {
358    ///     let p = a.load(SeqCst, &guard);
359    ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
360    /// }
361    /// # unsafe { drop(a.into_owned()); } // avoid leak
362    /// ```
363    pub fn repin_after<F, R>(&mut self, f: F) -> R
364    where
365        F: FnOnce() -> R,
366    {
367        // Ensure the Guard is re-pinned even if the function panics
368        struct ScopeGuard(*const Local);
369        impl Drop for ScopeGuard {
370            fn drop(&mut self) {
371                if let Some(local) = unsafe { self.0.as_ref() } {
372                    mem::forget(local.pin());
373                    local.release_handle();
374                }
375            }
376        }
377
378        if let Some(local) = unsafe { self.local.as_ref() } {
379            // We need to acquire a handle here to ensure the Local doesn't
380            // disappear from under us.
381            local.acquire_handle();
382            local.unpin();
383        }
384
385        let _guard = ScopeGuard(self.local);
386
387        f()
388    }
389
390    /// Returns the `Collector` associated with this guard.
391    ///
392    /// This method is useful when you need to ensure that all guards used with
393    /// a data structure come from the same collector.
394    ///
395    /// If this method is called from an [`unprotected`] guard, then `None` is returned.
396    ///
397    /// # Examples
398    ///
399    /// ```
400    /// use crossbeam_epoch as epoch;
401    ///
402    /// let guard1 = epoch::pin();
403    /// let guard2 = epoch::pin();
404    /// assert!(guard1.collector() == guard2.collector());
405    /// ```
406    pub fn collector(&self) -> Option<&Collector> {
407        unsafe { self.local.as_ref().map(|local| local.collector()) }
408    }
409}
410
411impl Drop for Guard {
412    #[inline]
413    fn drop(&mut self) {
414        if let Some(local) = unsafe { self.local.as_ref() } {
415            local.unpin();
416        }
417    }
418}
419
420impl fmt::Debug for Guard {
421    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
422        f.pad("Guard { .. }")
423    }
424}
425
426/// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s.
427///
428/// This guard should be used in special occasions only. Note that it doesn't actually keep any
429/// thread pinned - it's just a fake guard that allows loading from [`Atomic`]s unsafely.
430///
431/// Note that calling [`defer`] with a dummy guard will not defer the function - it will just
432/// execute the function immediately.
433///
434/// If necessary, it's possible to create more dummy guards by cloning: `unprotected().clone()`.
435///
436/// # Safety
437///
438/// Loading and dereferencing data from an [`Atomic`] using this guard is safe only if the
439/// [`Atomic`] is not being concurrently modified by other threads.
440///
441/// # Examples
442///
443/// ```
444/// use crossbeam_epoch::{self as epoch, Atomic};
445/// use std::sync::atomic::Ordering::Relaxed;
446///
447/// let a = Atomic::new(7);
448///
449/// unsafe {
450///     // Load `a` without pinning the current thread.
451///     a.load(Relaxed, epoch::unprotected());
452///
453///     // It's possible to create more dummy guards.
454///     let dummy = epoch::unprotected();
455///
456///     dummy.defer(move || {
457///         println!("This gets executed immediately.");
458///     });
459///
460///     // Dropping `dummy` doesn't affect the current thread - it's just a noop.
461/// }
462/// # unsafe { drop(a.into_owned()); } // avoid leak
463/// ```
464///
465/// The most common use of this function is when constructing or destructing a data structure.
466///
467/// For example, we can use a dummy guard in the destructor of a Treiber stack because at that
468/// point no other thread could concurrently modify the [`Atomic`]s we are accessing.
469///
470/// If we were to actually pin the current thread during destruction, that would just unnecessarily
471/// delay garbage collection and incur some performance cost, so in cases like these `unprotected`
472/// is very helpful.
473///
474/// ```
475/// use crossbeam_epoch::{self as epoch, Atomic};
476/// use std::mem::ManuallyDrop;
477/// use std::sync::atomic::Ordering::Relaxed;
478///
479/// struct Stack<T> {
480///     head: Atomic<Node<T>>,
481/// }
482///
483/// struct Node<T> {
484///     data: ManuallyDrop<T>,
485///     next: Atomic<Node<T>>,
486/// }
487///
488/// impl<T> Drop for Stack<T> {
489///     fn drop(&mut self) {
490///         unsafe {
491///             // Unprotected load.
492///             let mut node = self.head.load(Relaxed, epoch::unprotected());
493///
494///             while let Some(n) = node.as_ref() {
495///                 // Unprotected load.
496///                 let next = n.next.load(Relaxed, epoch::unprotected());
497///
498///                 // Take ownership of the node, then drop its data and deallocate it.
499///                 let mut o = node.into_owned();
500///                 ManuallyDrop::drop(&mut o.data);
501///                 drop(o);
502///
503///                 node = next;
504///             }
505///         }
506///     }
507/// }
508/// ```
509///
510/// [`Atomic`]: super::Atomic
511/// [`defer`]: Guard::defer
512#[inline]
513pub unsafe fn unprotected() -> &'static Guard {
514    // An unprotected guard is just a `Guard` with its field `local` set to null.
515    // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in
516    // a `static`
517    struct GuardWrapper(Guard);
518    unsafe impl Sync for GuardWrapper {}
519    static UNPROTECTED: GuardWrapper = GuardWrapper(Guard {
520        local: core::ptr::null(),
521    });
522    &UNPROTECTED.0
523}