pyo3/sync.rs
1//! Synchronization mechanisms based on the Python GIL.
2//!
3//! With the acceptance of [PEP 703] (aka a "freethreaded Python") for Python 3.13, these
4//! are likely to undergo significant developments in the future.
5//!
6//! [PEP 703]: https://peps.python.org/pep-703/
7use crate::{
8 internal::state::SuspendAttach,
9 sealed::Sealed,
10 types::{any::PyAnyMethods, PyAny, PyString},
11 Bound, Py, PyResult, PyTypeCheck, Python,
12};
13use std::{
14 cell::UnsafeCell,
15 marker::PhantomData,
16 mem::MaybeUninit,
17 sync::{Once, OnceState},
18};
19
20pub(crate) mod once_lock;
21
22#[cfg(not(Py_GIL_DISABLED))]
23use crate::PyVisit;
24
25pub use self::once_lock::PyOnceLock;
26
27/// Value with concurrent access protected by the GIL.
28///
29/// This is a synchronization primitive based on Python's global interpreter lock (GIL).
30/// It ensures that only one thread at a time can access the inner value via shared references.
31/// It can be combined with interior mutability to obtain mutable references.
32///
33/// This type is not defined for extensions built against the free-threaded CPython ABI.
34///
35/// # Example
36///
37/// Combining `GILProtected` with `RefCell` enables mutable access to static data:
38///
39/// ```
40/// # #![allow(deprecated)]
41/// # use pyo3::prelude::*;
42/// use pyo3::sync::GILProtected;
43/// use std::cell::RefCell;
44///
45/// static NUMBERS: GILProtected<RefCell<Vec<i32>>> = GILProtected::new(RefCell::new(Vec::new()));
46///
47/// Python::attach(|py| {
48/// NUMBERS.get(py).borrow_mut().push(42);
49/// });
50/// ```
51#[deprecated(
52 since = "0.26.0",
53 note = "Prefer an interior mutability primitive compatible with free-threaded Python, such as `Mutex` in combination with the `MutexExt` trait"
54)]
55#[cfg(not(Py_GIL_DISABLED))]
56pub struct GILProtected<T> {
57 value: T,
58}
59
60#[allow(deprecated)]
61#[cfg(not(Py_GIL_DISABLED))]
62impl<T> GILProtected<T> {
63 /// Place the given value under the protection of the GIL.
64 pub const fn new(value: T) -> Self {
65 Self { value }
66 }
67
68 /// Gain access to the inner value by giving proof of having acquired the GIL.
69 pub fn get<'py>(&'py self, _py: Python<'py>) -> &'py T {
70 &self.value
71 }
72
73 /// Gain access to the inner value by giving proof that garbage collection is happening.
74 pub fn traverse<'py>(&'py self, _visit: PyVisit<'py>) -> &'py T {
75 &self.value
76 }
77}
78
79#[allow(deprecated)]
80#[cfg(not(Py_GIL_DISABLED))]
81unsafe impl<T> Sync for GILProtected<T> where T: Send {}
82
83/// A write-once primitive similar to [`std::sync::OnceLock<T>`].
84///
85/// Unlike `OnceLock<T>` which blocks threads to achieve thread safety, `GilOnceCell<T>`
86/// allows calls to [`get_or_init`][GILOnceCell::get_or_init] and
87/// [`get_or_try_init`][GILOnceCell::get_or_try_init] to race to create an initialized value.
88/// (It is still guaranteed that only one thread will ever write to the cell.)
89///
90/// On Python versions that run with the Global Interpreter Lock (GIL), this helps to avoid
91/// deadlocks between initialization and the GIL. For an example of such a deadlock, see
92#[doc = concat!(
93 "[the FAQ section](https://pyo3.rs/v",
94 env!("CARGO_PKG_VERSION"),
95 "/faq.html#im-experiencing-deadlocks-using-pyo3-with-stdsynconcelock-stdsynclazylock-lazy_static-and-once_cell)"
96)]
97/// of the guide.
98///
99/// Note that because the GIL blocks concurrent execution, in practice the means that
100/// [`get_or_init`][GILOnceCell::get_or_init] and
101/// [`get_or_try_init`][GILOnceCell::get_or_try_init] may race if the initialization
102/// function leads to the GIL being released and a thread context switch. This can
103/// happen when importing or calling any Python code, as long as it releases the
104/// GIL at some point. On free-threaded Python without any GIL, the race is
105/// more likely since there is no GIL to prevent races. In the future, PyO3 may change
106/// the semantics of GILOnceCell to behave more like the GIL build in the future.
107///
108/// # Re-entrant initialization
109///
110/// [`get_or_init`][GILOnceCell::get_or_init] and
111/// [`get_or_try_init`][GILOnceCell::get_or_try_init] do not protect against infinite recursion
112/// from reentrant initialization.
113///
114/// # Examples
115///
116/// The following example shows how to use `GILOnceCell` to share a reference to a Python list
117/// between threads:
118///
119/// ```
120/// #![allow(deprecated)]
121/// use pyo3::sync::GILOnceCell;
122/// use pyo3::prelude::*;
123/// use pyo3::types::PyList;
124///
125/// static LIST_CELL: GILOnceCell<Py<PyList>> = GILOnceCell::new();
126///
127/// pub fn get_shared_list(py: Python<'_>) -> &Bound<'_, PyList> {
128/// LIST_CELL
129/// .get_or_init(py, || PyList::empty(py).unbind())
130/// .bind(py)
131/// }
132/// # Python::attach(|py| assert_eq!(get_shared_list(py).len(), 0));
133/// ```
134#[deprecated(
135 since = "0.26.0",
136 note = "Prefer `pyo3::sync::PyOnceLock`, which avoids the possibility of racing during initialization."
137)]
138pub struct GILOnceCell<T> {
139 once: Once,
140 data: UnsafeCell<MaybeUninit<T>>,
141
142 /// (Copied from std::sync::OnceLock)
143 ///
144 /// `PhantomData` to make sure dropck understands we're dropping T in our Drop impl.
145 ///
146 /// ```compile_error,E0597
147 /// #![allow(deprecated)]
148 /// use pyo3::Python;
149 /// use pyo3::sync::GILOnceCell;
150 ///
151 /// struct A<'a>(#[allow(dead_code)] &'a str);
152 ///
153 /// impl<'a> Drop for A<'a> {
154 /// fn drop(&mut self) {}
155 /// }
156 ///
157 /// let cell = GILOnceCell::new();
158 /// {
159 /// let s = String::new();
160 /// let _ = Python::attach(|py| cell.set(py,A(&s)));
161 /// }
162 /// ```
163 _marker: PhantomData<T>,
164}
165
166#[allow(deprecated)]
167impl<T> Default for GILOnceCell<T> {
168 fn default() -> Self {
169 Self::new()
170 }
171}
172
173// T: Send is needed for Sync because the thread which drops the GILOnceCell can be different
174// to the thread which fills it. (e.g. think scoped thread which fills the cell and then exits,
175// leaving the cell to be dropped by the main thread).
176#[allow(deprecated)]
177unsafe impl<T: Send + Sync> Sync for GILOnceCell<T> {}
178#[allow(deprecated)]
179unsafe impl<T: Send> Send for GILOnceCell<T> {}
180
181#[allow(deprecated)]
182impl<T> GILOnceCell<T> {
183 /// Create a `GILOnceCell` which does not yet contain a value.
184 pub const fn new() -> Self {
185 Self {
186 once: Once::new(),
187 data: UnsafeCell::new(MaybeUninit::uninit()),
188 _marker: PhantomData,
189 }
190 }
191
192 /// Get a reference to the contained value, or `None` if the cell has not yet been written.
193 #[inline]
194 pub fn get(&self, _py: Python<'_>) -> Option<&T> {
195 if self.once.is_completed() {
196 // SAFETY: the cell has been written.
197 Some(unsafe { (*self.data.get()).assume_init_ref() })
198 } else {
199 None
200 }
201 }
202
203 /// Get a reference to the contained value, initializing it if needed using the provided
204 /// closure.
205 ///
206 /// See the type-level documentation for detail on re-entrancy and concurrent initialization.
207 #[inline]
208 pub fn get_or_init<F>(&self, py: Python<'_>, f: F) -> &T
209 where
210 F: FnOnce() -> T,
211 {
212 if let Some(value) = self.get(py) {
213 return value;
214 }
215
216 // .unwrap() will never panic because the result is always Ok
217 self.init(py, || Ok::<T, std::convert::Infallible>(f()))
218 .unwrap()
219 }
220
221 /// Like `get_or_init`, but accepts a fallible initialization function. If it fails, the cell
222 /// is left uninitialized.
223 ///
224 /// See the type-level documentation for detail on re-entrancy and concurrent initialization.
225 #[inline]
226 pub fn get_or_try_init<F, E>(&self, py: Python<'_>, f: F) -> Result<&T, E>
227 where
228 F: FnOnce() -> Result<T, E>,
229 {
230 if let Some(value) = self.get(py) {
231 return Ok(value);
232 }
233
234 self.init(py, f)
235 }
236
237 #[cold]
238 fn init<F, E>(&self, py: Python<'_>, f: F) -> Result<&T, E>
239 where
240 F: FnOnce() -> Result<T, E>,
241 {
242 // Note that f() could temporarily release the GIL, so it's possible that another thread
243 // writes to this GILOnceCell before f() finishes. That's fine; we'll just have to discard
244 // the value computed here and accept a bit of wasted computation.
245
246 // TODO: on the freethreaded build, consider wrapping this pair of operations in a
247 // critical section (requires a critical section API which can use a PyMutex without
248 // an object.)
249 let value = f()?;
250 let _ = self.set(py, value);
251
252 Ok(self.get(py).unwrap())
253 }
254
255 /// Get the contents of the cell mutably. This is only possible if the reference to the cell is
256 /// unique.
257 pub fn get_mut(&mut self) -> Option<&mut T> {
258 if self.once.is_completed() {
259 // SAFETY: the cell has been written.
260 Some(unsafe { (*self.data.get()).assume_init_mut() })
261 } else {
262 None
263 }
264 }
265
266 /// Set the value in the cell.
267 ///
268 /// If the cell has already been written, `Err(value)` will be returned containing the new
269 /// value which was not written.
270 pub fn set(&self, _py: Python<'_>, value: T) -> Result<(), T> {
271 let mut value = Some(value);
272 // NB this can block, but since this is only writing a single value and
273 // does not call arbitrary python code, we don't need to worry about
274 // deadlocks with the GIL.
275 self.once.call_once_force(|_| {
276 // SAFETY: no other threads can be writing this value, because we are
277 // inside the `call_once_force` closure.
278 unsafe {
279 // `.take().unwrap()` will never panic
280 (*self.data.get()).write(value.take().unwrap());
281 }
282 });
283
284 match value {
285 // Some other thread wrote to the cell first
286 Some(value) => Err(value),
287 None => Ok(()),
288 }
289 }
290
291 /// Takes the value out of the cell, moving it back to an uninitialized state.
292 ///
293 /// Has no effect and returns None if the cell has not yet been written.
294 pub fn take(&mut self) -> Option<T> {
295 if self.once.is_completed() {
296 // Reset the cell to its default state so that it won't try to
297 // drop the value again.
298 self.once = Once::new();
299 // SAFETY: the cell has been written. `self.once` has been reset,
300 // so when `self` is dropped the value won't be read again.
301 Some(unsafe { self.data.get_mut().assume_init_read() })
302 } else {
303 None
304 }
305 }
306
307 /// Consumes the cell, returning the wrapped value.
308 ///
309 /// Returns None if the cell has not yet been written.
310 pub fn into_inner(mut self) -> Option<T> {
311 self.take()
312 }
313}
314
315#[allow(deprecated)]
316impl<T> GILOnceCell<Py<T>> {
317 /// Creates a new cell that contains a new Python reference to the same contained object.
318 ///
319 /// Returns an uninitialized cell if `self` has not yet been initialized.
320 pub fn clone_ref(&self, py: Python<'_>) -> Self {
321 let cloned = Self {
322 once: Once::new(),
323 data: UnsafeCell::new(MaybeUninit::uninit()),
324 _marker: PhantomData,
325 };
326 if let Some(value) = self.get(py) {
327 let _ = cloned.set(py, value.clone_ref(py));
328 }
329 cloned
330 }
331}
332
333#[allow(deprecated)]
334impl<T> GILOnceCell<Py<T>>
335where
336 T: PyTypeCheck,
337{
338 /// Get a reference to the contained Python type, initializing the cell if needed.
339 ///
340 /// This is a shorthand method for `get_or_init` which imports the type from Python on init.
341 ///
342 /// # Example: Using `GILOnceCell` to store a class in a static variable.
343 ///
344 /// `GILOnceCell` can be used to avoid importing a class multiple times:
345 /// ```
346 /// #![allow(deprecated)]
347 /// # use pyo3::prelude::*;
348 /// # use pyo3::sync::GILOnceCell;
349 /// # use pyo3::types::{PyDict, PyType};
350 /// # use pyo3::intern;
351 /// #
352 /// #[pyfunction]
353 /// fn create_ordered_dict<'py>(py: Python<'py>, dict: Bound<'py, PyDict>) -> PyResult<Bound<'py, PyAny>> {
354 /// // Even if this function is called multiple times,
355 /// // the `OrderedDict` class will be imported only once.
356 /// static ORDERED_DICT: GILOnceCell<Py<PyType>> = GILOnceCell::new();
357 /// ORDERED_DICT
358 /// .import(py, "collections", "OrderedDict")?
359 /// .call1((dict,))
360 /// }
361 ///
362 /// # Python::attach(|py| {
363 /// # let dict = PyDict::new(py);
364 /// # dict.set_item(intern!(py, "foo"), 42).unwrap();
365 /// # let fun = wrap_pyfunction!(create_ordered_dict, py).unwrap();
366 /// # let ordered_dict = fun.call1((&dict,)).unwrap();
367 /// # assert!(dict.eq(ordered_dict).unwrap());
368 /// # });
369 /// ```
370 pub fn import<'py>(
371 &self,
372 py: Python<'py>,
373 module_name: &str,
374 attr_name: &str,
375 ) -> PyResult<&Bound<'py, T>> {
376 self.get_or_try_init(py, || {
377 let type_object = py.import(module_name)?.getattr(attr_name)?.cast_into()?;
378 Ok(type_object.unbind())
379 })
380 .map(|ty| ty.bind(py))
381 }
382}
383
384#[allow(deprecated)]
385impl<T> Drop for GILOnceCell<T> {
386 fn drop(&mut self) {
387 if self.once.is_completed() {
388 // SAFETY: the cell has been written.
389 unsafe { MaybeUninit::assume_init_drop(self.data.get_mut()) }
390 }
391 }
392}
393
394/// Interns `text` as a Python string and stores a reference to it in static storage.
395///
396/// A reference to the same Python string is returned on each invocation.
397///
398/// # Example: Using `intern!` to avoid needlessly recreating the same Python string
399///
400/// ```
401/// use pyo3::intern;
402/// # use pyo3::{prelude::*, types::PyDict};
403///
404/// #[pyfunction]
405/// fn create_dict(py: Python<'_>) -> PyResult<Bound<'_, PyDict>> {
406/// let dict = PyDict::new(py);
407/// // 👇 A new `PyString` is created
408/// // for every call of this function.
409/// dict.set_item("foo", 42)?;
410/// Ok(dict)
411/// }
412///
413/// #[pyfunction]
414/// fn create_dict_faster(py: Python<'_>) -> PyResult<Bound<'_, PyDict>> {
415/// let dict = PyDict::new(py);
416/// // 👇 A `PyString` is created once and reused
417/// // for the lifetime of the program.
418/// dict.set_item(intern!(py, "foo"), 42)?;
419/// Ok(dict)
420/// }
421/// #
422/// # Python::attach(|py| {
423/// # let fun_slow = wrap_pyfunction!(create_dict, py).unwrap();
424/// # let dict = fun_slow.call0().unwrap();
425/// # assert!(dict.contains("foo").unwrap());
426/// # let fun = wrap_pyfunction!(create_dict_faster, py).unwrap();
427/// # let dict = fun.call0().unwrap();
428/// # assert!(dict.contains("foo").unwrap());
429/// # });
430/// ```
431#[macro_export]
432macro_rules! intern {
433 ($py: expr, $text: expr) => {{
434 static INTERNED: $crate::sync::Interned = $crate::sync::Interned::new($text);
435 INTERNED.get($py)
436 }};
437}
438
439/// Implementation detail for `intern!` macro.
440#[doc(hidden)]
441pub struct Interned(&'static str, PyOnceLock<Py<PyString>>);
442
443impl Interned {
444 /// Creates an empty holder for an interned `str`.
445 pub const fn new(value: &'static str) -> Self {
446 Interned(value, PyOnceLock::new())
447 }
448
449 /// Gets or creates the interned `str` value.
450 #[inline]
451 pub fn get<'py>(&self, py: Python<'py>) -> &Bound<'py, PyString> {
452 self.1
453 .get_or_init(py, || PyString::intern(py, self.0).into())
454 .bind(py)
455 }
456}
457
458/// Executes a closure with a Python critical section held on an object.
459///
460/// Acquires the per-object lock for the object `op` that is held
461/// until the closure `f` is finished.
462///
463/// This is structurally equivalent to the use of the paired
464/// Py_BEGIN_CRITICAL_SECTION and Py_END_CRITICAL_SECTION C-API macros.
465///
466/// A no-op on GIL-enabled builds, where the critical section API is exposed as
467/// a no-op by the Python C API.
468///
469/// Provides weaker locking guarantees than traditional locks, but can in some
470/// cases be used to provide guarantees similar to the GIL without the risk of
471/// deadlocks associated with traditional locks.
472///
473/// Many CPython C API functions do not acquire the per-object lock on objects
474/// passed to Python. You should not expect critical sections applied to
475/// built-in types to prevent concurrent modification. This API is most useful
476/// for user-defined types with full control over how the internal state for the
477/// type is managed.
478#[cfg_attr(not(Py_GIL_DISABLED), allow(unused_variables))]
479pub fn with_critical_section<F, R>(object: &Bound<'_, PyAny>, f: F) -> R
480where
481 F: FnOnce() -> R,
482{
483 #[cfg(Py_GIL_DISABLED)]
484 {
485 struct Guard(crate::ffi::PyCriticalSection);
486
487 impl Drop for Guard {
488 fn drop(&mut self) {
489 unsafe {
490 crate::ffi::PyCriticalSection_End(&mut self.0);
491 }
492 }
493 }
494
495 let mut guard = Guard(unsafe { std::mem::zeroed() });
496 unsafe { crate::ffi::PyCriticalSection_Begin(&mut guard.0, object.as_ptr()) };
497 f()
498 }
499 #[cfg(not(Py_GIL_DISABLED))]
500 {
501 f()
502 }
503}
504
505/// Executes a closure with a Python critical section held on two objects.
506///
507/// Acquires the per-object lock for the objects `a` and `b` that are held
508/// until the closure `f` is finished.
509///
510/// This is structurally equivalent to the use of the paired
511/// Py_BEGIN_CRITICAL_SECTION2 and Py_END_CRITICAL_SECTION2 C-API macros.
512///
513/// A no-op on GIL-enabled builds, where the critical section API is exposed as
514/// a no-op by the Python C API.
515///
516/// Provides weaker locking guarantees than traditional locks, but can in some
517/// cases be used to provide guarantees similar to the GIL without the risk of
518/// deadlocks associated with traditional locks.
519///
520/// Many CPython C API functions do not acquire the per-object lock on objects
521/// passed to Python. You should not expect critical sections applied to
522/// built-in types to prevent concurrent modification. This API is most useful
523/// for user-defined types with full control over how the internal state for the
524/// type is managed.
525#[cfg_attr(not(Py_GIL_DISABLED), allow(unused_variables))]
526pub fn with_critical_section2<F, R>(a: &Bound<'_, PyAny>, b: &Bound<'_, PyAny>, f: F) -> R
527where
528 F: FnOnce() -> R,
529{
530 #[cfg(Py_GIL_DISABLED)]
531 {
532 struct Guard(crate::ffi::PyCriticalSection2);
533
534 impl Drop for Guard {
535 fn drop(&mut self) {
536 unsafe {
537 crate::ffi::PyCriticalSection2_End(&mut self.0);
538 }
539 }
540 }
541
542 let mut guard = Guard(unsafe { std::mem::zeroed() });
543 unsafe { crate::ffi::PyCriticalSection2_Begin(&mut guard.0, a.as_ptr(), b.as_ptr()) };
544 f()
545 }
546 #[cfg(not(Py_GIL_DISABLED))]
547 {
548 f()
549 }
550}
551
552mod once_lock_ext_sealed {
553 pub trait Sealed {}
554 impl<T> Sealed for std::sync::OnceLock<T> {}
555}
556
557/// Extension trait for [`Once`] to help avoid deadlocking when using a [`Once`] when attached to a
558/// Python thread.
559pub trait OnceExt: Sealed {
560 ///The state of `Once`
561 type OnceState;
562
563 /// Similar to [`call_once`][Once::call_once], but releases the Python GIL temporarily
564 /// if blocking on another thread currently calling this `Once`.
565 fn call_once_py_attached(&self, py: Python<'_>, f: impl FnOnce());
566
567 /// Similar to [`call_once_force`][Once::call_once_force], but releases the Python GIL
568 /// temporarily if blocking on another thread currently calling this `Once`.
569 fn call_once_force_py_attached(&self, py: Python<'_>, f: impl FnOnce(&Self::OnceState));
570}
571
572/// Extension trait for [`std::sync::OnceLock`] which helps avoid deadlocks between the Python
573/// interpreter and initialization with the `OnceLock`.
574pub trait OnceLockExt<T>: once_lock_ext_sealed::Sealed {
575 /// Initializes this `OnceLock` with the given closure if it has not been initialized yet.
576 ///
577 /// If this function would block, this function detaches from the Python interpreter and
578 /// reattaches before calling `f`. This avoids deadlocks between the Python interpreter and
579 /// the `OnceLock` in cases where `f` can call arbitrary Python code, as calling arbitrary
580 /// Python code can lead to `f` itself blocking on the Python interpreter.
581 ///
582 /// By detaching from the Python interpreter before blocking, this ensures that if `f` blocks
583 /// then the Python interpreter cannot be blocked by `f` itself.
584 fn get_or_init_py_attached<F>(&self, py: Python<'_>, f: F) -> &T
585 where
586 F: FnOnce() -> T;
587}
588
589/// Extension trait for [`std::sync::Mutex`] which helps avoid deadlocks between
590/// the Python interpreter and acquiring the `Mutex`.
591pub trait MutexExt<T>: Sealed {
592 /// The result type returned by the `lock_py_attached` method.
593 type LockResult<'a>
594 where
595 Self: 'a;
596
597 /// Lock this `Mutex` in a manner that cannot deadlock with the Python interpreter.
598 ///
599 /// Before attempting to lock the mutex, this function detaches from the
600 /// Python runtime. When the lock is acquired, it re-attaches to the Python
601 /// runtime before returning the `LockResult`. This avoids deadlocks between
602 /// the GIL and other global synchronization events triggered by the Python
603 /// interpreter.
604 fn lock_py_attached(&self, py: Python<'_>) -> Self::LockResult<'_>;
605}
606
607impl OnceExt for Once {
608 type OnceState = OnceState;
609
610 fn call_once_py_attached(&self, py: Python<'_>, f: impl FnOnce()) {
611 if self.is_completed() {
612 return;
613 }
614
615 init_once_py_attached(self, py, f)
616 }
617
618 fn call_once_force_py_attached(&self, py: Python<'_>, f: impl FnOnce(&OnceState)) {
619 if self.is_completed() {
620 return;
621 }
622
623 init_once_force_py_attached(self, py, f);
624 }
625}
626
627#[cfg(feature = "parking_lot")]
628impl OnceExt for parking_lot::Once {
629 type OnceState = parking_lot::OnceState;
630
631 fn call_once_py_attached(&self, _py: Python<'_>, f: impl FnOnce()) {
632 if self.state().done() {
633 return;
634 }
635
636 let ts_guard = unsafe { SuspendAttach::new() };
637
638 self.call_once(move || {
639 drop(ts_guard);
640 f();
641 });
642 }
643
644 fn call_once_force_py_attached(
645 &self,
646 _py: Python<'_>,
647 f: impl FnOnce(&parking_lot::OnceState),
648 ) {
649 if self.state().done() {
650 return;
651 }
652
653 let ts_guard = unsafe { SuspendAttach::new() };
654
655 self.call_once_force(move |state| {
656 drop(ts_guard);
657 f(&state);
658 });
659 }
660}
661
662impl<T> OnceLockExt<T> for std::sync::OnceLock<T> {
663 fn get_or_init_py_attached<F>(&self, py: Python<'_>, f: F) -> &T
664 where
665 F: FnOnce() -> T,
666 {
667 // Use self.get() first to create a fast path when initialized
668 self.get()
669 .unwrap_or_else(|| init_once_lock_py_attached(self, py, f))
670 }
671}
672
673impl<T> MutexExt<T> for std::sync::Mutex<T> {
674 type LockResult<'a>
675 = std::sync::LockResult<std::sync::MutexGuard<'a, T>>
676 where
677 Self: 'a;
678
679 fn lock_py_attached(
680 &self,
681 _py: Python<'_>,
682 ) -> std::sync::LockResult<std::sync::MutexGuard<'_, T>> {
683 // If try_lock is successful or returns a poisoned mutex, return them so
684 // the caller can deal with them. Otherwise we need to use blocking
685 // lock, which requires detaching from the Python runtime to avoid
686 // possible deadlocks.
687 match self.try_lock() {
688 Ok(inner) => return Ok(inner),
689 Err(std::sync::TryLockError::Poisoned(inner)) => {
690 return std::sync::LockResult::Err(inner)
691 }
692 Err(std::sync::TryLockError::WouldBlock) => {}
693 }
694 // SAFETY: detach from the runtime right before a possibly blocking call
695 // then reattach when the blocking call completes and before calling
696 // into the C API.
697 let ts_guard = unsafe { SuspendAttach::new() };
698 let res = self.lock();
699 drop(ts_guard);
700 res
701 }
702}
703
704#[cfg(feature = "lock_api")]
705impl<R: lock_api::RawMutex, T> MutexExt<T> for lock_api::Mutex<R, T> {
706 type LockResult<'a>
707 = lock_api::MutexGuard<'a, R, T>
708 where
709 Self: 'a;
710
711 fn lock_py_attached(&self, _py: Python<'_>) -> lock_api::MutexGuard<'_, R, T> {
712 if let Some(guard) = self.try_lock() {
713 return guard;
714 }
715
716 let ts_guard = unsafe { SuspendAttach::new() };
717 let res = self.lock();
718 drop(ts_guard);
719 res
720 }
721}
722
723#[cfg(feature = "arc_lock")]
724impl<R, T> MutexExt<T> for std::sync::Arc<lock_api::Mutex<R, T>>
725where
726 R: lock_api::RawMutex,
727{
728 type LockResult<'a>
729 = lock_api::ArcMutexGuard<R, T>
730 where
731 Self: 'a;
732
733 fn lock_py_attached(&self, _py: Python<'_>) -> lock_api::ArcMutexGuard<R, T> {
734 if let Some(guard) = self.try_lock_arc() {
735 return guard;
736 }
737
738 let ts_guard = unsafe { SuspendAttach::new() };
739 let res = self.lock_arc();
740 drop(ts_guard);
741 res
742 }
743}
744
745#[cfg(feature = "lock_api")]
746impl<R, G, T> MutexExt<T> for lock_api::ReentrantMutex<R, G, T>
747where
748 R: lock_api::RawMutex,
749 G: lock_api::GetThreadId,
750{
751 type LockResult<'a>
752 = lock_api::ReentrantMutexGuard<'a, R, G, T>
753 where
754 Self: 'a;
755
756 fn lock_py_attached(&self, _py: Python<'_>) -> lock_api::ReentrantMutexGuard<'_, R, G, T> {
757 if let Some(guard) = self.try_lock() {
758 return guard;
759 }
760
761 let ts_guard = unsafe { SuspendAttach::new() };
762 let res = self.lock();
763 drop(ts_guard);
764 res
765 }
766}
767
768#[cfg(feature = "arc_lock")]
769impl<R, G, T> MutexExt<T> for std::sync::Arc<lock_api::ReentrantMutex<R, G, T>>
770where
771 R: lock_api::RawMutex,
772 G: lock_api::GetThreadId,
773{
774 type LockResult<'a>
775 = lock_api::ArcReentrantMutexGuard<R, G, T>
776 where
777 Self: 'a;
778
779 fn lock_py_attached(&self, _py: Python<'_>) -> lock_api::ArcReentrantMutexGuard<R, G, T> {
780 if let Some(guard) = self.try_lock_arc() {
781 return guard;
782 }
783
784 let ts_guard = unsafe { SuspendAttach::new() };
785 let res = self.lock_arc();
786 drop(ts_guard);
787 res
788 }
789}
790
791#[cold]
792fn init_once_py_attached<F, T>(once: &Once, _py: Python<'_>, f: F)
793where
794 F: FnOnce() -> T,
795{
796 // SAFETY: detach from the runtime right before a possibly blocking call
797 // then reattach when the blocking call completes and before calling
798 // into the C API.
799 let ts_guard = unsafe { SuspendAttach::new() };
800
801 once.call_once(move || {
802 drop(ts_guard);
803 f();
804 });
805}
806
807#[cold]
808fn init_once_force_py_attached<F, T>(once: &Once, _py: Python<'_>, f: F)
809where
810 F: FnOnce(&OnceState) -> T,
811{
812 // SAFETY: detach from the runtime right before a possibly blocking call
813 // then reattach when the blocking call completes and before calling
814 // into the C API.
815 let ts_guard = unsafe { SuspendAttach::new() };
816
817 once.call_once_force(move |state| {
818 drop(ts_guard);
819 f(state);
820 });
821}
822
823#[cold]
824fn init_once_lock_py_attached<'a, F, T>(
825 lock: &'a std::sync::OnceLock<T>,
826 _py: Python<'_>,
827 f: F,
828) -> &'a T
829where
830 F: FnOnce() -> T,
831{
832 // SAFETY: detach from the runtime right before a possibly blocking call
833 // then reattach when the blocking call completes and before calling
834 // into the C API.
835 let ts_guard = unsafe { SuspendAttach::new() };
836
837 // By having detached here, we guarantee that `.get_or_init` cannot deadlock with
838 // the Python interpreter
839 let value = lock.get_or_init(move || {
840 drop(ts_guard);
841 f()
842 });
843
844 value
845}
846
847#[cfg(test)]
848mod tests {
849 use super::*;
850
851 use crate::types::{PyDict, PyDictMethods};
852 #[cfg(not(target_arch = "wasm32"))]
853 use std::sync::Mutex;
854 #[cfg(not(target_arch = "wasm32"))]
855 #[cfg(feature = "macros")]
856 use std::sync::{
857 atomic::{AtomicBool, Ordering},
858 Barrier,
859 };
860
861 #[cfg(not(target_arch = "wasm32"))]
862 #[cfg(feature = "macros")]
863 #[crate::pyclass(crate = "crate")]
864 struct BoolWrapper(AtomicBool);
865
866 #[cfg(not(target_arch = "wasm32"))]
867 #[cfg(feature = "macros")]
868 #[crate::pyclass(crate = "crate")]
869 struct VecWrapper(Vec<isize>);
870
871 #[test]
872 fn test_intern() {
873 Python::attach(|py| {
874 let foo1 = "foo";
875 let foo2 = intern!(py, "foo");
876 let foo3 = intern!(py, stringify!(foo));
877
878 let dict = PyDict::new(py);
879 dict.set_item(foo1, 42_usize).unwrap();
880 assert!(dict.contains(foo2).unwrap());
881 assert_eq!(
882 dict.get_item(foo3)
883 .unwrap()
884 .unwrap()
885 .extract::<usize>()
886 .unwrap(),
887 42
888 );
889 });
890 }
891
892 #[test]
893 #[allow(deprecated)]
894 fn test_once_cell() {
895 Python::attach(|py| {
896 let mut cell = GILOnceCell::new();
897
898 assert!(cell.get(py).is_none());
899
900 assert_eq!(cell.get_or_try_init(py, || Err(5)), Err(5));
901 assert!(cell.get(py).is_none());
902
903 assert_eq!(cell.get_or_try_init(py, || Ok::<_, ()>(2)), Ok(&2));
904 assert_eq!(cell.get(py), Some(&2));
905
906 assert_eq!(cell.get_or_try_init(py, || Err(5)), Ok(&2));
907
908 assert_eq!(cell.take(), Some(2));
909 assert_eq!(cell.into_inner(), None);
910
911 let cell_py = GILOnceCell::new();
912 assert!(cell_py.clone_ref(py).get(py).is_none());
913 cell_py.get_or_init(py, || py.None());
914 assert!(cell_py.clone_ref(py).get(py).unwrap().is_none(py));
915 })
916 }
917
918 #[test]
919 #[allow(deprecated)]
920 fn test_once_cell_drop() {
921 #[derive(Debug)]
922 struct RecordDrop<'a>(&'a mut bool);
923
924 impl Drop for RecordDrop<'_> {
925 fn drop(&mut self) {
926 *self.0 = true;
927 }
928 }
929
930 Python::attach(|py| {
931 let mut dropped = false;
932 let cell = GILOnceCell::new();
933 cell.set(py, RecordDrop(&mut dropped)).unwrap();
934 let drop_container = cell.get(py).unwrap();
935
936 assert!(!*drop_container.0);
937 drop(cell);
938 assert!(dropped);
939 });
940 }
941
942 #[cfg(feature = "macros")]
943 #[cfg(not(target_arch = "wasm32"))] // We are building wasm Python with pthreads disabled
944 #[test]
945 fn test_critical_section() {
946 let barrier = Barrier::new(2);
947
948 let bool_wrapper = Python::attach(|py| -> Py<BoolWrapper> {
949 Py::new(py, BoolWrapper(AtomicBool::new(false))).unwrap()
950 });
951
952 std::thread::scope(|s| {
953 s.spawn(|| {
954 Python::attach(|py| {
955 let b = bool_wrapper.bind(py);
956 with_critical_section(b, || {
957 barrier.wait();
958 std::thread::sleep(std::time::Duration::from_millis(10));
959 b.borrow().0.store(true, Ordering::Release);
960 })
961 });
962 });
963 s.spawn(|| {
964 barrier.wait();
965 Python::attach(|py| {
966 let b = bool_wrapper.bind(py);
967 // this blocks until the other thread's critical section finishes
968 with_critical_section(b, || {
969 assert!(b.borrow().0.load(Ordering::Acquire));
970 });
971 });
972 });
973 });
974 }
975
976 #[cfg(feature = "macros")]
977 #[cfg(not(target_arch = "wasm32"))] // We are building wasm Python with pthreads disabled
978 #[test]
979 fn test_critical_section2() {
980 let barrier = Barrier::new(3);
981
982 let (bool_wrapper1, bool_wrapper2) = Python::attach(|py| {
983 (
984 Py::new(py, BoolWrapper(AtomicBool::new(false))).unwrap(),
985 Py::new(py, BoolWrapper(AtomicBool::new(false))).unwrap(),
986 )
987 });
988
989 std::thread::scope(|s| {
990 s.spawn(|| {
991 Python::attach(|py| {
992 let b1 = bool_wrapper1.bind(py);
993 let b2 = bool_wrapper2.bind(py);
994 with_critical_section2(b1, b2, || {
995 barrier.wait();
996 std::thread::sleep(std::time::Duration::from_millis(10));
997 b1.borrow().0.store(true, Ordering::Release);
998 b2.borrow().0.store(true, Ordering::Release);
999 })
1000 });
1001 });
1002 s.spawn(|| {
1003 barrier.wait();
1004 Python::attach(|py| {
1005 let b1 = bool_wrapper1.bind(py);
1006 // this blocks until the other thread's critical section finishes
1007 with_critical_section(b1, || {
1008 assert!(b1.borrow().0.load(Ordering::Acquire));
1009 });
1010 });
1011 });
1012 s.spawn(|| {
1013 barrier.wait();
1014 Python::attach(|py| {
1015 let b2 = bool_wrapper2.bind(py);
1016 // this blocks until the other thread's critical section finishes
1017 with_critical_section(b2, || {
1018 assert!(b2.borrow().0.load(Ordering::Acquire));
1019 });
1020 });
1021 });
1022 });
1023 }
1024
1025 #[cfg(feature = "macros")]
1026 #[cfg(not(target_arch = "wasm32"))] // We are building wasm Python with pthreads disabled
1027 #[test]
1028 fn test_critical_section2_same_object_no_deadlock() {
1029 let barrier = Barrier::new(2);
1030
1031 let bool_wrapper = Python::attach(|py| -> Py<BoolWrapper> {
1032 Py::new(py, BoolWrapper(AtomicBool::new(false))).unwrap()
1033 });
1034
1035 std::thread::scope(|s| {
1036 s.spawn(|| {
1037 Python::attach(|py| {
1038 let b = bool_wrapper.bind(py);
1039 with_critical_section2(b, b, || {
1040 barrier.wait();
1041 std::thread::sleep(std::time::Duration::from_millis(10));
1042 b.borrow().0.store(true, Ordering::Release);
1043 })
1044 });
1045 });
1046 s.spawn(|| {
1047 barrier.wait();
1048 Python::attach(|py| {
1049 let b = bool_wrapper.bind(py);
1050 // this blocks until the other thread's critical section finishes
1051 with_critical_section(b, || {
1052 assert!(b.borrow().0.load(Ordering::Acquire));
1053 });
1054 });
1055 });
1056 });
1057 }
1058
1059 #[cfg(feature = "macros")]
1060 #[cfg(not(target_arch = "wasm32"))] // We are building wasm Python with pthreads disabled
1061 #[test]
1062 fn test_critical_section2_two_containers() {
1063 let (vec1, vec2) = Python::attach(|py| {
1064 (
1065 Py::new(py, VecWrapper(vec![1, 2, 3])).unwrap(),
1066 Py::new(py, VecWrapper(vec![4, 5])).unwrap(),
1067 )
1068 });
1069
1070 std::thread::scope(|s| {
1071 s.spawn(|| {
1072 Python::attach(|py| {
1073 let v1 = vec1.bind(py);
1074 let v2 = vec2.bind(py);
1075 with_critical_section2(v1, v2, || {
1076 // v2.extend(v1)
1077 v2.borrow_mut().0.extend(v1.borrow().0.iter());
1078 })
1079 });
1080 });
1081 s.spawn(|| {
1082 Python::attach(|py| {
1083 let v1 = vec1.bind(py);
1084 let v2 = vec2.bind(py);
1085 with_critical_section2(v1, v2, || {
1086 // v1.extend(v2)
1087 v1.borrow_mut().0.extend(v2.borrow().0.iter());
1088 })
1089 });
1090 });
1091 });
1092
1093 Python::attach(|py| {
1094 let v1 = vec1.bind(py);
1095 let v2 = vec2.bind(py);
1096 // execution order is not guaranteed, so we need to check both
1097 // NB: extend should be atomic, items must not be interleaved
1098 // v1.extend(v2)
1099 // v2.extend(v1)
1100 let expected1_vec1 = vec![1, 2, 3, 4, 5];
1101 let expected1_vec2 = vec![4, 5, 1, 2, 3, 4, 5];
1102 // v2.extend(v1)
1103 // v1.extend(v2)
1104 let expected2_vec1 = vec![1, 2, 3, 4, 5, 1, 2, 3];
1105 let expected2_vec2 = vec![4, 5, 1, 2, 3];
1106
1107 assert!(
1108 (v1.borrow().0.eq(&expected1_vec1) && v2.borrow().0.eq(&expected1_vec2))
1109 || (v1.borrow().0.eq(&expected2_vec1) && v2.borrow().0.eq(&expected2_vec2))
1110 );
1111 });
1112 }
1113
1114 #[test]
1115 #[cfg(not(target_arch = "wasm32"))] // We are building wasm Python with pthreads disabled
1116 fn test_once_ext() {
1117 macro_rules! test_once {
1118 ($once:expr, $is_poisoned:expr) => {{
1119 // adapted from the example in the docs for Once::try_once_force
1120 let init = $once;
1121 std::thread::scope(|s| {
1122 // poison the once
1123 let handle = s.spawn(|| {
1124 Python::attach(|py| {
1125 init.call_once_py_attached(py, || panic!());
1126 })
1127 });
1128 assert!(handle.join().is_err());
1129
1130 // poisoning propagates
1131 let handle = s.spawn(|| {
1132 Python::attach(|py| {
1133 init.call_once_py_attached(py, || {});
1134 });
1135 });
1136
1137 assert!(handle.join().is_err());
1138
1139 // call_once_force will still run and reset the poisoned state
1140 Python::attach(|py| {
1141 init.call_once_force_py_attached(py, |state| {
1142 assert!($is_poisoned(state.clone()));
1143 });
1144
1145 // once any success happens, we stop propagating the poison
1146 init.call_once_py_attached(py, || {});
1147 });
1148
1149 // calling call_once_force should return immediately without calling the closure
1150 Python::attach(|py| init.call_once_force_py_attached(py, |_| panic!()));
1151 });
1152 }};
1153 }
1154
1155 test_once!(Once::new(), OnceState::is_poisoned);
1156 #[cfg(feature = "parking_lot")]
1157 test_once!(parking_lot::Once::new(), parking_lot::OnceState::poisoned);
1158 }
1159
1160 #[cfg(not(target_arch = "wasm32"))] // We are building wasm Python with pthreads disabled
1161 #[test]
1162 fn test_once_lock_ext() {
1163 let cell = std::sync::OnceLock::new();
1164 std::thread::scope(|s| {
1165 assert!(cell.get().is_none());
1166
1167 s.spawn(|| {
1168 Python::attach(|py| {
1169 assert_eq!(*cell.get_or_init_py_attached(py, || 12345), 12345);
1170 });
1171 });
1172 });
1173 assert_eq!(cell.get(), Some(&12345));
1174 }
1175
1176 #[cfg(feature = "macros")]
1177 #[cfg(not(target_arch = "wasm32"))] // We are building wasm Python with pthreads disabled
1178 #[test]
1179 fn test_mutex_ext() {
1180 let barrier = Barrier::new(2);
1181
1182 let mutex = Python::attach(|py| -> Mutex<Py<BoolWrapper>> {
1183 Mutex::new(Py::new(py, BoolWrapper(AtomicBool::new(false))).unwrap())
1184 });
1185
1186 std::thread::scope(|s| {
1187 s.spawn(|| {
1188 Python::attach(|py| {
1189 let b = mutex.lock_py_attached(py).unwrap();
1190 barrier.wait();
1191 // sleep to ensure the other thread actually blocks
1192 std::thread::sleep(std::time::Duration::from_millis(10));
1193 (*b).bind(py).borrow().0.store(true, Ordering::Release);
1194 drop(b);
1195 });
1196 });
1197 s.spawn(|| {
1198 barrier.wait();
1199 Python::attach(|py| {
1200 // blocks until the other thread releases the lock
1201 let b = mutex.lock_py_attached(py).unwrap();
1202 assert!((*b).bind(py).borrow().0.load(Ordering::Acquire));
1203 });
1204 });
1205 });
1206 }
1207
1208 #[cfg(feature = "macros")]
1209 #[cfg(all(
1210 any(feature = "parking_lot", feature = "lock_api"),
1211 not(target_arch = "wasm32") // We are building wasm Python with pthreads disabled
1212 ))]
1213 #[test]
1214 fn test_parking_lot_mutex_ext() {
1215 macro_rules! test_mutex {
1216 ($guard:ty ,$mutex:stmt) => {{
1217 let barrier = Barrier::new(2);
1218
1219 let mutex = Python::attach({ $mutex });
1220
1221 std::thread::scope(|s| {
1222 s.spawn(|| {
1223 Python::attach(|py| {
1224 let b: $guard = mutex.lock_py_attached(py);
1225 barrier.wait();
1226 // sleep to ensure the other thread actually blocks
1227 std::thread::sleep(std::time::Duration::from_millis(10));
1228 (*b).bind(py).borrow().0.store(true, Ordering::Release);
1229 drop(b);
1230 });
1231 });
1232 s.spawn(|| {
1233 barrier.wait();
1234 Python::attach(|py| {
1235 // blocks until the other thread releases the lock
1236 let b: $guard = mutex.lock_py_attached(py);
1237 assert!((*b).bind(py).borrow().0.load(Ordering::Acquire));
1238 });
1239 });
1240 });
1241 }};
1242 }
1243
1244 test_mutex!(parking_lot::MutexGuard<'_, _>, |py| {
1245 parking_lot::Mutex::new(Py::new(py, BoolWrapper(AtomicBool::new(false))).unwrap())
1246 });
1247
1248 test_mutex!(parking_lot::ReentrantMutexGuard<'_, _>, |py| {
1249 parking_lot::ReentrantMutex::new(
1250 Py::new(py, BoolWrapper(AtomicBool::new(false))).unwrap(),
1251 )
1252 });
1253
1254 #[cfg(feature = "arc_lock")]
1255 test_mutex!(parking_lot::ArcMutexGuard<_, _>, |py| {
1256 let mutex =
1257 parking_lot::Mutex::new(Py::new(py, BoolWrapper(AtomicBool::new(false))).unwrap());
1258 std::sync::Arc::new(mutex)
1259 });
1260
1261 #[cfg(feature = "arc_lock")]
1262 test_mutex!(parking_lot::ArcReentrantMutexGuard<_, _, _>, |py| {
1263 let mutex =
1264 parking_lot::ReentrantMutex::new(Py::new(py, BoolWrapper(AtomicBool::new(false))).unwrap());
1265 std::sync::Arc::new(mutex)
1266 });
1267 }
1268
1269 #[cfg(not(target_arch = "wasm32"))] // We are building wasm Python with pthreads disabled
1270 #[test]
1271 fn test_mutex_ext_poison() {
1272 let mutex = Mutex::new(42);
1273
1274 std::thread::scope(|s| {
1275 let lock_result = s.spawn(|| {
1276 Python::attach(|py| {
1277 let _unused = mutex.lock_py_attached(py);
1278 panic!();
1279 });
1280 });
1281 assert!(lock_result.join().is_err());
1282 assert!(mutex.is_poisoned());
1283 });
1284 let guard = Python::attach(|py| {
1285 // recover from the poisoning
1286 match mutex.lock_py_attached(py) {
1287 Ok(guard) => guard,
1288 Err(poisoned) => poisoned.into_inner(),
1289 }
1290 });
1291 assert!(*guard == 42);
1292 }
1293}