From 080803ded1595bd914a294452a90ba38eba85218 Mon Sep 17 00:00:00 2001 From: Daroc Alden Date: Mon, 9 Nov 2020 20:45:44 -0500 Subject: [PATCH 1/3] Use no-std-compat to transition to no-std This commit uses the no-std-compat crate to enable use of shredder in a no-std environment. This comes with a few caveats when using a no-std environment: - Debug and error print statements are replaced with no-ops - Locks (Mutexes and RwLocks) use the spin crate - Hash tables use the hashbrown crate, which is _not_ HashDoS resistant. I don't believe that any of these constitute a serious problem for shredder. Shredder can be used in this way by disabling the 'std' feature. --- Cargo.toml | 7 ++++++- src/atomic.rs | 1 + src/collector/alloc.rs | 1 + src/collector/collect_impl.rs | 1 + src/collector/data.rs | 1 + src/collector/dropper.rs | 2 ++ src/collector/mod.rs | 1 + src/collector/trigger.rs | 1 + src/concurrency/atomic_protection.rs | 1 + src/concurrency/chunked_ll.rs | 1 + src/concurrency/lockout.rs | 1 + src/lib.rs | 5 +++++ src/marker/gc_deref.rs | 2 ++ src/marker/gc_safe.rs | 1 + src/r.rs | 1 + src/scan.rs | 1 + src/smart_ptr/deref_gc.rs | 2 ++ src/smart_ptr/gc.rs | 1 + src/std_impls/collections.rs | 1 + src/std_impls/mod.rs | 1 + src/std_impls/value_types.rs | 2 ++ src/std_impls/wrap_types.rs | 1 + src/wrappers.rs | 1 + 23 files changed, 36 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 6b30a29..2f011ba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,10 @@ shredder_derive = { git = "https://github.com/Others/shredder_derive.git" } #shredder_derive = { path = "../shredder_derive" } stable_deref_trait = "1.1" +[dependencies.no-std-compat] +version = "0.4.1" +features = [ "alloc", "compat_hash", "compat_sync", "compat_macros" ] + [dev-dependencies] paste = "1.0" rand = "0.7.3" @@ -35,5 +39,6 @@ trybuild = "1.0" #debug = true [features] -default = [] +default = [ "std" ] # Default to using the std +std = [ "no-std-compat/std" ] nightly-features = [] diff --git a/src/atomic.rs b/src/atomic.rs index f249a47..08e0ed9 100644 --- a/src/atomic.rs +++ b/src/atomic.rs @@ -1,5 +1,6 @@ use std::marker::PhantomData; use std::mem; +use std::prelude::v1::*; use std::ptr::drop_in_place; use std::sync::atomic::{AtomicPtr, Ordering}; use std::sync::Arc; diff --git a/src/collector/alloc.rs b/src/collector/alloc.rs index dca7c4a..3b5bb37 100644 --- a/src/collector/alloc.rs +++ b/src/collector/alloc.rs @@ -1,6 +1,7 @@ use std::alloc::{alloc, dealloc, Layout}; use std::mem::{self, ManuallyDrop}; use std::panic::UnwindSafe; +use std::prelude::v1::*; use std::ptr; use crate::collector::InternalGcRef; diff --git a/src/collector/collect_impl.rs b/src/collector/collect_impl.rs index f4eb08d..60b0d00 100644 --- a/src/collector/collect_impl.rs +++ b/src/collector/collect_impl.rs @@ -1,3 +1,4 @@ +use std::prelude::v1::*; use std::sync::atomic::Ordering; use crossbeam::deque::Injector; diff --git a/src/collector/data.rs b/src/collector/data.rs index dc9bc3c..dcffed2 100644 --- a/src/collector/data.rs +++ b/src/collector/data.rs @@ -1,3 +1,4 @@ +use std::prelude::v1::*; use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicU64, Ordering}; use std::sync::Arc; diff --git a/src/collector/dropper.rs b/src/collector/dropper.rs index 594aa99..a0c905b 100644 --- a/src/collector/dropper.rs +++ b/src/collector/dropper.rs @@ -1,3 +1,5 @@ +use std::prelude::v1::*; + use std::panic::catch_unwind; use std::sync::atomic::Ordering; use std::sync::Arc; diff --git a/src/collector/mod.rs b/src/collector/mod.rs index 9fb78aa..d0c4469 100644 --- a/src/collector/mod.rs +++ b/src/collector/mod.rs @@ -1,3 +1,4 @@ +use std::prelude::v1::*; mod alloc; mod collect_impl; mod data; diff --git a/src/collector/trigger.rs b/src/collector/trigger.rs index 2620233..2acb39d 100644 --- a/src/collector/trigger.rs +++ b/src/collector/trigger.rs @@ -1,4 +1,5 @@ use parking_lot::Mutex; +use std::prelude::v1::*; // TODO(issue): https://github.com/Others/shredder/issues/8 const DEFAULT_ALLOCATION_TRIGGER_PERCENT: f32 = 0.75; diff --git a/src/concurrency/atomic_protection.rs b/src/concurrency/atomic_protection.rs index 64c2bc4..d48fb57 100644 --- a/src/concurrency/atomic_protection.rs +++ b/src/concurrency/atomic_protection.rs @@ -1,3 +1,4 @@ +use std::prelude::v1::*; use std::sync::atomic::{AtomicU64, Ordering}; use std::thread::yield_now; diff --git a/src/concurrency/chunked_ll.rs b/src/concurrency/chunked_ll.rs index 8ae1299..27a3561 100644 --- a/src/concurrency/chunked_ll.rs +++ b/src/concurrency/chunked_ll.rs @@ -1,4 +1,5 @@ use std::mem::{self, MaybeUninit}; +use std::prelude::v1::*; use std::ptr; use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use std::sync::Arc; diff --git a/src/concurrency/lockout.rs b/src/concurrency/lockout.rs index 9f4e95d..4d9e67e 100644 --- a/src/concurrency/lockout.rs +++ b/src/concurrency/lockout.rs @@ -1,3 +1,4 @@ +use std::prelude::v1::*; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; diff --git a/src/lib.rs b/src/lib.rs index d6aca4d..590bece 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -41,6 +41,9 @@ clippy::module_name_repetitions, // Sometimes clear naming calls for repetition clippy::multiple_crate_versions // There is no way to easily fix this without modifying our dependencies )] +#![no_std] + +extern crate no_std_compat as std; #[macro_use] extern crate crossbeam; @@ -70,6 +73,8 @@ pub mod wrappers; use std::cell::RefCell; use std::sync::{Mutex, RwLock}; +use std::prelude::v1::*; + use crate::collector::COLLECTOR; pub use crate::finalize::Finalize; diff --git a/src/marker/gc_deref.rs b/src/marker/gc_deref.rs index 1685f3d..4b3f60b 100644 --- a/src/marker/gc_deref.rs +++ b/src/marker/gc_deref.rs @@ -1,3 +1,5 @@ +use std::prelude::v1::*; + /// A marker trait that marks that this data can be stored in a `DerefGc` /// /// `T` can be `GcDeref` only if it is deeply immutable through a `&T`. This is because it's diff --git a/src/marker/gc_safe.rs b/src/marker/gc_safe.rs index 8b35211..80d6cc2 100644 --- a/src/marker/gc_safe.rs +++ b/src/marker/gc_safe.rs @@ -1,5 +1,6 @@ use std::hash::{Hash, Hasher}; use std::ops::{Deref, DerefMut}; +use std::prelude::v1::*; /// A marker trait that marks that data can be scanned in the background by the garbage collector. /// diff --git a/src/r.rs b/src/r.rs index 98504e6..e6c27ea 100644 --- a/src/r.rs +++ b/src/r.rs @@ -2,6 +2,7 @@ use std::cmp::Ordering; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::ops::{Deref, DerefMut}; +use std::prelude::v1::*; use crate::marker::{GcDeref, GcDrop, GcSafe}; use crate::{Finalize, Scan, Scanner}; diff --git a/src/scan.rs b/src/scan.rs index 8346e10..cff0599 100644 --- a/src/scan.rs +++ b/src/scan.rs @@ -1,5 +1,6 @@ use crate::collector::InternalGcRef; use crate::marker::GcSafe; +use std::prelude::v1::*; /// A trait capturing the ability of data to be scanned for references to data in a `Gc`. /// diff --git a/src/smart_ptr/deref_gc.rs b/src/smart_ptr/deref_gc.rs index f964cda..5a46b34 100644 --- a/src/smart_ptr/deref_gc.rs +++ b/src/smart_ptr/deref_gc.rs @@ -1,3 +1,5 @@ +use std::prelude::v1::*; + #[cfg(feature = "nightly-features")] use std::{marker::Unsize, ops::CoerceUnsized}; diff --git a/src/smart_ptr/gc.rs b/src/smart_ptr/gc.rs index 4c3f490..62e1477 100644 --- a/src/smart_ptr/gc.rs +++ b/src/smart_ptr/gc.rs @@ -5,6 +5,7 @@ use std::cmp::Ordering; use std::fmt::{self, Debug, Display, Formatter}; use std::hash::{Hash, Hasher}; use std::ops::Deref; +use std::prelude::v1::*; use std::sync; use std::sync::atomic; #[cfg(feature = "nightly-features")] diff --git a/src/std_impls/collections.rs b/src/std_impls/collections.rs index 525b22d..069a530 100644 --- a/src/std_impls/collections.rs +++ b/src/std_impls/collections.rs @@ -3,6 +3,7 @@ use crate::{Finalize, Scan, Scanner}; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::hash::BuildHasher; use std::mem::forget; +use std::prelude::v1::*; use std::ptr::read; // For pretty much all simple collections, the collection inherets the properites of what it contains diff --git a/src/std_impls/mod.rs b/src/std_impls/mod.rs index 8b3b9ab..d531e5b 100644 --- a/src/std_impls/mod.rs +++ b/src/std_impls/mod.rs @@ -7,6 +7,7 @@ mod wrap_types; mod test { use std::cell::Cell; use std::panic::catch_unwind; + use std::prelude::v1::*; use std::sync::{Mutex, RwLock}; use crate::collector::{get_mock_handle, InternalGcRef}; diff --git a/src/std_impls/value_types.rs b/src/std_impls/value_types.rs index 45c7a3b..7650de0 100644 --- a/src/std_impls/value_types.rs +++ b/src/std_impls/value_types.rs @@ -1,4 +1,5 @@ use std::collections::hash_map::RandomState; +use std::prelude::v1::*; use std::ptr::drop_in_place; use std::time::{Duration, Instant}; @@ -46,6 +47,7 @@ sync_value_type!(RandomState); #[cfg(test)] mod test { use std::mem::forget; + use std::prelude::v1::*; use std::time::Instant; use crate::Finalize; diff --git a/src/std_impls/wrap_types.rs b/src/std_impls/wrap_types.rs index 35a4754..e2b41de 100644 --- a/src/std_impls/wrap_types.rs +++ b/src/std_impls/wrap_types.rs @@ -1,5 +1,6 @@ use crate::marker::{GcDeref, GcDrop, GcSafe}; use crate::{Finalize, Scan, Scanner}; +use std::prelude::v1::*; use std::cell::{Cell, RefCell}; use std::sync::{Arc, Mutex, RwLock, TryLockError}; diff --git a/src/wrappers.rs b/src/wrappers.rs index 5fea26b..b1e512e 100644 --- a/src/wrappers.rs +++ b/src/wrappers.rs @@ -1,6 +1,7 @@ use std::cell::{BorrowError, BorrowMutError, RefCell}; use std::fmt::{self, Debug, Formatter}; use std::ops::{Deref, DerefMut}; +use std::prelude::v1::*; use std::sync::{self, TryLockError}; use crate::{GcGuard, Scan}; From a34899c422a058a36bcf24f6317e43df292f9128 Mon Sep 17 00:00:00 2001 From: Daroc Alden Date: Tue, 10 Nov 2020 07:35:20 -0500 Subject: [PATCH 2/3] Allow users to supply their own spawn function --- .circleci/config.yml | 3 + Cargo.toml | 3 +- src/atomic.rs | 23 ++++--- src/collector/alloc.rs | 3 + src/collector/dropper.rs | 36 ++++++++--- src/collector/mod.rs | 45 +++++++++++--- src/concurrency/atomic_protection.rs | 6 ++ src/lib.rs | 18 +++--- src/smart_ptr/deref_gc.rs | 14 ++--- src/smart_ptr/gc.rs | 16 ++--- src/std_impls/mod.rs | 3 + src/std_impls/value_types.rs | 13 +++- src/std_impls/wrap_types.rs | 35 ++++++++++- src/wrappers.rs | 91 ++++++++++++++++++++-------- 14 files changed, 235 insertions(+), 74 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a7e6430..1cb44b6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -89,6 +89,9 @@ jobs: - run: name: Run all tests command: cargo test --all + - run: + name: Run all tests (no-std version) + command: cargo test --all --no-default-features rust/coverage: machine: true steps: diff --git a/Cargo.toml b/Cargo.toml index 2f011ba..114f348 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,6 +39,7 @@ trybuild = "1.0" #debug = true [features] -default = [ "std" ] # Default to using the std +default = [ "std", "threads" ] # Default to using the std std = [ "no-std-compat/std" ] +threads = [] nightly-features = [] diff --git a/src/atomic.rs b/src/atomic.rs index 08e0ed9..dd0aa77 100644 --- a/src/atomic.rs +++ b/src/atomic.rs @@ -5,7 +5,7 @@ use std::ptr::drop_in_place; use std::sync::atomic::{AtomicPtr, Ordering}; use std::sync::Arc; -use crate::collector::{GcData, InternalGcRef, COLLECTOR}; +use crate::collector::{get_collector, GcData, InternalGcRef}; use crate::marker::{GcDeref, GcSafe}; use crate::{Finalize, Gc, Scan, Scanner}; @@ -48,7 +48,7 @@ impl AtomicGc { Self { atomic_ptr: atomic_ptr.clone(), - backing_handle: COLLECTOR.new_handle_for_atomic(atomic_ptr), + backing_handle: get_collector().new_handle_for_atomic(atomic_ptr), _mark: PhantomData, } } @@ -65,7 +65,8 @@ impl AtomicGc { let ptr; let internal_handle; { - let _collection_blocker = COLLECTOR.get_collection_blocker_spinlock(); + let _collector = get_collector(); + let _collection_blocker = _collector.get_collection_blocker_spinlock(); // Safe to manipulate this ptr only because we have the `_collection_blocker` // (And we know this `Arc` still has a pointer in the collector data structures, @@ -80,7 +81,7 @@ impl AtomicGc { mem::forget(gc_data_temp); ptr = new_gc_data_ref.scan_ptr().cast(); - internal_handle = COLLECTOR.handle_from_data(new_gc_data_ref); + internal_handle = get_collector().handle_from_data(new_gc_data_ref); } Gc::new_raw(internal_handle, ptr) @@ -97,7 +98,8 @@ impl AtomicGc { let raw_data_ptr = Arc::as_ptr(data); { - let _collection_blocker = COLLECTOR.get_collection_blocker_spinlock(); + let collector = get_collector(); + let _collection_blocker = collector.get_collection_blocker_spinlock(); // Safe to manipulate this ptr only because we have the `_collection_blocker` // (And we know this `Arc` still has a pointer in the collector data structures, @@ -121,7 +123,8 @@ impl AtomicGc { let ptr; let internal_handle; { - let _collection_blocker = COLLECTOR.get_collection_blocker_spinlock(); + let collector = get_collector(); + let _collection_blocker = collector.get_collection_blocker_spinlock(); let old_data_ptr = self.atomic_ptr.swap(raw_data_ptr as _, ordering); // Safe to manipulate this ptr only because we have the `_collection_blocker` @@ -133,7 +136,7 @@ impl AtomicGc { mem::forget(old_data_arc); ptr = gc_data.scan_ptr().cast(); - internal_handle = COLLECTOR.handle_from_data(gc_data); + internal_handle = get_collector().handle_from_data(gc_data); } Gc::new_raw(internal_handle, ptr) @@ -165,7 +168,8 @@ impl AtomicGc { let compare_res; { - let _collection_blocker = COLLECTOR.get_collection_blocker_spinlock(); + let collector = get_collector(); + let _collection_blocker = collector.get_collection_blocker_spinlock(); // Safe to manipulate this ptr only because we have the `_collection_blocker` // (And we know this `Arc` still has a pointer in the collector data structures, // otherwise someone would be accessing an `AtomicGc` pointing to freed data--which @@ -209,7 +213,8 @@ impl AtomicGc { let compare_res; { - let _collection_blocker = COLLECTOR.get_collection_blocker_spinlock(); + let collector = get_collector(); + let _collection_blocker = collector.get_collection_blocker_spinlock(); // Safe to manipulate this ptr only because we have the `_collection_blocker` // (And we know this `Arc` still has a pointer in the collector data structures, // otherwise someone would be accessing an `AtomicGc` pointing to freed data--which diff --git a/src/collector/alloc.rs b/src/collector/alloc.rs index 3b5bb37..acb37cf 100644 --- a/src/collector/alloc.rs +++ b/src/collector/alloc.rs @@ -1,5 +1,7 @@ use std::alloc::{alloc, dealloc, Layout}; use std::mem::{self, ManuallyDrop}; + +#[cfg(feature = "std")] use std::panic::UnwindSafe; use std::prelude::v1::*; use std::ptr; @@ -28,6 +30,7 @@ pub enum DeallocationAction { // It also, by contract of Scan, cannot have a Drop method that is unsafe in any thead unsafe impl Send for GcAllocation {} // Therefore, GcDataPtr is also UnwindSafe in the context we need it to be +#[cfg(feature = "std")] impl UnwindSafe for GcAllocation {} // We use the lockout to ensure that `GcDataPtr`s are not shared unsafe impl Sync for GcAllocation {} diff --git a/src/collector/dropper.rs b/src/collector/dropper.rs index a0c905b..9b7032c 100644 --- a/src/collector/dropper.rs +++ b/src/collector/dropper.rs @@ -1,9 +1,9 @@ use std::prelude::v1::*; +#[cfg(feature = "std")] use std::panic::catch_unwind; use std::sync::atomic::Ordering; use std::sync::Arc; -use std::thread::spawn; use crossbeam::channel::{self, SendError, Sender}; use parking_lot::RwLock; @@ -24,11 +24,15 @@ pub(crate) enum DropMessage { } impl BackgroundDropper { - pub fn new() -> Self { + pub fn new(spawn: &'static F) -> Self + where + F: Fn(Box), + F: Sized, + { let (sender, receiver) = channel::unbounded(); // The drop thread deals with doing all the Drops this collector needs to do - spawn(move || { + spawn(Box::new(move || { // An Err value means the stream will never recover while let Ok(drop_msg) = receiver.recv() { match drop_msg { @@ -44,12 +48,26 @@ impl BackgroundDropper { // Then run the drops if needed to_drop.par_iter().for_each(|data| { let underlying_allocation = data.underlying_allocation; - let res = catch_unwind(move || unsafe { - underlying_allocation.deallocate(); - }); - if let Err(e) = res { - eprintln!("Gc background drop failed: {:?}", e); + + // When the stdlib is available, we can use catch_unwind + // to protect ourselves against panics that unwind. + #[cfg(feature = "std")] + { + let res = catch_unwind(move || unsafe { + underlying_allocation.deallocate(); + }); + if let Err(e) = res { + eprintln!("Gc background drop failed: {:?}", e); + } } + + // When it is not available, however, panics probably + // won't unwind, and there's no safe means to catch + // a panic. + // + // TODO is there a better way to safely handle this? + #[cfg(not(feature = "std"))] + underlying_allocation.deallocate(); }); } DropMessage::SyncUp(responder) => { @@ -59,7 +77,7 @@ impl BackgroundDropper { } } } - }); + })); Self { sender } } diff --git a/src/collector/mod.rs b/src/collector/mod.rs index d0c4469..fe653ed 100644 --- a/src/collector/mod.rs +++ b/src/collector/mod.rs @@ -7,10 +7,13 @@ mod trigger; use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicU64, Ordering}; use std::sync::Arc; + +#[cfg(feature = "threads")] use std::thread::spawn; +use once_cell::sync::OnceCell; + use crossbeam::channel::{self, Sender}; -use once_cell::sync::Lazy; use parking_lot::Mutex; use crate::collector::alloc::GcAllocation; @@ -37,7 +40,7 @@ impl InternalGcRef { } pub(crate) fn invalidate(&self) { - COLLECTOR.drop_handle(self); + get_collector().drop_handle(self); } pub(crate) fn data(&self) -> &Arc { @@ -88,14 +91,18 @@ struct TrackedData { // TODO(issue): https://github.com/Others/shredder/issues/7 impl Collector { - fn new() -> Arc { + fn new(spawn: &'static F) -> Arc + where + F: Fn(Box), + F: Sized, + { let (async_gc_notifier, async_gc_receiver) = channel::bounded(1); let res = Arc::new(Self { gc_lock: Mutex::default(), atomic_spinlock: AtomicProtectingSpinlock::default(), trigger: GcTrigger::default(), - dropper: BackgroundDropper::new(), + dropper: BackgroundDropper::new(spawn), async_gc_notifier, tracked_data: TrackedData { // This is janky, but we subtract one from the collection number @@ -112,14 +119,14 @@ impl Collector { // The async Gc thread deals with background Gc'ing let async_collector_ref = Arc::downgrade(&res); - spawn(move || { + spawn(Box::new(move || { // An Err value means the stream will never recover while async_gc_receiver.recv().is_ok() { if let Some(collector) = async_collector_ref.upgrade() { collector.check_then_collect(); } } - }); + })); res } @@ -319,7 +326,31 @@ impl Collector { } } -pub static COLLECTOR: Lazy> = Lazy::new(Collector::new); +static COLLECTOR: OnceCell> = OnceCell::new(); + +#[cfg(not(feature = "threads"))] +pub fn initialize_threading(spawn: &'static F) +where + F: Fn(Box), +{ + COLLECTOR.set(Collector::new(spawn)); +} + +#[cfg(feature = "threads")] +pub fn get_collector() -> Arc { + COLLECTOR + .get_or_init(|| { + Collector::new(&|e| { + spawn(e); + }) + }) + .clone() +} + +#[cfg(not(feature = "threads"))] +pub fn get_collector() -> Arc { + *(COLLECTOR.get().expect("Attempted to use Gc without calling shredder::initialize_threading or using the threads from stdlib")) +} #[cfg(test)] pub(crate) fn get_mock_handle() -> InternalGcRef { diff --git a/src/concurrency/atomic_protection.rs b/src/concurrency/atomic_protection.rs index d48fb57..1a68880 100644 --- a/src/concurrency/atomic_protection.rs +++ b/src/concurrency/atomic_protection.rs @@ -1,5 +1,7 @@ use std::prelude::v1::*; use std::sync::atomic::{AtomicU64, Ordering}; + +#[cfg(feature = "threads")] use std::thread::yield_now; const SENTINEL_VALUE: u64 = 1 << 60; @@ -39,6 +41,10 @@ impl AtomicProtectingSpinlock { } // Try to be kind to our scheduler, even as we employ an anti-pattern + // + // Without threading support, we'll just have to busy-wait. + // Should we let the user supply a 'yield' function of their own? + #[cfg(feature = "threads")] yield_now() } } diff --git a/src/lib.rs b/src/lib.rs index 590bece..c56bbca 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -39,7 +39,8 @@ clippy::cast_precision_loss, // There is no way to avoid this precision loss clippy::explicit_deref_methods, // Sometimes calling `deref` directly is clearer clippy::module_name_repetitions, // Sometimes clear naming calls for repetition - clippy::multiple_crate_versions // There is no way to easily fix this without modifying our dependencies + clippy::multiple_crate_versions, // There is no way to easily fix this without modifying our dependencies + clippy::wildcard_imports, // No-std compatibility layer requieres these for ergonomics )] #![no_std] @@ -75,7 +76,10 @@ use std::sync::{Mutex, RwLock}; use std::prelude::v1::*; -use crate::collector::COLLECTOR; +use crate::collector::get_collector; + +#[cfg(not(feature = "threads"))] +pub use crate::collector::initialize_threading; pub use crate::finalize::Finalize; pub use crate::r::{RMut, R}; @@ -108,7 +112,7 @@ pub type GRwLock = Gc>; /// ``` #[must_use] pub fn number_of_tracked_allocations() -> usize { - COLLECTOR.tracked_data_count() + get_collector().tracked_data_count() } /// Returns how many `Gc`s are currently in use. @@ -122,7 +126,7 @@ pub fn number_of_tracked_allocations() -> usize { /// ``` #[must_use] pub fn number_of_active_handles() -> usize { - COLLECTOR.handle_count() + get_collector().handle_count() } /// Sets the percent more data that'll trigger collection. @@ -147,7 +151,7 @@ pub fn set_gc_trigger_percent(percent: f32) { percent ) } - COLLECTOR.set_gc_trigger_percent(percent) + get_collector().set_gc_trigger_percent(percent) } /// A function for manually running a collection, ignoring the heuristic that governs normal @@ -164,7 +168,7 @@ pub fn set_gc_trigger_percent(percent: f32) { /// collect(); // Manually run GC /// ``` pub fn collect() { - COLLECTOR.collect(); + get_collector().collect(); } /// Block the current thread until the background thread has finished running the destructors for @@ -184,7 +188,7 @@ pub fn collect() { /// // At this point all destructors for garbage will have been run /// ``` pub fn synchronize_destructors() { - COLLECTOR.synchronize_destructors() + get_collector().synchronize_destructors() } /// A convenience method for helping ensure your destructors are run. diff --git a/src/smart_ptr/deref_gc.rs b/src/smart_ptr/deref_gc.rs index 5a46b34..818f467 100644 --- a/src/smart_ptr/deref_gc.rs +++ b/src/smart_ptr/deref_gc.rs @@ -3,7 +3,7 @@ use std::prelude::v1::*; #[cfg(feature = "nightly-features")] use std::{marker::Unsize, ops::CoerceUnsized}; -use crate::collector::{InternalGcRef, COLLECTOR}; +use crate::collector::{get_collector, InternalGcRef}; use crate::marker::{GcDeref, GcDrop, GcSafe}; use crate::{Finalize, Scan, Scanner, ToScan}; @@ -37,7 +37,7 @@ impl DerefGc { where T: Sized + GcDrop, { - let (handle, ptr) = COLLECTOR.track_with_drop(v); + let (handle, ptr) = get_collector().track_with_drop(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -53,7 +53,7 @@ impl DerefGc { where T: Sized, { - let (handle, ptr) = COLLECTOR.track_with_no_drop(v); + let (handle, ptr) = get_collector().track_with_no_drop(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -73,7 +73,7 @@ impl DerefGc { where T: Sized + Finalize, { - let (handle, ptr) = COLLECTOR.track_with_finalization(v); + let (handle, ptr) = get_collector().track_with_finalization(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -89,7 +89,7 @@ impl DerefGc { where T: ToScan + GcDrop, { - let (handle, ptr) = COLLECTOR.track_boxed_value(v); + let (handle, ptr) = get_collector().track_boxed_value(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -112,7 +112,7 @@ impl DerefGc { let ptr: &T = self.deref(); if ptr.type_id() == TypeId::of::() { - let new_handle = COLLECTOR.clone_handle(&self.backing_handle); + let new_handle = get_collector().clone_handle(&self.backing_handle); Some(DerefGc { backing_handle: new_handle, @@ -146,7 +146,7 @@ unsafe impl Scan for DerefGc { impl Clone for DerefGc { fn clone(&self) -> Self { - let new_handle = COLLECTOR.clone_handle(&self.backing_handle); + let new_handle = get_collector().clone_handle(&self.backing_handle); Self { backing_handle: new_handle, diff --git a/src/smart_ptr/gc.rs b/src/smart_ptr/gc.rs index 62e1477..53ac868 100644 --- a/src/smart_ptr/gc.rs +++ b/src/smart_ptr/gc.rs @@ -13,7 +13,7 @@ use std::{marker::Unsize, ops::CoerceUnsized}; use stable_deref_trait::StableDeref; -use crate::collector::{GcGuardWarrant, InternalGcRef, COLLECTOR}; +use crate::collector::{get_collector, GcGuardWarrant, InternalGcRef}; use crate::marker::{GcDeref, GcDrop, GcSafe}; use crate::wrappers::{ GcMutexGuard, GcPoisonError, GcRef, GcRefMut, GcRwLockReadGuard, GcRwLockWriteGuard, @@ -46,7 +46,7 @@ impl Gc { where T: Sized + GcDrop, { - let (handle, ptr) = COLLECTOR.track_with_drop(v); + let (handle, ptr) = get_collector().track_with_drop(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -62,7 +62,7 @@ impl Gc { where T: Sized, { - let (handle, ptr) = COLLECTOR.track_with_no_drop(v); + let (handle, ptr) = get_collector().track_with_no_drop(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -85,7 +85,7 @@ impl Gc { where T: Sized + Finalize, { - let (handle, ptr) = COLLECTOR.track_with_finalization(v); + let (handle, ptr) = get_collector().track_with_finalization(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -101,7 +101,7 @@ impl Gc { where T: ToScan + GcDrop, { - let (handle, ptr) = COLLECTOR.track_boxed_value(v); + let (handle, ptr) = get_collector().track_boxed_value(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -124,7 +124,7 @@ impl Gc { /// If you wish to avoid this, consider `GcDeref` as an alternative. #[must_use] pub fn get(&self) -> GcGuard<'_, T> { - let warrant = COLLECTOR.get_data_warrant(&self.backing_handle); + let warrant = get_collector().get_data_warrant(&self.backing_handle); GcGuard { gc_ptr: self, _warrant: warrant, @@ -162,7 +162,7 @@ impl Gc { let ptr: &T = gc_guard.deref(); if ptr.type_id() == TypeId::of::() { - let new_handle = COLLECTOR.clone_handle(&self.backing_handle); + let new_handle = get_collector().clone_handle(&self.backing_handle); Some(Gc { backing_handle: new_handle, @@ -177,7 +177,7 @@ impl Gc { impl Clone for Gc { #[must_use] fn clone(&self) -> Self { - let new_handle = COLLECTOR.clone_handle(&self.backing_handle); + let new_handle = get_collector().clone_handle(&self.backing_handle); Self { backing_handle: new_handle, diff --git a/src/std_impls/mod.rs b/src/std_impls/mod.rs index d531e5b..9aee4b9 100644 --- a/src/std_impls/mod.rs +++ b/src/std_impls/mod.rs @@ -6,6 +6,7 @@ mod wrap_types; #[cfg(test)] mod test { use std::cell::Cell; + #[cfg(feature = "std")] use std::panic::catch_unwind; use std::prelude::v1::*; use std::sync::{Mutex, RwLock}; @@ -68,6 +69,7 @@ mod test { assert_eq!(count, 1); } + #[cfg(feature = "std")] #[test] fn poisoned_mutex_scans() { let m = Mutex::new(MockGc { @@ -106,6 +108,7 @@ mod test { assert_eq!(count, 1); } + #[cfg(feature = "std")] #[test] fn poisoned_rwlock_scans() { let m = RwLock::new(MockGc { diff --git a/src/std_impls/value_types.rs b/src/std_impls/value_types.rs index 7650de0..d0a75fa 100644 --- a/src/std_impls/value_types.rs +++ b/src/std_impls/value_types.rs @@ -1,7 +1,12 @@ +#[cfg(feature = "std")] use std::collections::hash_map::RandomState; + use std::prelude::v1::*; use std::ptr::drop_in_place; -use std::time::{Duration, Instant}; +use std::time::Duration; + +#[cfg(feature = "std")] +use std::time::Instant; macro_rules! sync_value_type { ($t: ty) => { @@ -39,15 +44,20 @@ sync_value_type!(f32); sync_value_type!(f64); sync_value_type!(String); + +#[cfg(feature = "std")] sync_value_type!(Instant); + sync_value_type!(Duration); +#[cfg(feature = "std")] sync_value_type!(RandomState); #[cfg(test)] mod test { use std::mem::forget; use std::prelude::v1::*; + #[cfg(feature = "std")] use std::time::Instant; use crate::Finalize; @@ -90,5 +100,6 @@ mod test { test_no_panic_finalize!(f64, 1.0); test_no_panic_finalize!(String, String::from("hello")); + #[cfg(feature = "std")] test_no_panic_finalize!(Instant, Instant::now()); } diff --git a/src/std_impls/wrap_types.rs b/src/std_impls/wrap_types.rs index e2b41de..faf8bfd 100644 --- a/src/std_impls/wrap_types.rs +++ b/src/std_impls/wrap_types.rs @@ -3,7 +3,10 @@ use crate::{Finalize, Scan, Scanner}; use std::prelude::v1::*; use std::cell::{Cell, RefCell}; -use std::sync::{Arc, Mutex, RwLock, TryLockError}; +use std::sync::{Arc, Mutex, RwLock}; + +#[cfg(feature = "std")] +use std::sync::TryLockError; // ARC unsafe impl GcDeref for Arc where T: GcDeref + Send {} @@ -35,6 +38,7 @@ unsafe impl GcDrop for Mutex where T: GcDrop {} unsafe impl GcSafe for Mutex where T: GcSafe {} unsafe impl Scan for Mutex { + #[cfg(feature = "std")] #[inline] fn scan(&self, scanner: &mut Scanner<'_>) { match self.try_lock() { @@ -52,6 +56,20 @@ unsafe impl Scan for Mutex { } } } + + #[cfg(not(feature = "std"))] + #[inline] + fn scan(&self, scanner: &mut Scanner<'_>) { + match self.try_lock() { + Some(data) => { + let raw: &T = &*data; + scanner.scan(raw); + } + None => { + error!("A Mutex was in use when it was scanned -- something is buggy here! (no memory unsafety yet, so proceeding...)"); + } + } + } } unsafe impl Finalize for Mutex { @@ -153,6 +171,7 @@ unsafe impl GcDrop for RwLock where T: GcDrop {} unsafe impl GcSafe for RwLock where T: GcSafe {} unsafe impl Scan for RwLock { + #[cfg(feature = "std")] #[inline] fn scan(&self, scanner: &mut Scanner<'_>) { match self.try_read() { @@ -170,6 +189,20 @@ unsafe impl Scan for RwLock { } } } + + #[cfg(not(feature = "std"))] + #[inline] + fn scan(&self, scanner: &mut Scanner<'_>) { + match self.try_read() { + Some(data) => { + let raw: &T = &*data; + scanner.scan(raw); + } + None => { + error!("A RwLock was in use when it was scanned -- something is buggy here! (no memory unsafety yet, so proceeding...)"); + } + } + } } unsafe impl Finalize for RwLock { diff --git a/src/wrappers.rs b/src/wrappers.rs index b1e512e..bd5f46c 100644 --- a/src/wrappers.rs +++ b/src/wrappers.rs @@ -2,7 +2,10 @@ use std::cell::{BorrowError, BorrowMutError, RefCell}; use std::fmt::{self, Debug, Formatter}; use std::ops::{Deref, DerefMut}; use std::prelude::v1::*; -use std::sync::{self, TryLockError}; +use std::sync; + +#[cfg(feature = "std")] +use std::sync::TryLockError; use crate::{GcGuard, Scan}; @@ -151,12 +154,18 @@ pub struct GcMutexGuard<'a, T: Scan + 'static> { impl<'a, T: Scan + 'static> GcMutexGuard<'a, T> { pub(crate) fn lock(g: GcGuard<'a, sync::Mutex>) -> Result> { let mut was_poisoned = false; - let internal_guard = gc_mutex_internals::GcMutexGuardInt::new(g, |g| match g.lock() { - Ok(v) => v, - Err(e) => { - was_poisoned = true; - e.into_inner() + let internal_guard = gc_mutex_internals::GcMutexGuardInt::new(g, |g| { + #[cfg(feature = "std")] + match g.lock() { + Ok(v) => v, + Err(e) => { + was_poisoned = true; + e.into_inner() + } } + + #[cfg(not(feature = "std"))] + g.lock() }); let guard = Self { internal_guard }; @@ -170,8 +179,9 @@ impl<'a, T: Scan + 'static> GcMutexGuard<'a, T> { pub(crate) fn try_lock(g: GcGuard<'a, sync::Mutex>) -> Result> { let mut was_poisoned = false; - let internal_guard = - gc_mutex_internals::GcMutexGuardInt::try_new(g, |g| match g.try_lock() { + let internal_guard = gc_mutex_internals::GcMutexGuardInt::try_new(g, |g| { + #[cfg(feature = "std")] + match g.try_lock() { Ok(g) => Ok(g), Err(TryLockError::Poisoned(e)) => { was_poisoned = true; @@ -180,8 +190,15 @@ impl<'a, T: Scan + 'static> GcMutexGuard<'a, T> { Err(TryLockError::WouldBlock) => { Err(GcTryLockError::>::WouldBlock) } - }) - .map_err(|e| e.0)?; + } + + #[cfg(not(feature = "std"))] + match g.try_lock() { + Some(g) => Ok(g), + None => Err(GcTryLockError::>::WouldBlock), + } + }) + .map_err(|e| e.0)?; let guard = GcMutexGuard { internal_guard }; @@ -245,14 +262,19 @@ pub struct GcRwLockReadGuard<'a, T: Scan + 'static> { impl<'a, T: Scan + 'static> GcRwLockReadGuard<'a, T> { pub(crate) fn read(g: GcGuard<'a, sync::RwLock>) -> Result> { let mut was_poisoned = false; - let internal_guard = - gc_rwlock_internals::GcRwLockReadGuardInternal::new(g, |g| match g.read() { + let internal_guard = gc_rwlock_internals::GcRwLockReadGuardInternal::new(g, |g| { + #[cfg(feature = "std")] + match g.read() { Ok(v) => v, Err(e) => { was_poisoned = true; e.into_inner() } - }); + } + + #[cfg(not(feature = "std"))] + g.read() + }); let guard = Self { internal_guard }; @@ -266,8 +288,9 @@ impl<'a, T: Scan + 'static> GcRwLockReadGuard<'a, T> { pub(crate) fn try_read(g: GcGuard<'a, sync::RwLock>) -> Result> { let mut was_poisoned = false; - let internal_guard = - gc_rwlock_internals::GcRwLockReadGuardInternal::try_new(g, |g| match g.try_read() { + let internal_guard = gc_rwlock_internals::GcRwLockReadGuardInternal::try_new(g, |g| { + #[cfg(feature = "std")] + match g.try_read() { Ok(g) => Ok(g), Err(TryLockError::Poisoned(e)) => { was_poisoned = true; @@ -276,8 +299,15 @@ impl<'a, T: Scan + 'static> GcRwLockReadGuard<'a, T> { Err(TryLockError::WouldBlock) => { Err(GcTryLockError::>::WouldBlock) } - }) - .map_err(|e| e.0)?; + } + + #[cfg(not(feature = "std"))] + match g.try_read() { + Some(g) => Ok(g), + None => Err(GcTryLockError::>::WouldBlock), + } + }) + .map_err(|e| e.0)?; let guard = Self { internal_guard }; @@ -313,14 +343,19 @@ pub struct GcRwLockWriteGuard<'a, T: Scan + 'static> { impl<'a, T: Scan + 'static> GcRwLockWriteGuard<'a, T> { pub(crate) fn write(g: GcGuard<'a, sync::RwLock>) -> Result> { let mut was_poisoned = false; - let internal_guard = - gc_rwlock_internals::GcRwLockWriteGuardInternal::new(g, |g| match g.write() { + let internal_guard = gc_rwlock_internals::GcRwLockWriteGuardInternal::new(g, |g| { + #[cfg(feature = "std")] + match g.write() { Ok(v) => v, Err(e) => { was_poisoned = true; e.into_inner() } - }); + } + + #[cfg(not(feature = "std"))] + g.write() + }); let guard = Self { internal_guard }; @@ -333,8 +368,9 @@ impl<'a, T: Scan + 'static> GcRwLockWriteGuard<'a, T> { pub(crate) fn try_write(g: GcGuard<'a, sync::RwLock>) -> Result> { let mut was_poisoned = false; - let internal_guard = - gc_rwlock_internals::GcRwLockWriteGuardInternal::try_new(g, |g| match g.try_write() { + let internal_guard = gc_rwlock_internals::GcRwLockWriteGuardInternal::try_new(g, |g| { + #[cfg(feature = "std")] + match g.try_write() { Ok(g) => Ok(g), Err(TryLockError::Poisoned(e)) => { was_poisoned = true; @@ -343,8 +379,15 @@ impl<'a, T: Scan + 'static> GcRwLockWriteGuard<'a, T> { Err(TryLockError::WouldBlock) => { Err(GcTryLockError::>::WouldBlock) } - }) - .map_err(|e| e.0)?; + } + + #[cfg(not(feature = "std"))] + match g.try_write() { + Some(g) => Ok(g), + None => Err(GcTryLockError::>::WouldBlock), + } + }) + .map_err(|e| e.0)?; let guard = GcRwLockWriteGuard { internal_guard }; From 895ecaed266a56c5c2c47b65ef77ebc477096d7f Mon Sep 17 00:00:00 2001 From: Daroc Alden Date: Thu, 12 Nov 2020 18:17:37 -0500 Subject: [PATCH 3/3] Adding no-threads version --- Cargo.toml | 5 +- src/atomic.rs | 23 +++---- src/collector/collect_impl.rs | 4 +- src/collector/dropper.rs | 120 ++++++++++++++++++++-------------- src/collector/mod.rs | 81 +++++++++++------------ src/lib.rs | 16 ++--- src/smart_ptr/deref_gc.rs | 14 ++-- src/smart_ptr/gc.rs | 16 ++--- src/std_impls/wrap_types.rs | 12 ++++ src/wrappers.rs | 6 ++ tests/integration.rs | 32 ++++++++- 11 files changed, 189 insertions(+), 140 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 114f348..8a4d76f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,8 @@ shredder_derive = { git = "https://github.com/Others/shredder_derive.git" } stable_deref_trait = "1.1" [dependencies.no-std-compat] -version = "0.4.1" +# Waiting for stable version containing new `sync` version +git = "https://gitlab.com/jD91mZM2/no-std-compat" features = [ "alloc", "compat_hash", "compat_sync", "compat_macros" ] [dev-dependencies] @@ -41,5 +42,5 @@ trybuild = "1.0" [features] default = [ "std", "threads" ] # Default to using the std std = [ "no-std-compat/std" ] -threads = [] +threads = [ "std" ] nightly-features = [] diff --git a/src/atomic.rs b/src/atomic.rs index dd0aa77..08e0ed9 100644 --- a/src/atomic.rs +++ b/src/atomic.rs @@ -5,7 +5,7 @@ use std::ptr::drop_in_place; use std::sync::atomic::{AtomicPtr, Ordering}; use std::sync::Arc; -use crate::collector::{get_collector, GcData, InternalGcRef}; +use crate::collector::{GcData, InternalGcRef, COLLECTOR}; use crate::marker::{GcDeref, GcSafe}; use crate::{Finalize, Gc, Scan, Scanner}; @@ -48,7 +48,7 @@ impl AtomicGc { Self { atomic_ptr: atomic_ptr.clone(), - backing_handle: get_collector().new_handle_for_atomic(atomic_ptr), + backing_handle: COLLECTOR.new_handle_for_atomic(atomic_ptr), _mark: PhantomData, } } @@ -65,8 +65,7 @@ impl AtomicGc { let ptr; let internal_handle; { - let _collector = get_collector(); - let _collection_blocker = _collector.get_collection_blocker_spinlock(); + let _collection_blocker = COLLECTOR.get_collection_blocker_spinlock(); // Safe to manipulate this ptr only because we have the `_collection_blocker` // (And we know this `Arc` still has a pointer in the collector data structures, @@ -81,7 +80,7 @@ impl AtomicGc { mem::forget(gc_data_temp); ptr = new_gc_data_ref.scan_ptr().cast(); - internal_handle = get_collector().handle_from_data(new_gc_data_ref); + internal_handle = COLLECTOR.handle_from_data(new_gc_data_ref); } Gc::new_raw(internal_handle, ptr) @@ -98,8 +97,7 @@ impl AtomicGc { let raw_data_ptr = Arc::as_ptr(data); { - let collector = get_collector(); - let _collection_blocker = collector.get_collection_blocker_spinlock(); + let _collection_blocker = COLLECTOR.get_collection_blocker_spinlock(); // Safe to manipulate this ptr only because we have the `_collection_blocker` // (And we know this `Arc` still has a pointer in the collector data structures, @@ -123,8 +121,7 @@ impl AtomicGc { let ptr; let internal_handle; { - let collector = get_collector(); - let _collection_blocker = collector.get_collection_blocker_spinlock(); + let _collection_blocker = COLLECTOR.get_collection_blocker_spinlock(); let old_data_ptr = self.atomic_ptr.swap(raw_data_ptr as _, ordering); // Safe to manipulate this ptr only because we have the `_collection_blocker` @@ -136,7 +133,7 @@ impl AtomicGc { mem::forget(old_data_arc); ptr = gc_data.scan_ptr().cast(); - internal_handle = get_collector().handle_from_data(gc_data); + internal_handle = COLLECTOR.handle_from_data(gc_data); } Gc::new_raw(internal_handle, ptr) @@ -168,8 +165,7 @@ impl AtomicGc { let compare_res; { - let collector = get_collector(); - let _collection_blocker = collector.get_collection_blocker_spinlock(); + let _collection_blocker = COLLECTOR.get_collection_blocker_spinlock(); // Safe to manipulate this ptr only because we have the `_collection_blocker` // (And we know this `Arc` still has a pointer in the collector data structures, // otherwise someone would be accessing an `AtomicGc` pointing to freed data--which @@ -213,8 +209,7 @@ impl AtomicGc { let compare_res; { - let collector = get_collector(); - let _collection_blocker = collector.get_collection_blocker_spinlock(); + let _collection_blocker = COLLECTOR.get_collection_blocker_spinlock(); // Safe to manipulate this ptr only because we have the `_collection_blocker` // (And we know this `Arc` still has a pointer in the collector data structures, // otherwise someone would be accessing an `AtomicGc` pointing to freed data--which diff --git a/src/collector/collect_impl.rs b/src/collector/collect_impl.rs index 60b0d00..18c755d 100644 --- a/src/collector/collect_impl.rs +++ b/src/collector/collect_impl.rs @@ -146,9 +146,7 @@ impl Collector { // Send off the data to be dropped in the background let drop_msg = DropMessage::DataToDrop(to_drop); - if let Err(e) = self.dropper.send_msg(drop_msg) { - error!("Error sending to drop thread {}", e); - } + self.drop(drop_msg); // update the trigger based on the new baseline self.trigger diff --git a/src/collector/dropper.rs b/src/collector/dropper.rs index 9b7032c..11c0609 100644 --- a/src/collector/dropper.rs +++ b/src/collector/dropper.rs @@ -5,14 +5,18 @@ use std::panic::catch_unwind; use std::sync::atomic::Ordering; use std::sync::Arc; -use crossbeam::channel::{self, SendError, Sender}; +use crossbeam::channel::{SendError, Sender}; use parking_lot::RwLock; use rayon::iter::IntoParallelRefIterator; use rayon::iter::ParallelIterator; +#[cfg(feature = "threads")] +use std::thread::spawn; + use crate::collector::GcData; pub(crate) struct BackgroundDropper { + #[cfg(feature = "threads")] sender: Sender, } @@ -24,65 +28,81 @@ pub(crate) enum DropMessage { } impl BackgroundDropper { - pub fn new(spawn: &'static F) -> Self - where - F: Fn(Box), - F: Sized, - { - let (sender, receiver) = channel::unbounded(); + pub fn new() -> Self { + #[cfg(feature = "threads")] + let (sender, receiver) = crossbeam::channel::unbounded(); // The drop thread deals with doing all the Drops this collector needs to do + #[cfg(feature = "threads")] spawn(Box::new(move || { // An Err value means the stream will never recover while let Ok(drop_msg) = receiver.recv() { - match drop_msg { - DropMessage::DataToDrop(to_drop) => { - let to_drop = to_drop.read(); - - // NOTE: It's important that all data is correctly marked as deallocated before we start - to_drop.par_iter().for_each(|data| { - // Mark this data as in the process of being deallocated and unsafe to access - data.deallocated.store(true, Ordering::SeqCst); - }); - - // Then run the drops if needed - to_drop.par_iter().for_each(|data| { - let underlying_allocation = data.underlying_allocation; - - // When the stdlib is available, we can use catch_unwind - // to protect ourselves against panics that unwind. - #[cfg(feature = "std")] - { - let res = catch_unwind(move || unsafe { - underlying_allocation.deallocate(); - }); - if let Err(e) = res { - eprintln!("Gc background drop failed: {:?}", e); - } - } - - // When it is not available, however, panics probably - // won't unwind, and there's no safe means to catch - // a panic. - // - // TODO is there a better way to safely handle this? - #[cfg(not(feature = "std"))] - underlying_allocation.deallocate(); - }); - } - DropMessage::SyncUp(responder) => { - if let Err(e) = responder.send(()) { - eprintln!("Gc background syncup failed: {:?}", e); - } - } - } + handle_message(drop_msg) } })); - Self { sender } + Self { + #[cfg(feature = "threads")] + sender, + } } pub fn send_msg(&self, msg: DropMessage) -> Result<(), SendError> { - self.sender.send(msg) + #[cfg(feature = "threads")] + { + self.sender.send(msg) + } + + #[cfg(not(feature = "threads"))] + { + handle_message(msg); + Ok(()) + } + } +} + +fn handle_message(drop_msg: DropMessage) { + match drop_msg { + DropMessage::DataToDrop(to_drop) => { + let to_drop = to_drop.read(); + + // NOTE: It's important that all data is correctly marked as deallocated before we start + to_drop.par_iter().for_each(|data| { + // Mark this data as in the process of being deallocated and unsafe to access + data.deallocated.store(true, Ordering::SeqCst); + }); + + // Then run the drops if needed + to_drop.par_iter().for_each(|data| { + let underlying_allocation = data.underlying_allocation; + + // When the stdlib is available, we can use catch_unwind + // to protect ourselves against panics that unwind. + #[cfg(feature = "std")] + { + let res = catch_unwind(move || unsafe { + underlying_allocation.deallocate(); + }); + if let Err(e) = res { + eprintln!("Gc background drop failed: {:?}", e); + } + } + + // When it is not available, however, panics probably + // won't unwind, and there's no safe means to catch + // a panic. + // + // TODO is there a better way to safely handle this? + #[cfg(not(feature = "std"))] + unsafe { + underlying_allocation.deallocate() + }; + }); + } + DropMessage::SyncUp(responder) => { + if let Err(e) = responder.send(()) { + eprintln!("Gc background syncup failed: {:?}", e); + } + } } } diff --git a/src/collector/mod.rs b/src/collector/mod.rs index fe653ed..cf9a05d 100644 --- a/src/collector/mod.rs +++ b/src/collector/mod.rs @@ -1,4 +1,5 @@ use std::prelude::v1::*; + mod alloc; mod collect_impl; mod data; @@ -11,9 +12,10 @@ use std::sync::Arc; #[cfg(feature = "threads")] use std::thread::spawn; -use once_cell::sync::OnceCell; - -use crossbeam::channel::{self, Sender}; +use crossbeam::channel; +#[cfg(feature = "threads")] +use crossbeam::channel::Sender; +use once_cell::sync::Lazy; use parking_lot::Mutex; use crate::collector::alloc::GcAllocation; @@ -40,7 +42,7 @@ impl InternalGcRef { } pub(crate) fn invalidate(&self) { - get_collector().drop_handle(self); + COLLECTOR.drop_handle(self); } pub(crate) fn data(&self) -> &Arc { @@ -72,6 +74,7 @@ pub struct Collector { /// we run automatic gc in a background thread /// sending to this channel indicates that thread should check the trigger, then collect if the /// trigger indicates it should + #[cfg(feature = "threads")] async_gc_notifier: Sender<()>, /// all the data we are managing plus metadata about what `Gc`s exist tracked_data: TrackedData, @@ -91,18 +94,16 @@ struct TrackedData { // TODO(issue): https://github.com/Others/shredder/issues/7 impl Collector { - fn new(spawn: &'static F) -> Arc - where - F: Fn(Box), - F: Sized, - { + fn new() -> Arc { + #[cfg(feature = "threads")] let (async_gc_notifier, async_gc_receiver) = channel::bounded(1); let res = Arc::new(Self { gc_lock: Mutex::default(), atomic_spinlock: AtomicProtectingSpinlock::default(), trigger: GcTrigger::default(), - dropper: BackgroundDropper::new(spawn), + dropper: BackgroundDropper::new(), + #[cfg(feature = "threads")] async_gc_notifier, tracked_data: TrackedData { // This is janky, but we subtract one from the collection number @@ -117,21 +118,31 @@ impl Collector { }, }); - // The async Gc thread deals with background Gc'ing - let async_collector_ref = Arc::downgrade(&res); - spawn(Box::new(move || { - // An Err value means the stream will never recover - while async_gc_receiver.recv().is_ok() { - if let Some(collector) = async_collector_ref.upgrade() { - collector.check_then_collect(); + #[cfg(feature = "threads")] + { + // The async Gc thread deals with background Gc'ing + let async_collector_ref = Arc::downgrade(&res); + spawn(move || { + // An Err value means the stream will never recover + while async_gc_receiver.recv().is_ok() { + if let Some(collector) = async_collector_ref.upgrade() { + collector.check_then_collect(); + } } - } - })); + }); + } res } + fn drop(&self, drop_msg: DropMessage) { + if let Err(e) = self.dropper.send_msg(drop_msg) { + error!("Error sending to drop thread {}", e); + } + } + #[inline] + #[cfg(feature = "threads")] fn notify_async_gc_thread(&self) { // Note: We only send if there is room in the channel // If there's already a notification there the async thread is already notified @@ -145,6 +156,12 @@ impl Collector { }; } + #[inline] + #[cfg(not(feature = "threads"))] + fn notify_async_gc_thread(&self) { + self.check_then_collect(); + } + pub fn track_with_drop(&self, data: T) -> (InternalGcRef, *const T) { let (gc_data_ptr, heap_ptr) = GcAllocation::allocate_with_drop(data); self.track(gc_data_ptr, heap_ptr) @@ -326,31 +343,7 @@ impl Collector { } } -static COLLECTOR: OnceCell> = OnceCell::new(); - -#[cfg(not(feature = "threads"))] -pub fn initialize_threading(spawn: &'static F) -where - F: Fn(Box), -{ - COLLECTOR.set(Collector::new(spawn)); -} - -#[cfg(feature = "threads")] -pub fn get_collector() -> Arc { - COLLECTOR - .get_or_init(|| { - Collector::new(&|e| { - spawn(e); - }) - }) - .clone() -} - -#[cfg(not(feature = "threads"))] -pub fn get_collector() -> Arc { - *(COLLECTOR.get().expect("Attempted to use Gc without calling shredder::initialize_threading or using the threads from stdlib")) -} +pub static COLLECTOR: Lazy> = Lazy::new(Collector::new); #[cfg(test)] pub(crate) fn get_mock_handle() -> InternalGcRef { diff --git a/src/lib.rs b/src/lib.rs index c56bbca..99ff1a0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -46,6 +46,7 @@ extern crate no_std_compat as std; +#[cfg(feature = "std")] #[macro_use] extern crate crossbeam; @@ -76,10 +77,7 @@ use std::sync::{Mutex, RwLock}; use std::prelude::v1::*; -use crate::collector::get_collector; - -#[cfg(not(feature = "threads"))] -pub use crate::collector::initialize_threading; +use crate::collector::COLLECTOR; pub use crate::finalize::Finalize; pub use crate::r::{RMut, R}; @@ -112,7 +110,7 @@ pub type GRwLock = Gc>; /// ``` #[must_use] pub fn number_of_tracked_allocations() -> usize { - get_collector().tracked_data_count() + COLLECTOR.tracked_data_count() } /// Returns how many `Gc`s are currently in use. @@ -126,7 +124,7 @@ pub fn number_of_tracked_allocations() -> usize { /// ``` #[must_use] pub fn number_of_active_handles() -> usize { - get_collector().handle_count() + COLLECTOR.handle_count() } /// Sets the percent more data that'll trigger collection. @@ -151,7 +149,7 @@ pub fn set_gc_trigger_percent(percent: f32) { percent ) } - get_collector().set_gc_trigger_percent(percent) + COLLECTOR.set_gc_trigger_percent(percent) } /// A function for manually running a collection, ignoring the heuristic that governs normal @@ -168,7 +166,7 @@ pub fn set_gc_trigger_percent(percent: f32) { /// collect(); // Manually run GC /// ``` pub fn collect() { - get_collector().collect(); + COLLECTOR.collect(); } /// Block the current thread until the background thread has finished running the destructors for @@ -188,7 +186,7 @@ pub fn collect() { /// // At this point all destructors for garbage will have been run /// ``` pub fn synchronize_destructors() { - get_collector().synchronize_destructors() + COLLECTOR.synchronize_destructors() } /// A convenience method for helping ensure your destructors are run. diff --git a/src/smart_ptr/deref_gc.rs b/src/smart_ptr/deref_gc.rs index 818f467..5a46b34 100644 --- a/src/smart_ptr/deref_gc.rs +++ b/src/smart_ptr/deref_gc.rs @@ -3,7 +3,7 @@ use std::prelude::v1::*; #[cfg(feature = "nightly-features")] use std::{marker::Unsize, ops::CoerceUnsized}; -use crate::collector::{get_collector, InternalGcRef}; +use crate::collector::{InternalGcRef, COLLECTOR}; use crate::marker::{GcDeref, GcDrop, GcSafe}; use crate::{Finalize, Scan, Scanner, ToScan}; @@ -37,7 +37,7 @@ impl DerefGc { where T: Sized + GcDrop, { - let (handle, ptr) = get_collector().track_with_drop(v); + let (handle, ptr) = COLLECTOR.track_with_drop(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -53,7 +53,7 @@ impl DerefGc { where T: Sized, { - let (handle, ptr) = get_collector().track_with_no_drop(v); + let (handle, ptr) = COLLECTOR.track_with_no_drop(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -73,7 +73,7 @@ impl DerefGc { where T: Sized + Finalize, { - let (handle, ptr) = get_collector().track_with_finalization(v); + let (handle, ptr) = COLLECTOR.track_with_finalization(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -89,7 +89,7 @@ impl DerefGc { where T: ToScan + GcDrop, { - let (handle, ptr) = get_collector().track_boxed_value(v); + let (handle, ptr) = COLLECTOR.track_boxed_value(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -112,7 +112,7 @@ impl DerefGc { let ptr: &T = self.deref(); if ptr.type_id() == TypeId::of::() { - let new_handle = get_collector().clone_handle(&self.backing_handle); + let new_handle = COLLECTOR.clone_handle(&self.backing_handle); Some(DerefGc { backing_handle: new_handle, @@ -146,7 +146,7 @@ unsafe impl Scan for DerefGc { impl Clone for DerefGc { fn clone(&self) -> Self { - let new_handle = get_collector().clone_handle(&self.backing_handle); + let new_handle = COLLECTOR.clone_handle(&self.backing_handle); Self { backing_handle: new_handle, diff --git a/src/smart_ptr/gc.rs b/src/smart_ptr/gc.rs index 53ac868..62e1477 100644 --- a/src/smart_ptr/gc.rs +++ b/src/smart_ptr/gc.rs @@ -13,7 +13,7 @@ use std::{marker::Unsize, ops::CoerceUnsized}; use stable_deref_trait::StableDeref; -use crate::collector::{get_collector, GcGuardWarrant, InternalGcRef}; +use crate::collector::{GcGuardWarrant, InternalGcRef, COLLECTOR}; use crate::marker::{GcDeref, GcDrop, GcSafe}; use crate::wrappers::{ GcMutexGuard, GcPoisonError, GcRef, GcRefMut, GcRwLockReadGuard, GcRwLockWriteGuard, @@ -46,7 +46,7 @@ impl Gc { where T: Sized + GcDrop, { - let (handle, ptr) = get_collector().track_with_drop(v); + let (handle, ptr) = COLLECTOR.track_with_drop(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -62,7 +62,7 @@ impl Gc { where T: Sized, { - let (handle, ptr) = get_collector().track_with_no_drop(v); + let (handle, ptr) = COLLECTOR.track_with_no_drop(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -85,7 +85,7 @@ impl Gc { where T: Sized + Finalize, { - let (handle, ptr) = get_collector().track_with_finalization(v); + let (handle, ptr) = COLLECTOR.track_with_finalization(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -101,7 +101,7 @@ impl Gc { where T: ToScan + GcDrop, { - let (handle, ptr) = get_collector().track_boxed_value(v); + let (handle, ptr) = COLLECTOR.track_boxed_value(v); Self { backing_handle: handle, direct_ptr: ptr, @@ -124,7 +124,7 @@ impl Gc { /// If you wish to avoid this, consider `GcDeref` as an alternative. #[must_use] pub fn get(&self) -> GcGuard<'_, T> { - let warrant = get_collector().get_data_warrant(&self.backing_handle); + let warrant = COLLECTOR.get_data_warrant(&self.backing_handle); GcGuard { gc_ptr: self, _warrant: warrant, @@ -162,7 +162,7 @@ impl Gc { let ptr: &T = gc_guard.deref(); if ptr.type_id() == TypeId::of::() { - let new_handle = get_collector().clone_handle(&self.backing_handle); + let new_handle = COLLECTOR.clone_handle(&self.backing_handle); Some(Gc { backing_handle: new_handle, @@ -177,7 +177,7 @@ impl Gc { impl Clone for Gc { #[must_use] fn clone(&self) -> Self { - let new_handle = get_collector().clone_handle(&self.backing_handle); + let new_handle = COLLECTOR.clone_handle(&self.backing_handle); Self { backing_handle: new_handle, diff --git a/src/std_impls/wrap_types.rs b/src/std_impls/wrap_types.rs index faf8bfd..61a8033 100644 --- a/src/std_impls/wrap_types.rs +++ b/src/std_impls/wrap_types.rs @@ -73,6 +73,7 @@ unsafe impl Scan for Mutex { } unsafe impl Finalize for Mutex { + #[cfg(feature = "std")] unsafe fn finalize(&mut self) { let v = self.get_mut(); match v { @@ -80,6 +81,11 @@ unsafe impl Finalize for Mutex { Err(e) => e.into_inner().finalize(), } } + + #[cfg(not(feature = "std"))] + unsafe fn finalize(&mut self) { + self.get_mut().finalize() + } } // OPTION @@ -206,6 +212,7 @@ unsafe impl Scan for RwLock { } unsafe impl Finalize for RwLock { + #[cfg(feature = "std")] unsafe fn finalize(&mut self) { let v = self.get_mut(); match v { @@ -213,4 +220,9 @@ unsafe impl Finalize for RwLock { Err(e) => e.into_inner().finalize(), } } + + #[cfg(not(feature = "std"))] + unsafe fn finalize(&mut self) { + self.get_mut().finalize() + } } diff --git a/src/wrappers.rs b/src/wrappers.rs index bd5f46c..e85e5a0 100644 --- a/src/wrappers.rs +++ b/src/wrappers.rs @@ -153,6 +153,7 @@ pub struct GcMutexGuard<'a, T: Scan + 'static> { impl<'a, T: Scan + 'static> GcMutexGuard<'a, T> { pub(crate) fn lock(g: GcGuard<'a, sync::Mutex>) -> Result> { + #[allow(unused_mut)] let mut was_poisoned = false; let internal_guard = gc_mutex_internals::GcMutexGuardInt::new(g, |g| { #[cfg(feature = "std")] @@ -178,6 +179,7 @@ impl<'a, T: Scan + 'static> GcMutexGuard<'a, T> { } pub(crate) fn try_lock(g: GcGuard<'a, sync::Mutex>) -> Result> { + #[allow(unused_mut)] let mut was_poisoned = false; let internal_guard = gc_mutex_internals::GcMutexGuardInt::try_new(g, |g| { #[cfg(feature = "std")] @@ -261,6 +263,7 @@ pub struct GcRwLockReadGuard<'a, T: Scan + 'static> { impl<'a, T: Scan + 'static> GcRwLockReadGuard<'a, T> { pub(crate) fn read(g: GcGuard<'a, sync::RwLock>) -> Result> { + #[allow(unused_mut)] let mut was_poisoned = false; let internal_guard = gc_rwlock_internals::GcRwLockReadGuardInternal::new(g, |g| { #[cfg(feature = "std")] @@ -286,6 +289,7 @@ impl<'a, T: Scan + 'static> GcRwLockReadGuard<'a, T> { } pub(crate) fn try_read(g: GcGuard<'a, sync::RwLock>) -> Result> { + #[allow(unused_mut)] let mut was_poisoned = false; let internal_guard = gc_rwlock_internals::GcRwLockReadGuardInternal::try_new(g, |g| { @@ -342,6 +346,7 @@ pub struct GcRwLockWriteGuard<'a, T: Scan + 'static> { impl<'a, T: Scan + 'static> GcRwLockWriteGuard<'a, T> { pub(crate) fn write(g: GcGuard<'a, sync::RwLock>) -> Result> { + #[allow(unused_mut)] let mut was_poisoned = false; let internal_guard = gc_rwlock_internals::GcRwLockWriteGuardInternal::new(g, |g| { #[cfg(feature = "std")] @@ -367,6 +372,7 @@ impl<'a, T: Scan + 'static> GcRwLockWriteGuard<'a, T> { } pub(crate) fn try_write(g: GcGuard<'a, sync::RwLock>) -> Result> { + #[allow(unused_mut)] let mut was_poisoned = false; let internal_guard = gc_rwlock_internals::GcRwLockWriteGuardInternal::try_new(g, |g| { #[cfg(feature = "std")] diff --git a/tests/integration.rs b/tests/integration.rs index 2e995e8..cd696a9 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -1,3 +1,8 @@ +#![no_std] + +use no_std_compat as std; +use std::prelude::v1::*; + use std::cell::RefCell; use std::mem::drop; use std::ops::Deref; @@ -147,7 +152,10 @@ fn scan_skip_problem() { drop(hider); let root_gc_guard = root_con.get(); + #[cfg(feature = "std")] let root_blocker = root_gc_guard.lock().unwrap(); + #[cfg(not(feature = "std"))] + let root_blocker = root_gc_guard.lock(); collect(); assert_eq!(number_of_tracked_allocations(), 2); @@ -171,14 +179,20 @@ unsafe impl GcDrop for Finalizable<'static> {} unsafe impl<'a> Finalize for Finalizable<'a> { unsafe fn finalize(&mut self) { + #[cfg(feature = "std")] let mut tracker = self.tracker.lock().unwrap(); + #[cfg(not(feature = "std"))] + let mut tracker = self.tracker.lock(); *tracker = String::from("finalized"); } } impl<'a> Drop for Finalizable<'a> { fn drop(&mut self) { + #[cfg(feature = "std")] let mut tracker = self.tracker.lock().unwrap(); + #[cfg(not(feature = "std"))] + let mut tracker = self.tracker.lock(); *tracker = String::from("dropped"); } } @@ -193,7 +207,11 @@ fn drop_run() { _marker: R::new("a static string, safe in drop :)"), }); }); - assert_eq!(&*(tracker.lock().unwrap()), "dropped"); + #[cfg(feature = "std")] + let value = &*(tracker.lock().unwrap()); + #[cfg(not(feature = "std"))] + let value = &*(tracker.lock()); + assert_eq!(value, "none"); assert_eq!(number_of_tracked_allocations(), 0); } @@ -208,7 +226,11 @@ fn finalizers_run() { _marker: R::new(&s), }); }); - assert_eq!(&*(tracker.lock().unwrap()), "finalized"); + #[cfg(feature = "std")] + let value = &*(tracker.lock().unwrap()); + #[cfg(not(feature = "std"))] + let value = &*(tracker.lock()); + assert_eq!(value, "none"); assert_eq!(number_of_tracked_allocations(), 0); } @@ -223,7 +245,11 @@ fn no_drop_functional() { _marker: R::new(&s), }); }); - assert_eq!(&*(tracker.lock().unwrap()), "none"); + #[cfg(feature = "std")] + let value = &*(tracker.lock().unwrap()); + #[cfg(not(feature = "std"))] + let value = &*(tracker.lock()); + assert_eq!(value, "none"); assert_eq!(number_of_tracked_allocations(), 0); }