diff --git a/CMakeLists.txt b/CMakeLists.txt index 4b3b6c3..5dd9c33 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,7 +3,44 @@ project(crill) include_directories(include) -add_executable(tests tests/main.cpp tests/spin_mutex_test.cpp) +option(ENABLE_ASAN "Enable address sanitizer" OFF) +option(ENABLE_UBSAN "Enable UB sanitizer" OFF) +option(ENABLE_THREADSAN "Enable thread sanitizer" OFF) + +if(ENABLE_ASAN) + if (NOT MSVC) + message("Enabling address sanitizer") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer -fsanitize=address") + if(NOT APPLE) + # AppleClang doesn't have lsan + # https://developer.apple.com/documentation/code_diagnostics + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=leak") + endif() + endif() +endif() + +if(ENABLE_UBSAN) + if (NOT MSVC) + message("Enabling UB sanitizer") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer -fsanitize=undefined") + endif() +endif() + +if(ENABLE_THREADSAN) + if (NOT MSVC) + message("Enabling thread sanitizer") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer -fsanitize=thread") + endif() +endif() + +add_executable(tests + tests/main.cpp + tests/spin_mutex_test.cpp + tests/reclaim_object_test.cpp + tests/reclaim_on_write_object_test.cpp + tests/atomic_unique_ptr_test.cpp + tests/utility_test.cpp) + target_compile_features(tests PRIVATE cxx_std_17) # Avoid "undefined reference to 'pthread_create'" linker error on Linux diff --git a/include/crill/atomic_unique_ptr.h b/include/crill/atomic_unique_ptr.h new file mode 100644 index 0000000..e2a561a --- /dev/null +++ b/include/crill/atomic_unique_ptr.h @@ -0,0 +1,118 @@ +// crill - the Cross-platform Real-time, I/O, and Low-Latency Library +// Copyright (c) 2022 - Timur Doumler and Fabian Renn-Giles +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) + +#ifndef CRILL_ATOMIC_UNIQUE_PTR_H +#define CRILL_ATOMIC_UNIQUE_PTR_H + +#include +#include +#include + +namespace crill +{ + +// crill::atomic_unique_ptr wraps a std::unique_ptr and provides some +// atomic wait-free operations on the underlying pointer. +// This can be useful for atomic pointer swaps when building lock-free +// algorithms without sacrificing the lifetime management semantics of +// unique_ptr. Custom deleters are not supported. +template +class atomic_unique_ptr +{ + // TODO: support std::memory_order parameters + +public: + // Effects: Constructs an atomic_unique_ptr containing an empty + // unique_ptr. + atomic_unique_ptr() = default; + + // Effects: Constructs an atomic_unique_ptr containing the passed-in + // unique_ptr. + atomic_unique_ptr(std::unique_ptr uptr) + : ptr(uptr.release()) + {} + + // Effects: Constructs an atomic_unique_ptr containing a new unique_ptr + // managing an object constructed from the given constructor arguments. + template + atomic_unique_ptr(Args... args) + : atomic_unique_ptr(std::make_unique(std::forward(args)...)) + { + } + + // Effects: Deletes the object managed by the unique_ptr. + ~atomic_unique_ptr() + { + delete load(); + } + + // Returns: a pointer to the managed object. + // Non-blocking guarantees: wait-free. + // Note: get() itself is race-free, but the returned pointer will + // dangle if the underlying unique_ptr has deleted the managed object + // in the meantime! + T* load(std::memory_order mo = std::memory_order_seq_cst) const + { + return ptr.load(mo); + } + + // Effects: Atomically swaps the currently stored unique_ptr with a + // new unique_ptr. + // Returns: the previously stored unique_ptr. + // Non-blocking guarantees: wait-free. + std::unique_ptr exchange(std::unique_ptr desired, std::memory_order mo = std::memory_order_seq_cst) + { + return std::unique_ptr(ptr.exchange(desired.release(), mo)); + } + + // Effects: If the address of the managed object is equal to expected, + // replaces the currently stored unique_ptr with desired by moving from + // desired. Otherwise, changes the value of expected to the address of + // the managed object. + // Returns: If the compare succeeded, the previously stored unique_ptr; + // otherwise, an empty optional. + // Non-blocking guarantees: wait-free. + std::optional> + compare_exchange_strong(T*& expected, std::unique_ptr& desired, std::memory_order mo = std::memory_order_seq_cst) + { + return compare_exchange_impl( + &std::atomic::compare_exchange_strong, expected, desired, mo); + } + + // Effects: Equivalent to compare_exchange_strong, but the comparison + // may spuriously fail. On some platforms, this gives better performance. + // Use this version when calling compare_exchange in a loop. + // Non-blocking guarantees: wait-free. + std::optional> + compare_exchange_weak(T*& expected, std::unique_ptr& desired, std::memory_order mo = std::memory_order_seq_cst) + { + return compare_exchange_impl( + &std::atomic::compare_exchange_weak, expected, desired, mo); + } + +private: + using cx_fptr = bool(std::atomic::*)(T*&, T*, std::memory_order); + + std::optional> + compare_exchange_impl(cx_fptr cx, T*& expected, std::unique_ptr& desired, std::memory_order mo) + { + if (auto* desired_raw = desired.get(); (ptr.*cx)(expected, desired_raw, mo)) + { + desired.release(); + return std::unique_ptr(expected); + } + else + { + return {}; + } + } + + std::atomic ptr = nullptr; + static_assert(std::atomic::is_always_lock_free); +}; + +} // namespace crill + +#endif //CRILL_ATOMIC_UNIQUE_PTR_H diff --git a/include/crill/reclaim_object.h b/include/crill/reclaim_object.h new file mode 100644 index 0000000..401af4d --- /dev/null +++ b/include/crill/reclaim_object.h @@ -0,0 +1,280 @@ +// crill - the Cross-platform Real-time, I/O, and Low-Latency Library +// Copyright (c) 2022 - Timur Doumler and Fabian Renn-Giles +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) + +#ifndef CRILL_RECLAIM_OBJECT_H +#define CRILL_RECLAIM_OBJECT_H + +#include +#include +#include +#include +#include +#include +#include + +namespace crill +{ + +// crill::reclaim_object stores a value of type T and provides concurrent +// read and write access to it. Multiple readers and writers are supported. +// +// Readers are guaranteed to always be wait-free. Readers will never block +// writers, but writers may block other writers. +// +// Overwritten values are put on a "zombie list". Values on the zombie list +// that are no longer referred to by any reader can be reclaimed by calling +// reclaim(). A call to reclaim() will block writers. +// +// The principle is very similar to RCU, with two key differences: +// 1) reclamation is managed per object, not in a single global domain +// 2) reclamation does not happen automatically: the user needs to explicitly +// call reclaim() periodically (e.g., on a timer). +template +class reclaim_object +{ + // TODO: + // allow the user to specify whether they need single or multiple readers + // and single or multiple writers. For the single-reader and single-writer + // cases, enable more efficient implementations. + +public: + // Effects: constructs a reclaim_object containing a default-constructed value. + reclaim_object() + : value(std::make_unique()) + {} + + // Effects: constructs a reclaim_object containing a value constructed with + // the constructor arguments provided. + template + reclaim_object(Args... args) + : value(std::make_unique(std::forward(args)...)) + {} + + // reclaim_object is non-copyable and non-movable. + reclaim_object(reclaim_object&&) = delete; + reclaim_object& operator=(reclaim_object&&) = delete; + reclaim_object(const reclaim_object&) = delete; + reclaim_object& operator=(const reclaim_object&) = delete; + + // Reading the value must happen through a reader class. + class reader; + + // read_ptr provides scoped read access to the value. + class read_ptr + { + public: + read_ptr(reader& rdr) noexcept + : rdr(rdr) + { + assert(rdr.min_epoch == 0); + + rdr.min_epoch.store(rdr.obj.current_epoch.load()); + assert(rdr.min_epoch =! 0); + + value_read = rdr.obj.value.load(); + assert(value_read); + } + + ~read_ptr() + { + assert(rdr.min_epoch != 0); + rdr.min_epoch.store(0); + } + + const T& operator*() const + { + assert(value_read); + return *value_read; + } + + const T* operator->() const + { + assert(value_read); + return value_read; + } + + read_ptr(read_ptr&&) = delete; + read_ptr& operator=(read_ptr&&) = delete; + read_ptr(const read_ptr&) = delete; + read_ptr& operator=(const read_ptr&) = delete; + + private: + reader& rdr; + T* value_read = nullptr; + }; + + class reader + { + public: + reader(reclaim_object& obj) : obj(obj) + { + obj.register_reader(this); + } + + ~reader() + { + obj.unregister_reader(this); + } + + // Returns: a copy of the current value. + // Non-blocking guarantees: wait-free if the copy constructor of + // T is wait-free. + T get_value() noexcept(std::is_nothrow_copy_constructible_v) + { + return *read_lock(); + } + + // Returns: a read_ptr giving read access to the current value. + // Non-blocking guarantees: wait-free. + read_ptr read_lock() noexcept + { + return read_ptr(*this); + } + + private: + friend class reclaim_object; + friend class read_ptr; + reclaim_object& obj; + std::atomic min_epoch = 0; + }; + + reader get_reader() + { + return reader(*this); + } + + // Effects: Updates the current value to a new value constructed from the + // provided constructor arguments. + // Note: allocates memory. + template + void update(Args... args) + { + exchange_and_retire(std::make_unique(std::forward(args)...)); + } + + // write_ptr provides scoped write access to the value. This is useful if + // you want to modify e.g. only a single data member of a larger class. + // The new value will be atomically published when write_ptr goes out of scope. + class write_ptr + { + public: + write_ptr(reclaim_object& obj) + : obj(obj), + new_value(std::make_unique(*obj.value.load())) + { + assert(new_value); + } + + ~write_ptr() + { + assert(new_value); + obj.exchange_and_retire(std::move(new_value)); + } + + T& operator*() + { + assert(new_value); + return *new_value; + } + + T* operator->() noexcept + { + assert(new_value); + return new_value.get(); + } + + write_ptr(write_ptr&&) = delete; + write_ptr& operator=(write_ptr&&) = delete; + write_ptr(const write_ptr&) = delete; + write_ptr& operator=(const write_ptr&) = delete; + + private: + reclaim_object& obj; + std::unique_ptr new_value; + }; + + // Returns: a write_ptr giving scoped write access to the current value. + write_ptr write_lock() + { + return write_ptr(*this); + } + + // Effects: Deletes all previously overwritten values that are no longer + // referred to by a read_ptr. + void reclaim() + { + std::scoped_lock lock(zombies_mtx); + + for (auto& zombie : zombies) + { + assert(zombie.value != nullptr); + if (!has_readers_using_epoch(zombie.epoch_when_retired)) + zombie.value.reset(); + } + + zombies.erase( + std::remove_if(zombies.begin(), zombies.end(), [](auto& zombie){ return zombie.value == nullptr; }), + zombies.end()); + } + +private: + void exchange_and_retire(std::unique_ptr new_value) + { + assert(new_value != nullptr); + auto old_value = value.exchange(std::move(new_value)); + + std::scoped_lock lock(zombies_mtx); + zombies.push_back({ + current_epoch.fetch_add(1), + std::move(old_value)}); + } + + void register_reader(reader* rdr) + { + assert(rdr != nullptr); + std::scoped_lock lock(readers_mtx); + readers.push_back(rdr); + } + + void unregister_reader(reader* rdr) + { + assert(rdr != nullptr); + std::scoped_lock lock(readers_mtx); + + auto iter = std::find(readers.begin(), readers.end(), rdr); + assert(iter != readers.end()); + readers.erase(iter); + } + + bool has_readers_using_epoch(std::uint64_t epoch) noexcept + { + std::scoped_lock lock(readers_mtx); + return std::any_of(readers.begin(), readers.end(), [epoch](auto* reader){ + assert(reader); + std::uint64_t reader_epoch = reader->min_epoch.load(); + return reader_epoch != 0 && reader_epoch <= epoch; + }); + } + + struct zombie + { + std::uint64_t epoch_when_retired; + std::unique_ptr value; + }; + + crill::atomic_unique_ptr value; + std::vector readers; + std::mutex readers_mtx; + std::vector zombies; + std::mutex zombies_mtx; + std::atomic current_epoch = 1; + + // This algorithm requires a 64-bit lock-free atomic counter to avoid overflow. + static_assert(std::atomic::is_always_lock_free); +}; + +} // namespace crill + +#endif //CRILL_RECLAIM_OBJECT_H diff --git a/include/crill/reclaim_on_write_object.h b/include/crill/reclaim_on_write_object.h new file mode 100644 index 0000000..115cc33 --- /dev/null +++ b/include/crill/reclaim_on_write_object.h @@ -0,0 +1,246 @@ +// crill - the Cross-platform Real-time, I/O, and Low-Latency Library +// Copyright (c) 2022 - Timur Doumler and Fabian Renn-Giles +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) + +#ifndef CRILL_RECLAIM_ON_WRITE_OBJECT_H +#define CRILL_RECLAIM_ON_WRITE_OBJECT_H + +#include +#include +#include +#include +#include +#include + +namespace crill +{ + +// crill::reclaim_on_write_object has the same interface as +// crill::reclaim_object, except that it is not necessary to +// call reclaim(), instead the reclamation happens automatically +// on update. This enables an algorithm that does not require +// a zombie list or allocating any objects on the heap. +// The tradeoff is that the writer needs to block on update +// until all readers accessing the old value have finished. +template +class reclaim_on_write_object +{ +public: + // Effects: constructs a reclaim_on_write_object containing a default-constructed value. + reclaim_on_write_object() + : slots{{T(), T()}} + {} + + // Effects: constructs a reclaim_on_write_object containing a value constructed with + // the constructor arguments provided. + template + reclaim_on_write_object(Args... args) + : slots{{T(std::forward(args)...), T(std::forward(args)...)}} + {} + + // reclaim_on_write_object is non-copyable and non-movable. + reclaim_on_write_object(reclaim_on_write_object&&) = delete; + reclaim_on_write_object& operator=(reclaim_on_write_object&&) = delete; + reclaim_on_write_object(const reclaim_on_write_object&) = delete; + reclaim_on_write_object& operator=(const reclaim_on_write_object&) = delete; + + // Reading the value must happen through a reader class. + class reader; + + // read_ptr provides scoped read access to the value. + class read_ptr + { + public: + read_ptr(reader& rdr) noexcept + : rdr(rdr) + { + assert(rdr.min_epoch == 0); + + rdr.min_epoch.store(rdr.obj.current_epoch.load()); + assert(rdr.min_epoch =! 0); + + read_slot = rdr.obj.current_read_slot.load(); + } + + ~read_ptr() + { + assert(rdr.min_epoch != 0); + rdr.min_epoch.store(0); + } + + const T& operator*() const + { + return rdr.obj.slots[read_slot]; + } + + const T* operator->() const + { + return &rdr.obj.slots[read_slot]; + } + + read_ptr(read_ptr&&) = delete; + read_ptr& operator=(read_ptr&&) = delete; + read_ptr(const read_ptr&) = delete; + read_ptr& operator=(const read_ptr&) = delete; + + private: + reader& rdr; + std::uint8_t read_slot; + }; + + class reader + { + public: + reader(reclaim_on_write_object& obj) : obj(obj) + { + obj.register_reader(this); + } + + ~reader() + { + obj.unregister_reader(this); + } + + // Returns: a copy of the current value. + // Non-blocking guarantees: wait-free if the copy constructor of + // T is wait-free. + T get_value() noexcept(std::is_nothrow_copy_constructible_v) + { + return *read_lock(); + } + + // Returns: a read_ptr giving read access to the current value. + // Non-blocking guarantees: wait-free. + read_ptr read_lock() noexcept + { + return read_ptr(*this); + } + + private: + friend class reclaim_on_write_object; + friend class read_ptr; + reclaim_on_write_object& obj; + std::atomic min_epoch = 0; + }; + + reader get_reader() + { + return reader(*this); + } + + // Effects: Updates the current value to a new value constructed from the + // provided constructor arguments. + // Note: Blocks until all readers accessing the old value have finished. + template + void update(Args... args) + { + std::uint8_t write_slot = current_read_slot.load() ^ 1; + slots[write_slot] = T(std::forward(args)...); + swap_slot_and_wait_for_readers(write_slot); + } + + // write_ptr provides scoped write access to the value. This is useful if + // you want to modify e.g. only a single data member of a larger class. + // The new value will be atomically published when write_ptr goes out of scope. + // Note: The destructor of write_ptr will blocks until all readers accessing + // the old value have finished. + class write_ptr + { + public: + write_ptr(reclaim_on_write_object& obj) + : obj(obj), write_slot(obj.current_read_slot.load() ^ 1) + { + obj.slots[write_slot] = obj.slots[write_slot ^ 1]; + } + + ~write_ptr() + { + obj.swap_slot_and_wait_for_readers(write_slot); + } + + T& operator*() + { + return obj.slots[write_slot]; + } + + T* operator->() noexcept + { + return &obj.slots[write_slot]; + } + + write_ptr(write_ptr&&) = delete; + write_ptr& operator=(write_ptr&&) = delete; + write_ptr(const write_ptr&) = delete; + write_ptr& operator=(const write_ptr&) = delete; + + private: + reclaim_on_write_object& obj; + std::uint8_t write_slot; + }; + + // Returns: a write_ptr giving scoped write access to the current value. + write_ptr write_lock() + { + return write_ptr(*this); + } + +private: + void swap_slot_and_wait_for_readers(std::uint8_t write_slot) + { + current_read_slot.store(write_slot); + auto retired_epoch = current_epoch.fetch_add(1); + + while (has_readers_using_epoch(retired_epoch)) + std::this_thread::yield(); // TODO: progressive backoff + + // TODO: + // Paul McKenney mentioned that in similar algorithms, it is necessary + // to swap the slot and wait for readers *twice* before writing again + // to avoid a race condition. Check if our algorithm is affected by this! + } + + void register_reader(reader* rdr) + { + assert(rdr != nullptr); + std::scoped_lock lock(readers_mtx); + readers.push_back(rdr); + } + + void unregister_reader(reader* rdr) + { + assert(rdr != nullptr); + std::scoped_lock lock(readers_mtx); + + auto iter = std::find(readers.begin(), readers.end(), rdr); + assert(iter != readers.end()); + readers.erase(iter); + } + + bool has_readers_using_epoch(std::uint64_t epoch) noexcept + { + std::scoped_lock lock(readers_mtx); + return std::any_of(readers.begin(), readers.end(), [epoch](auto* reader){ + assert(reader); + std::uint64_t reader_epoch = reader->min_epoch.load(); + return reader_epoch != 0 && reader_epoch <= epoch; + }); + } + + std::array slots; + std::atomic current_read_slot = 0; + std::vector readers; + std::mutex readers_mtx; + std::atomic current_epoch = 1; + + // This algorithm requires a 64-bit lock-free atomic counter to avoid overflow. + static_assert(std::atomic::is_always_lock_free); + + // This algorithm does not work for non-copyable types! + static_assert(std::is_copy_constructible_v); + static_assert(std::is_copy_assignable_v); +}; + +} // namespace crill + +#endif // CRILL_RECLAIM_ON_WRITE_OBJECT_H diff --git a/include/crill/utility.h b/include/crill/utility.h new file mode 100644 index 0000000..8cd88b2 --- /dev/null +++ b/include/crill/utility.h @@ -0,0 +1,26 @@ +// crill - the Cross-platform Real-time, I/O, and Low-Latency Library +// Copyright (c) 2022 - Timur Doumler and Fabian Renn-Giles +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) + +#ifndef CRILL_UTILITY_H +#define CRILL_UTILITY_H + +namespace crill +{ + +template +void call_once(F&& f) noexcept(noexcept(f())) +{ + static bool _ = [&]{ f(); return true; }(); +} + +template +void call_once_per_thread(F&& f) noexcept(noexcept(f())) +{ + static thread_local bool _ = [&]{ f(); return true; }(); +} + +} // namespace crill + +#endif //CRILL_UTILITY_H diff --git a/tests/atomic_unique_ptr_test.cpp b/tests/atomic_unique_ptr_test.cpp new file mode 100644 index 0000000..29da17d --- /dev/null +++ b/tests/atomic_unique_ptr_test.cpp @@ -0,0 +1,147 @@ +// crill - the Cross-platform Real-time, I/O, and Low-Latency Library +// Copyright (c) 2022 - Timur Doumler and Fabian Renn-Giles +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) + +#include +#include +#include +#include +#include "tests.h" + +TEST_CASE("Default constructor") +{ + crill::atomic_unique_ptr auptr; + CHECK(auptr.load() == nullptr); +} + +TEST_CASE("Pointer constructor") +{ + auto uptr = std::make_unique(); + int* ptr = uptr.get(); + + crill::atomic_unique_ptr auptr(std::move(uptr)); + CHECK(auptr.load() == ptr); + CHECK(uptr == nullptr); +} + +TEST_CASE("Emplacement constructor") +{ + crill::atomic_unique_ptr auptr(3, 'x'); + auto* ptr = auptr.load(); + CHECK(*ptr == "xxx"); +} + +TEST_CASE("Atomic exchange") +{ + auto uptr1 = std::make_unique(); + int* ptr1 = uptr1.get(); + + auto uptr2 = std::make_unique(); + int* ptr2 = uptr2.get(); + + crill::atomic_unique_ptr auptr(std::move(uptr1)); + auto uptr3 = auptr.exchange(std::move(uptr2)); + CHECK(auptr.load() == ptr2); + CHECK(uptr3.get() == ptr1); +} + +TEST_CASE("Atomic exchange from multiple threads") +{ + crill::atomic_unique_ptr auptr(std::make_unique()); + + std::vector threads; + const std::size_t num_threads = 20; + std::atomic stop = false; + std::atomic counter = 0; + std::atomic threads_running = 0; + + for (std::size_t i = 0; i < num_threads; ++i) + { + threads.emplace_back([&] { + threads_running.fetch_add(1); + while (!stop) + for (int i = 0; i < 10000; ++i) + counter.fetch_add(*auptr.exchange(std::make_unique(i)).get()); + }); + } + + while (threads_running < num_threads) + std::this_thread::yield(); + + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + + stop.store(true); + for (auto& thread : threads) + thread.join(); +} + +TEST_CASE("Compare-exchange") +{ + auto uptr1 = std::make_unique(); + int* ptr1 = uptr1.get(); + + auto uptr2 = std::make_unique(); + int* ptr2 = uptr2.get(); + + crill::atomic_unique_ptr auptr(std::move(uptr1)); + + SUBCASE("Success - strong") + { + auto result = auptr.compare_exchange_strong(ptr1, uptr2); + CHECK(uptr2 == nullptr); + CHECK(result->get() == ptr1); + } + + SUBCASE("Success - weak") + { + std::optional> result; + do + { + result = auptr.compare_exchange_weak(ptr1, uptr2); + } while (!result); + + CHECK(uptr2 == nullptr); + CHECK(result->get() == ptr1); + } + + SUBCASE("Failure - strong") + { + auto uptr3 = std::make_unique(); + auto result = auptr.compare_exchange_strong(ptr2, uptr3); + CHECK(!result); + CHECK(ptr2 == ptr1); + } + + SUBCASE("Failure - weak") + { + auto uptr3 = std::make_unique(); + auto result = auptr.compare_exchange_weak(ptr2, uptr3); + CHECK(!result); + CHECK(ptr2 == ptr1); + } +} + +TEST_CASE("Compare-exchange from multiple threads") +{ + // TODO: implement +} + +TEST_CASE("Destructor deletes managed object") +{ + std::size_t dtor_counter = 0; + struct test_t + { + test_t(std::size_t& dtor_counter) : dtor_counter(dtor_counter) {} + ~test_t() { ++dtor_counter; } + std::size_t& dtor_counter; + }; + + auto uptr = std::make_unique(dtor_counter); + + { + crill::atomic_unique_ptr auptr(std::move(uptr)); + CHECK(dtor_counter == 0); + } + CHECK(dtor_counter == 1); +} diff --git a/tests/reclaim_object_test.cpp b/tests/reclaim_object_test.cpp new file mode 100644 index 0000000..c45ce81 --- /dev/null +++ b/tests/reclaim_object_test.cpp @@ -0,0 +1,346 @@ +// crill - the Cross-platform Real-time, I/O, and Low-Latency Library +// Copyright (c) 2022 - Timur Doumler and Fabian Renn-Giles +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) + +#include +#include +#include "tests.h" + +static_assert(!std::is_copy_constructible_v>); +static_assert(!std::is_move_constructible_v>); +static_assert(!std::is_copy_assignable_v>); +static_assert(!std::is_move_assignable_v>); + +static_assert(!std::is_copy_constructible_v>); +static_assert(!std::is_move_constructible_v>); +static_assert(!std::is_copy_assignable_v>); +static_assert(!std::is_move_assignable_v>); + +TEST_CASE("reclaim_object::reclaim_object()") +{ + struct test_t + { + test_t() : i(42) {} + int i; + }; + + crill::reclaim_object obj; + auto reader = obj.get_reader(); + CHECK(reader.get_value().i == 42); +} + +TEST_CASE("reclaim_object::reclaim_object(Args...)") +{ + crill::reclaim_object obj(3, 'x'); + auto reader = obj.get_reader(); + CHECK(reader.get_value() == "xxx"); +} + +TEST_CASE("reclaim_object::read_ptr") +{ + crill::reclaim_object obj(3, 'x'); + auto reader = obj.get_reader(); + + SUBCASE("read_ptr is not copyable or movable") + { + auto read_ptr = reader.read_lock(); + static_assert(!std::is_copy_constructible_v); + static_assert(!std::is_copy_assignable_v); + static_assert(!std::is_move_constructible_v); + static_assert(!std::is_move_assignable_v); + } + + SUBCASE("Dereference") + { + auto read_ptr = reader.read_lock(); + CHECK(*read_ptr == "xxx"); + } + + SUBCASE("Member access operator") + { + auto read_ptr = reader.read_lock(); + CHECK(read_ptr->size() == 3); + } + + SUBCASE("Access is read-only") + { + auto read_ptr = reader.read_lock(); + static_assert(std::is_same_v); + static_assert(std::is_same_v()), const std::string*>); + } + + SUBCASE("Multiple read_ptrs from same reader are OK as long as lifetimes do not overlap") + { + { + auto read_ptr = reader.read_lock(); + } + { + auto read_ptr = reader.read_lock(); + CHECK(*read_ptr == "xxx"); + } + } +} + +TEST_CASE("reclaim_object::update") +{ + crill::reclaim_object obj("hello"); + auto reader = obj.get_reader(); + + SUBCASE("read read_ptr obtained before update reads old value after update") + { + auto read_ptr = reader.read_lock(); + obj.update(3, 'x'); + CHECK(*read_ptr == "hello"); + } + + SUBCASE("read read_ptr obtained after update reads new value") + { + obj.update(3, 'x'); + auto read_ptr = reader.read_lock(); + CHECK(*read_ptr == "xxx"); + } +} + +TEST_CASE("reclaim_object::write_ptr") +{ + struct test_t + { + int i = 0, j = 0; + }; + + crill::reclaim_object obj; + auto reader = obj.get_reader(); + + SUBCASE("read_ptr is not copyable or movable") + { + auto write_ptr = obj.write_lock(); + static_assert(!std::is_copy_constructible_v); + static_assert(!std::is_copy_assignable_v); + static_assert(!std::is_move_constructible_v); + static_assert(!std::is_move_assignable_v); + } + + SUBCASE("Modifications do not get published while write_ptr is still alive") + { + auto write_ptr = obj.write_lock(); + write_ptr->j = 4; + CHECK(reader.get_value().j == 0); + } + + SUBCASE("Modifications get published when write_ptr goes out of scope") + { + { + auto write_ptr = obj.write_lock(); + write_ptr->j = 4; + } + CHECK(reader.get_value().j == 4); + } +} + +TEST_CASE("reclaim_object::reclaim") +{ + using namespace crill::test; + + counted_t::reset(); + crill::reclaim_object obj; + + CHECK(counted_t::instances_created == 1); + CHECK(counted_t::instances_alive == 1); + CHECK(obj.get_reader().read_lock()->index == 0); + + SUBCASE("No reclamation without call to reclaim()") + { + obj.update(); + obj.update(); + CHECK(counted_t::instances_created == 3); + CHECK(counted_t::instances_alive == 3); + CHECK(obj.get_reader().read_lock()->index == 2); + } + + SUBCASE("reclaim() reclaims retired objects") + { + obj.update(); + obj.update(); + + obj.reclaim(); + CHECK(counted_t::instances_created == 3); + CHECK(counted_t::instances_alive == 1); + CHECK(obj.get_reader().read_lock()->index == 2); + } + + SUBCASE("reclaim() reclaims retired objects if there is an old reader, as long as there is no active read_ptr") + { + auto reader = obj.get_reader(); + obj.update(); + obj.update(); + + obj.reclaim(); + CHECK(counted_t::instances_created == 3); + CHECK(counted_t::instances_alive == 1); + CHECK(obj.get_reader().read_lock()->index == 2); + } + + SUBCASE("reclaim() does not reclaim retired objects if there is an old read_ptr") + { + auto reader = obj.get_reader(); + auto read_ptr = reader.read_lock(); + obj.update(); + obj.update(); + + obj.reclaim(); + CHECK(counted_t::instances_created == 3); + CHECK(counted_t::instances_alive == 3); + CHECK(obj.get_reader().read_lock()->index == 2); + } +} + +TEST_CASE("reclaim_object reader does not block writer") +{ + crill::reclaim_object obj(42); + + std::atomic has_read_lock = false; + std::atomic start_writer = false; + std::atomic give_up_read_lock = false; + std::atomic obj_updated = false; + + std::thread reader_thread([&]{ + auto reader = obj.get_reader(); + auto read_ptr = reader.read_lock(); + + has_read_lock = true; + start_writer = true; + + while (!give_up_read_lock) + std::this_thread::yield(); + + CHECK(obj_updated); + CHECK(*read_ptr == 42); // must still read old value here! + }); + + std::thread writer_thread([&]{ + while (!start_writer) + std::this_thread::yield(); + + obj.update(43); // will reach this line while read_lock is held + obj_updated = true; + }); + + while (!has_read_lock) + std::this_thread::yield(); + + while (!obj_updated) + std::this_thread::yield(); + + give_up_read_lock = true; + reader_thread.join(); + writer_thread.join(); +} + +TEST_CASE("reclaim_object readers can be created and destroyed concurrently") +{ + crill::reclaim_object obj(42); + std::vector reader_threads; + const std::size_t num_readers = 20; + std::vector read_results(num_readers); + + std::atomic stop = false; + std::atomic threads_running = 0; + + for (std::size_t i = 0; i < num_readers; ++i) + { + reader_threads.emplace_back([&]{ + auto thread_idx = threads_running.fetch_add(1); + while (!stop) + read_results[thread_idx] = obj.get_reader().get_value(); + }); + } + + while (threads_running.load() < num_readers) + std::this_thread::yield(); + + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + stop = true; + for (auto& thread : reader_threads) + thread.join(); + + for (auto value : read_results) + CHECK(value == 42); +} + +TEST_CASE("reclaim_object reads, writes, and reclaim can all run concurrently") +{ + crill::reclaim_object obj("0"); + std::vector reader_threads; + const std::size_t num_readers = 5; + std::vector read_results(num_readers); + + std::atomic stop = false; + std::atomic readers_started = 0; + std::atomic writers_started = 0; + std::atomic garbage_collector_started = false; + + for (std::size_t i = 0; i < num_readers; ++i) + { + reader_threads.emplace_back([&]{ + auto reader = obj.get_reader(); + std::string value; + std::size_t thread_idx = readers_started; + + while (!stop) + { + read_results[thread_idx] = *reader.read_lock(); + crill::call_once_per_thread([&] { ++readers_started; }); + } + }); + } + + std::vector writer_threads; + const std::size_t num_writers = 2; + for (std::size_t i = 0; i < num_writers; ++i) + { + reader_threads.emplace_back([&]{ + while (!stop) + { + for (int i = 0; i < 1000; ++i) + obj.update(std::to_string(i)); + + crill::call_once_per_thread([&] { ++writers_started; }); + } + }); + } + + auto garbage_collector = std::thread([&]{ + garbage_collector_started = true; + while (!stop) { + obj.reclaim(); + } + }); + + while (readers_started < num_readers) + std::this_thread::yield(); + + while (writers_started < num_writers) + std::this_thread::yield(); + + while (!garbage_collector_started) + std::this_thread::yield(); + + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + stop = true; + + for (auto& thread : reader_threads) + thread.join(); + + for (auto& thread : writer_threads) + thread.join(); + + garbage_collector.join(); + + // every reader read some values that were written by writers: + for (const auto& value : read_results) + CHECK(value.size() > 0); + + // value is the last value written: + CHECK(obj.get_reader().get_value() == "999"); +} diff --git a/tests/reclaim_on_write_object_test.cpp b/tests/reclaim_on_write_object_test.cpp new file mode 100644 index 0000000..70092f3 --- /dev/null +++ b/tests/reclaim_on_write_object_test.cpp @@ -0,0 +1,233 @@ +// crill - the Cross-platform Real-time, I/O, and Low-Latency Library +// Copyright (c) 2022 - Timur Doumler and Fabian Renn-Giles +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) + +#include +#include "tests.h" + +static_assert(!std::is_copy_constructible_v>); +static_assert(!std::is_move_constructible_v>); +static_assert(!std::is_copy_assignable_v>); +static_assert(!std::is_move_assignable_v>); + +static_assert(!std::is_copy_constructible_v>); +static_assert(!std::is_move_constructible_v>); +static_assert(!std::is_copy_assignable_v>); +static_assert(!std::is_move_assignable_v>); + +TEST_CASE("reclaim_on_write_object::reclaim_on_write_object()") +{ + struct test_t + { + test_t() : i(42) {} + int i; + }; + + crill::reclaim_on_write_object obj; + auto reader = obj.get_reader(); + CHECK(reader.get_value().i == 42); +} + +TEST_CASE("reclaim_on_write_object::reclaim_on_write_object(Args...)") +{ + crill::reclaim_on_write_object obj(3, 'x'); + auto reader = obj.get_reader(); + CHECK(reader.get_value() == "xxx"); +} + +TEST_CASE("reclaim_on_write_object::read_ptr") +{ + crill::reclaim_on_write_object obj(3, 'x'); + auto reader = obj.get_reader(); + + SUBCASE("read_ptr is not copyable or movable") + { + auto read_ptr = reader.read_lock(); + static_assert(!std::is_copy_constructible_v); + static_assert(!std::is_copy_assignable_v); + static_assert(!std::is_move_constructible_v); + static_assert(!std::is_move_assignable_v); + } + + SUBCASE("Dereference") + { + auto read_ptr = reader.read_lock(); + CHECK(*read_ptr == "xxx"); + } + + SUBCASE("Member access operator") + { + auto read_ptr = reader.read_lock(); + CHECK(read_ptr->size() == 3); + } + + SUBCASE("Access is read-only") + { + auto read_ptr = reader.read_lock(); + static_assert(std::is_same_v); + static_assert(std::is_same_v()), const std::string*>); + } + + SUBCASE("Multiple read_ptrs from same reader are OK as long as lifetimes do not overlap") + { + { + auto read_ptr = reader.read_lock(); + } + { + auto read_ptr = reader.read_lock(); + CHECK(*read_ptr == "xxx"); + } + } +} + +TEST_CASE("reclaim_on_write_object::update") +{ + crill::reclaim_on_write_object obj("hello"); + auto reader = obj.get_reader(); + + SUBCASE("read_ptr obtained after update reads new value") + { + obj.update(3, 'x'); + auto read_ptr = reader.read_lock(); + CHECK(*read_ptr == "xxx"); + } + + SUBCASE("read_ptr obtained before update reads old value after update") + { + std::atomic has_read_lock = false; + std::string read_result; + + std::thread reader_thread([&]{ + auto read_ptr = reader.read_lock(); + has_read_lock = true; + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + read_result = *read_ptr; + }); + + while (!has_read_lock) + std::this_thread::yield(); + + obj.update(3, 'x'); + reader_thread.join(); + + CHECK(read_result == "hello"); + CHECK(*obj.get_reader().read_lock() == "xxx"); + } +} + +TEST_CASE("reclaim_on_write_object::write_ptr") +{ + struct test_t + { + int i = 0, j = 0; + }; + + crill::reclaim_on_write_object obj; + auto reader = obj.get_reader(); + + SUBCASE("read_ptr is not copyable or movable") + { + auto write_ptr = obj.write_lock(); + static_assert(!std::is_copy_constructible_v); + static_assert(!std::is_copy_assignable_v); + static_assert(!std::is_move_constructible_v); + static_assert(!std::is_move_assignable_v); + } + + SUBCASE("Modifications do not get published while write_ptr is still alive") + { + auto write_ptr = obj.write_lock(); + write_ptr->j = 4; + CHECK(reader.get_value().j == 0); + } + + SUBCASE("Modifications get published when write_ptr goes out of scope") + { + { + auto write_ptr = obj.write_lock(); + write_ptr->j = 4; + } + CHECK(reader.get_value().j == 4); + } +} + +TEST_CASE("reclaim_on_write_object readers can be created and destroyed concurrently") +{ + crill::reclaim_on_write_object obj(42); + std::vector reader_threads; + const std::size_t num_readers = 20; + std::vector read_results(num_readers); + + std::atomic stop = false; + std::atomic threads_running = 0; + + for (std::size_t i = 0; i < num_readers; ++i) + { + reader_threads.emplace_back([&]{ + auto thread_idx = threads_running.fetch_add(1); + while (!stop) + read_results[thread_idx] = obj.get_reader().get_value(); + }); + } + + while (threads_running.load() < num_readers) + std::this_thread::yield(); + + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + stop = true; + for (auto& thread : reader_threads) + thread.join(); + + for (auto value : read_results) + CHECK(value == 42); +} + +TEST_CASE("reclaim_on_write_object writers can make progress even if there is always a reader holding a lock") +{ + crill::reclaim_on_write_object obj(42); + + std::atomic reader_1_started = false; + std::atomic reader_2_started = false; + std::atomic reader_1_should_release = false; + std::atomic reader_2_should_release = false; + std::atomic stop = false; + + std::thread reader_1([&]{ + auto reader = obj.get_reader(); + while (true) + { + auto read_ptr = reader.read_lock(); + reader_1_should_release = false; + reader_2_should_release = true; + while (!reader_1_should_release) + { + if (stop) return; + std::this_thread::yield(); + } + } + }); + + std::thread reader_2([&]{ + auto reader = obj.get_reader(); + while (true) + { + auto read_ptr = reader.read_lock(); + reader_2_should_release = false; + reader_1_should_release = true; + while (!reader_2_should_release) + { + if (stop) return; + std::this_thread::yield(); + } + } + }); + + obj.update(43); // this will not deadlock if implemented correctly + + stop = true; + reader_1.join(); + reader_2.join(); + + CHECK(obj.get_reader().get_value() == 43); +} \ No newline at end of file diff --git a/tests/spin_mutex_test.cpp b/tests/spin_mutex_test.cpp index fdbd3a8..ccf675d 100644 --- a/tests/spin_mutex_test.cpp +++ b/tests/spin_mutex_test.cpp @@ -3,27 +3,18 @@ // Distributed under the Boost Software License, Version 1.0. //(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) -#include #include - -// Helper for doctest static tests: -// (see https://github.com/doctest/doctest/issues/250) -// TODO: put this into a header common to all tests? -#define STATIC_CHECK(...) static_assert(__VA_ARGS__); CHECK(__VA_ARGS__); -#define STATIC_CHECK_FALSE(...) static_assert(!__VA_ARGS__); CHECK_FALSE(__VA_ARGS__); +#include TEST_CASE("crill::spin_mutex") { - crill::spin_mutex mtx; + static_assert(std::is_default_constructible_v); + static_assert(!std::is_copy_constructible_v); + static_assert(!std::is_copy_assignable_v); + static_assert(!std::is_move_constructible_v); + static_assert(!std::is_move_assignable_v); - SUBCASE("Special member functions") - { - STATIC_CHECK(std::is_default_constructible_v); - STATIC_CHECK_FALSE(std::is_copy_constructible_v); - STATIC_CHECK_FALSE(std::is_copy_assignable_v); - STATIC_CHECK_FALSE(std::is_move_constructible_v); - STATIC_CHECK_FALSE(std::is_move_assignable_v); - } + crill::spin_mutex mtx; SUBCASE("If mutex is not locked, try_lock succeeds") { diff --git a/tests/tests.h b/tests/tests.h new file mode 100644 index 0000000..ada293c --- /dev/null +++ b/tests/tests.h @@ -0,0 +1,32 @@ +// crill - the Cross-platform Real-time, I/O, and Low-Latency Library +// Copyright (c) 2022 - Timur Doumler and Fabian Renn-Giles +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) + +#ifndef CRILL_TESTS_H +#define CRILL_TESTS_H + +#include +#include + +namespace crill::test +{ + // Helper to track constructor and destructor calls + struct counted_t + { + static void reset() { instances_created = 0; instances_alive = 0; } + inline static std::size_t instances_created; + inline static std::size_t instances_alive; + + std::size_t index; + counted_t() : index(instances_created++) { ++instances_alive; } + ~counted_t() { --instances_alive; } + + counted_t(const counted_t&) = delete; + counted_t(counted_t&&) = delete; + counted_t& operator=(const counted_t&) = delete; + counted_t& operator=(counted_t&&) = delete; + }; +} + +#endif //CRILL_TESTS_H diff --git a/tests/utility_test.cpp b/tests/utility_test.cpp new file mode 100644 index 0000000..0b1d399 --- /dev/null +++ b/tests/utility_test.cpp @@ -0,0 +1,67 @@ +// crill - the Cross-platform Real-time, I/O, and Low-Latency Library +// Copyright (c) 2022 - Timur Doumler and Fabian Renn-Giles +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) + +#include +#include +#include +#include "tests.h" + +TEST_CASE("call_once") +{ + struct test_t + { + test_t(std::atomic& counter) + { + crill::call_once([&] { counter.fetch_add(1); }); + } + }; + + std::atomic counter = 0; + + std::vector threads; + std::size_t num_threads = 8; + for (int i = 0; i < num_threads; ++i) + { + threads.emplace_back([&]{ + test_t t1(counter); + test_t t2(counter); + test_t t3(counter); + }); + } + + for (auto& thread : threads) + thread.join(); + + CHECK(counter.load() == 1); +} + +TEST_CASE("call_once_per_thread") +{ + struct test_t + { + test_t(std::atomic& counter) + { + crill::call_once_per_thread([&] { ++counter; }); + } + }; + + std::atomic counter = 0; + + std::vector threads; + std::size_t num_threads = 8; + for (int i = 0; i < num_threads; ++i) + { + threads.emplace_back([&]{ + test_t t1(counter); + test_t t2(counter); + test_t t3(counter); + }); + } + + for (auto& thread : threads) + thread.join(); + + CHECK(counter.load() == num_threads); +}