diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0a1a648e..19a0b203 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -278,7 +278,7 @@ jobs: cxxstd: "11,14,17,2a" addrmd: 64 cxxflags: "cxxflags=-Wa,-mbig-obj" - supported: "Too many unknowns to get this compiler working - gcc-8.1" + supported: true os: windows-2019 needs: [runner-selection] diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..2168bd09 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,17 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-shebang-scripts-are-executable + - id: mixed-line-ending + args: [--fix=auto] + + +- repo: https://github.com/pre-commit/mirrors-clang-format + rev: v17.0.2 + hooks: + - id: clang-format + types_or: [c++, c, cuda] diff --git a/CMakeLists.txt b/CMakeLists.txt index 50c8b279..660df0aa 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,9 +7,32 @@ cmake_minimum_required(VERSION 3.5...3.16) project(boost_lockfree VERSION "${BOOST_SUPERPROJECT_VERSION}" LANGUAGES CXX) +option(BOOST_LOCKFREE_BUILD_TESTS "Build boost::lockfree tests" ${BUILD_TESTING}) + + add_library(boost_lockfree INTERFACE) add_library(Boost::lockfree ALIAS boost_lockfree) +if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.23) + set(Headers + include/boost/lockfree/spsc_queue.hpp + include/boost/lockfree/policies.hpp + include/boost/lockfree/queue.hpp + include/boost/lockfree/lockfree_forward.hpp + include/boost/lockfree/detail/prefix.hpp + include/boost/lockfree/detail/copy_payload.hpp + include/boost/lockfree/detail/tagged_ptr_dcas.hpp + include/boost/lockfree/detail/tagged_ptr.hpp + include/boost/lockfree/detail/tagged_ptr_ptrcompression.hpp + include/boost/lockfree/detail/atomic.hpp + include/boost/lockfree/detail/freelist.hpp + include/boost/lockfree/detail/parameter.hpp + include/boost/lockfree/stack.hpp + ) + + target_sources(boost_lockfree PUBLIC FILE_SET HEADERS FILES ${Headers} ) +endif() + target_include_directories(boost_lockfree INTERFACE include) target_link_libraries(boost_lockfree @@ -31,9 +54,6 @@ target_link_libraries(boost_lockfree Boost::utility ) -if(BUILD_TESTING AND EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/test/CMakeLists.txt") - - add_subdirectory(test) - +if( BOOST_LOCKFREE_BUILD_TESTS ) + add_subdirectory(test) endif() - diff --git a/_clang-format b/_clang-format new file mode 100644 index 00000000..9bb3e5a9 --- /dev/null +++ b/_clang-format @@ -0,0 +1,147 @@ +AccessModifierOffset: -4 +AlignAfterOpenBracket: Align + +# crashes: https://github.com/llvm/llvm-project/issues/55493 +# AlignArrayOfStructures: Left + +AlignConsecutiveAssignments: AcrossComments +AlignConsecutiveBitFields: AcrossComments +AlignConsecutiveDeclarations: AcrossComments +AlignConsecutiveMacros: AcrossComments +AlignConsecutiveShortCaseStatements: + Enabled: true + AcrossEmptyLines: true + AcrossComments: true + AlignCaseColons: false +AlignEscapedNewlines: Left +AlignOperands: true +AlignTrailingComments: + Kind: Always + OverEmptyLines: 1 + +AllowAllArgumentsOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: true + +AllowShortBlocksOnASingleLine: Empty +AllowShortCaseLabelsOnASingleLine: true +AllowShortEnumsOnASingleLine: false +AllowShortFunctionsOnASingleLine: None +AllowShortIfStatementsOnASingleLine: false +AllowShortLambdasOnASingleLine: Empty +AllowShortLoopsOnASingleLine: false + +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: false +AlwaysBreakTemplateDeclarations: true +BinPackArguments: false +BinPackParameters: false +BitFieldColonSpacing: Both +BraceWrapping: + AfterCaseLabel: false + AfterClass: true + AfterControlStatement: Never + AfterEnum: true + AfterFunction: true + AfterNamespace: false + AfterObjCDeclaration: true + AfterStruct: true + AfterUnion: true + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + BeforeLambdaBody: false + BeforeWhile: false + IndentBraces: false + SplitEmptyFunction: false + SplitEmptyRecord: false + SplitEmptyNamespace: false +BreakAfterAttributes: Leave +BreakBeforeInlineASMColon: Always +BreakBeforeBinaryOperators: All +BreakBeforeBraces: Custom +BreakBeforeTernaryOperators: true +BreakConstructorInitializers: AfterColon +BreakInheritanceList: AfterColon +BreakStringLiterals: true +ColumnLimit: 120 +CommentPragmas: '^!' +CompactNamespaces: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +EmptyLineAfterAccessModifier: Never +EmptyLineBeforeAccessModifier: LogicalBlock +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +ForEachMacros: [ foreach, BOOST_FOREACH ] +IncludeBlocks: Preserve +IndentAccessModifiers: false +IndentCaseBlocks: false +IndentCaseLabels: false +IndentExternBlock: NoIndent +IndentGotoLabels: false +IndentPPDirectives: AfterHash +# IndentRequiresClause: true + +IndentWidth: 4 +IndentWrappedFunctionNames: false +InsertNewlineAtEOF: True +InsertTrailingCommas: Wrapped +# IntegerLiteralSeparator: +# Binary: 0 +# Decimal: 3 +# Hex: -1 +KeepEmptyLinesAtTheStartOfBlocks: false +KeepEmptyLinesAtEOF: false +LambdaBodyIndentation: OuterScope +MaxEmptyLinesToKeep: 2 +NamespaceIndentation: None +NamespaceMacros: [ MSGPACK_API_VERSION_NAMESPACE ] +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PPIndentWidth: 4 +PackConstructorInitializers: Never +PenaltyBreakAssignment: 10 +PenaltyBreakBeforeFirstCallParameter: 100 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 1200 +PenaltyBreakString: 10 +PenaltyBreakOpenParenthesis: 100 +PenaltyExcessCharacter: 20 +PenaltyReturnTypeOnItsOwnLine: 50 +PointerAlignment: Left +ReferenceAlignment: Left +ReflowComments: true +RemoveParentheses: Leave + +# NOTE: results in broken code +# RemoveSemicolon: true +# RequiresClausePosition: OwnLine +ShortNamespaceLines: 0 +SortIncludes: CaseSensitive +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCpp11BracedList: true +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +SpaceBeforeRangeBasedForLoopColon: true +SpaceBeforeSquareBrackets: false +SpaceInEmptyBlock: false +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: Always +SpacesInCStyleCastParentheses: false +SpacesInConditionalStatement: false +SpacesInContainerLiterals: true +SpacesInParentheses: true +SpacesInSquareBrackets: true +Standard: c++20 +TabWidth: 8 +UseTab: Never +WhitespaceSensitiveMacros: ['BOOST_PP_STRINGIZE'] diff --git a/examples/queue.cpp b/examples/queue.cpp index 9e599dbb..86c4e981 100644 --- a/examples/queue.cpp +++ b/examples/queue.cpp @@ -5,58 +5,58 @@ // http://www.boost.org/LICENSE_1_0.txt) //[queue_example -#include #include +#include #include #include -boost::atomic_int producer_count(0); -boost::atomic_int consumer_count(0); +boost::atomic_int producer_count( 0 ); +boost::atomic_int consumer_count( 0 ); -boost::lockfree::queue queue(128); +boost::lockfree::queue< int > queue( 128 ); -const int iterations = 10000000; +const int iterations = 10000000; const int producer_thread_count = 4; const int consumer_thread_count = 4; -void producer(void) +void producer( void ) { - for (int i = 0; i != iterations; ++i) { + for ( int i = 0; i != iterations; ++i ) { int value = ++producer_count; - while (!queue.push(value)) + while ( !queue.push( value ) ) ; } } -boost::atomic done (false); -void consumer(void) +boost::atomic< bool > done( false ); +void consumer( void ) { int value; - while (!done) { - while (queue.pop(value)) + while ( !done ) { + while ( queue.pop( value ) ) ++consumer_count; } - while (queue.pop(value)) + while ( queue.pop( value ) ) ++consumer_count; } -int main(int argc, char* argv[]) +int main( int argc, char* argv[] ) { using namespace std; cout << "boost::lockfree::queue is "; - if (!queue.is_lock_free()) + if ( !queue.is_lock_free() ) cout << "not "; cout << "lockfree" << endl; boost::thread_group producer_threads, consumer_threads; - for (int i = 0; i != producer_thread_count; ++i) - producer_threads.create_thread(producer); + for ( int i = 0; i != producer_thread_count; ++i ) + producer_threads.create_thread( producer ); - for (int i = 0; i != consumer_thread_count; ++i) - consumer_threads.create_thread(consumer); + for ( int i = 0; i != consumer_thread_count; ++i ) + consumer_threads.create_thread( consumer ); producer_threads.join_all(); done = true; diff --git a/examples/spsc_queue.cpp b/examples/spsc_queue.cpp index 6c6adc17..b9c50872 100644 --- a/examples/spsc_queue.cpp +++ b/examples/spsc_queue.cpp @@ -5,52 +5,52 @@ // http://www.boost.org/LICENSE_1_0.txt) //[spsc_queue_example -#include #include +#include #include #include -int producer_count = 0; -boost::atomic_int consumer_count (0); +int producer_count = 0; +boost::atomic_int consumer_count( 0 ); -boost::lockfree::spsc_queue > spsc_queue; +boost::lockfree::spsc_queue< int, boost::lockfree::capacity< 1024 > > spsc_queue; const int iterations = 10000000; -void producer(void) +void producer( void ) { - for (int i = 0; i != iterations; ++i) { + for ( int i = 0; i != iterations; ++i ) { int value = ++producer_count; - while (!spsc_queue.push(value)) + while ( !spsc_queue.push( value ) ) ; } } -boost::atomic done (false); +boost::atomic< bool > done( false ); -void consumer(void) +void consumer( void ) { int value; - while (!done) { - while (spsc_queue.pop(value)) + while ( !done ) { + while ( spsc_queue.pop( value ) ) ++consumer_count; } - while (spsc_queue.pop(value)) + while ( spsc_queue.pop( value ) ) ++consumer_count; } -int main(int argc, char* argv[]) +int main( int argc, char* argv[] ) { using namespace std; cout << "boost::lockfree::queue is "; - if (!spsc_queue.is_lock_free()) + if ( !spsc_queue.is_lock_free() ) cout << "not "; cout << "lockfree" << endl; - boost::thread producer_thread(producer); - boost::thread consumer_thread(consumer); + boost::thread producer_thread( producer ); + boost::thread consumer_thread( consumer ); producer_thread.join(); done = true; diff --git a/examples/stack.cpp b/examples/stack.cpp index ba123026..cd9e6a79 100644 --- a/examples/stack.cpp +++ b/examples/stack.cpp @@ -5,59 +5,59 @@ // http://www.boost.org/LICENSE_1_0.txt) //[stack_example -#include #include +#include #include #include -boost::atomic_int producer_count(0); -boost::atomic_int consumer_count(0); +boost::atomic_int producer_count( 0 ); +boost::atomic_int consumer_count( 0 ); -boost::lockfree::stack stack(128); +boost::lockfree::stack< int > stack( 128 ); -const int iterations = 1000000; +const int iterations = 1000000; const int producer_thread_count = 4; const int consumer_thread_count = 4; -void producer(void) +void producer( void ) { - for (int i = 0; i != iterations; ++i) { + for ( int i = 0; i != iterations; ++i ) { int value = ++producer_count; - while (!stack.push(value)) + while ( !stack.push( value ) ) ; } } -boost::atomic done (false); +boost::atomic< bool > done( false ); -void consumer(void) +void consumer( void ) { int value; - while (!done) { - while (stack.pop(value)) + while ( !done ) { + while ( stack.pop( value ) ) ++consumer_count; } - while (stack.pop(value)) + while ( stack.pop( value ) ) ++consumer_count; } -int main(int argc, char* argv[]) +int main( int argc, char* argv[] ) { using namespace std; cout << "boost::lockfree::stack is "; - if (!stack.is_lock_free()) + if ( !stack.is_lock_free() ) cout << "not "; cout << "lockfree" << endl; boost::thread_group producer_threads, consumer_threads; - for (int i = 0; i != producer_thread_count; ++i) - producer_threads.create_thread(producer); + for ( int i = 0; i != producer_thread_count; ++i ) + producer_threads.create_thread( producer ); - for (int i = 0; i != consumer_thread_count; ++i) - consumer_threads.create_thread(consumer); + for ( int i = 0; i != consumer_thread_count; ++i ) + consumer_threads.create_thread( consumer ); producer_threads.join_all(); done = true; diff --git a/include/boost/lockfree/detail/atomic.hpp b/include/boost/lockfree/detail/atomic.hpp index 6083800f..24726003 100644 --- a/include/boost/lockfree/detail/atomic.hpp +++ b/include/boost/lockfree/detail/atomic.hpp @@ -11,57 +11,56 @@ #ifndef BOOST_LOCKFREE_FORCE_STD_ATOMIC -#define BOOST_LOCKFREE_NO_HDR_ATOMIC +# define BOOST_LOCKFREE_NO_HDR_ATOMIC // MSVC supports atomic<> from version 2012 onwards. -#if defined(BOOST_MSVC) && (BOOST_MSVC >= 1700) -#undef BOOST_LOCKFREE_NO_HDR_ATOMIC -#endif +# if defined( BOOST_MSVC ) && ( BOOST_MSVC >= 1700 ) +# undef BOOST_LOCKFREE_NO_HDR_ATOMIC +# endif // GCC supports atomic<> from version 4.8 onwards. -#if (BOOST_GCC >= 40800) && (__cplusplus >= 201103L) -#undef BOOST_LOCKFREE_NO_HDR_ATOMIC -#endif +# if ( BOOST_GCC >= 40800 ) && ( __cplusplus >= 201103L ) +# undef BOOST_LOCKFREE_NO_HDR_ATOMIC +# endif // Apple clang is 2 mayor versions ahead, but in fact 1 minor version behind -#ifdef BOOST_CLANG +# ifdef BOOST_CLANG -#define BOOST_ATOMIC_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) +# define BOOST_ATOMIC_CLANG_VERSION ( __clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__ ) -#if defined(__apple_build_version__) && (BOOST_ATOMIC_CLANG_VERSION >= 60100) && (__cplusplus >= 201103L) -#undef BOOST_LOCKFREE_NO_HDR_ATOMIC -#endif +# if defined( __apple_build_version__ ) && ( BOOST_ATOMIC_CLANG_VERSION >= 60100 ) && ( __cplusplus >= 201103L ) +# undef BOOST_LOCKFREE_NO_HDR_ATOMIC +# endif -#if !defined(__apple_build_version__) && (BOOST_ATOMIC_CLANG_VERSION >= 30600) && (__cplusplus >= 201103L) -#undef BOOST_LOCKFREE_NO_HDR_ATOMIC -#endif +# if !defined( __apple_build_version__ ) && ( BOOST_ATOMIC_CLANG_VERSION >= 30600 ) && ( __cplusplus >= 201103L ) +# undef BOOST_LOCKFREE_NO_HDR_ATOMIC +# endif -#undef BOOST_ATOMIC_CLANG_VERSION +# undef BOOST_ATOMIC_CLANG_VERSION -#endif // BOOST_CLANG +# endif // BOOST_CLANG // Stdlib should also be checked -#include -#if defined(BOOST_NO_CXX11_HDR_ATOMIC) && !defined(BOOST_LOCKFREE_NO_HDR_ATOMIC) -# define BOOST_LOCKFREE_NO_HDR_ATOMIC -#endif +# include +# if defined( BOOST_NO_CXX11_HDR_ATOMIC ) && !defined( BOOST_LOCKFREE_NO_HDR_ATOMIC ) +# define BOOST_LOCKFREE_NO_HDR_ATOMIC +# endif #endif // BOOST_LOCKFREE_FORCE_STD_ATOMIC -#if defined(BOOST_LOCKFREE_NO_HDR_ATOMIC) || defined(BOOST_LOCKFREE_FORCE_BOOST_ATOMIC) -#include +#if defined( BOOST_LOCKFREE_NO_HDR_ATOMIC ) || defined( BOOST_LOCKFREE_FORCE_BOOST_ATOMIC ) +# include #else -#include +# include #endif -namespace boost { -namespace lockfree { +namespace boost { namespace lockfree { namespace detail { -#if defined(BOOST_LOCKFREE_NO_HDR_ATOMIC) || defined(BOOST_LOCKFREE_FORCE_BOOST_ATOMIC) +#if defined( BOOST_LOCKFREE_NO_HDR_ATOMIC ) || defined( BOOST_LOCKFREE_FORCE_BOOST_ATOMIC ) using boost::atomic; using boost::memory_order_acquire; using boost::memory_order_consume; @@ -75,13 +74,13 @@ using std::memory_order_relaxed; using std::memory_order_release; #endif -} +} // namespace detail using detail::atomic; using detail::memory_order_acquire; using detail::memory_order_consume; using detail::memory_order_relaxed; using detail::memory_order_release; -}} +}} // namespace boost::lockfree #endif /* BOOST_LOCKFREE_DETAIL_ATOMIC_HPP */ diff --git a/include/boost/lockfree/detail/copy_payload.hpp b/include/boost/lockfree/detail/copy_payload.hpp index 5972da7c..9acd086f 100644 --- a/include/boost/lockfree/detail/copy_payload.hpp +++ b/include/boost/lockfree/detail/copy_payload.hpp @@ -12,19 +12,17 @@ #include #include -#if defined(_MSC_VER) -#pragma warning(push) -#pragma warning(disable: 4512) // assignment operator could not be generated +#if defined( _MSC_VER ) +# pragma warning( push ) +# pragma warning( disable : 4512 ) // assignment operator could not be generated #endif -namespace boost { -namespace lockfree { -namespace detail { +namespace boost { namespace lockfree { namespace detail { struct copy_convertible { - template - static void copy(T & t, U & u) + template < typename T, typename U > + static void copy( T& t, U& u ) { u = t; } @@ -32,52 +30,50 @@ struct copy_convertible struct copy_constructible_and_copyable { - template - static void copy(T & t, U & u) + template < typename T, typename U > + static void copy( T& t, U& u ) { - u = U(t); + u = U( t ); } }; -template -void copy_payload(T & t, U & u) +template < typename T, typename U > +void copy_payload( T& t, U& u ) { - typedef typename boost::mpl::if_::type, - copy_convertible, - copy_constructible_and_copyable - >::type copy_type; - copy_type::copy(t, u); + typedef typename boost::mpl::if_< typename boost::is_convertible< T, U >::type, + copy_convertible, + copy_constructible_and_copyable >::type copy_type; + copy_type::copy( t, u ); } -template +template < typename T > struct consume_via_copy { - consume_via_copy(T & out): - out_(out) + consume_via_copy( T& out ) : + out_( out ) {} - template - void operator()(U & element) + template < typename U > + void operator()( U& element ) { - copy_payload(element, out_); + copy_payload( element, out_ ); } - T & out_; + T& out_; }; struct consume_noop { - template - void operator()(const U &) - { - } + template < typename U > + void operator()( const U& ) + {} }; -}}} +}}} // namespace boost::lockfree::detail -#if defined(_MSC_VER) -#pragma warning(pop) +#if defined( _MSC_VER ) +# pragma warning( pop ) #endif -#endif /* BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED */ +#endif /* BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED */ diff --git a/include/boost/lockfree/detail/freelist.hpp b/include/boost/lockfree/detail/freelist.hpp index 250b26bb..0ce72f6d 100644 --- a/include/boost/lockfree/detail/freelist.hpp +++ b/include/boost/lockfree/detail/freelist.hpp @@ -26,267 +26,261 @@ #include #include -#if defined(_MSC_VER) -#pragma warning(push) -#pragma warning(disable: 4100) // unreferenced formal parameter -#pragma warning(disable: 4127) // conditional expression is constant +#if defined( _MSC_VER ) +# pragma warning( push ) +# pragma warning( disable : 4100 ) // unreferenced formal parameter +# pragma warning( disable : 4127 ) // conditional expression is constant #endif -namespace boost { -namespace lockfree { -namespace detail { +namespace boost { namespace lockfree { namespace detail { -template - > -class freelist_stack: - Alloc +template < typename T, typename Alloc = std::allocator< T > > +class freelist_stack : Alloc { struct freelist_node { - tagged_ptr next; + tagged_ptr< freelist_node > next; }; - typedef tagged_ptr tagged_node_ptr; + typedef tagged_ptr< freelist_node > tagged_node_ptr; public: - typedef T * index_t; - typedef tagged_ptr tagged_node_handle; + typedef T* index_t; + typedef tagged_ptr< T > tagged_node_handle; - template - freelist_stack (Allocator const & alloc, std::size_t n = 0): - Alloc(alloc), - pool_(tagged_node_ptr(NULL)) + template < typename Allocator > + freelist_stack( Allocator const& alloc, std::size_t n = 0 ) : + Alloc( alloc ), + pool_( tagged_node_ptr( NULL ) ) { - for (std::size_t i = 0; i != n; ++i) { - T * node = Alloc::allocate(1); - std::memset((void*)node, 0, sizeof(T)); + for ( std::size_t i = 0; i != n; ++i ) { + T* node = Alloc::allocate( 1 ); + std::memset( (void*)node, 0, sizeof( T ) ); #ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR - destruct(node); + destruct< false >( node ); #else - deallocate(node); + deallocate< false >( node ); #endif } } - template - void reserve (std::size_t count) + template < bool ThreadSafe > + void reserve( std::size_t count ) { - for (std::size_t i = 0; i != count; ++i) { - T * node = Alloc::allocate(1); - std::memset((void*)node, 0, sizeof(T)); - deallocate(node); + for ( std::size_t i = 0; i != count; ++i ) { + T* node = Alloc::allocate( 1 ); + std::memset( (void*)node, 0, sizeof( T ) ); + deallocate< ThreadSafe >( node ); } } - template - T * construct (void) + template < bool ThreadSafe, bool Bounded > + T* construct( void ) { - T * node = allocate(); - if (node) - new(node) T(); + T* node = allocate< ThreadSafe, Bounded >(); + if ( node ) + new ( node ) T(); return node; } - template - T * construct (ArgumentType const & arg) + template < bool ThreadSafe, bool Bounded, typename ArgumentType > + T* construct( ArgumentType const& arg ) { - T * node = allocate(); - if (node) - new(node) T(arg); + T* node = allocate< ThreadSafe, Bounded >(); + if ( node ) + new ( node ) T( arg ); return node; } - template - T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2) + template < bool ThreadSafe, bool Bounded, typename ArgumentType1, typename ArgumentType2 > + T* construct( ArgumentType1 const& arg1, ArgumentType2 const& arg2 ) { - T * node = allocate(); - if (node) - new(node) T(arg1, arg2); + T* node = allocate< ThreadSafe, Bounded >(); + if ( node ) + new ( node ) T( arg1, arg2 ); return node; } - template - void destruct (tagged_node_handle const & tagged_ptr) + template < bool ThreadSafe > + void destruct( tagged_node_handle const& tagged_ptr ) { - T * n = tagged_ptr.get_ptr(); + T* n = tagged_ptr.get_ptr(); n->~T(); - deallocate(n); + deallocate< ThreadSafe >( n ); } - template - void destruct (T * n) + template < bool ThreadSafe > + void destruct( T* n ) { n->~T(); - deallocate(n); + deallocate< ThreadSafe >( n ); } - ~freelist_stack(void) + ~freelist_stack( void ) { tagged_node_ptr current = pool_.load(); - while (current) { - freelist_node * current_ptr = current.get_ptr(); - if (current_ptr) + while ( current ) { + freelist_node* current_ptr = current.get_ptr(); + if ( current_ptr ) current = current_ptr->next; - Alloc::deallocate((T*)current_ptr, 1); + Alloc::deallocate( (T*)current_ptr, 1 ); } } - bool is_lock_free(void) const + bool is_lock_free( void ) const { return pool_.is_lock_free(); } - T * get_handle(T * pointer) const + T* get_handle( T* pointer ) const { return pointer; } - T * get_handle(tagged_node_handle const & handle) const + T* get_handle( tagged_node_handle const& handle ) const { - return get_pointer(handle); + return get_pointer( handle ); } - T * get_pointer(tagged_node_handle const & tptr) const + T* get_pointer( tagged_node_handle const& tptr ) const { return tptr.get_ptr(); } - T * get_pointer(T * pointer) const + T* get_pointer( T* pointer ) const { return pointer; } - T * null_handle(void) const + T* null_handle( void ) const { return NULL; } protected: // allow use from subclasses - template - T * allocate (void) + template < bool ThreadSafe, bool Bounded > + T* allocate( void ) { - if (ThreadSafe) - return allocate_impl(); + if ( ThreadSafe ) + return allocate_impl< Bounded >(); else - return allocate_impl_unsafe(); + return allocate_impl_unsafe< Bounded >(); } private: - template - T * allocate_impl (void) + template < bool Bounded > + T* allocate_impl( void ) { - tagged_node_ptr old_pool = pool_.load(memory_order_consume); + tagged_node_ptr old_pool = pool_.load( memory_order_consume ); - for(;;) { - if (!old_pool.get_ptr()) { - if (!Bounded) { - T *ptr = Alloc::allocate(1); - std::memset((void*)ptr, 0, sizeof(T)); + for ( ;; ) { + if ( !old_pool.get_ptr() ) { + if ( !Bounded ) { + T* ptr = Alloc::allocate( 1 ); + std::memset( (void*)ptr, 0, sizeof( T ) ); return ptr; - } - else + } else return 0; } - freelist_node * new_pool_ptr = old_pool->next.get_ptr(); - tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag()); + freelist_node* new_pool_ptr = old_pool->next.get_ptr(); + tagged_node_ptr new_pool( new_pool_ptr, old_pool.get_next_tag() ); - if (pool_.compare_exchange_weak(old_pool, new_pool)) { - void * ptr = old_pool.get_ptr(); - return reinterpret_cast(ptr); + if ( pool_.compare_exchange_weak( old_pool, new_pool ) ) { + void* ptr = old_pool.get_ptr(); + return reinterpret_cast< T* >( ptr ); } } } - template - T * allocate_impl_unsafe (void) + template < bool Bounded > + T* allocate_impl_unsafe( void ) { - tagged_node_ptr old_pool = pool_.load(memory_order_relaxed); + tagged_node_ptr old_pool = pool_.load( memory_order_relaxed ); - if (!old_pool.get_ptr()) { - if (!Bounded) { - T *ptr = Alloc::allocate(1); - std::memset((void*)ptr, 0, sizeof(T)); + if ( !old_pool.get_ptr() ) { + if ( !Bounded ) { + T* ptr = Alloc::allocate( 1 ); + std::memset( (void*)ptr, 0, sizeof( T ) ); return ptr; - } - else + } else return 0; } - freelist_node * new_pool_ptr = old_pool->next.get_ptr(); - tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag()); + freelist_node* new_pool_ptr = old_pool->next.get_ptr(); + tagged_node_ptr new_pool( new_pool_ptr, old_pool.get_next_tag() ); - pool_.store(new_pool, memory_order_relaxed); - void * ptr = old_pool.get_ptr(); - return reinterpret_cast(ptr); + pool_.store( new_pool, memory_order_relaxed ); + void* ptr = old_pool.get_ptr(); + return reinterpret_cast< T* >( ptr ); } protected: - template - void deallocate (T * n) + template < bool ThreadSafe > + void deallocate( T* n ) { - if (ThreadSafe) - deallocate_impl(n); + if ( ThreadSafe ) + deallocate_impl( n ); else - deallocate_impl_unsafe(n); + deallocate_impl_unsafe( n ); } private: - void deallocate_impl (T * n) + void deallocate_impl( T* n ) { - void * node = n; - tagged_node_ptr old_pool = pool_.load(memory_order_consume); - freelist_node * new_pool_ptr = reinterpret_cast(node); + void* node = n; + tagged_node_ptr old_pool = pool_.load( memory_order_consume ); + freelist_node* new_pool_ptr = reinterpret_cast< freelist_node* >( node ); - for(;;) { - tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag()); - new_pool->next.set_ptr(old_pool.get_ptr()); + for ( ;; ) { + tagged_node_ptr new_pool( new_pool_ptr, old_pool.get_tag() ); + new_pool->next.set_ptr( old_pool.get_ptr() ); - if (pool_.compare_exchange_weak(old_pool, new_pool)) + if ( pool_.compare_exchange_weak( old_pool, new_pool ) ) return; } } - void deallocate_impl_unsafe (T * n) + void deallocate_impl_unsafe( T* n ) { - void * node = n; - tagged_node_ptr old_pool = pool_.load(memory_order_relaxed); - freelist_node * new_pool_ptr = reinterpret_cast(node); + void* node = n; + tagged_node_ptr old_pool = pool_.load( memory_order_relaxed ); + freelist_node* new_pool_ptr = reinterpret_cast< freelist_node* >( node ); - tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag()); - new_pool->next.set_ptr(old_pool.get_ptr()); + tagged_node_ptr new_pool( new_pool_ptr, old_pool.get_tag() ); + new_pool->next.set_ptr( old_pool.get_ptr() ); - pool_.store(new_pool, memory_order_relaxed); + pool_.store( new_pool, memory_order_relaxed ); } - atomic pool_; + atomic< tagged_node_ptr > pool_; }; -class -BOOST_ALIGNMENT( 4 ) // workaround for bugs in MSVC -tagged_index +class BOOST_ALIGNMENT( 4 ) // workaround for bugs in MSVC + tagged_index { public: typedef boost::uint16_t tag_t; typedef boost::uint16_t index_t; /** uninitialized constructor */ - tagged_index(void) BOOST_NOEXCEPT //: index(0), tag(0) + tagged_index( void ) BOOST_NOEXCEPT //: index(0), tag(0) {} /** copy constructor */ #ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS - tagged_index(tagged_index const & rhs): - index(rhs.index), tag(rhs.tag) + tagged_index( tagged_index const& rhs ) : + index( rhs.index ), + tag( rhs.tag ) {} #else - tagged_index(tagged_index const & rhs) = default; + tagged_index( tagged_index const& rhs ) = default; #endif - explicit tagged_index(index_t i, tag_t t = 0): - index(i), tag(t) + explicit tagged_index( index_t i, tag_t t = 0 ) : + index( i ), + tag( t ) {} /** index access */ @@ -296,7 +290,7 @@ tagged_index return index; } - void set_index(index_t i) + void set_index( index_t i ) { index = i; } @@ -311,357 +305,339 @@ tagged_index tag_t get_next_tag() const { - tag_t next = (get_tag() + 1u) & (std::numeric_limits::max)(); + tag_t next = ( get_tag() + 1u ) & ( std::numeric_limits< tag_t >::max )(); return next; } - void set_tag(tag_t t) + void set_tag( tag_t t ) { tag = t; } /* @} */ - bool operator==(tagged_index const & rhs) const + bool operator==( tagged_index const& rhs ) const { - return (index == rhs.index) && (tag == rhs.tag); + return ( index == rhs.index ) && ( tag == rhs.tag ); } - bool operator!=(tagged_index const & rhs) const + bool operator!=( tagged_index const& rhs ) const { - return !operator==(rhs); + return !operator==( rhs ); } protected: index_t index; - tag_t tag; + tag_t tag; }; -template -struct BOOST_ALIGNMENT(BOOST_LOCKFREE_CACHELINE_BYTES) compiletime_sized_freelist_storage +template < typename T, std::size_t size > +struct BOOST_ALIGNMENT( BOOST_LOCKFREE_CACHELINE_BYTES ) compiletime_sized_freelist_storage { // array-based freelists only support a 16bit address space. - BOOST_STATIC_ASSERT(size < 65536); + BOOST_STATIC_ASSERT( size < 65536 ); - boost::array data; + boost::array< char, size * sizeof( T ) + 64 > data; // unused ... only for API purposes - template - compiletime_sized_freelist_storage(Allocator const & /* alloc */, std::size_t /* count */) + template < typename Allocator > + compiletime_sized_freelist_storage( Allocator const& /* alloc */, std::size_t /* count */ ) { - data.fill(0); + data.fill( 0 ); } - T * nodes(void) const + T* nodes( void ) const { - char * data_pointer = const_cast(data.data()); - return reinterpret_cast( boost::alignment::align_up( data_pointer, BOOST_LOCKFREE_CACHELINE_BYTES ) ); + char* data_pointer = const_cast< char* >( data.data() ); + return reinterpret_cast< T* >( boost::alignment::align_up( data_pointer, BOOST_LOCKFREE_CACHELINE_BYTES ) ); } - std::size_t node_count(void) const + std::size_t node_count( void ) const { return size; } }; -template > -struct runtime_sized_freelist_storage: - boost::alignment::aligned_allocator_adaptor +template < typename T, typename Alloc = std::allocator< T > > +struct runtime_sized_freelist_storage : + boost::alignment::aligned_allocator_adaptor< Alloc, BOOST_LOCKFREE_CACHELINE_BYTES > { - typedef boost::alignment::aligned_allocator_adaptor allocator_type; - T * nodes_; - std::size_t node_count_; + typedef boost::alignment::aligned_allocator_adaptor< Alloc, BOOST_LOCKFREE_CACHELINE_BYTES > allocator_type; + T* nodes_; + std::size_t node_count_; - template - runtime_sized_freelist_storage(Allocator const & alloc, std::size_t count): - allocator_type(alloc), node_count_(count) + template < typename Allocator > + runtime_sized_freelist_storage( Allocator const& alloc, std::size_t count ) : + allocator_type( alloc ), + node_count_( count ) { - if (count > 65535) - boost::throw_exception(std::runtime_error("boost.lockfree: freelist size is limited to a maximum of 65535 objects")); - nodes_ = allocator_type::allocate(count); - std::memset((void*)nodes_, 0, sizeof(T) * count); + if ( count > 65535 ) + boost::throw_exception( std::runtime_error( "boost.lockfree: freelist size is limited to a maximum of " + "65535 objects" ) ); + nodes_ = allocator_type::allocate( count ); + std::memset( (void*)nodes_, 0, sizeof( T ) * count ); } - ~runtime_sized_freelist_storage(void) + ~runtime_sized_freelist_storage( void ) { - allocator_type::deallocate(nodes_, node_count_); + allocator_type::deallocate( nodes_, node_count_ ); } - T * nodes(void) const + T* nodes( void ) const { return nodes_; } - std::size_t node_count(void) const + std::size_t node_count( void ) const { return node_count_; } }; -template - > -class fixed_size_freelist: - NodeStorage +template < typename T, typename NodeStorage = runtime_sized_freelist_storage< T > > +class fixed_size_freelist : NodeStorage { struct freelist_node { tagged_index next; }; - void initialize(void) + void initialize( void ) { - T * nodes = NodeStorage::nodes(); - for (std::size_t i = 0; i != NodeStorage::node_count(); ++i) { - tagged_index * next_index = reinterpret_cast(nodes + i); - next_index->set_index(null_handle()); + T* nodes = NodeStorage::nodes(); + for ( std::size_t i = 0; i != NodeStorage::node_count(); ++i ) { + tagged_index* next_index = reinterpret_cast< tagged_index* >( nodes + i ); + next_index->set_index( null_handle() ); #ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR - destruct(nodes + i); + destruct< false >( nodes + i ); #else - deallocate(static_cast(i)); + deallocate< false >( static_cast< index_t >( i ) ); #endif } } public: - typedef tagged_index tagged_node_handle; + typedef tagged_index tagged_node_handle; typedef tagged_index::index_t index_t; - template - fixed_size_freelist (Allocator const & alloc, std::size_t count): - NodeStorage(alloc, count), - pool_(tagged_index(static_cast(count), 0)) + template < typename Allocator > + fixed_size_freelist( Allocator const& alloc, std::size_t count ) : + NodeStorage( alloc, count ), + pool_( tagged_index( static_cast< index_t >( count ), 0 ) ) { initialize(); } - fixed_size_freelist (void): - pool_(tagged_index(NodeStorage::node_count(), 0)) + fixed_size_freelist( void ) : + pool_( tagged_index( NodeStorage::node_count(), 0 ) ) { initialize(); } - template - T * construct (void) + template < bool ThreadSafe, bool Bounded > + T* construct( void ) { - index_t node_index = allocate(); - if (node_index == null_handle()) + index_t node_index = allocate< ThreadSafe >(); + if ( node_index == null_handle() ) return NULL; - T * node = NodeStorage::nodes() + node_index; - new(node) T(); + T* node = NodeStorage::nodes() + node_index; + new ( node ) T(); return node; } - template - T * construct (ArgumentType const & arg) + template < bool ThreadSafe, bool Bounded, typename ArgumentType > + T* construct( ArgumentType const& arg ) { - index_t node_index = allocate(); - if (node_index == null_handle()) + index_t node_index = allocate< ThreadSafe >(); + if ( node_index == null_handle() ) return NULL; - T * node = NodeStorage::nodes() + node_index; - new(node) T(arg); + T* node = NodeStorage::nodes() + node_index; + new ( node ) T( arg ); return node; } - template - T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2) + template < bool ThreadSafe, bool Bounded, typename ArgumentType1, typename ArgumentType2 > + T* construct( ArgumentType1 const& arg1, ArgumentType2 const& arg2 ) { - index_t node_index = allocate(); - if (node_index == null_handle()) + index_t node_index = allocate< ThreadSafe >(); + if ( node_index == null_handle() ) return NULL; - T * node = NodeStorage::nodes() + node_index; - new(node) T(arg1, arg2); + T* node = NodeStorage::nodes() + node_index; + new ( node ) T( arg1, arg2 ); return node; } - template - void destruct (tagged_node_handle tagged_index) + template < bool ThreadSafe > + void destruct( tagged_node_handle tagged_index ) { index_t index = tagged_index.get_index(); - T * n = NodeStorage::nodes() + index; + T* n = NodeStorage::nodes() + index; (void)n; // silence msvc warning n->~T(); - deallocate(index); + deallocate< ThreadSafe >( index ); } - template - void destruct (T * n) + template < bool ThreadSafe > + void destruct( T* n ) { n->~T(); - deallocate(static_cast(n - NodeStorage::nodes())); + deallocate< ThreadSafe >( static_cast< index_t >( n - NodeStorage::nodes() ) ); } - bool is_lock_free(void) const + bool is_lock_free( void ) const { return pool_.is_lock_free(); } - index_t null_handle(void) const + index_t null_handle( void ) const { - return static_cast(NodeStorage::node_count()); + return static_cast< index_t >( NodeStorage::node_count() ); } - index_t get_handle(T * pointer) const + index_t get_handle( T* pointer ) const { - if (pointer == NULL) + if ( pointer == NULL ) return null_handle(); else - return static_cast(pointer - NodeStorage::nodes()); + return static_cast< index_t >( pointer - NodeStorage::nodes() ); } - index_t get_handle(tagged_node_handle const & handle) const + index_t get_handle( tagged_node_handle const& handle ) const { return handle.get_index(); } - T * get_pointer(tagged_node_handle const & tptr) const + T* get_pointer( tagged_node_handle const& tptr ) const { - return get_pointer(tptr.get_index()); + return get_pointer( tptr.get_index() ); } - T * get_pointer(index_t index) const + T* get_pointer( index_t index ) const { - if (index == null_handle()) + if ( index == null_handle() ) return 0; else return NodeStorage::nodes() + index; } - T * get_pointer(T * ptr) const + T* get_pointer( T* ptr ) const { return ptr; } protected: // allow use from subclasses - template - index_t allocate (void) + template < bool ThreadSafe > + index_t allocate( void ) { - if (ThreadSafe) + if ( ThreadSafe ) return allocate_impl(); else return allocate_impl_unsafe(); } private: - index_t allocate_impl (void) + index_t allocate_impl( void ) { - tagged_index old_pool = pool_.load(memory_order_consume); + tagged_index old_pool = pool_.load( memory_order_consume ); - for(;;) { + for ( ;; ) { index_t index = old_pool.get_index(); - if (index == null_handle()) + if ( index == null_handle() ) return index; - T * old_node = NodeStorage::nodes() + index; - tagged_index * next_index = reinterpret_cast(old_node); + T* old_node = NodeStorage::nodes() + index; + tagged_index* next_index = reinterpret_cast< tagged_index* >( old_node ); - tagged_index new_pool(next_index->get_index(), old_pool.get_next_tag()); + tagged_index new_pool( next_index->get_index(), old_pool.get_next_tag() ); - if (pool_.compare_exchange_weak(old_pool, new_pool)) + if ( pool_.compare_exchange_weak( old_pool, new_pool ) ) return old_pool.get_index(); } } - index_t allocate_impl_unsafe (void) + index_t allocate_impl_unsafe( void ) { - tagged_index old_pool = pool_.load(memory_order_consume); + tagged_index old_pool = pool_.load( memory_order_consume ); index_t index = old_pool.get_index(); - if (index == null_handle()) + if ( index == null_handle() ) return index; - T * old_node = NodeStorage::nodes() + index; - tagged_index * next_index = reinterpret_cast(old_node); + T* old_node = NodeStorage::nodes() + index; + tagged_index* next_index = reinterpret_cast< tagged_index* >( old_node ); - tagged_index new_pool(next_index->get_index(), old_pool.get_next_tag()); + tagged_index new_pool( next_index->get_index(), old_pool.get_next_tag() ); - pool_.store(new_pool, memory_order_relaxed); + pool_.store( new_pool, memory_order_relaxed ); return old_pool.get_index(); } - template - void deallocate (index_t index) + template < bool ThreadSafe > + void deallocate( index_t index ) { - if (ThreadSafe) - deallocate_impl(index); + if ( ThreadSafe ) + deallocate_impl( index ); else - deallocate_impl_unsafe(index); + deallocate_impl_unsafe( index ); } - void deallocate_impl (index_t index) + void deallocate_impl( index_t index ) { - freelist_node * new_pool_node = reinterpret_cast(NodeStorage::nodes() + index); - tagged_index old_pool = pool_.load(memory_order_consume); + freelist_node* new_pool_node = reinterpret_cast< freelist_node* >( NodeStorage::nodes() + index ); + tagged_index old_pool = pool_.load( memory_order_consume ); - for(;;) { - tagged_index new_pool (index, old_pool.get_tag()); - new_pool_node->next.set_index(old_pool.get_index()); + for ( ;; ) { + tagged_index new_pool( index, old_pool.get_tag() ); + new_pool_node->next.set_index( old_pool.get_index() ); - if (pool_.compare_exchange_weak(old_pool, new_pool)) + if ( pool_.compare_exchange_weak( old_pool, new_pool ) ) return; } } - void deallocate_impl_unsafe (index_t index) + void deallocate_impl_unsafe( index_t index ) { - freelist_node * new_pool_node = reinterpret_cast(NodeStorage::nodes() + index); - tagged_index old_pool = pool_.load(memory_order_consume); + freelist_node* new_pool_node = reinterpret_cast< freelist_node* >( NodeStorage::nodes() + index ); + tagged_index old_pool = pool_.load( memory_order_consume ); - tagged_index new_pool (index, old_pool.get_tag()); - new_pool_node->next.set_index(old_pool.get_index()); + tagged_index new_pool( index, old_pool.get_tag() ); + new_pool_node->next.set_index( old_pool.get_index() ); - pool_.store(new_pool); + pool_.store( new_pool ); } - atomic pool_; + atomic< tagged_index > pool_; }; -template +template < typename T, typename Alloc, bool IsCompileTimeSized, bool IsFixedSize, std::size_t Capacity > struct select_freelist { - typedef typename mpl::if_c, - runtime_sized_freelist_storage - >::type fixed_sized_storage_type; - - typedef typename mpl::if_c, - freelist_stack - >::type type; + typedef typename mpl::if_c< IsCompileTimeSized, + compiletime_sized_freelist_storage< T, Capacity >, + runtime_sized_freelist_storage< T, Alloc > >::type fixed_sized_storage_type; + + typedef typename mpl::if_c< IsCompileTimeSized || IsFixedSize, + fixed_size_freelist< T, fixed_sized_storage_type >, + freelist_stack< T, Alloc > >::type type; }; -template +template < typename T, bool IsNodeBased > struct select_tagged_handle { - typedef typename mpl::if_c, - tagged_index - >::type tagged_handle_type; - - typedef typename mpl::if_c::type handle_type; + typedef typename mpl::if_c< IsNodeBased, tagged_ptr< T >, tagged_index >::type tagged_handle_type; + + typedef typename mpl::if_c< IsNodeBased, T*, typename tagged_index::index_t >::type handle_type; }; -} /* namespace detail */ -} /* namespace lockfree */ -} /* namespace boost */ +}}} // namespace boost::lockfree::detail -#if defined(_MSC_VER) -#pragma warning(pop) +#if defined( _MSC_VER ) +# pragma warning( pop ) #endif diff --git a/include/boost/lockfree/detail/parameter.hpp b/include/boost/lockfree/detail/parameter.hpp index 79bde9a1..27364358 100644 --- a/include/boost/lockfree/detail/parameter.hpp +++ b/include/boost/lockfree/detail/parameter.hpp @@ -19,64 +19,57 @@ #include - -namespace boost { -namespace lockfree { -namespace detail { +namespace boost { namespace lockfree { namespace detail { namespace mpl = boost::mpl; -template +template < typename bound_args, typename tag_type > struct has_arg { - typedef typename parameter::binding::type type; - static const bool value = mpl::is_not_void_::type::value; + typedef typename parameter::binding< bound_args, tag_type, mpl::void_ >::type type; + static const bool value = mpl::is_not_void_< type >::type::value; }; -template +template < typename bound_args > struct extract_capacity { - static const bool has_capacity = has_arg::value; + static const bool has_capacity = has_arg< bound_args, tag::capacity >::value; - typedef typename mpl::if_c::type, - mpl::size_t< 0 > - >::type capacity_t; + typedef + typename mpl::if_c< has_capacity, typename has_arg< bound_args, tag::capacity >::type, mpl::size_t< 0 > >::type + capacity_t; static const std::size_t capacity = capacity_t::value; }; -template +template < typename bound_args, typename T > struct extract_allocator { - static const bool has_allocator = has_arg::value; + static const bool has_allocator = has_arg< bound_args, tag::allocator >::value; - typedef typename mpl::if_c::type, - boost::alignment::aligned_allocator - >::type allocator_arg; + typedef + typename mpl::if_c< has_allocator, + typename has_arg< bound_args, tag::allocator >::type, + boost::alignment::aligned_allocator< T, BOOST_LOCKFREE_CACHELINE_BYTES > >::type allocator_arg; - typedef typename boost::allocator_rebind::type type; + typedef typename boost::allocator_rebind< allocator_arg, T >::type type; }; -template +template < typename bound_args, bool default_ = false > struct extract_fixed_sized { - static const bool has_fixed_sized = has_arg::value; + static const bool has_fixed_sized = has_arg< bound_args, tag::fixed_sized >::value; - typedef typename mpl::if_c::type, - mpl::bool_ - >::type type; + typedef typename mpl::if_c< has_fixed_sized, + typename has_arg< bound_args, tag::fixed_sized >::type, + mpl::bool_< default_ > >::type type; static const bool value = type::value; }; -} /* namespace detail */ -} /* namespace lockfree */ -} /* namespace boost */ +}}} // namespace boost::lockfree::detail #endif /* BOOST_LOCKFREE_DETAIL_PARAMETER_HPP */ diff --git a/include/boost/lockfree/detail/prefix.hpp b/include/boost/lockfree/detail/prefix.hpp index 600c037c..88bcbdf8 100644 --- a/include/boost/lockfree/detail/prefix.hpp +++ b/include/boost/lockfree/detail/prefix.hpp @@ -13,18 +13,18 @@ of the virtual address space as tag (at least 16bit) */ -#if defined(__s390__) || defined(__s390x__) - #define BOOST_LOCKFREE_CACHELINE_BYTES 256 -#elif defined(powerpc) || defined(__powerpc__) || defined(__ppc__) - #define BOOST_LOCKFREE_CACHELINE_BYTES 128 +#if defined( __s390__ ) || defined( __s390x__ ) +# define BOOST_LOCKFREE_CACHELINE_BYTES 256 +#elif defined( powerpc ) || defined( __powerpc__ ) || defined( __ppc__ ) +# define BOOST_LOCKFREE_CACHELINE_BYTES 128 #else - #define BOOST_LOCKFREE_CACHELINE_BYTES 64 +# define BOOST_LOCKFREE_CACHELINE_BYTES 64 #endif #include -#if BOOST_ARCH_X86_64 || ( (BOOST_ARCH_ARM >= BOOST_VERSION_NUMBER(8,0,0)) && !BOOST_PLAT_ANDROID ) -#define BOOST_LOCKFREE_PTR_COMPRESSION 1 +#if BOOST_ARCH_X86_64 || ( ( BOOST_ARCH_ARM >= BOOST_VERSION_NUMBER( 8, 0, 0 ) ) && !BOOST_PLAT_ANDROID ) +# define BOOST_LOCKFREE_PTR_COMPRESSION 1 #endif #endif /* BOOST_LOCKFREE_PREFIX_HPP_INCLUDED */ diff --git a/include/boost/lockfree/detail/tagged_ptr.hpp b/include/boost/lockfree/detail/tagged_ptr.hpp index 10ab2e91..2e702175 100644 --- a/include/boost/lockfree/detail/tagged_ptr.hpp +++ b/include/boost/lockfree/detail/tagged_ptr.hpp @@ -13,9 +13,9 @@ #include #ifndef BOOST_LOCKFREE_PTR_COMPRESSION -#include +# include #else -#include +# include #endif #endif /* BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED */ diff --git a/include/boost/lockfree/detail/tagged_ptr_dcas.hpp b/include/boost/lockfree/detail/tagged_ptr_dcas.hpp index f9223506..237866e5 100644 --- a/include/boost/lockfree/detail/tagged_ptr_dcas.hpp +++ b/include/boost/lockfree/detail/tagged_ptr_dcas.hpp @@ -9,60 +9,59 @@ #ifndef BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED #define BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED -#include /* for std::size_t */ +#include /* for std::size_t */ #include #include -namespace boost { -namespace lockfree { -namespace detail { +namespace boost { namespace lockfree { namespace detail { - -template +template < class T > class #if BOOST_COMP_MSVC && BOOST_ARCH_X86_64 -BOOST_ALIGNMENT(16) + BOOST_ALIGNMENT( 16 ) #elif BOOST_COMP_MSVC && BOOST_ARCH_X86_32 -BOOST_ALIGNMENT(8) + BOOST_ALIGNMENT( 8 ) #else -BOOST_ALIGNMENT(2 * sizeof(void*)) + BOOST_ALIGNMENT( 2 * sizeof( void* ) ) #endif - tagged_ptr + tagged_ptr { public: typedef std::size_t tag_t; /** uninitialized constructor */ - tagged_ptr(void) BOOST_NOEXCEPT//: ptr(0), tag(0) + tagged_ptr( void ) BOOST_NOEXCEPT //: ptr(0), tag(0) {} #ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS - tagged_ptr(tagged_ptr const & p): - ptr(p.ptr), tag(p.tag) + tagged_ptr( tagged_ptr const& p ) : + ptr( p.ptr ), + tag( p.tag ) {} #else - tagged_ptr(tagged_ptr const & p) = default; + tagged_ptr( tagged_ptr const& p ) = default; #endif - explicit tagged_ptr(T * p, tag_t t = 0): - ptr(p), tag(t) + explicit tagged_ptr( T* p, tag_t t = 0 ) : + ptr( p ), + tag( t ) {} /** unsafe set operation */ /* @{ */ #ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS - tagged_ptr & operator= (tagged_ptr const & p) + tagged_ptr& operator=( tagged_ptr const& p ) { - set(p.ptr, p.tag); + set( p.ptr, p.tag ); return *this; } #else - tagged_ptr & operator= (tagged_ptr const & p) = default; + tagged_ptr& operator=( tagged_ptr const& p ) = default; #endif - void set(T * p, tag_t t) + void set( T* p, tag_t t ) { ptr = p; tag = t; @@ -71,25 +70,25 @@ BOOST_ALIGNMENT(2 * sizeof(void*)) /** comparing semantics */ /* @{ */ - bool operator== (volatile tagged_ptr const & p) const + bool operator==( volatile tagged_ptr const& p ) const { - return (ptr == p.ptr) && (tag == p.tag); + return ( ptr == p.ptr ) && ( tag == p.tag ); } - bool operator!= (volatile tagged_ptr const & p) const + bool operator!=( volatile tagged_ptr const& p ) const { - return !operator==(p); + return !operator==( p ); } /* @} */ /** pointer access */ /* @{ */ - T * get_ptr(void) const + T* get_ptr( void ) const { return ptr; } - void set_ptr(T * p) + void set_ptr( T* p ) { ptr = p; } @@ -104,11 +103,11 @@ BOOST_ALIGNMENT(2 * sizeof(void*)) tag_t get_next_tag() const { - tag_t next = (get_tag() + 1) & (std::numeric_limits::max)(); + tag_t next = ( get_tag() + 1 ) & ( std::numeric_limits< tag_t >::max )(); return next; } - void set_tag(tag_t t) + void set_tag( tag_t t ) { tag = t; } @@ -116,29 +115,27 @@ BOOST_ALIGNMENT(2 * sizeof(void*)) /** smart pointer support */ /* @{ */ - T & operator*() const + T& operator*() const { return *ptr; } - T * operator->() const + T* operator->() const { return ptr; } - operator bool(void) const + operator bool( void ) const { return ptr != 0; } /* @} */ protected: - T * ptr; + T* ptr; tag_t tag; }; -} /* namespace detail */ -} /* namespace lockfree */ -} /* namespace boost */ +}}} // namespace boost::lockfree::detail #endif /* BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED */ diff --git a/include/boost/lockfree/detail/tagged_ptr_ptrcompression.hpp b/include/boost/lockfree/detail/tagged_ptr_ptrcompression.hpp index 63f952eb..c2cf6ff3 100644 --- a/include/boost/lockfree/detail/tagged_ptr_ptrcompression.hpp +++ b/include/boost/lockfree/detail/tagged_ptr_ptrcompression.hpp @@ -9,19 +9,17 @@ #ifndef BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED #define BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED -#include /* for std::size_t */ +#include /* for std::size_t */ #include #include #include -namespace boost { -namespace lockfree { -namespace detail { +namespace boost { namespace lockfree { namespace detail { #ifdef BOOST_LOCKFREE_PTR_COMPRESSION -template +template < class T > class tagged_ptr { typedef boost::uint64_t compressed_ptr_t; @@ -33,92 +31,92 @@ class tagged_ptr union cast_unit { compressed_ptr_t value; - tag_t tag[4]; + tag_t tag[ 4 ]; }; - static const int tag_index = 3; - static const compressed_ptr_t ptr_mask = 0xffffffffffffUL; //(1L<<48L)-1; + static const int tag_index = 3; + static const compressed_ptr_t ptr_mask = 0xffffffffffffUL; //(1L<<48L)-1; - static T* extract_ptr(volatile compressed_ptr_t const & i) + static T* extract_ptr( volatile compressed_ptr_t const& i ) { - return (T*)(i & ptr_mask); + return (T*)( i & ptr_mask ); } - static tag_t extract_tag(volatile compressed_ptr_t const & i) + static tag_t extract_tag( volatile compressed_ptr_t const& i ) { cast_unit cu; cu.value = i; - return cu.tag[tag_index]; + return cu.tag[ tag_index ]; } - static compressed_ptr_t pack_ptr(T * ptr, tag_t tag) + static compressed_ptr_t pack_ptr( T* ptr, tag_t tag ) { cast_unit ret; - ret.value = compressed_ptr_t(ptr); - ret.tag[tag_index] = tag; + ret.value = compressed_ptr_t( ptr ); + ret.tag[ tag_index ] = tag; return ret.value; } public: /** uninitialized constructor */ - tagged_ptr(void) BOOST_NOEXCEPT//: ptr(0), tag(0) + tagged_ptr( void ) BOOST_NOEXCEPT //: ptr(0), tag(0) {} /** copy constructor */ -#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS - tagged_ptr(tagged_ptr const & p): - ptr(p.ptr) +# ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS + tagged_ptr( tagged_ptr const& p ) : + ptr( p.ptr ) {} -#else - tagged_ptr(tagged_ptr const & p) = default; -#endif +# else + tagged_ptr( tagged_ptr const& p ) = default; +# endif - explicit tagged_ptr(T * p, tag_t t = 0): - ptr(pack_ptr(p, t)) + explicit tagged_ptr( T* p, tag_t t = 0 ) : + ptr( pack_ptr( p, t ) ) {} /** unsafe set operation */ /* @{ */ -#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS - tagged_ptr & operator= (tagged_ptr const & p) +# ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS + tagged_ptr& operator=( tagged_ptr const& p ) { - ptr = p.ptr; - return *this; + ptr = p.ptr; + return *this; } -#else - tagged_ptr & operator= (tagged_ptr const & p) = default; -#endif +# else + tagged_ptr& operator=( tagged_ptr const& p ) = default; +# endif - void set(T * p, tag_t t) + void set( T* p, tag_t t ) { - ptr = pack_ptr(p, t); + ptr = pack_ptr( p, t ); } /* @} */ /** comparing semantics */ /* @{ */ - bool operator== (volatile tagged_ptr const & p) volatile const + bool operator==( volatile tagged_ptr const& p ) volatile const { - return (ptr == p.ptr); + return ( ptr == p.ptr ); } - bool operator!= (volatile tagged_ptr const & p) volatile const + bool operator!=( volatile tagged_ptr const& p ) volatile const { - return !operator==(p); + return !operator==( p ); } /* @} */ /** pointer access */ /* @{ */ - T * get_ptr() const + T* get_ptr() const { - return extract_ptr(ptr); + return extract_ptr( ptr ); } - void set_ptr(T * p) + void set_ptr( T* p ) { tag_t tag = get_tag(); - ptr = pack_ptr(p, tag); + ptr = pack_ptr( p, tag ); } /* @} */ @@ -126,35 +124,35 @@ class tagged_ptr /* @{ */ tag_t get_tag() const { - return extract_tag(ptr); + return extract_tag( ptr ); } tag_t get_next_tag() const { - tag_t next = (get_tag() + 1u) & (std::numeric_limits::max)(); + tag_t next = ( get_tag() + 1u ) & ( std::numeric_limits< tag_t >::max )(); return next; } - void set_tag(tag_t t) + void set_tag( tag_t t ) { - T * p = get_ptr(); - ptr = pack_ptr(p, t); + T* p = get_ptr(); + ptr = pack_ptr( p, t ); } /* @} */ /** smart pointer support */ /* @{ */ - T & operator*() const + T& operator*() const { return *get_ptr(); } - T * operator->() const + T* operator->() const { return get_ptr(); } - operator bool(void) const + operator bool( void ) const { return get_ptr() != 0; } @@ -164,11 +162,9 @@ class tagged_ptr compressed_ptr_t ptr; }; #else -#error unsupported platform +# error unsupported platform #endif -} /* namespace detail */ -} /* namespace lockfree */ -} /* namespace boost */ +}}} // namespace boost::lockfree::detail #endif /* BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED */ diff --git a/include/boost/lockfree/lockfree_forward.hpp b/include/boost/lockfree/lockfree_forward.hpp index b1d72b32..81d5f9ce 100644 --- a/include/boost/lockfree/lockfree_forward.hpp +++ b/include/boost/lockfree/lockfree_forward.hpp @@ -11,61 +11,51 @@ #ifndef BOOST_DOXYGEN_INVOKED -#include // size_t +# include // size_t -#include +# include -#ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES -#include -#endif +# ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES +# include +# endif -namespace boost { -namespace lockfree { +namespace boost { namespace lockfree { // policies -template +template < bool IsFixedSized > struct fixed_sized; -template +template < size_t Size > struct capacity; -template +template < class Alloc > struct allocator; // data structures -#ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES -template -#else -template -#endif +# ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES +template < typename T, class A0 = boost::parameter::void_, class A1 = boost::parameter::void_, class A2 = boost::parameter::void_ > +# else +template < typename T, typename... Options > +# endif class queue; -#ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES -template -#else -template -#endif +# ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES +template < typename T, class A0 = boost::parameter::void_, class A1 = boost::parameter::void_, class A2 = boost::parameter::void_ > +# else +template < typename T, typename... Options > +# endif class stack; -#ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES -template -#else -template -#endif +# ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES +template < typename T, class A0 = boost::parameter::void_, class A1 = boost::parameter::void_ > +# else +template < typename T, typename... Options > +# endif class spsc_queue; -} -} +}} // namespace boost::lockfree #endif // BOOST_DOXYGEN_INVOKED diff --git a/include/boost/lockfree/policies.hpp b/include/boost/lockfree/policies.hpp index 479411ea..e1acc6fc 100644 --- a/include/boost/lockfree/policies.hpp +++ b/include/boost/lockfree/policies.hpp @@ -9,49 +9,50 @@ #ifndef BOOST_LOCKFREE_POLICIES_HPP_INCLUDED #define BOOST_LOCKFREE_POLICIES_HPP_INCLUDED -#include #include #include +#include -namespace boost { -namespace lockfree { +namespace boost { namespace lockfree { #ifndef BOOST_DOXYGEN_INVOKED -namespace tag { struct allocator ; } -namespace tag { struct fixed_sized; } -namespace tag { struct capacity; } +namespace tag { +struct allocator; +} // namespace tag +namespace tag { +struct fixed_sized; +} // namespace tag +namespace tag { +struct capacity; +} // namespace tag #endif /** Configures a data structure as \b fixed-sized. * - * The internal nodes are stored inside an array and they are addressed by array indexing. This limits the possible size of the - * queue to the number of elements that can be addressed by the index type (usually 2**16-2), but on platforms that lack - * double-width compare-and-exchange instructions, this is the best way to achieve lock-freedom. - * This implies that a data structure is bounded. + * The internal nodes are stored inside an array and they are addressed by array indexing. This limits the possible + * size of the queue to the number of elements that can be addressed by the index type (usually 2**16-2), but on + * platforms that lack double-width compare-and-exchange instructions, this is the best way to achieve lock-freedom. + * This implies that a data structure is bounded. * */ -template -struct fixed_sized: - boost::parameter::template_keyword > +template < bool IsFixedSized > +struct fixed_sized : boost::parameter::template_keyword< tag::fixed_sized, boost::mpl::bool_< IsFixedSized > > {}; /** Sets the \b capacity of a data structure at compile-time. * * This implies that a data structure is bounded and fixed-sized. * */ -template -struct capacity: - boost::parameter::template_keyword > +template < size_t Size > +struct capacity : boost::parameter::template_keyword< tag::capacity, boost::mpl::size_t< Size > > {}; /** Defines the \b allocator type of a data structure. * */ -template -struct allocator: - boost::parameter::template_keyword +template < class Alloc > +struct allocator : boost::parameter::template_keyword< tag::allocator, Alloc > {}; -} -} +}} // namespace boost::lockfree #endif /* BOOST_LOCKFREE_POLICIES_HPP_INCLUDED */ diff --git a/include/boost/lockfree/queue.hpp b/include/boost/lockfree/queue.hpp index 7f0b70ff..eb9cd277 100644 --- a/include/boost/lockfree/queue.hpp +++ b/include/boost/lockfree/queue.hpp @@ -12,11 +12,11 @@ #define BOOST_LOCKFREE_FIFO_HPP_INCLUDED #include -#include +#include // for BOOST_LIKELY & BOOST_ALIGNMENT #include +#include #include #include -#include // for BOOST_LIKELY & BOOST_ALIGNMENT #include #include @@ -27,31 +27,28 @@ #include #ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once +# pragma once #endif -#if defined(_MSC_VER) -#pragma warning(push) -#pragma warning(disable: 4324) // structure was padded due to __declspec(align()) +#if defined( _MSC_VER ) +# pragma warning( push ) +# pragma warning( disable : 4324 ) // structure was padded due to __declspec(align()) #endif -#if defined(BOOST_INTEL) && (BOOST_INTEL_CXX_VERSION > 1000) -#pragma warning(push) -#pragma warning(disable:488) // template parameter unused in declaring parameter types, - // gets erronously triggered the queue constructor which - // takes an allocator of another type and rebinds it +#if defined( BOOST_INTEL ) && ( BOOST_INTEL_CXX_VERSION > 1000 ) +# pragma warning( push ) +# pragma warning( disable : 488 ) // template parameter unused in declaring parameter types, + // gets erronously triggered the queue constructor which + // takes an allocator of another type and rebinds it #endif +namespace boost { namespace lockfree { +namespace detail { -namespace boost { -namespace lockfree { -namespace detail { - -typedef parameter::parameters, - boost::parameter::optional - > queue_signature; +typedef parameter::parameters< boost::parameter::optional< tag::allocator >, boost::parameter::optional< tag::capacity > > + queue_signature; } /* namespace detail */ @@ -63,10 +60,10 @@ typedef parameter::parameters, * \b Policies: * - \ref boost::lockfree::fixed_sized, defaults to \c boost::lockfree::fixed_sized \n * Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior. \n - * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed - * by array indexing. This limits the possible size of the queue to the number of elements that can be addressed by the index - * type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way - * to achieve lock-freedom. + * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are + * addressed by array indexing. This limits the possible size of the queue to the number of elements that can be + * addressed by the index type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange + * instructions, this is the best way to achieve lock-freedom. * * - \ref boost::lockfree::capacity, optional \n * If this template argument is passed to the options, the size of the queue is set at compile-time.\n @@ -82,86 +79,87 @@ typedef parameter::parameters, * * */ #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES -template +template < typename T, class A0, class A1, class A2 > #else -template +template < typename T, typename... Options > #endif class queue { private: #ifndef BOOST_DOXYGEN_INVOKED -#ifdef BOOST_HAS_TRIVIAL_DESTRUCTOR - BOOST_STATIC_ASSERT((boost::has_trivial_destructor::value)); -#endif +# ifdef BOOST_HAS_TRIVIAL_DESTRUCTOR + BOOST_STATIC_ASSERT( ( boost::has_trivial_destructor< T >::value ) ); +# endif -#ifdef BOOST_HAS_TRIVIAL_ASSIGN - BOOST_STATIC_ASSERT((boost::has_trivial_assign::value)); -#endif +# ifdef BOOST_HAS_TRIVIAL_ASSIGN + BOOST_STATIC_ASSERT( ( boost::has_trivial_assign< T >::value ) ); +# endif -#ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES - typedef typename detail::queue_signature::bind::type bound_args; -#else - typedef typename detail::queue_signature::bind::type bound_args; -#endif +# ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES + typedef typename detail::queue_signature::bind< A0, A1, A2 >::type bound_args; +# else + typedef typename detail::queue_signature::bind< Options... >::type bound_args; +# endif - static const bool has_capacity = detail::extract_capacity::has_capacity; - static const size_t capacity = detail::extract_capacity::capacity + 1; // the queue uses one dummy node - static const bool fixed_sized = detail::extract_fixed_sized::value; - static const bool node_based = !(has_capacity || fixed_sized); + static const bool has_capacity = detail::extract_capacity< bound_args >::has_capacity; + static const size_t capacity = detail::extract_capacity< bound_args >::capacity + 1; // the queue uses one dummy node + static const bool fixed_sized = detail::extract_fixed_sized< bound_args >::value; + static const bool node_based = !( has_capacity || fixed_sized ); static const bool compile_time_sized = has_capacity; - struct BOOST_ALIGNMENT(BOOST_LOCKFREE_CACHELINE_BYTES) node + struct BOOST_ALIGNMENT( BOOST_LOCKFREE_CACHELINE_BYTES ) node { - typedef typename detail::select_tagged_handle::tagged_handle_type tagged_node_handle; - typedef typename detail::select_tagged_handle::handle_type handle_type; + typedef typename detail::select_tagged_handle< node, node_based >::tagged_handle_type tagged_node_handle; + typedef typename detail::select_tagged_handle< node, node_based >::handle_type handle_type; - node(T const & v, handle_type null_handle): - data(v) + node( T const& v, handle_type null_handle ) : + data( v ) { /* increment tag to avoid ABA problem */ - tagged_node_handle old_next = next.load(memory_order_relaxed); - tagged_node_handle new_next (null_handle, old_next.get_next_tag()); - next.store(new_next, memory_order_release); + tagged_node_handle old_next = next.load( memory_order_relaxed ); + tagged_node_handle new_next( null_handle, old_next.get_next_tag() ); + next.store( new_next, memory_order_release ); } - node (handle_type null_handle): - next(tagged_node_handle(null_handle, 0)) + node( handle_type null_handle ) : + next( tagged_node_handle( null_handle, 0 ) ) {} - node(void) + node( void ) {} - atomic next; - T data; + atomic< tagged_node_handle > next; + T data; }; - typedef typename detail::extract_allocator::type node_allocator; - typedef typename detail::select_freelist::type pool_t; - typedef typename pool_t::tagged_node_handle tagged_node_handle; - typedef typename detail::select_tagged_handle::handle_type handle_type; + typedef typename detail::extract_allocator< bound_args, node >::type node_allocator; + typedef + typename detail::select_freelist< node, node_allocator, compile_time_sized, fixed_sized, capacity >::type pool_t; + typedef typename pool_t::tagged_node_handle tagged_node_handle; + typedef typename detail::select_tagged_handle< node, node_based >::handle_type handle_type; - void initialize(void) + void initialize( void ) { - node * n = pool.template construct(pool.null_handle()); - tagged_node_handle dummy_node(pool.get_handle(n), 0); - head_.store(dummy_node, memory_order_relaxed); - tail_.store(dummy_node, memory_order_release); + node* n = pool.template construct< true, false >( pool.null_handle() ); + tagged_node_handle dummy_node( pool.get_handle( n ), 0 ); + head_.store( dummy_node, memory_order_relaxed ); + tail_.store( dummy_node, memory_order_release ); } struct implementation_defined { typedef node_allocator allocator; - typedef std::size_t size_type; + typedef std::size_t size_type; }; #endif - BOOST_DELETED_FUNCTION(queue(queue const&)) - BOOST_DELETED_FUNCTION(queue& operator= (queue const&)) + BOOST_DELETED_FUNCTION( queue( queue const& ) ) + BOOST_DELETED_FUNCTION( queue& operator=( queue const& ) ) public: - typedef T value_type; + typedef T value_type; typedef typename implementation_defined::allocator allocator; typedef typename implementation_defined::size_type size_type; @@ -169,11 +167,11 @@ class queue * \return true, if implementation is lock-free. * * \warning It only checks, if the queue head and tail nodes and the freelist can be modified in a lock-free manner. - * On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics, there is - * no possibility to provide a completely accurate implementation, because one would need to test every internal + * On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics, there + * is no possibility to provide a completely accurate implementation, because one would need to test every internal * node, which is impossible if further nodes will be allocated from the operating system. * */ - bool is_lock_free (void) const + bool is_lock_free( void ) const { return head_.is_lock_free() && tail_.is_lock_free() && pool.is_lock_free(); } @@ -182,14 +180,14 @@ class queue * * \pre Must specify a capacity<> argument * */ - queue(void): - head_(tagged_node_handle(0, 0)), - tail_(tagged_node_handle(0, 0)), - pool(node_allocator(), capacity) + queue( void ) : + head_( tagged_node_handle( 0, 0 ) ), + tail_( tagged_node_handle( 0, 0 ) ), + pool( node_allocator(), capacity ) { // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling // this function and this function may be compiled even when it isn't being used. - BOOST_ASSERT(has_capacity); + BOOST_ASSERT( has_capacity ); initialize(); } @@ -197,13 +195,13 @@ class queue * * \pre Must specify a capacity<> argument * */ - template - explicit queue(typename boost::allocator_rebind::type const & alloc): - head_(tagged_node_handle(0, 0)), - tail_(tagged_node_handle(0, 0)), - pool(alloc, capacity) + template < typename U > + explicit queue( typename boost::allocator_rebind< node_allocator, U >::type const& alloc ) : + head_( tagged_node_handle( 0, 0 ) ), + tail_( tagged_node_handle( 0, 0 ) ), + pool( alloc, capacity ) { - BOOST_STATIC_ASSERT(has_capacity); + BOOST_STATIC_ASSERT( has_capacity ); initialize(); } @@ -211,14 +209,14 @@ class queue * * \pre Must specify a capacity<> argument * */ - explicit queue(allocator const & alloc): - head_(tagged_node_handle(0, 0)), - tail_(tagged_node_handle(0, 0)), - pool(alloc, capacity) + explicit queue( allocator const& alloc ) : + head_( tagged_node_handle( 0, 0 ) ), + tail_( tagged_node_handle( 0, 0 ) ), + pool( alloc, capacity ) { // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling // this function and this function may be compiled even when it isn't being used. - BOOST_ASSERT(has_capacity); + BOOST_ASSERT( has_capacity ); initialize(); } @@ -228,14 +226,14 @@ class queue * * \pre Must \b not specify a capacity<> argument * */ - explicit queue(size_type n): - head_(tagged_node_handle(0, 0)), - tail_(tagged_node_handle(0, 0)), - pool(node_allocator(), n + 1) + explicit queue( size_type n ) : + head_( tagged_node_handle( 0, 0 ) ), + tail_( tagged_node_handle( 0, 0 ) ), + pool( node_allocator(), n + 1 ) { // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling // this function and this function may be compiled even when it isn't being used. - BOOST_ASSERT(!has_capacity); + BOOST_ASSERT( !has_capacity ); initialize(); } @@ -245,50 +243,49 @@ class queue * * \pre Must \b not specify a capacity<> argument * */ - template - queue(size_type n, typename boost::allocator_rebind::type const & alloc): - head_(tagged_node_handle(0, 0)), - tail_(tagged_node_handle(0, 0)), - pool(alloc, n + 1) + template < typename U > + queue( size_type n, typename boost::allocator_rebind< node_allocator, U >::type const& alloc ) : + head_( tagged_node_handle( 0, 0 ) ), + tail_( tagged_node_handle( 0, 0 ) ), + pool( alloc, n + 1 ) { - BOOST_STATIC_ASSERT(!has_capacity); + BOOST_STATIC_ASSERT( !has_capacity ); initialize(); } /** \copydoc boost::lockfree::stack::reserve * */ - void reserve(size_type n) + void reserve( size_type n ) { - pool.template reserve(n); + pool.template reserve< true >( n ); } /** \copydoc boost::lockfree::stack::reserve_unsafe * */ - void reserve_unsafe(size_type n) + void reserve_unsafe( size_type n ) { - pool.template reserve(n); + pool.template reserve< false >( n ); } /** Destroys queue, free all nodes from freelist. * */ - ~queue(void) + ~queue( void ) { T dummy; - while(unsynchronized_pop(dummy)) - {} + while ( unsynchronized_pop( dummy ) ) {} - pool.template destruct(head_.load(memory_order_relaxed)); + pool.template destruct< false >( head_.load( memory_order_relaxed ) ); } /** Check if the queue is empty * * \return true, if the queue is empty, false otherwise - * \note The result is only accurate, if no other thread modifies the queue. Therefore it is rarely practical to use this - * value in program logic. + * \note The result is only accurate, if no other thread modifies the queue. Therefore it is rarely practical to use + * this value in program logic. * */ - bool empty(void) const + bool empty( void ) const { - return pool.get_handle(head_.load()) == pool.get_handle(tail_.load()); + return pool.get_handle( head_.load() ) == pool.get_handle( tail_.load() ); } /** Pushes object t to the queue. @@ -296,12 +293,12 @@ class queue * \post object will be pushed to the queue, if internal node can be allocated * \returns true, if the push operation is successful. * - * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated - * from the OS. This may not be lock-free. + * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will + * be allocated from the OS. This may not be lock-free. * */ - bool push(T const & t) + bool push( T const& t ) { - return do_push(t); + return do_push< false >( t ); } /** Pushes object t to the queue. @@ -312,42 +309,41 @@ class queue * \note Thread-safe and non-blocking. If internal memory pool is exhausted, operation will fail * \throws if memory allocator throws * */ - bool bounded_push(T const & t) + bool bounded_push( T const& t ) { - return do_push(t); + return do_push< true >( t ); } private: #ifndef BOOST_DOXYGEN_INVOKED - template - bool do_push(T const & t) + template < bool Bounded > + bool do_push( T const& t ) { - node * n = pool.template construct(t, pool.null_handle()); - handle_type node_handle = pool.get_handle(n); + node* n = pool.template construct< true, Bounded >( t, pool.null_handle() ); + handle_type node_handle = pool.get_handle( n ); - if (n == NULL) + if ( n == NULL ) return false; - for (;;) { - tagged_node_handle tail = tail_.load(memory_order_acquire); - node * tail_node = pool.get_pointer(tail); - tagged_node_handle next = tail_node->next.load(memory_order_acquire); - node * next_ptr = pool.get_pointer(next); - - tagged_node_handle tail2 = tail_.load(memory_order_acquire); - if (BOOST_LIKELY(tail == tail2)) { - if (next_ptr == 0) { - tagged_node_handle new_tail_next(node_handle, next.get_next_tag()); - if ( tail_node->next.compare_exchange_weak(next, new_tail_next) ) { - tagged_node_handle new_tail(node_handle, tail.get_next_tag()); - tail_.compare_exchange_strong(tail, new_tail); + for ( ;; ) { + tagged_node_handle tail = tail_.load( memory_order_acquire ); + node* tail_node = pool.get_pointer( tail ); + tagged_node_handle next = tail_node->next.load( memory_order_acquire ); + node* next_ptr = pool.get_pointer( next ); + + tagged_node_handle tail2 = tail_.load( memory_order_acquire ); + if ( BOOST_LIKELY( tail == tail2 ) ) { + if ( next_ptr == 0 ) { + tagged_node_handle new_tail_next( node_handle, next.get_next_tag() ); + if ( tail_node->next.compare_exchange_weak( next, new_tail_next ) ) { + tagged_node_handle new_tail( node_handle, tail.get_next_tag() ); + tail_.compare_exchange_strong( tail, new_tail ); return true; } - } - else { - tagged_node_handle new_tail(pool.get_handle(next_ptr), tail.get_next_tag()); - tail_.compare_exchange_strong(tail, new_tail); + } else { + tagged_node_handle new_tail( pool.get_handle( next_ptr ), tail.get_next_tag() ); + tail_.compare_exchange_strong( tail, new_tail ); } } } @@ -355,35 +351,32 @@ class queue #endif public: - /** Pushes object t to the queue. * * \post object will be pushed to the queue, if internal node can be allocated * \returns true, if the push operation is successful. * - * \note Not Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated - * from the OS. This may not be lock-free. - * \throws if memory allocator throws + * \note Not Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node + * will be allocated from the OS. This may not be lock-free. \throws if memory allocator throws * */ - bool unsynchronized_push(T const & t) + bool unsynchronized_push( T const& t ) { - node * n = pool.template construct(t, pool.null_handle()); + node* n = pool.template construct< false, false >( t, pool.null_handle() ); - if (n == NULL) + if ( n == NULL ) return false; - for (;;) { - tagged_node_handle tail = tail_.load(memory_order_relaxed); - tagged_node_handle next = tail->next.load(memory_order_relaxed); - node * next_ptr = next.get_ptr(); + for ( ;; ) { + tagged_node_handle tail = tail_.load( memory_order_relaxed ); + tagged_node_handle next = tail->next.load( memory_order_relaxed ); + node* next_ptr = next.get_ptr(); - if (next_ptr == 0) { - tail->next.store(tagged_node_handle(n, next.get_next_tag()), memory_order_relaxed); - tail_.store(tagged_node_handle(n, tail.get_next_tag()), memory_order_relaxed); + if ( next_ptr == 0 ) { + tail->next.store( tagged_node_handle( n, next.get_next_tag() ), memory_order_relaxed ); + tail_.store( tagged_node_handle( n, tail.get_next_tag() ), memory_order_relaxed ); return true; - } - else - tail_.store(tagged_node_handle(next_ptr, tail.get_next_tag()), memory_order_relaxed); + } else + tail_.store( tagged_node_handle( next_ptr, tail.get_next_tag() ), memory_order_relaxed ); } } @@ -394,9 +387,9 @@ class queue * * \note Thread-safe and non-blocking. Might modify return argument even if operation fails. * */ - bool pop (T & ret) + bool pop( T& ret ) { - return pop(ret); + return pop< T >( ret ); } /** Pops object from queue. @@ -407,39 +400,39 @@ class queue * * \note Thread-safe and non-blocking. Might modify return argument even if operation fails. * */ - template - bool pop (U & ret) + template < typename U > + bool pop( U& ret ) { - for (;;) { - tagged_node_handle head = head_.load(memory_order_acquire); - node * head_ptr = pool.get_pointer(head); - - tagged_node_handle tail = tail_.load(memory_order_acquire); - tagged_node_handle next = head_ptr->next.load(memory_order_acquire); - node * next_ptr = pool.get_pointer(next); - - tagged_node_handle head2 = head_.load(memory_order_acquire); - if (BOOST_LIKELY(head == head2)) { - if (pool.get_handle(head) == pool.get_handle(tail)) { - if (next_ptr == 0) + for ( ;; ) { + tagged_node_handle head = head_.load( memory_order_acquire ); + node* head_ptr = pool.get_pointer( head ); + + tagged_node_handle tail = tail_.load( memory_order_acquire ); + tagged_node_handle next = head_ptr->next.load( memory_order_acquire ); + node* next_ptr = pool.get_pointer( next ); + + tagged_node_handle head2 = head_.load( memory_order_acquire ); + if ( BOOST_LIKELY( head == head2 ) ) { + if ( pool.get_handle( head ) == pool.get_handle( tail ) ) { + if ( next_ptr == 0 ) return false; - tagged_node_handle new_tail(pool.get_handle(next), tail.get_next_tag()); - tail_.compare_exchange_strong(tail, new_tail); + tagged_node_handle new_tail( pool.get_handle( next ), tail.get_next_tag() ); + tail_.compare_exchange_strong( tail, new_tail ); } else { - if (next_ptr == 0) + if ( next_ptr == 0 ) /* this check is not part of the original algorithm as published by michael and scott * * however we reuse the tagged_ptr part for the freelist and clear the next part during node * allocation. we can observe a null-pointer here. * */ continue; - detail::copy_payload(next_ptr->data, ret); + detail::copy_payload( next_ptr->data, ret ); - tagged_node_handle new_head(pool.get_handle(next), head.get_next_tag()); - if (head_.compare_exchange_weak(head, new_head)) { - pool.template destruct(head); + tagged_node_handle new_head( pool.get_handle( next ), head.get_next_tag() ); + if ( head_.compare_exchange_weak( head, new_head ) ) { + pool.template destruct< true >( head ); return true; } } @@ -455,9 +448,9 @@ class queue * \note Not thread-safe, but non-blocking. Might modify return argument even if operation fails. * * */ - bool unsynchronized_pop (T & ret) + bool unsynchronized_pop( T& ret ) { - return unsynchronized_pop(ret); + return unsynchronized_pop< T >( ret ); } /** Pops object from queue. @@ -470,34 +463,34 @@ class queue * \note Not thread-safe, but non-blocking. Might modify return argument even if operation fails. * * */ - template - bool unsynchronized_pop (U & ret) + template < typename U > + bool unsynchronized_pop( U& ret ) { - for (;;) { - tagged_node_handle head = head_.load(memory_order_relaxed); - node * head_ptr = pool.get_pointer(head); - tagged_node_handle tail = tail_.load(memory_order_relaxed); - tagged_node_handle next = head_ptr->next.load(memory_order_relaxed); - node * next_ptr = pool.get_pointer(next); - - if (pool.get_handle(head) == pool.get_handle(tail)) { - if (next_ptr == 0) + for ( ;; ) { + tagged_node_handle head = head_.load( memory_order_relaxed ); + node* head_ptr = pool.get_pointer( head ); + tagged_node_handle tail = tail_.load( memory_order_relaxed ); + tagged_node_handle next = head_ptr->next.load( memory_order_relaxed ); + node* next_ptr = pool.get_pointer( next ); + + if ( pool.get_handle( head ) == pool.get_handle( tail ) ) { + if ( next_ptr == 0 ) return false; - tagged_node_handle new_tail(pool.get_handle(next), tail.get_next_tag()); - tail_.store(new_tail); + tagged_node_handle new_tail( pool.get_handle( next ), tail.get_next_tag() ); + tail_.store( new_tail ); } else { - if (next_ptr == 0) + if ( next_ptr == 0 ) /* this check is not part of the original algorithm as published by michael and scott * * however we reuse the tagged_ptr part for the freelist and clear the next part during node * allocation. we can observe a null-pointer here. * */ continue; - detail::copy_payload(next_ptr->data, ret); - tagged_node_handle new_head(pool.get_handle(next), head.get_next_tag()); - head_.store(new_head); - pool.template destruct(head); + detail::copy_payload( next_ptr->data, ret ); + tagged_node_handle new_head( pool.get_handle( next ), head.get_next_tag() ); + head_.store( new_head ); + pool.template destruct< false >( head ); return true; } } @@ -511,25 +504,25 @@ class queue * * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking * */ - template - bool consume_one(Functor & f) + template < typename Functor > + bool consume_one( Functor& f ) { - T element; - bool success = pop(element); - if (success) - f(element); + T element; + bool success = pop( element ); + if ( success ) + f( element ); return success; } /// \copydoc boost::lockfree::queue::consume_one(Functor & rhs) - template - bool consume_one(Functor const & f) + template < typename Functor > + bool consume_one( Functor const& f ) { - T element; - bool success = pop(element); - if (success) - f(element); + T element; + bool success = pop( element ); + if ( success ) + f( element ); return success; } @@ -542,22 +535,22 @@ class queue * * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking * */ - template - size_t consume_all(Functor & f) + template < typename Functor > + size_t consume_all( Functor& f ) { size_t element_count = 0; - while (consume_one(f)) + while ( consume_one( f ) ) element_count += 1; return element_count; } /// \copydoc boost::lockfree::queue::consume_all(Functor & rhs) - template - size_t consume_all(Functor const & f) + template < typename Functor > + size_t consume_all( Functor const& f ) { size_t element_count = 0; - while (consume_one(f)) + while ( consume_one( f ) ) element_count += 1; return element_count; @@ -565,25 +558,24 @@ class queue private: #ifndef BOOST_DOXYGEN_INVOKED - atomic head_; - static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle); - char padding1[padding_size]; - atomic tail_; - char padding2[padding_size]; + atomic< tagged_node_handle > head_; + static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof( tagged_node_handle ); + char padding1[ padding_size ]; + atomic< tagged_node_handle > tail_; + char padding2[ padding_size ]; pool_t pool; #endif }; -} /* namespace lockfree */ -} /* namespace boost */ +}} // namespace boost::lockfree -#if defined(BOOST_INTEL) && (BOOST_INTEL_CXX_VERSION > 1000) -#pragma warning(pop) +#if defined( BOOST_INTEL ) && ( BOOST_INTEL_CXX_VERSION > 1000 ) +# pragma warning( pop ) #endif -#if defined(_MSC_VER) -#pragma warning(pop) +#if defined( _MSC_VER ) +# pragma warning( pop ) #endif #endif /* BOOST_LOCKFREE_FIFO_HPP_INCLUDED */ diff --git a/include/boost/lockfree/spsc_queue.hpp b/include/boost/lockfree/spsc_queue.hpp index 11d07e4f..30a1e00b 100644 --- a/include/boost/lockfree/spsc_queue.hpp +++ b/include/boost/lockfree/spsc_queue.hpp @@ -15,12 +15,12 @@ #include #include -#include +#include // for BOOST_LIKELY #include -#include #include +#include +#include #include -#include // for BOOST_LIKELY #include #include @@ -33,311 +33,310 @@ #include #ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once +# pragma once #endif -namespace boost { -namespace lockfree { -namespace detail { +namespace boost { namespace lockfree { +namespace detail { -typedef parameter::parameters, - boost::parameter::optional - > ringbuffer_signature; +typedef parameter::parameters< boost::parameter::optional< tag::capacity >, boost::parameter::optional< tag::allocator > > + ringbuffer_signature; -template +template < typename T > class ringbuffer_base { #ifndef BOOST_DOXYGEN_INVOKED protected: typedef std::size_t size_t; - static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(size_t); - atomic write_index_; - char padding1[padding_size]; /* force read_index and write_index to different cache lines */ - atomic read_index_; + static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof( size_t ); + atomic< size_t > write_index_; + char padding1[ padding_size ]; /* force read_index and write_index to different cache lines */ + atomic< size_t > read_index_; - BOOST_DELETED_FUNCTION(ringbuffer_base(ringbuffer_base const&)) - BOOST_DELETED_FUNCTION(ringbuffer_base& operator= (ringbuffer_base const&)) + BOOST_DELETED_FUNCTION( ringbuffer_base( ringbuffer_base const& ) ) + BOOST_DELETED_FUNCTION( ringbuffer_base& operator=( ringbuffer_base const& ) ) protected: - ringbuffer_base(void): - write_index_(0), read_index_(0) + ringbuffer_base( void ) : + write_index_( 0 ), + read_index_( 0 ) {} - static size_t next_index(size_t arg, size_t max_size) + static size_t next_index( size_t arg, size_t max_size ) { size_t ret = arg + 1; - while (BOOST_UNLIKELY(ret >= max_size)) + while ( BOOST_UNLIKELY( ret >= max_size ) ) ret -= max_size; return ret; } - static size_t read_available(size_t write_index, size_t read_index, size_t max_size) + static size_t read_available( size_t write_index, size_t read_index, size_t max_size ) { - if (write_index >= read_index) + if ( write_index >= read_index ) return write_index - read_index; const size_t ret = write_index + max_size - read_index; return ret; } - static size_t write_available(size_t write_index, size_t read_index, size_t max_size) + static size_t write_available( size_t write_index, size_t read_index, size_t max_size ) { size_t ret = read_index - write_index - 1; - if (write_index >= read_index) + if ( write_index >= read_index ) ret += max_size; return ret; } - size_t read_available(size_t max_size) const + size_t read_available( size_t max_size ) const { - size_t write_index = write_index_.load(memory_order_acquire); - const size_t read_index = read_index_.load(memory_order_relaxed); - return read_available(write_index, read_index, max_size); + size_t write_index = write_index_.load( memory_order_acquire ); + const size_t read_index = read_index_.load( memory_order_relaxed ); + return read_available( write_index, read_index, max_size ); } - size_t write_available(size_t max_size) const + size_t write_available( size_t max_size ) const { - size_t write_index = write_index_.load(memory_order_relaxed); - const size_t read_index = read_index_.load(memory_order_acquire); - return write_available(write_index, read_index, max_size); + size_t write_index = write_index_.load( memory_order_relaxed ); + const size_t read_index = read_index_.load( memory_order_acquire ); + return write_available( write_index, read_index, max_size ); } - bool push(T const & t, T * buffer, size_t max_size) + bool push( T const& t, T* buffer, size_t max_size ) { - const size_t write_index = write_index_.load(memory_order_relaxed); // only written from push thread - const size_t next = next_index(write_index, max_size); + const size_t write_index = write_index_.load( memory_order_relaxed ); // only written from push thread + const size_t next = next_index( write_index, max_size ); - if (next == read_index_.load(memory_order_acquire)) - return false; /* ringbuffer is full */ + if ( next == read_index_.load( memory_order_acquire ) ) + return false; /* ringbuffer is full */ - new (buffer + write_index) T(t); // copy-construct + new ( buffer + write_index ) T( t ); // copy-construct - write_index_.store(next, memory_order_release); + write_index_.store( next, memory_order_release ); return true; } - size_t push(const T * input_buffer, size_t input_count, T * internal_buffer, size_t max_size) + size_t push( const T* input_buffer, size_t input_count, T* internal_buffer, size_t max_size ) { - return push(input_buffer, input_buffer + input_count, internal_buffer, max_size) - input_buffer; + return push( input_buffer, input_buffer + input_count, internal_buffer, max_size ) - input_buffer; } - template - ConstIterator push(ConstIterator begin, ConstIterator end, T * internal_buffer, size_t max_size) + template < typename ConstIterator > + ConstIterator push( ConstIterator begin, ConstIterator end, T* internal_buffer, size_t max_size ) { // FIXME: avoid std::distance - const size_t write_index = write_index_.load(memory_order_relaxed); // only written from push thread - const size_t read_index = read_index_.load(memory_order_acquire); - const size_t avail = write_available(write_index, read_index, max_size); + const size_t write_index = write_index_.load( memory_order_relaxed ); // only written from push thread + const size_t read_index = read_index_.load( memory_order_acquire ); + const size_t avail = write_available( write_index, read_index, max_size ); - if (avail == 0) + if ( avail == 0 ) return begin; - size_t input_count = std::distance(begin, end); - input_count = (std::min)(input_count, avail); + size_t input_count = std::distance( begin, end ); + input_count = ( std::min )( input_count, avail ); size_t new_write_index = write_index + input_count; - const ConstIterator last = boost::next(begin, input_count); + const ConstIterator last = boost::next( begin, input_count ); - if (write_index + input_count > max_size) { + if ( write_index + input_count > max_size ) { /* copy data in two sections */ - const size_t count0 = max_size - write_index; - const ConstIterator midpoint = boost::next(begin, count0); + const size_t count0 = max_size - write_index; + const ConstIterator midpoint = boost::next( begin, count0 ); - std::uninitialized_copy(begin, midpoint, internal_buffer + write_index); - std::uninitialized_copy(midpoint, last, internal_buffer); + std::uninitialized_copy( begin, midpoint, internal_buffer + write_index ); + std::uninitialized_copy( midpoint, last, internal_buffer ); new_write_index -= max_size; } else { - std::uninitialized_copy(begin, last, internal_buffer + write_index); + std::uninitialized_copy( begin, last, internal_buffer + write_index ); - if (new_write_index == max_size) + if ( new_write_index == max_size ) new_write_index = 0; } - write_index_.store(new_write_index, memory_order_release); + write_index_.store( new_write_index, memory_order_release ); return last; } - template - bool consume_one(Functor & functor, T * buffer, size_t max_size) + template < typename Functor > + bool consume_one( Functor& functor, T* buffer, size_t max_size ) { - const size_t write_index = write_index_.load(memory_order_acquire); - const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread - if ( empty(write_index, read_index) ) + const size_t write_index = write_index_.load( memory_order_acquire ); + const size_t read_index = read_index_.load( memory_order_relaxed ); // only written from pop thread + if ( empty( write_index, read_index ) ) return false; - T & object_to_consume = buffer[read_index]; + T& object_to_consume = buffer[ read_index ]; functor( object_to_consume ); object_to_consume.~T(); - size_t next = next_index(read_index, max_size); - read_index_.store(next, memory_order_release); + size_t next = next_index( read_index, max_size ); + read_index_.store( next, memory_order_release ); return true; } - template - bool consume_one(Functor const & functor, T * buffer, size_t max_size) + template < typename Functor > + bool consume_one( Functor const& functor, T* buffer, size_t max_size ) { - const size_t write_index = write_index_.load(memory_order_acquire); - const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread - if ( empty(write_index, read_index) ) + const size_t write_index = write_index_.load( memory_order_acquire ); + const size_t read_index = read_index_.load( memory_order_relaxed ); // only written from pop thread + if ( empty( write_index, read_index ) ) return false; - T & object_to_consume = buffer[read_index]; + T& object_to_consume = buffer[ read_index ]; functor( object_to_consume ); object_to_consume.~T(); - size_t next = next_index(read_index, max_size); - read_index_.store(next, memory_order_release); + size_t next = next_index( read_index, max_size ); + read_index_.store( next, memory_order_release ); return true; } - template - size_t consume_all (Functor const & functor, T * internal_buffer, size_t max_size) + template < typename Functor > + size_t consume_all( Functor const& functor, T* internal_buffer, size_t max_size ) { - const size_t write_index = write_index_.load(memory_order_acquire); - const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread + const size_t write_index = write_index_.load( memory_order_acquire ); + const size_t read_index = read_index_.load( memory_order_relaxed ); // only written from pop thread - const size_t avail = read_available(write_index, read_index, max_size); + const size_t avail = read_available( write_index, read_index, max_size ); - if (avail == 0) + if ( avail == 0 ) return 0; const size_t output_count = avail; size_t new_read_index = read_index + output_count; - if (read_index + output_count > max_size) { + if ( read_index + output_count > max_size ) { /* copy data in two sections */ const size_t count0 = max_size - read_index; const size_t count1 = output_count - count0; - run_functor_and_delete(internal_buffer + read_index, internal_buffer + max_size, functor); - run_functor_and_delete(internal_buffer, internal_buffer + count1, functor); + run_functor_and_delete( internal_buffer + read_index, internal_buffer + max_size, functor ); + run_functor_and_delete( internal_buffer, internal_buffer + count1, functor ); new_read_index -= max_size; } else { - run_functor_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, functor); + run_functor_and_delete( internal_buffer + read_index, internal_buffer + read_index + output_count, functor ); - if (new_read_index == max_size) + if ( new_read_index == max_size ) new_read_index = 0; } - read_index_.store(new_read_index, memory_order_release); + read_index_.store( new_read_index, memory_order_release ); return output_count; } - template - size_t consume_all (Functor & functor, T * internal_buffer, size_t max_size) + template < typename Functor > + size_t consume_all( Functor& functor, T* internal_buffer, size_t max_size ) { - const size_t write_index = write_index_.load(memory_order_acquire); - const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread + const size_t write_index = write_index_.load( memory_order_acquire ); + const size_t read_index = read_index_.load( memory_order_relaxed ); // only written from pop thread - const size_t avail = read_available(write_index, read_index, max_size); + const size_t avail = read_available( write_index, read_index, max_size ); - if (avail == 0) + if ( avail == 0 ) return 0; const size_t output_count = avail; size_t new_read_index = read_index + output_count; - if (read_index + output_count > max_size) { + if ( read_index + output_count > max_size ) { /* copy data in two sections */ const size_t count0 = max_size - read_index; const size_t count1 = output_count - count0; - run_functor_and_delete(internal_buffer + read_index, internal_buffer + max_size, functor); - run_functor_and_delete(internal_buffer, internal_buffer + count1, functor); + run_functor_and_delete( internal_buffer + read_index, internal_buffer + max_size, functor ); + run_functor_and_delete( internal_buffer, internal_buffer + count1, functor ); new_read_index -= max_size; } else { - run_functor_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, functor); + run_functor_and_delete( internal_buffer + read_index, internal_buffer + read_index + output_count, functor ); - if (new_read_index == max_size) + if ( new_read_index == max_size ) new_read_index = 0; } - read_index_.store(new_read_index, memory_order_release); + read_index_.store( new_read_index, memory_order_release ); return output_count; } - size_t pop (T * output_buffer, size_t output_count, T * internal_buffer, size_t max_size) + size_t pop( T* output_buffer, size_t output_count, T* internal_buffer, size_t max_size ) { - const size_t write_index = write_index_.load(memory_order_acquire); - const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread + const size_t write_index = write_index_.load( memory_order_acquire ); + const size_t read_index = read_index_.load( memory_order_relaxed ); // only written from pop thread - const size_t avail = read_available(write_index, read_index, max_size); + const size_t avail = read_available( write_index, read_index, max_size ); - if (avail == 0) + if ( avail == 0 ) return 0; - output_count = (std::min)(output_count, avail); + output_count = ( std::min )( output_count, avail ); size_t new_read_index = read_index + output_count; - if (read_index + output_count > max_size) { + if ( read_index + output_count > max_size ) { /* copy data in two sections */ const size_t count0 = max_size - read_index; const size_t count1 = output_count - count0; - copy_and_delete(internal_buffer + read_index, internal_buffer + max_size, output_buffer); - copy_and_delete(internal_buffer, internal_buffer + count1, output_buffer + count0); + copy_and_delete( internal_buffer + read_index, internal_buffer + max_size, output_buffer ); + copy_and_delete( internal_buffer, internal_buffer + count1, output_buffer + count0 ); new_read_index -= max_size; } else { - copy_and_delete(internal_buffer + read_index, internal_buffer + read_index + output_count, output_buffer); - if (new_read_index == max_size) + copy_and_delete( internal_buffer + read_index, internal_buffer + read_index + output_count, output_buffer ); + if ( new_read_index == max_size ) new_read_index = 0; } - read_index_.store(new_read_index, memory_order_release); + read_index_.store( new_read_index, memory_order_release ); return output_count; } - template - size_t pop_to_output_iterator (OutputIterator it, T * internal_buffer, size_t max_size) + template < typename OutputIterator > + size_t pop_to_output_iterator( OutputIterator it, T* internal_buffer, size_t max_size ) { - const size_t write_index = write_index_.load(memory_order_acquire); - const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread + const size_t write_index = write_index_.load( memory_order_acquire ); + const size_t read_index = read_index_.load( memory_order_relaxed ); // only written from pop thread - const size_t avail = read_available(write_index, read_index, max_size); - if (avail == 0) + const size_t avail = read_available( write_index, read_index, max_size ); + if ( avail == 0 ) return 0; size_t new_read_index = read_index + avail; - if (read_index + avail > max_size) { + if ( read_index + avail > max_size ) { /* copy data in two sections */ const size_t count0 = max_size - read_index; const size_t count1 = avail - count0; - it = copy_and_delete(internal_buffer + read_index, internal_buffer + max_size, it); - copy_and_delete(internal_buffer, internal_buffer + count1, it); + it = copy_and_delete( internal_buffer + read_index, internal_buffer + max_size, it ); + copy_and_delete( internal_buffer, internal_buffer + count1, it ); new_read_index -= max_size; } else { - copy_and_delete(internal_buffer + read_index, internal_buffer + read_index + avail, it); - if (new_read_index == max_size) + copy_and_delete( internal_buffer + read_index, internal_buffer + read_index + avail, it ); + if ( new_read_index == max_size ) new_read_index = 0; } - read_index_.store(new_read_index, memory_order_release); + read_index_.store( new_read_index, memory_order_release ); return avail; } - const T& front(const T * internal_buffer) const + const T& front( const T* internal_buffer ) const { - const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread - return *(internal_buffer + read_index); + const size_t read_index = read_index_.load( memory_order_relaxed ); // only written from pop thread + return *( internal_buffer + read_index ); } - T& front(T * internal_buffer) + T& front( T* internal_buffer ) { - const size_t read_index = read_index_.load(memory_order_relaxed); // only written from pop thread - return *(internal_buffer + read_index); + const size_t read_index = read_index_.load( memory_order_relaxed ); // only written from pop thread + return *( internal_buffer + read_index ); } #endif @@ -347,16 +346,16 @@ class ringbuffer_base * * \note Not thread-safe * */ - void reset(void) + void reset( void ) { - if ( !boost::has_trivial_destructor::value ) { + if ( !boost::has_trivial_destructor< T >::value ) { // make sure to call all destructors! detail::consume_noop consume_functor; (void)consume_all( consume_functor ); } else { - write_index_.store(0, memory_order_relaxed); - read_index_.store(0, memory_order_release); + write_index_.store( 0, memory_order_relaxed ); + read_index_.store( 0, memory_order_release ); } } @@ -365,33 +364,33 @@ class ringbuffer_base * \return true, if the ringbuffer is empty, false otherwise * \note Due to the concurrent nature of the ringbuffer the result may be inaccurate. * */ - bool empty(void) + bool empty( void ) { - return empty(write_index_.load(memory_order_relaxed), read_index_.load(memory_order_relaxed)); + return empty( write_index_.load( memory_order_relaxed ), read_index_.load( memory_order_relaxed ) ); } /** * \return true, if implementation is lock-free. * * */ - bool is_lock_free(void) const + bool is_lock_free( void ) const { return write_index_.is_lock_free() && read_index_.is_lock_free(); } private: - bool empty(size_t write_index, size_t read_index) + bool empty( size_t write_index, size_t read_index ) { return write_index == read_index; } - template< class OutputIterator > - OutputIterator copy_and_delete( T * first, T * last, OutputIterator out ) + template < class OutputIterator > + OutputIterator copy_and_delete( T* first, T* last, OutputIterator out ) { - if (boost::has_trivial_destructor::value) { - return std::copy(first, last, out); // will use memcpy if possible + if ( boost::has_trivial_destructor< T >::value ) { + return std::copy( first, last, out ); // will use memcpy if possible } else { - for (; first != last; ++first, ++out) { + for ( ; first != last; ++first, ++out ) { *out = *first; first->~T(); } @@ -399,46 +398,44 @@ class ringbuffer_base } } - template< class Functor > - void run_functor_and_delete( T * first, T * last, Functor & functor ) + template < class Functor > + void run_functor_and_delete( T* first, T* last, Functor& functor ) { - for (; first != last; ++first) { - functor(*first); + for ( ; first != last; ++first ) { + functor( *first ); first->~T(); } } - template< class Functor > - void run_functor_and_delete( T * first, T * last, Functor const & functor ) + template < class Functor > + void run_functor_and_delete( T* first, T* last, Functor const& functor ) { - for (; first != last; ++first) { - functor(*first); + for ( ; first != last; ++first ) { + functor( *first ); first->~T(); } } }; -template -class compile_time_sized_ringbuffer: - public ringbuffer_base +template < typename T, std::size_t MaxSize > +class compile_time_sized_ringbuffer : public ringbuffer_base< T > { - typedef std::size_t size_type; + typedef std::size_t size_type; static const std::size_t max_size = MaxSize + 1; - typedef typename boost::aligned_storage::value - >::type storage_type; + typedef + typename boost::aligned_storage< max_size * sizeof( T ), boost::alignment_of< T >::value >::type storage_type; storage_type storage_; - T * data() + T* data() { - return static_cast(storage_.address()); + return static_cast< T* >( storage_.address() ); } - const T * data() const + const T* data() const { - return static_cast(storage_.address()); + return static_cast< const T* >( storage_.address() ); } protected: @@ -447,93 +444,91 @@ class compile_time_sized_ringbuffer: return max_size; } - ~compile_time_sized_ringbuffer(void) + ~compile_time_sized_ringbuffer( void ) { // destroy all remaining items detail::consume_noop consume_functor; - (void)consume_all(consume_functor); + (void)consume_all( consume_functor ); } public: - bool push(T const & t) + bool push( T const& t ) { - return ringbuffer_base::push(t, data(), max_size); + return ringbuffer_base< T >::push( t, data(), max_size ); } - template - bool consume_one(Functor & f) + template < typename Functor > + bool consume_one( Functor& f ) { - return ringbuffer_base::consume_one(f, data(), max_size); + return ringbuffer_base< T >::consume_one( f, data(), max_size ); } - template - bool consume_one(Functor const & f) + template < typename Functor > + bool consume_one( Functor const& f ) { - return ringbuffer_base::consume_one(f, data(), max_size); + return ringbuffer_base< T >::consume_one( f, data(), max_size ); } - template - size_type consume_all(Functor & f) + template < typename Functor > + size_type consume_all( Functor& f ) { - return ringbuffer_base::consume_all(f, data(), max_size); + return ringbuffer_base< T >::consume_all( f, data(), max_size ); } - template - size_type consume_all(Functor const & f) + template < typename Functor > + size_type consume_all( Functor const& f ) { - return ringbuffer_base::consume_all(f, data(), max_size); + return ringbuffer_base< T >::consume_all( f, data(), max_size ); } - size_type push(T const * t, size_type size) + size_type push( T const* t, size_type size ) { - return ringbuffer_base::push(t, size, data(), max_size); + return ringbuffer_base< T >::push( t, size, data(), max_size ); } - template - size_type push(T const (&t)[size]) + template < size_type size > + size_type push( T const ( &t )[ size ] ) { - return push(t, size); + return push( t, size ); } - template - ConstIterator push(ConstIterator begin, ConstIterator end) + template < typename ConstIterator > + ConstIterator push( ConstIterator begin, ConstIterator end ) { - return ringbuffer_base::push(begin, end, data(), max_size); + return ringbuffer_base< T >::push( begin, end, data(), max_size ); } - size_type pop(T * ret, size_type size) + size_type pop( T* ret, size_type size ) { - return ringbuffer_base::pop(ret, size, data(), max_size); + return ringbuffer_base< T >::pop( ret, size, data(), max_size ); } - template - size_type pop_to_output_iterator(OutputIterator it) + template < typename OutputIterator > + size_type pop_to_output_iterator( OutputIterator it ) { - return ringbuffer_base::pop_to_output_iterator(it, data(), max_size); + return ringbuffer_base< T >::pop_to_output_iterator( it, data(), max_size ); } - const T& front(void) const + const T& front( void ) const { - return ringbuffer_base::front(data()); + return ringbuffer_base< T >::front( data() ); } - T& front(void) + T& front( void ) { - return ringbuffer_base::front(data()); + return ringbuffer_base< T >::front( data() ); } }; -template -class runtime_sized_ringbuffer: - public ringbuffer_base, - private Alloc +template < typename T, typename Alloc > +class runtime_sized_ringbuffer : public ringbuffer_base< T >, private Alloc { typedef std::size_t size_type; - size_type max_elements_; + size_type max_elements_; #ifdef BOOST_NO_CXX11_ALLOCATOR typedef typename Alloc::pointer pointer; #else - typedef std::allocator_traits allocator_traits; + typedef std::allocator_traits< Alloc > allocator_traits; typedef typename allocator_traits::pointer pointer; #endif pointer array_; @@ -545,153 +540,153 @@ class runtime_sized_ringbuffer: } public: - explicit runtime_sized_ringbuffer(size_type max_elements): - max_elements_(max_elements + 1) + explicit runtime_sized_ringbuffer( size_type max_elements ) : + max_elements_( max_elements + 1 ) { #ifdef BOOST_NO_CXX11_ALLOCATOR - array_ = Alloc::allocate(max_elements_); + array_ = Alloc::allocate( max_elements_ ); #else Alloc& alloc = *this; - array_ = allocator_traits::allocate(alloc, max_elements_); + array_ = allocator_traits::allocate( alloc, max_elements_ ); #endif } - template - runtime_sized_ringbuffer(typename boost::allocator_rebind::type const & alloc, size_type max_elements): - Alloc(alloc), max_elements_(max_elements + 1) + template < typename U > + runtime_sized_ringbuffer( typename boost::allocator_rebind< Alloc, U >::type const& alloc, size_type max_elements ) : + Alloc( alloc ), + max_elements_( max_elements + 1 ) { #ifdef BOOST_NO_CXX11_ALLOCATOR - array_ = Alloc::allocate(max_elements_); + array_ = Alloc::allocate( max_elements_ ); #else Alloc& allocator = *this; - array_ = allocator_traits::allocate(allocator, max_elements_); + array_ = allocator_traits::allocate( allocator, max_elements_ ); #endif } - runtime_sized_ringbuffer(Alloc const & alloc, size_type max_elements): - Alloc(alloc), max_elements_(max_elements + 1) + runtime_sized_ringbuffer( Alloc const& alloc, size_type max_elements ) : + Alloc( alloc ), + max_elements_( max_elements + 1 ) { #ifdef BOOST_NO_CXX11_ALLOCATOR - array_ = Alloc::allocate(max_elements_); + array_ = Alloc::allocate( max_elements_ ); #else Alloc& allocator = *this; - array_ = allocator_traits::allocate(allocator, max_elements_); + array_ = allocator_traits::allocate( allocator, max_elements_ ); #endif } - ~runtime_sized_ringbuffer(void) + ~runtime_sized_ringbuffer( void ) { // destroy all remaining items detail::consume_noop consume_functor; - (void)consume_all(consume_functor); + (void)consume_all( consume_functor ); #ifdef BOOST_NO_CXX11_ALLOCATOR - Alloc::deallocate(array_, max_elements_); + Alloc::deallocate( array_, max_elements_ ); #else Alloc& allocator = *this; - allocator_traits::deallocate(allocator, array_, max_elements_); + allocator_traits::deallocate( allocator, array_, max_elements_ ); #endif } - bool push(T const & t) + bool push( T const& t ) { - return ringbuffer_base::push(t, &*array_, max_elements_); + return ringbuffer_base< T >::push( t, &*array_, max_elements_ ); } - template - bool consume_one(Functor & f) + template < typename Functor > + bool consume_one( Functor& f ) { - return ringbuffer_base::consume_one(f, &*array_, max_elements_); + return ringbuffer_base< T >::consume_one( f, &*array_, max_elements_ ); } - template - bool consume_one(Functor const & f) + template < typename Functor > + bool consume_one( Functor const& f ) { - return ringbuffer_base::consume_one(f, &*array_, max_elements_); + return ringbuffer_base< T >::consume_one( f, &*array_, max_elements_ ); } - template - size_type consume_all(Functor & f) + template < typename Functor > + size_type consume_all( Functor& f ) { - return ringbuffer_base::consume_all(f, &*array_, max_elements_); + return ringbuffer_base< T >::consume_all( f, &*array_, max_elements_ ); } - template - size_type consume_all(Functor const & f) + template < typename Functor > + size_type consume_all( Functor const& f ) { - return ringbuffer_base::consume_all(f, &*array_, max_elements_); + return ringbuffer_base< T >::consume_all( f, &*array_, max_elements_ ); } - size_type push(T const * t, size_type size) + size_type push( T const* t, size_type size ) { - return ringbuffer_base::push(t, size, &*array_, max_elements_); + return ringbuffer_base< T >::push( t, size, &*array_, max_elements_ ); } - template - size_type push(T const (&t)[size]) + template < size_type size > + size_type push( T const ( &t )[ size ] ) { - return push(t, size); + return push( t, size ); } - template - ConstIterator push(ConstIterator begin, ConstIterator end) + template < typename ConstIterator > + ConstIterator push( ConstIterator begin, ConstIterator end ) { - return ringbuffer_base::push(begin, end, &*array_, max_elements_); + return ringbuffer_base< T >::push( begin, end, &*array_, max_elements_ ); } - size_type pop(T * ret, size_type size) + size_type pop( T* ret, size_type size ) { - return ringbuffer_base::pop(ret, size, &*array_, max_elements_); + return ringbuffer_base< T >::pop( ret, size, &*array_, max_elements_ ); } - template - size_type pop_to_output_iterator(OutputIterator it) + template < typename OutputIterator > + size_type pop_to_output_iterator( OutputIterator it ) { - return ringbuffer_base::pop_to_output_iterator(it, &*array_, max_elements_); + return ringbuffer_base< T >::pop_to_output_iterator( it, &*array_, max_elements_ ); } - const T& front(void) const + const T& front( void ) const { - return ringbuffer_base::front(&*array_); + return ringbuffer_base< T >::front( &*array_ ); } - T& front(void) + T& front( void ) { - return ringbuffer_base::front(&*array_); + return ringbuffer_base< T >::front( &*array_ ); } }; #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES -template +template < typename T, typename A0, typename A1 > #else -template +template < typename T, typename... Options > #endif struct make_ringbuffer { #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES - typedef typename ringbuffer_signature::bind::type bound_args; + typedef typename ringbuffer_signature::bind< A0, A1 >::type bound_args; #else - typedef typename ringbuffer_signature::bind::type bound_args; + typedef typename ringbuffer_signature::bind< Options... >::type bound_args; #endif - typedef extract_capacity extract_capacity_t; + typedef extract_capacity< bound_args > extract_capacity_t; - static const bool runtime_sized = !extract_capacity_t::has_capacity; - static const size_t capacity = extract_capacity_t::capacity; + static const bool runtime_sized = !extract_capacity_t::has_capacity; + static const size_t capacity = extract_capacity_t::capacity; - typedef extract_allocator extract_allocator_t; + typedef extract_allocator< bound_args, T > extract_allocator_t; typedef typename extract_allocator_t::type allocator; // allocator argument is only sane, for run-time sized ringbuffers - BOOST_STATIC_ASSERT((mpl::if_, - mpl::bool_, - mpl::true_ - >::type::value)); - - typedef typename mpl::if_c, - compile_time_sized_ringbuffer - >::type ringbuffer_type; + BOOST_STATIC_ASSERT( ( mpl::if_< mpl::bool_< !runtime_sized >, + mpl::bool_< !extract_allocator_t::has_allocator >, + mpl::true_ >::type::value ) ); + + typedef typename mpl::if_c< runtime_sized, + runtime_sized_ringbuffer< T, allocator >, + compile_time_sized_ringbuffer< T, capacity > >::type ringbuffer_type; }; @@ -705,49 +700,48 @@ struct make_ringbuffer * If this template argument is passed to the options, the size of the ringbuffer is set at compile-time. * * - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator>
- * Specifies the allocator that is used to allocate the ringbuffer. This option is only valid, if the ringbuffer is configured - * to be sized at run-time + * Specifies the allocator that is used to allocate the ringbuffer. This option is only valid, if the ringbuffer is + * configured to be sized at run-time * * \b Requirements: * - T must have a default constructor * - T must be copyable * */ #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES -template +template < typename T, class A0, class A1 > #else -template +template < typename T, typename... Options > #endif -class spsc_queue: +class spsc_queue : #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES - public detail::make_ringbuffer::ringbuffer_type + public detail::make_ringbuffer< T, A0, A1 >::ringbuffer_type #else - public detail::make_ringbuffer::ringbuffer_type + public detail::make_ringbuffer< T, Options... >::ringbuffer_type #endif { private: - #ifndef BOOST_DOXYGEN_INVOKED -#ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES - typedef typename detail::make_ringbuffer::ringbuffer_type base_type; - static const bool runtime_sized = detail::make_ringbuffer::runtime_sized; - typedef typename detail::make_ringbuffer::allocator allocator_arg; -#else - typedef typename detail::make_ringbuffer::ringbuffer_type base_type; - static const bool runtime_sized = detail::make_ringbuffer::runtime_sized; - typedef typename detail::make_ringbuffer::allocator allocator_arg; -#endif +# ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES + typedef typename detail::make_ringbuffer< T, A0, A1 >::ringbuffer_type base_type; + static const bool runtime_sized = detail::make_ringbuffer< T, A0, A1 >::runtime_sized; + typedef typename detail::make_ringbuffer< T, A0, A1 >::allocator allocator_arg; +# else + typedef typename detail::make_ringbuffer< T, Options... >::ringbuffer_type base_type; + static const bool runtime_sized = detail::make_ringbuffer< T, Options... >::runtime_sized; + typedef typename detail::make_ringbuffer< T, Options... >::allocator allocator_arg; +# endif struct implementation_defined { typedef allocator_arg allocator; - typedef std::size_t size_type; + typedef std::size_t size_type; }; #endif public: - typedef T value_type; + typedef T value_type; typedef typename implementation_defined::allocator allocator; typedef typename implementation_defined::size_type size_type; @@ -755,11 +749,11 @@ class spsc_queue: * * \pre spsc_queue must be configured to be sized at compile-time */ - spsc_queue(void) + spsc_queue( void ) { // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling // this function and this function may be compiled even when it isn't being used. - BOOST_ASSERT(!runtime_sized); + BOOST_ASSERT( !runtime_sized ); } /** Constructs a spsc_queue with a custom allocator @@ -768,10 +762,10 @@ class spsc_queue: * * \note This is just for API compatibility: an allocator isn't actually needed */ - template - explicit spsc_queue(typename boost::allocator_rebind::type const &) + template < typename U > + explicit spsc_queue( typename boost::allocator_rebind< allocator, U >::type const& ) { - BOOST_STATIC_ASSERT(!runtime_sized); + BOOST_STATIC_ASSERT( !runtime_sized ); } /** Constructs a spsc_queue with a custom allocator @@ -780,46 +774,46 @@ class spsc_queue: * * \note This is just for API compatibility: an allocator isn't actually needed */ - explicit spsc_queue(allocator const &) + explicit spsc_queue( allocator const& ) { // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling // this function and this function may be compiled even when it isn't being used. - BOOST_ASSERT(!runtime_sized); + BOOST_ASSERT( !runtime_sized ); } /** Constructs a spsc_queue for element_count elements * * \pre spsc_queue must be configured to be sized at run-time */ - explicit spsc_queue(size_type element_count): - base_type(element_count) + explicit spsc_queue( size_type element_count ) : + base_type( element_count ) { // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling // this function and this function may be compiled even when it isn't being used. - BOOST_ASSERT(runtime_sized); + BOOST_ASSERT( runtime_sized ); } /** Constructs a spsc_queue for element_count elements with a custom allocator * * \pre spsc_queue must be configured to be sized at run-time */ - template - spsc_queue(size_type element_count, typename boost::allocator_rebind::type const & alloc): - base_type(alloc, element_count) + template < typename U > + spsc_queue( size_type element_count, typename boost::allocator_rebind< allocator, U >::type const& alloc ) : + base_type( alloc, element_count ) { - BOOST_STATIC_ASSERT(runtime_sized); + BOOST_STATIC_ASSERT( runtime_sized ); } /** Constructs a spsc_queue for element_count elements with a custom allocator * * \pre spsc_queue must be configured to be sized at run-time */ - spsc_queue(size_type element_count, allocator_arg const & alloc): - base_type(alloc, element_count) + spsc_queue( size_type element_count, allocator_arg const& alloc ) : + base_type( alloc, element_count ) { // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling // this function and this function may be compiled even when it isn't being used. - BOOST_ASSERT(runtime_sized); + BOOST_ASSERT( runtime_sized ); } /** Pushes object t to the ringbuffer. @@ -830,9 +824,9 @@ class spsc_queue: * * \note Thread-safe and wait-free * */ - bool push(T const & t) + bool push( T const& t ) { - return base_type::push(t); + return base_type::push( t ); } /** Pops one object from ringbuffer. @@ -843,7 +837,7 @@ class spsc_queue: * * \note Thread-safe and wait-free */ - bool pop () + bool pop() { detail::consume_noop consume_functor; return consume_one( consume_functor ); @@ -857,11 +851,10 @@ class spsc_queue: * * \note Thread-safe and wait-free */ - template - typename boost::enable_if::type, bool>::type - pop (U & ret) + template < typename U > + typename boost::enable_if< typename is_convertible< T, U >::type, bool >::type pop( U& ret ) { - detail::consume_via_copy consume_functor(ret); + detail::consume_via_copy< U > consume_functor( ret ); return consume_one( consume_functor ); } @@ -872,9 +865,9 @@ class spsc_queue: * * \note Thread-safe and wait-free */ - size_type push(T const * t, size_type size) + size_type push( T const* t, size_type size ) { - return base_type::push(t, size); + return base_type::push( t, size ); } /** Pushes as many objects from the array t as there is space available. @@ -884,10 +877,10 @@ class spsc_queue: * * \note Thread-safe and wait-free */ - template - size_type push(T const (&t)[size]) + template < size_type size > + size_type push( T const ( &t )[ size ] ) { - return push(t, size); + return push( t, size ); } /** Pushes as many objects from the range [begin, end) as there is space . @@ -897,10 +890,10 @@ class spsc_queue: * * \note Thread-safe and wait-free */ - template - ConstIterator push(ConstIterator begin, ConstIterator end) + template < typename ConstIterator > + ConstIterator push( ConstIterator begin, ConstIterator end ) { - return base_type::push(begin, end); + return base_type::push( begin, end ); } /** Pops a maximum of size objects from ringbuffer. @@ -910,9 +903,9 @@ class spsc_queue: * * \note Thread-safe and wait-free * */ - size_type pop(T * ret, size_type size) + size_type pop( T* ret, size_type size ) { - return base_type::pop(ret, size); + return base_type::pop( ret, size ); } /** Pops a maximum of size objects from spsc_queue. @@ -922,10 +915,10 @@ class spsc_queue: * * \note Thread-safe and wait-free * */ - template - size_type pop(T (&ret)[size]) + template < size_type size > + size_type pop( T ( &ret )[ size ] ) { - return pop(ret, size); + return pop( ret, size ); } /** Pops objects to the output iterator it @@ -935,11 +928,11 @@ class spsc_queue: * * \note Thread-safe and wait-free * */ - template - typename boost::disable_if::type, size_type>::type - pop(OutputIterator it) + template < typename OutputIterator > + typename boost::disable_if< typename is_convertible< T, OutputIterator >::type, size_type >::type + pop( OutputIterator it ) { - return base_type::pop_to_output_iterator(it); + return base_type::pop_to_output_iterator( it ); } /** consumes one element via a functor @@ -950,17 +943,17 @@ class spsc_queue: * * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking * */ - template - bool consume_one(Functor & f) + template < typename Functor > + bool consume_one( Functor& f ) { - return base_type::consume_one(f); + return base_type::consume_one( f ); } /// \copydoc boost::lockfree::spsc_queue::consume_one(Functor & rhs) - template - bool consume_one(Functor const & f) + template < typename Functor > + bool consume_one( Functor const& f ) { - return base_type::consume_one(f); + return base_type::consume_one( f ); } /** consumes all elements via a functor @@ -971,17 +964,17 @@ class spsc_queue: * * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking * */ - template - size_type consume_all(Functor & f) + template < typename Functor > + size_type consume_all( Functor& f ) { - return base_type::consume_all(f); + return base_type::consume_all( f ); } /// \copydoc boost::lockfree::spsc_queue::consume_all(Functor & rhs) - template - size_type consume_all(Functor const & f) + template < typename Functor > + size_type consume_all( Functor const& f ) { - return base_type::consume_all(f); + return base_type::consume_all( f ); } /** get number of elements that are available for read @@ -992,7 +985,7 @@ class spsc_queue: * */ size_type read_available() const { - return base_type::read_available(base_type::max_number_of_elements()); + return base_type::read_available( base_type::max_number_of_elements() ); } /** get write space to write elements @@ -1003,7 +996,7 @@ class spsc_queue: * */ size_type write_available() const { - return base_type::write_available(base_type::max_number_of_elements()); + return base_type::write_available( base_type::max_number_of_elements() ); } /** get reference to element in the front of the queue @@ -1018,14 +1011,14 @@ class spsc_queue: */ const T& front() const { - BOOST_ASSERT(read_available() > 0); + BOOST_ASSERT( read_available() > 0 ); return base_type::front(); } /// \copydoc boost::lockfree::spsc_queue::front() const T& front() { - BOOST_ASSERT(read_available() > 0); + BOOST_ASSERT( read_available() > 0 ); return base_type::front(); } @@ -1033,22 +1026,21 @@ class spsc_queue: * * \note Not thread-safe * */ - void reset(void) + void reset( void ) { - if ( !boost::has_trivial_destructor::value ) { + if ( !boost::has_trivial_destructor< T >::value ) { // make sure to call all destructors! detail::consume_noop consume_functor; - (void)consume_all(consume_functor); + (void)consume_all( consume_functor ); } else { - base_type::write_index_.store(0, memory_order_relaxed); - base_type::read_index_.store(0, memory_order_release); + base_type::write_index_.store( 0, memory_order_relaxed ); + base_type::read_index_.store( 0, memory_order_release ); } - } + } }; -} /* namespace lockfree */ -} /* namespace boost */ +}} // namespace boost::lockfree #endif /* BOOST_LOCKFREE_SPSC_QUEUE_HPP_INCLUDED */ diff --git a/include/boost/lockfree/stack.hpp b/include/boost/lockfree/stack.hpp index d6d4ccd4..c78eea05 100644 --- a/include/boost/lockfree/stack.hpp +++ b/include/boost/lockfree/stack.hpp @@ -25,18 +25,16 @@ #include #ifdef BOOST_HAS_PRAGMA_ONCE -#pragma once +# pragma once #endif -namespace boost { -namespace lockfree { -namespace detail { +namespace boost { namespace lockfree { +namespace detail { -typedef parameter::parameters, - boost::parameter::optional - > stack_signature; +typedef parameter::parameters< boost::parameter::optional< tag::allocator >, boost::parameter::optional< tag::capacity > > + stack_signature; -} +} // namespace detail /** The stack class provides a multi-writer/multi-reader stack, pushing and popping is lock-free, * construction/destruction has to be synchronized. It uses a freelist for memory management, @@ -46,10 +44,10 @@ typedef parameter::parameters, * * - \c boost::lockfree::fixed_sized<>, defaults to \c boost::lockfree::fixed_sized
* Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior.
- * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed - * by array indexing. This limits the possible size of the stack to the number of elements that can be addressed by the index - * type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way - * to achieve lock-freedom. + * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are + * addressed by array indexing. This limits the possible size of the stack to the number of elements that can be + * addressed by the index type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange + * instructions, this is the best way to achieve lock-freedom. * * - \c boost::lockfree::capacity<>, optional
* If this template argument is passed to the options, the size of the stack is set at compile-time.
@@ -62,62 +60,62 @@ typedef parameter::parameters, * - T must have a copy constructor * */ #ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES -template +template < typename T, class A0, class A1, class A2 > #else -template +template < typename T, typename... Options > #endif class stack { private: #ifndef BOOST_DOXYGEN_INVOKED - BOOST_STATIC_ASSERT(boost::is_copy_constructible::value); + BOOST_STATIC_ASSERT( boost::is_copy_constructible< T >::value ); -#ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES - typedef typename detail::stack_signature::bind::type bound_args; -#else - typedef typename detail::stack_signature::bind::type bound_args; -#endif +# ifdef BOOST_NO_CXX11_VARIADIC_TEMPLATES + typedef typename detail::stack_signature::bind< A0, A1, A2 >::type bound_args; +# else + typedef typename detail::stack_signature::bind< Options... >::type bound_args; +# endif - static const bool has_capacity = detail::extract_capacity::has_capacity; - static const size_t capacity = detail::extract_capacity::capacity; - static const bool fixed_sized = detail::extract_fixed_sized::value; - static const bool node_based = !(has_capacity || fixed_sized); - static const bool compile_time_sized = has_capacity; + static const bool has_capacity = detail::extract_capacity< bound_args >::has_capacity; + static const size_t capacity = detail::extract_capacity< bound_args >::capacity; + static const bool fixed_sized = detail::extract_fixed_sized< bound_args >::value; + static const bool node_based = !( has_capacity || fixed_sized ); + static const bool compile_time_sized = has_capacity; struct node { - node(T const & val): - v(val) + node( T const& val ) : + v( val ) {} - typedef typename detail::select_tagged_handle::handle_type handle_t; - handle_t next; - const T v; + typedef typename detail::select_tagged_handle< node, node_based >::handle_type handle_t; + handle_t next; + const T v; }; - typedef typename detail::extract_allocator::type node_allocator; - typedef typename detail::select_freelist::type pool_t; + typedef typename detail::extract_allocator< bound_args, node >::type node_allocator; + typedef + typename detail::select_freelist< node, node_allocator, compile_time_sized, fixed_sized, capacity >::type pool_t; typedef typename pool_t::tagged_node_handle tagged_node_handle; // check compile-time capacity - BOOST_STATIC_ASSERT((mpl::if_c::const_max>, - mpl::true_ - >::type::value)); + BOOST_STATIC_ASSERT( ( mpl::if_c< has_capacity, + mpl::bool_< capacity - 1 < boost::integer_traits< boost::uint16_t >::const_max >, + mpl::true_ >::type::value ) ); struct implementation_defined { typedef node_allocator allocator; - typedef std::size_t size_type; + typedef std::size_t size_type; }; #endif - BOOST_DELETED_FUNCTION(stack(stack const&)) - BOOST_DELETED_FUNCTION(stack& operator= (stack const&)) + BOOST_DELETED_FUNCTION( stack( stack const& ) ) + BOOST_DELETED_FUNCTION( stack& operator=( stack const& ) ) public: - typedef T value_type; + typedef T value_type; typedef typename implementation_defined::allocator allocator; typedef typename implementation_defined::size_type size_type; @@ -130,7 +128,7 @@ class stack * every internal node, which is impossible if further nodes will be allocated from the operating system. * * */ - bool is_lock_free (void) const + bool is_lock_free( void ) const { return tos.is_lock_free() && pool.is_lock_free(); } @@ -139,12 +137,12 @@ class stack * * \pre Must specify a capacity<> argument * */ - stack(void): - pool(node_allocator(), capacity) + stack( void ) : + pool( node_allocator(), capacity ) { // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling // this function and this function may be compiled even when it isn't being used. - BOOST_ASSERT(has_capacity); + BOOST_ASSERT( has_capacity ); initialize(); } @@ -152,11 +150,11 @@ class stack * * \pre Must specify a capacity<> argument * */ - template - explicit stack(typename boost::allocator_rebind::type const & alloc): - pool(alloc, capacity) + template < typename U > + explicit stack( typename boost::allocator_rebind< node_allocator, U >::type const& alloc ) : + pool( alloc, capacity ) { - BOOST_STATIC_ASSERT(has_capacity); + BOOST_STATIC_ASSERT( has_capacity ); initialize(); } @@ -164,12 +162,12 @@ class stack * * \pre Must specify a capacity<> argument * */ - explicit stack(allocator const & alloc): - pool(alloc, capacity) + explicit stack( allocator const& alloc ) : + pool( alloc, capacity ) { // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling // this function and this function may be compiled even when it isn't being used. - BOOST_ASSERT(has_capacity); + BOOST_ASSERT( has_capacity ); initialize(); } @@ -179,12 +177,12 @@ class stack * * \pre Must \b not specify a capacity<> argument * */ - explicit stack(size_type n): - pool(node_allocator(), n) + explicit stack( size_type n ) : + pool( node_allocator(), n ) { // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling // this function and this function may be compiled even when it isn't being used. - BOOST_ASSERT(!has_capacity); + BOOST_ASSERT( !has_capacity ); initialize(); } @@ -194,11 +192,11 @@ class stack * * \pre Must \b not specify a capacity<> argument * */ - template - stack(size_type n, typename boost::allocator_rebind::type const & alloc): - pool(alloc, n) + template < typename U > + stack( size_type n, typename boost::allocator_rebind< node_allocator, U >::type const& alloc ) : + pool( alloc, n ) { - BOOST_STATIC_ASSERT(!has_capacity); + BOOST_STATIC_ASSERT( !has_capacity ); initialize(); } @@ -208,12 +206,12 @@ class stack * \note thread-safe, may block if memory allocator blocks * * */ - void reserve(size_type n) + void reserve( size_type n ) { // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling // this function and this function may be compiled even when it isn't being used. - BOOST_ASSERT(!has_capacity); - pool.template reserve(n); + BOOST_ASSERT( !has_capacity ); + pool.template reserve< true >( n ); } /** Allocate n nodes for freelist @@ -222,12 +220,12 @@ class stack * \note not thread-safe, may block if memory allocator blocks * * */ - void reserve_unsafe(size_type n) + void reserve_unsafe( size_type n ) { // Don't use BOOST_STATIC_ASSERT() here since it will be evaluated when compiling // this function and this function may be compiled even when it isn't being used. - BOOST_ASSERT(!has_capacity); - pool.template reserve(n); + BOOST_ASSERT( !has_capacity ); + pool.template reserve< false >( n ); } /** Destroys stack, free all nodes from freelist. @@ -235,67 +233,70 @@ class stack * \note not thread-safe * * */ - ~stack(void) + ~stack( void ) { detail::consume_noop consume_functor; - (void)consume_all(consume_functor); + (void)consume_all( consume_functor ); } private: #ifndef BOOST_DOXYGEN_INVOKED - void initialize(void) + void initialize( void ) { - tos.store(tagged_node_handle(pool.null_handle(), 0)); + tos.store( tagged_node_handle( pool.null_handle(), 0 ) ); } - void link_nodes_atomic(node * new_top_node, node * end_node) + void link_nodes_atomic( node* new_top_node, node* end_node ) { - tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed); - for (;;) { - tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag()); - end_node->next = pool.get_handle(old_tos); + tagged_node_handle old_tos = tos.load( detail::memory_order_relaxed ); + for ( ;; ) { + tagged_node_handle new_tos( pool.get_handle( new_top_node ), old_tos.get_tag() ); + end_node->next = pool.get_handle( old_tos ); - if (tos.compare_exchange_weak(old_tos, new_tos)) + if ( tos.compare_exchange_weak( old_tos, new_tos ) ) break; } } - void link_nodes_unsafe(node * new_top_node, node * end_node) + void link_nodes_unsafe( node* new_top_node, node* end_node ) { - tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed); + tagged_node_handle old_tos = tos.load( detail::memory_order_relaxed ); - tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag()); - end_node->next = pool.get_handle(old_tos); + tagged_node_handle new_tos( pool.get_handle( new_top_node ), old_tos.get_tag() ); + end_node->next = pool.get_handle( old_tos ); - tos.store(new_tos, memory_order_relaxed); + tos.store( new_tos, memory_order_relaxed ); } - template - tuple prepare_node_list(ConstIterator begin, ConstIterator end, ConstIterator & ret) + template < bool Threadsafe, bool Bounded, typename ConstIterator > + tuple< node*, node* > prepare_node_list( ConstIterator begin, ConstIterator end, ConstIterator& ret ) { - ConstIterator it = begin; - node * end_node = pool.template construct(*it++); - if (end_node == NULL) { + ConstIterator it = begin; + node* end_node = pool.template construct< Threadsafe, Bounded >( *it++ ); + if ( end_node == NULL ) { ret = begin; - return make_tuple(NULL, NULL); + return make_tuple< node*, node* >( NULL, NULL ); } - node * new_top_node = end_node; - end_node->next = NULL; + node* new_top_node = end_node; + end_node->next = NULL; - BOOST_TRY { + BOOST_TRY + { /* link nodes */ - for (; it != end; ++it) { - node * newnode = pool.template construct(*it); - if (newnode == NULL) + for ( ; it != end; ++it ) { + node* newnode = pool.template construct< Threadsafe, Bounded >( *it ); + if ( newnode == NULL ) break; newnode->next = new_top_node; - new_top_node = newnode; + new_top_node = newnode; } - } BOOST_CATCH (...) { - for (node * current_node = new_top_node; current_node != NULL;) { - node * next = current_node->next; - pool.template destruct(current_node); + } + BOOST_CATCH( ... ) + { + for ( node* current_node = new_top_node; current_node != NULL; ) { + node* next = current_node->next; + pool.template destruct< Threadsafe >( current_node ); current_node = next; } BOOST_RETHROW; @@ -303,7 +304,7 @@ class stack BOOST_CATCH_END ret = it; - return make_tuple(new_top_node, end_node); + return make_tuple( new_top_node, end_node ); } #endif @@ -313,13 +314,12 @@ class stack * \post object will be pushed to the stack, if internal node can be allocated * \returns true, if the push operation is successful. * - * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated - * from the OS. This may not be lock-free. - * \throws if memory allocator throws + * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will + * be allocated from the OS. This may not be lock-free. \throws if memory allocator throws * */ - bool push(T const & v) + bool push( T const& v ) { - return do_push(v); + return do_push< false >( v ); } /** Pushes object t to the stack. @@ -329,34 +329,34 @@ class stack * * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail * */ - bool bounded_push(T const & v) + bool bounded_push( T const& v ) { - return do_push(v); + return do_push< true >( v ); } #ifndef BOOST_DOXYGEN_INVOKED private: - template - bool do_push(T const & v) + template < bool Bounded > + bool do_push( T const& v ) { - node * newnode = pool.template construct(v); - if (newnode == 0) + node* newnode = pool.template construct< true, Bounded >( v ); + if ( newnode == 0 ) return false; - link_nodes_atomic(newnode, newnode); + link_nodes_atomic( newnode, newnode ); return true; } - template - ConstIterator do_push(ConstIterator begin, ConstIterator end) + template < bool Bounded, typename ConstIterator > + ConstIterator do_push( ConstIterator begin, ConstIterator end ) { - node * new_top_node; - node * end_node; + node* new_top_node; + node* end_node; ConstIterator ret; - tie(new_top_node, end_node) = prepare_node_list(begin, end, ret); - if (new_top_node) - link_nodes_atomic(new_top_node, end_node); + tie( new_top_node, end_node ) = prepare_node_list< true, Bounded >( begin, end, ret ); + if ( new_top_node ) + link_nodes_atomic( new_top_node, end_node ); return ret; } @@ -369,14 +369,13 @@ class stack * \return iterator to the first element, which has not been pushed * * \note Operation is applied atomically - * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated - * from the OS. This may not be lock-free. - * \throws if memory allocator throws + * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will + * be allocated from the OS. This may not be lock-free. \throws if memory allocator throws */ - template - ConstIterator push(ConstIterator begin, ConstIterator end) + template < typename ConstIterator > + ConstIterator push( ConstIterator begin, ConstIterator end ) { - return do_push(begin, end); + return do_push< false, ConstIterator >( begin, end ); } /** Pushes as many objects from the range [begin, end) as freelist node can be allocated. @@ -387,10 +386,10 @@ class stack * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail * \throws if memory allocator throws */ - template - ConstIterator bounded_push(ConstIterator begin, ConstIterator end) + template < typename ConstIterator > + ConstIterator bounded_push( ConstIterator begin, ConstIterator end ) { - return do_push(begin, end); + return do_push< true, ConstIterator >( begin, end ); } @@ -399,17 +398,16 @@ class stack * \post object will be pushed to the stack, if internal node can be allocated * \returns true, if the push operation is successful. * - * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated - * from the OS. This may not be lock-free. - * \throws if memory allocator throws + * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node + * will be allocated from the OS. This may not be lock-free. \throws if memory allocator throws * */ - bool unsynchronized_push(T const & v) + bool unsynchronized_push( T const& v ) { - node * newnode = pool.template construct(v); - if (newnode == 0) + node* newnode = pool.template construct< false, false >( v ); + if ( newnode == 0 ) return false; - link_nodes_unsafe(newnode, newnode); + link_nodes_unsafe( newnode, newnode ); return true; } @@ -417,20 +415,19 @@ class stack * * \return iterator to the first element, which has not been pushed * - * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated - * from the OS. This may not be lock-free. - * \throws if memory allocator throws + * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node + * will be allocated from the OS. This may not be lock-free. \throws if memory allocator throws */ - template - ConstIterator unsynchronized_push(ConstIterator begin, ConstIterator end) + template < typename ConstIterator > + ConstIterator unsynchronized_push( ConstIterator begin, ConstIterator end ) { - node * new_top_node; - node * end_node; + node* new_top_node; + node* end_node; ConstIterator ret; - tie(new_top_node, end_node) = prepare_node_list(begin, end, ret); - if (new_top_node) - link_nodes_unsafe(new_top_node, end_node); + tie( new_top_node, end_node ) = prepare_node_list< false, false >( begin, end, ret ); + if ( new_top_node ) + link_nodes_unsafe( new_top_node, end_node ); return ret; } @@ -444,9 +441,9 @@ class stack * \note Thread-safe and non-blocking * * */ - bool pop(T & ret) + bool pop( T& ret ) { - return pop(ret); + return pop< T >( ret ); } /** Pops object from stack. @@ -458,13 +455,13 @@ class stack * \note Thread-safe and non-blocking * * */ - template - bool pop(U & ret) + template < typename U > + bool pop( U& ret ) { - BOOST_STATIC_ASSERT((boost::is_convertible::value)); - detail::consume_via_copy consumer(ret); + BOOST_STATIC_ASSERT( ( boost::is_convertible< T, U >::value ) ); + detail::consume_via_copy< U > consumer( ret ); - return consume_one(consumer); + return consume_one( consumer ); } @@ -476,9 +473,9 @@ class stack * \note Not thread-safe, but non-blocking * * */ - bool unsynchronized_pop(T & ret) + bool unsynchronized_pop( T& ret ) { - return unsynchronized_pop(ret); + return unsynchronized_pop< T >( ret ); } /** Pops object from stack. @@ -490,22 +487,22 @@ class stack * \note Not thread-safe, but non-blocking * * */ - template - bool unsynchronized_pop(U & ret) + template < typename U > + bool unsynchronized_pop( U& ret ) { - BOOST_STATIC_ASSERT((boost::is_convertible::value)); - tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed); - node * old_tos_pointer = pool.get_pointer(old_tos); + BOOST_STATIC_ASSERT( ( boost::is_convertible< T, U >::value ) ); + tagged_node_handle old_tos = tos.load( detail::memory_order_relaxed ); + node* old_tos_pointer = pool.get_pointer( old_tos ); - if (!pool.get_pointer(old_tos)) + if ( !pool.get_pointer( old_tos ) ) return false; - node * new_tos_ptr = pool.get_pointer(old_tos_pointer->next); - tagged_node_handle new_tos(pool.get_handle(new_tos_ptr), old_tos.get_next_tag()); + node* new_tos_ptr = pool.get_pointer( old_tos_pointer->next ); + tagged_node_handle new_tos( pool.get_handle( new_tos_ptr ), old_tos.get_next_tag() ); - tos.store(new_tos, memory_order_relaxed); - detail::copy_payload(old_tos_pointer->v, ret); - pool.template destruct(old_tos); + tos.store( new_tos, memory_order_relaxed ); + detail::copy_payload( old_tos_pointer->v, ret ); + pool.template destruct< false >( old_tos ); return true; } @@ -517,42 +514,42 @@ class stack * * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking * */ - template - bool consume_one(Functor & f) + template < typename Functor > + bool consume_one( Functor& f ) { - tagged_node_handle old_tos = tos.load(detail::memory_order_consume); + tagged_node_handle old_tos = tos.load( detail::memory_order_consume ); - for (;;) { - node * old_tos_pointer = pool.get_pointer(old_tos); - if (!old_tos_pointer) + for ( ;; ) { + node* old_tos_pointer = pool.get_pointer( old_tos ); + if ( !old_tos_pointer ) return false; - tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag()); + tagged_node_handle new_tos( old_tos_pointer->next, old_tos.get_next_tag() ); - if (tos.compare_exchange_weak(old_tos, new_tos)) { - f(old_tos_pointer->v); - pool.template destruct(old_tos); + if ( tos.compare_exchange_weak( old_tos, new_tos ) ) { + f( old_tos_pointer->v ); + pool.template destruct< true >( old_tos ); return true; } } } /// \copydoc boost::lockfree::stack::consume_one(Functor & rhs) - template - bool consume_one(Functor const & f) + template < typename Functor > + bool consume_one( Functor const& f ) { - tagged_node_handle old_tos = tos.load(detail::memory_order_consume); + tagged_node_handle old_tos = tos.load( detail::memory_order_consume ); - for (;;) { - node * old_tos_pointer = pool.get_pointer(old_tos); - if (!old_tos_pointer) + for ( ;; ) { + node* old_tos_pointer = pool.get_pointer( old_tos ); + if ( !old_tos_pointer ) return false; - tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag()); + tagged_node_handle new_tos( old_tos_pointer->next, old_tos.get_next_tag() ); - if (tos.compare_exchange_weak(old_tos, new_tos)) { - f(old_tos_pointer->v); - pool.template destruct(old_tos); + if ( tos.compare_exchange_weak( old_tos, new_tos ) ) { + f( old_tos_pointer->v ); + pool.template destruct< true >( old_tos ); return true; } } @@ -566,22 +563,22 @@ class stack * * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking * */ - template - size_t consume_all(Functor & f) + template < typename Functor > + size_t consume_all( Functor& f ) { size_t element_count = 0; - while (consume_one(f)) + while ( consume_one( f ) ) element_count += 1; return element_count; } /// \copydoc boost::lockfree::stack::consume_all(Functor & rhs) - template - size_t consume_all(Functor const & f) + template < typename Functor > + size_t consume_all( Functor const& f ) { size_t element_count = 0; - while (consume_one(f)) + while ( consume_one( f ) ) element_count += 1; return element_count; @@ -595,39 +592,39 @@ class stack * * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking * */ - template - size_t consume_all_atomic(Functor & f) + template < typename Functor > + size_t consume_all_atomic( Functor& f ) { - size_t element_count = 0; - tagged_node_handle old_tos = tos.load(detail::memory_order_consume); + size_t element_count = 0; + tagged_node_handle old_tos = tos.load( detail::memory_order_consume ); - for (;;) { - node * old_tos_pointer = pool.get_pointer(old_tos); - if (!old_tos_pointer) + for ( ;; ) { + node* old_tos_pointer = pool.get_pointer( old_tos ); + if ( !old_tos_pointer ) return 0; - tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag()); + tagged_node_handle new_tos( pool.null_handle(), old_tos.get_next_tag() ); - if (tos.compare_exchange_weak(old_tos, new_tos)) + if ( tos.compare_exchange_weak( old_tos, new_tos ) ) break; } tagged_node_handle nodes_to_consume = old_tos; - for(;;) { - node * node_pointer = pool.get_pointer(nodes_to_consume); - f(node_pointer->v); + for ( ;; ) { + node* node_pointer = pool.get_pointer( nodes_to_consume ); + f( node_pointer->v ); element_count += 1; - node * next_node = pool.get_pointer(node_pointer->next); + node* next_node = pool.get_pointer( node_pointer->next ); - if (!next_node) { - pool.template destruct(nodes_to_consume); + if ( !next_node ) { + pool.template destruct< true >( nodes_to_consume ); break; } - tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag()); - pool.template destruct(nodes_to_consume); + tagged_node_handle next( pool.get_handle( next_node ), nodes_to_consume.get_next_tag() ); + pool.template destruct< true >( nodes_to_consume ); nodes_to_consume = next; } @@ -635,39 +632,39 @@ class stack } /// \copydoc boost::lockfree::stack::consume_all_atomic(Functor & rhs) - template - size_t consume_all_atomic(Functor const & f) + template < typename Functor > + size_t consume_all_atomic( Functor const& f ) { - size_t element_count = 0; - tagged_node_handle old_tos = tos.load(detail::memory_order_consume); + size_t element_count = 0; + tagged_node_handle old_tos = tos.load( detail::memory_order_consume ); - for (;;) { - node * old_tos_pointer = pool.get_pointer(old_tos); - if (!old_tos_pointer) + for ( ;; ) { + node* old_tos_pointer = pool.get_pointer( old_tos ); + if ( !old_tos_pointer ) return 0; - tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag()); + tagged_node_handle new_tos( pool.null_handle(), old_tos.get_next_tag() ); - if (tos.compare_exchange_weak(old_tos, new_tos)) + if ( tos.compare_exchange_weak( old_tos, new_tos ) ) break; } tagged_node_handle nodes_to_consume = old_tos; - for(;;) { - node * node_pointer = pool.get_pointer(nodes_to_consume); - f(node_pointer->v); + for ( ;; ) { + node* node_pointer = pool.get_pointer( nodes_to_consume ); + f( node_pointer->v ); element_count += 1; - node * next_node = pool.get_pointer(node_pointer->next); + node* next_node = pool.get_pointer( node_pointer->next ); - if (!next_node) { - pool.template destruct(nodes_to_consume); + if ( !next_node ) { + pool.template destruct< true >( nodes_to_consume ); break; } - tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag()); - pool.template destruct(nodes_to_consume); + tagged_node_handle next( pool.get_handle( next_node ), nodes_to_consume.get_next_tag() ); + pool.template destruct< true >( nodes_to_consume ); nodes_to_consume = next; } @@ -682,57 +679,57 @@ class stack * * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking * */ - template - size_t consume_all_atomic_reversed(Functor & f) + template < typename Functor > + size_t consume_all_atomic_reversed( Functor& f ) { - size_t element_count = 0; - tagged_node_handle old_tos = tos.load(detail::memory_order_consume); + size_t element_count = 0; + tagged_node_handle old_tos = tos.load( detail::memory_order_consume ); - for (;;) { - node * old_tos_pointer = pool.get_pointer(old_tos); - if (!old_tos_pointer) + for ( ;; ) { + node* old_tos_pointer = pool.get_pointer( old_tos ); + if ( !old_tos_pointer ) return 0; - tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag()); + tagged_node_handle new_tos( pool.null_handle(), old_tos.get_next_tag() ); - if (tos.compare_exchange_weak(old_tos, new_tos)) + if ( tos.compare_exchange_weak( old_tos, new_tos ) ) break; } tagged_node_handle nodes_to_consume = old_tos; - node * last_node_pointer = NULL; + node* last_node_pointer = NULL; tagged_node_handle nodes_in_reversed_order; - for(;;) { - node * node_pointer = pool.get_pointer(nodes_to_consume); - node * next_node = pool.get_pointer(node_pointer->next); + for ( ;; ) { + node* node_pointer = pool.get_pointer( nodes_to_consume ); + node* next_node = pool.get_pointer( node_pointer->next ); - node_pointer->next = pool.get_handle(last_node_pointer); - last_node_pointer = node_pointer; + node_pointer->next = pool.get_handle( last_node_pointer ); + last_node_pointer = node_pointer; - if (!next_node) { + if ( !next_node ) { nodes_in_reversed_order = nodes_to_consume; break; } - tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag()); + tagged_node_handle next( pool.get_handle( next_node ), nodes_to_consume.get_next_tag() ); nodes_to_consume = next; } - for(;;) { - node * node_pointer = pool.get_pointer(nodes_in_reversed_order); - f(node_pointer->v); + for ( ;; ) { + node* node_pointer = pool.get_pointer( nodes_in_reversed_order ); + f( node_pointer->v ); element_count += 1; - node * next_node = pool.get_pointer(node_pointer->next); + node* next_node = pool.get_pointer( node_pointer->next ); - if (!next_node) { - pool.template destruct(nodes_in_reversed_order); + if ( !next_node ) { + pool.template destruct< true >( nodes_in_reversed_order ); break; } - tagged_node_handle next(pool.get_handle(next_node), nodes_in_reversed_order.get_next_tag()); - pool.template destruct(nodes_in_reversed_order); + tagged_node_handle next( pool.get_handle( next_node ), nodes_in_reversed_order.get_next_tag() ); + pool.template destruct< true >( nodes_in_reversed_order ); nodes_in_reversed_order = next; } @@ -740,57 +737,57 @@ class stack } /// \copydoc boost::lockfree::stack::consume_all_atomic_reversed(Functor & rhs) - template - size_t consume_all_atomic_reversed(Functor const & f) + template < typename Functor > + size_t consume_all_atomic_reversed( Functor const& f ) { - size_t element_count = 0; - tagged_node_handle old_tos = tos.load(detail::memory_order_consume); + size_t element_count = 0; + tagged_node_handle old_tos = tos.load( detail::memory_order_consume ); - for (;;) { - node * old_tos_pointer = pool.get_pointer(old_tos); - if (!old_tos_pointer) + for ( ;; ) { + node* old_tos_pointer = pool.get_pointer( old_tos ); + if ( !old_tos_pointer ) return 0; - tagged_node_handle new_tos(pool.null_handle(), old_tos.get_next_tag()); + tagged_node_handle new_tos( pool.null_handle(), old_tos.get_next_tag() ); - if (tos.compare_exchange_weak(old_tos, new_tos)) + if ( tos.compare_exchange_weak( old_tos, new_tos ) ) break; } tagged_node_handle nodes_to_consume = old_tos; - node * last_node_pointer = NULL; + node* last_node_pointer = NULL; tagged_node_handle nodes_in_reversed_order; - for(;;) { - node * node_pointer = pool.get_pointer(nodes_to_consume); - node * next_node = pool.get_pointer(node_pointer->next); + for ( ;; ) { + node* node_pointer = pool.get_pointer( nodes_to_consume ); + node* next_node = pool.get_pointer( node_pointer->next ); - node_pointer->next = pool.get_handle(last_node_pointer); - last_node_pointer = node_pointer; + node_pointer->next = pool.get_handle( last_node_pointer ); + last_node_pointer = node_pointer; - if (!next_node) { + if ( !next_node ) { nodes_in_reversed_order = nodes_to_consume; break; } - tagged_node_handle next(pool.get_handle(next_node), nodes_to_consume.get_next_tag()); + tagged_node_handle next( pool.get_handle( next_node ), nodes_to_consume.get_next_tag() ); nodes_to_consume = next; } - for(;;) { - node * node_pointer = pool.get_pointer(nodes_in_reversed_order); - f(node_pointer->v); + for ( ;; ) { + node* node_pointer = pool.get_pointer( nodes_in_reversed_order ); + f( node_pointer->v ); element_count += 1; - node * next_node = pool.get_pointer(node_pointer->next); + node* next_node = pool.get_pointer( node_pointer->next ); - if (!next_node) { - pool.template destruct(nodes_in_reversed_order); + if ( !next_node ) { + pool.template destruct< true >( nodes_in_reversed_order ); break; } - tagged_node_handle next(pool.get_handle(next_node), nodes_in_reversed_order.get_next_tag()); - pool.template destruct(nodes_in_reversed_order); + tagged_node_handle next( pool.get_handle( next_node ), nodes_in_reversed_order.get_next_tag() ); + pool.template destruct< true >( nodes_in_reversed_order ); nodes_in_reversed_order = next; } @@ -802,23 +799,22 @@ class stack * \note It only guarantees that at some point during the execution of the function the stack has been empty. * It is rarely practical to use this value in program logic, because the stack can be modified by other threads. * */ - bool empty(void) const + bool empty( void ) const { - return pool.get_pointer(tos.load()) == NULL; + return pool.get_pointer( tos.load() ) == NULL; } private: #ifndef BOOST_DOXYGEN_INVOKED - detail::atomic tos; + detail::atomic< tagged_node_handle > tos; - static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle); - char padding[padding_size]; + static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof( tagged_node_handle ); + char padding[ padding_size ]; pool_t pool; #endif }; -} /* namespace lockfree */ -} /* namespace boost */ +}} // namespace boost::lockfree #endif /* BOOST_LOCKFREE_STACK_HPP_INCLUDED */ diff --git a/index.html b/index.html index ccee53f7..31e91a2f 100644 --- a/index.html +++ b/index.html @@ -6,8 +6,8 @@ Automatic redirection failed, please go to ../../doc/html/lockfree.html  

© Copyright Beman Dawes, 2001

-

Distributed under the Boost Software License, Version 1.0. (See accompanying -file LICENSE_1_0.txt or copy +

Distributed under the Boost Software License, Version 1.0. (See accompanying +file LICENSE_1_0.txt or copy at www.boost.org/LICENSE_1_0.txt)

diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt new file mode 100644 index 00000000..d51b0fe0 --- /dev/null +++ b/test/CMakeLists.txt @@ -0,0 +1,45 @@ +if(NOT TARGET boost_lockfree_all_tests) + add_custom_target(boost_lockfree_all_tests) +endif() + + +set(Tests + destructor_test + freelist_test + queue_bounded_stress_test + queue_fixedsize_stress_test + queue_interprocess_test + queue_test + queue_unbounded_stress_test + spsc_queue_stress_test + spsc_queue_test + stack_bounded_stress_test + stack_fixedsize_stress_test + stack_interprocess_test + stack_test + stack_unbounded_stress_test + tagged_ptr_test +) + +foreach(Test ${Tests}) + # Test target + add_executable(boost_lockfree_${Test} ${Test}.cpp test_common.hpp test_helpers.hpp ) + source_group( TREE "${CMAKE_CURRENT_SOURCE_DIR}" FILES ${Test}.cpp test_common.hpp test_helpers.hpp) + + target_include_directories(boost_lockfree_${Test} PRIVATE . ) + target_link_libraries(boost_lockfree_${Test} PRIVATE + Boost::lockfree + Boost::unit_test_framework + Boost::thread + Boost::foreach + ) + + target_compile_definitions(boost_lockfree_${Test} PRIVATE BOOST_TEST_NO_OLD_TOOLS ) + + # CTest Target + add_test(NAME boost_lockfree_${Test} COMMAND boost_lockfree_${Test}) + add_dependencies(boost_lockfree_all_tests boost_lockfree_${Test} ) +endforeach() + +target_link_libraries(boost_lockfree_stack_interprocess_test PRIVATE Boost::interprocess) +target_link_libraries(boost_lockfree_queue_interprocess_test PRIVATE Boost::interprocess) diff --git a/test/Jamfile.v2 b/test/Jamfile.v2 index 3b7dd27f..7e698434 100644 --- a/test/Jamfile.v2 +++ b/test/Jamfile.v2 @@ -36,6 +36,7 @@ rule test_all ../../thread/build//boost_thread/ multi static + BOOST_TEST_NO_OLD_TOOLS ] ; } diff --git a/test/destructor_test.cpp b/test/destructor_test.cpp index d757e815..61d81f33 100644 --- a/test/destructor_test.cpp +++ b/test/destructor_test.cpp @@ -4,14 +4,14 @@ // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) -#include #include +#include #define BOOST_TEST_MAIN #ifdef BOOST_LOCKFREE_INCLUDE_TESTS -#include +# include #else -#include +# include #endif @@ -24,7 +24,7 @@ struct tester ++g_instance_counter; } - tester(tester const&) + tester( tester const& ) { ++g_instance_counter; } @@ -38,59 +38,60 @@ struct tester BOOST_AUTO_TEST_CASE( stack_instance_deleter_test ) { { - boost::lockfree::stack q(128); - q.push(tester()); - q.push(tester()); - q.push(tester()); - q.push(tester()); - q.push(tester()); + boost::lockfree::stack< tester > q( 128 ); + q.push( tester() ); + q.push( tester() ); + q.push( tester() ); + q.push( tester() ); + q.push( tester() ); } - assert(g_instance_counter == 0); - BOOST_REQUIRE(g_instance_counter == 0); + assert( g_instance_counter == 0 ); + BOOST_TEST_REQUIRE( g_instance_counter == 0 ); } BOOST_AUTO_TEST_CASE( spsc_queue_instance_deleter_test ) { { - boost::lockfree::spsc_queue q(128); - q.push(tester()); - q.push(tester()); - q.push(tester()); - q.push(tester()); - q.push(tester()); + boost::lockfree::spsc_queue< tester > q( 128 ); + q.push( tester() ); + q.push( tester() ); + q.push( tester() ); + q.push( tester() ); + q.push( tester() ); } - assert(g_instance_counter == 0); - BOOST_REQUIRE(g_instance_counter == 0); + assert( g_instance_counter == 0 ); + BOOST_TEST_REQUIRE( g_instance_counter == 0 ); } BOOST_AUTO_TEST_CASE( spsc_queue_fixed_sized_instance_deleter_test ) { { - boost::lockfree::spsc_queue > q; - q.push(tester()); - q.push(tester()); - q.push(tester()); - q.push(tester()); - q.push(tester()); + boost::lockfree::spsc_queue< tester, boost::lockfree::capacity< 128 > > q; + q.push( tester() ); + q.push( tester() ); + q.push( tester() ); + q.push( tester() ); + q.push( tester() ); } - assert(g_instance_counter == 0); - BOOST_REQUIRE(g_instance_counter == 0); + assert( g_instance_counter == 0 ); + BOOST_TEST_REQUIRE( g_instance_counter == 0 ); } struct no_default_init_tester { int value; - no_default_init_tester(int value) : value(value) + no_default_init_tester( int value ) : + value( value ) { ++g_instance_counter; } - no_default_init_tester(no_default_init_tester const& t) + no_default_init_tester( no_default_init_tester const& t ) { value = t.value; @@ -106,45 +107,45 @@ struct no_default_init_tester BOOST_AUTO_TEST_CASE( stack_instance_deleter_no_default_init_test ) { { - boost::lockfree::stack q(128); - q.push(no_default_init_tester(1)); - q.push(no_default_init_tester(2)); - q.push(no_default_init_tester(3)); - q.push(no_default_init_tester(4)); - q.push(no_default_init_tester(5)); + boost::lockfree::stack< no_default_init_tester > q( 128 ); + q.push( no_default_init_tester( 1 ) ); + q.push( no_default_init_tester( 2 ) ); + q.push( no_default_init_tester( 3 ) ); + q.push( no_default_init_tester( 4 ) ); + q.push( no_default_init_tester( 5 ) ); } - assert(g_instance_counter == 0); - BOOST_REQUIRE(g_instance_counter == 0); + assert( g_instance_counter == 0 ); + BOOST_TEST_REQUIRE( g_instance_counter == 0 ); } BOOST_AUTO_TEST_CASE( spsc_queue_instance_deleter_no_default_init_test ) { { - boost::lockfree::spsc_queue q(128); - q.push(no_default_init_tester(1)); - q.push(no_default_init_tester(2)); - q.push(no_default_init_tester(3)); - q.push(no_default_init_tester(4)); - q.push(no_default_init_tester(5)); + boost::lockfree::spsc_queue< no_default_init_tester > q( 128 ); + q.push( no_default_init_tester( 1 ) ); + q.push( no_default_init_tester( 2 ) ); + q.push( no_default_init_tester( 3 ) ); + q.push( no_default_init_tester( 4 ) ); + q.push( no_default_init_tester( 5 ) ); } - assert(g_instance_counter == 0); - BOOST_REQUIRE(g_instance_counter == 0); + assert( g_instance_counter == 0 ); + BOOST_TEST_REQUIRE( g_instance_counter == 0 ); } BOOST_AUTO_TEST_CASE( spsc_queue_fixed_sized_instance_deleter_no_default_init_test ) { { - boost::lockfree::spsc_queue > q; - q.push(no_default_init_tester(1)); - q.push(no_default_init_tester(2)); - q.push(no_default_init_tester(3)); - q.push(no_default_init_tester(4)); - q.push(no_default_init_tester(5)); + boost::lockfree::spsc_queue< no_default_init_tester, boost::lockfree::capacity< 128 > > q; + q.push( no_default_init_tester( 1 ) ); + q.push( no_default_init_tester( 2 ) ); + q.push( no_default_init_tester( 3 ) ); + q.push( no_default_init_tester( 4 ) ); + q.push( no_default_init_tester( 5 ) ); } - assert(g_instance_counter == 0); - BOOST_REQUIRE(g_instance_counter == 0); + assert( g_instance_counter == 0 ); + BOOST_TEST_REQUIRE( g_instance_counter == 0 ); } diff --git a/test/freelist_test.cpp b/test/freelist_test.cpp index 4eb9c5c4..02990b39 100644 --- a/test/freelist_test.cpp +++ b/test/freelist_test.cpp @@ -11,14 +11,14 @@ #include #include -#include #include +#include #define BOOST_TEST_MAIN #ifdef BOOST_LOCKFREE_INCLUDE_TESTS -#include +# include #else -#include +# include #endif #include @@ -27,104 +27,102 @@ using boost::lockfree::detail::atomic; -atomic test_running(false); +atomic< bool > test_running( false ); struct dummy { - dummy(void) + dummy( void ) { - if (test_running.load(boost::lockfree::detail::memory_order_relaxed)) - assert(allocated == 0); + if ( test_running.load( boost::lockfree::detail::memory_order_relaxed ) ) + assert( allocated == 0 ); allocated = 1; } - ~dummy(void) + ~dummy( void ) { - if (test_running.load(boost::lockfree::detail::memory_order_relaxed)) - assert(allocated == 1); + if ( test_running.load( boost::lockfree::detail::memory_order_relaxed ) ) + assert( allocated == 1 ); allocated = 0; } - size_t padding[2]; // for used for the freelist node - int allocated; + size_t padding[ 2 ]; // for used for the freelist node + int allocated; }; -template -void run_test(void) +template < typename freelist_type, bool threadsafe, bool bounded > +void run_test( void ) { - freelist_type fl(std::allocator(), 8); + freelist_type fl( std::allocator< int >(), 8 ); - std::set nodes; + std::set< dummy* > nodes; dummy d; - if (bounded) - test_running.store(true); + if ( bounded ) + test_running.store( true ); - for (int i = 0; i != 4; ++i) { - dummy * allocated = fl.template construct(); - BOOST_REQUIRE(nodes.find(allocated) == nodes.end()); - nodes.insert(allocated); + for ( int i = 0; i != 4; ++i ) { + dummy* allocated = fl.template construct< threadsafe, bounded >(); + BOOST_TEST_REQUIRE( ( nodes.find( allocated ) == nodes.end() ) ); + nodes.insert( allocated ); } - BOOST_FOREACH(dummy * d, nodes) - fl.template destruct(d); + BOOST_FOREACH ( dummy* d, nodes ) + fl.template destruct< threadsafe >( d ); nodes.clear(); - for (int i = 0; i != 4; ++i) - nodes.insert(fl.template construct()); + for ( int i = 0; i != 4; ++i ) + nodes.insert( fl.template construct< threadsafe, bounded >() ); - BOOST_FOREACH(dummy * d, nodes) - fl.template destruct(d); + BOOST_FOREACH ( dummy* d, nodes ) + fl.template destruct< threadsafe >( d ); - for (int i = 0; i != 4; ++i) - nodes.insert(fl.template construct()); + for ( int i = 0; i != 4; ++i ) + nodes.insert( fl.template construct< threadsafe, bounded >() ); - if (bounded) - test_running.store(false); + if ( bounded ) + test_running.store( false ); } -template -void run_tests(void) +template < bool bounded > +void run_tests( void ) { - run_test, true, bounded>(); - run_test, false, bounded>(); - run_test, true, bounded>(); + run_test< boost::lockfree::detail::freelist_stack< dummy >, true, bounded >(); + run_test< boost::lockfree::detail::freelist_stack< dummy >, false, bounded >(); + run_test< boost::lockfree::detail::fixed_size_freelist< dummy >, true, bounded >(); } BOOST_AUTO_TEST_CASE( freelist_tests ) { - run_tests(); - run_tests(); + run_tests< false >(); + run_tests< true >(); } -template -void oom_test(void) +template < typename freelist_type, bool threadsafe > +void oom_test( void ) { - const bool bounded = true; - freelist_type fl(std::allocator(), 8); + const bool bounded = true; + freelist_type fl( std::allocator< int >(), 8 ); - for (int i = 0; i != 8; ++i) - fl.template construct(); + for ( int i = 0; i != 8; ++i ) + fl.template construct< threadsafe, bounded >(); - dummy * allocated = fl.template construct(); - BOOST_REQUIRE(allocated == NULL); + dummy* allocated = fl.template construct< threadsafe, bounded >(); + BOOST_TEST_REQUIRE( allocated == (dummy*)NULL ); } BOOST_AUTO_TEST_CASE( oom_tests ) { - oom_test, true >(); - oom_test, false >(); - oom_test, true >(); - oom_test, false >(); + oom_test< boost::lockfree::detail::freelist_stack< dummy >, true >(); + oom_test< boost::lockfree::detail::freelist_stack< dummy >, false >(); + oom_test< boost::lockfree::detail::fixed_size_freelist< dummy >, true >(); + oom_test< boost::lockfree::detail::fixed_size_freelist< dummy >, false >(); } -template +template < typename freelist_type, bool bounded > struct freelist_tester { - static const int size = 128; + static const int size = 128; static const int thread_count = 4; #ifndef BOOST_LOCKFREE_STRESS_TEST static const int operations_per_thread = 1000; @@ -132,63 +130,66 @@ struct freelist_tester static const int operations_per_thread = 100000; #endif - freelist_type fl; - boost::lockfree::queue allocated_nodes; + freelist_type fl; + boost::lockfree::queue< dummy* > allocated_nodes; - atomic running; - static_hashed_set working_set; + atomic< bool > running; + static_hashed_set< dummy*, 1 << 16 > working_set; - freelist_tester(void): - fl(std::allocator(), size), allocated_nodes(256) + freelist_tester( void ) : + fl( std::allocator< int >(), size ), + allocated_nodes( 256 ) {} void run() { running = true; - if (bounded) - test_running.store(true); + if ( bounded ) + test_running.store( true ); boost::thread_group alloc_threads; boost::thread_group dealloc_threads; - for (int i = 0; i != thread_count; ++i) - dealloc_threads.create_thread(boost::bind(&freelist_tester::deallocate, this)); + for ( int i = 0; i != thread_count; ++i ) + dealloc_threads.create_thread( boost::bind( &freelist_tester::deallocate, this ) ); - for (int i = 0; i != thread_count; ++i) - alloc_threads.create_thread(boost::bind(&freelist_tester::allocate, this)); + for ( int i = 0; i != thread_count; ++i ) + alloc_threads.create_thread( boost::bind( &freelist_tester::allocate, this ) ); alloc_threads.join_all(); - test_running.store(false); + test_running.store( false ); running = false; dealloc_threads.join_all(); } - void allocate(void) + void allocate( void ) { - for (long i = 0; i != operations_per_thread; ++i) { - for (;;) { - dummy * node = fl.template construct(); - if (node) { - bool success = working_set.insert(node); - assert(success); - allocated_nodes.push(node); + for ( long i = 0; i != operations_per_thread; ++i ) { + for ( ;; ) { + dummy* node = fl.template construct< true, bounded >(); + if ( node ) { + bool success = working_set.insert( node ); + (void)success; + assert( success ); + allocated_nodes.push( node ); break; } } } } - void deallocate(void) + void deallocate( void ) { - for (;;) { - dummy * node; - if (allocated_nodes.pop(node)) { - bool success = working_set.erase(node); - assert(success); - fl.template destruct(node); + for ( ;; ) { + dummy* node; + if ( allocated_nodes.pop( node ) ) { + bool success = working_set.erase( node ); + (void)success; + assert( success ); + fl.template destruct< true >( node ); } - if (running.load() == false) + if ( running.load() == false ) break; #ifdef __VXWORKS__ @@ -196,38 +197,39 @@ struct freelist_tester #endif } - dummy * node; - while (allocated_nodes.pop(node)) { - bool success = working_set.erase(node); - assert(success); - fl.template destruct(node); + dummy* node; + while ( allocated_nodes.pop( node ) ) { + bool success = working_set.erase( node ); + (void)success; + assert( success ); + fl.template destruct< true >( node ); } } }; -template +template < typename Tester > void run_tester() { - boost::scoped_ptr tester (new Tester); + boost::scoped_ptr< Tester > tester( new Tester ); tester->run(); } BOOST_AUTO_TEST_CASE( unbounded_freelist_test ) { - typedef freelist_tester, false > test_type; - run_tester(); + typedef freelist_tester< boost::lockfree::detail::freelist_stack< dummy >, false > test_type; + run_tester< test_type >(); } BOOST_AUTO_TEST_CASE( bounded_freelist_test ) { - typedef freelist_tester, true > test_type; - run_tester(); + typedef freelist_tester< boost::lockfree::detail::freelist_stack< dummy >, true > test_type; + run_tester< test_type >(); } BOOST_AUTO_TEST_CASE( fixed_size_freelist_test ) { - typedef freelist_tester, true > test_type; - run_tester(); + typedef freelist_tester< boost::lockfree::detail::fixed_size_freelist< dummy >, true > test_type; + run_tester< test_type >(); } diff --git a/test/queue_bounded_stress_test.cpp b/test/queue_bounded_stress_test.cpp index aa882ce1..bdd1db1e 100644 --- a/test/queue_bounded_stress_test.cpp +++ b/test/queue_bounded_stress_test.cpp @@ -10,18 +10,18 @@ #define BOOST_TEST_MAIN #ifdef BOOST_LOCKFREE_INCLUDE_TESTS -#include +# include #else -#include +# include #endif #include "test_common.hpp" BOOST_AUTO_TEST_CASE( queue_test_bounded ) { - typedef queue_stress_tester tester_type; - boost::scoped_ptr tester(new tester_type(4, 4) ); + typedef queue_stress_tester< true > tester_type; + boost::scoped_ptr< tester_type > tester( new tester_type( 4, 4 ) ); - boost::lockfree::queue q(128); - tester->run(q); + boost::lockfree::queue< long > q( 128 ); + tester->run( q ); } diff --git a/test/queue_fixedsize_stress_test.cpp b/test/queue_fixedsize_stress_test.cpp index 8ae46f27..944bf1ae 100644 --- a/test/queue_fixedsize_stress_test.cpp +++ b/test/queue_fixedsize_stress_test.cpp @@ -10,9 +10,9 @@ #define BOOST_TEST_MAIN #ifdef BOOST_LOCKFREE_INCLUDE_TESTS -#include +# include #else -#include +# include #endif #include "test_common.hpp" @@ -20,9 +20,9 @@ BOOST_AUTO_TEST_CASE( queue_test_fixed_size ) { - typedef queue_stress_tester<> tester_type; - boost::scoped_ptr tester(new tester_type(4, 4) ); + typedef queue_stress_tester<> tester_type; + boost::scoped_ptr< tester_type > tester( new tester_type( 4, 4 ) ); - boost::lockfree::queue > q; - tester->run(q); + boost::lockfree::queue< long, boost::lockfree::capacity< 8 > > q; + tester->run( q ); } diff --git a/test/queue_interprocess_test.cpp b/test/queue_interprocess_test.cpp index 78c0ce9f..eb227417 100644 --- a/test/queue_interprocess_test.cpp +++ b/test/queue_interprocess_test.cpp @@ -5,53 +5,57 @@ // http://www.boost.org/LICENSE_1_0.txt) #include //std::system -#include #include #include #include using namespace boost::interprocess; -typedef allocator ShmemAllocator; -typedef boost::lockfree::queue, - boost::lockfree::capacity<2048> - > queue; +typedef allocator< int, managed_shared_memory::segment_manager > ShmemAllocator; +typedef boost::lockfree::queue< int, boost::lockfree::allocator< ShmemAllocator >, boost::lockfree::capacity< 2048 > > queue; -int main (int argc, char *argv[]) +int main( int argc, char* argv[] ) { - if(argc == 1){ + if ( argc == 1 ) { struct shm_remove { - shm_remove() { shared_memory_object::remove("boost_queue_interprocess_test_shm"); } - ~shm_remove(){ shared_memory_object::remove("boost_queue_interprocess_test_shm"); } + shm_remove() + { + shared_memory_object::remove( "boost_queue_interprocess_test_shm" ); + } + ~shm_remove() + { + shared_memory_object::remove( "boost_queue_interprocess_test_shm" ); + } } remover; - managed_shared_memory segment(create_only, "boost_queue_interprocess_test_shm", 262144); - ShmemAllocator alloc_inst (segment.get_segment_manager()); + managed_shared_memory segment( create_only, "boost_queue_interprocess_test_shm", 262144 ); + ShmemAllocator alloc_inst( segment.get_segment_manager() ); - queue * q = segment.construct("queue")(alloc_inst); - for (int i = 0; i != 1024; ++i) - q->push(i); + queue* q = segment.construct< queue >( "queue" )( alloc_inst ); + for ( int i = 0; i != 1024; ++i ) + q->push( i ); - std::string s(argv[0]); s += " child "; - if(0 != std::system(s.c_str())) + std::string s( argv[ 0 ] ); + s += " child "; + if ( 0 != std::system( s.c_str() ) ) return 1; - while (!q->empty()) + while ( !q->empty() ) boost::thread::yield(); return 0; } else { - managed_shared_memory segment(open_only, "boost_queue_interprocess_test_shm"); - queue * q = segment.find("queue").first; + managed_shared_memory segment( open_only, "boost_queue_interprocess_test_shm" ); + queue* q = segment.find< queue >( "queue" ).first; int from_queue; - for (int i = 0; i != 1024; ++i) { - bool success = q->pop(from_queue); - assert (success); - assert (from_queue == i); + for ( int i = 0; i != 1024; ++i ) { + bool success = q->pop( from_queue ); + (void)success; + assert( success ); + assert( from_queue == i ); } - segment.destroy("queue"); + segment.destroy< queue >( "queue" ); } return 0; } diff --git a/test/queue_test.cpp b/test/queue_test.cpp index 79f6f0ed..56b77d54 100644 --- a/test/queue_test.cpp +++ b/test/queue_test.cpp @@ -11,9 +11,9 @@ #define BOOST_TEST_MAIN #ifdef BOOST_LOCKFREE_INCLUDE_TESTS -#include +# include #else -#include +# include #endif #include @@ -26,169 +26,168 @@ using namespace std; BOOST_AUTO_TEST_CASE( simple_queue_test ) { - queue f(64); + queue< int > f( 64 ); - BOOST_WARN(f.is_lock_free()); + BOOST_TEST_WARN( f.is_lock_free() ); - BOOST_REQUIRE(f.empty()); - f.push(1); - f.push(2); + BOOST_TEST_REQUIRE( f.empty() ); + f.push( 1 ); + f.push( 2 ); - int i1(0), i2(0); + int i1( 0 ), i2( 0 ); - BOOST_REQUIRE(f.pop(i1)); - BOOST_REQUIRE_EQUAL(i1, 1); + BOOST_TEST_REQUIRE( f.pop( i1 ) ); + BOOST_TEST_REQUIRE( i1 == 1 ); - BOOST_REQUIRE(f.pop(i2)); - BOOST_REQUIRE_EQUAL(i2, 2); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.pop( i2 ) ); + BOOST_TEST_REQUIRE( i2 == 2 ); + BOOST_TEST_REQUIRE( f.empty() ); } BOOST_AUTO_TEST_CASE( simple_queue_test_capacity ) { - queue > f; + queue< int, capacity< 64 > > f; - BOOST_WARN(f.is_lock_free()); + BOOST_TEST_WARN( f.is_lock_free() ); - BOOST_REQUIRE(f.empty()); - f.push(1); - f.push(2); + BOOST_TEST_REQUIRE( f.empty() ); + f.push( 1 ); + f.push( 2 ); - int i1(0), i2(0); + int i1( 0 ), i2( 0 ); - BOOST_REQUIRE(f.pop(i1)); - BOOST_REQUIRE_EQUAL(i1, 1); + BOOST_TEST_REQUIRE( f.pop( i1 ) ); + BOOST_TEST_REQUIRE( i1 == 1 ); - BOOST_REQUIRE(f.pop(i2)); - BOOST_REQUIRE_EQUAL(i2, 2); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.pop( i2 ) ); + BOOST_TEST_REQUIRE( i2 == 2 ); + BOOST_TEST_REQUIRE( f.empty() ); } BOOST_AUTO_TEST_CASE( unsafe_queue_test ) { - queue f(64); + queue< int > f( 64 ); - BOOST_WARN(f.is_lock_free()); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_WARN( f.is_lock_free() ); + BOOST_TEST_REQUIRE( f.empty() ); - int i1(0), i2(0); + int i1( 0 ), i2( 0 ); - f.unsynchronized_push(1); - f.unsynchronized_push(2); + f.unsynchronized_push( 1 ); + f.unsynchronized_push( 2 ); - BOOST_REQUIRE(f.unsynchronized_pop(i1)); - BOOST_REQUIRE_EQUAL(i1, 1); + BOOST_TEST_REQUIRE( f.unsynchronized_pop( i1 ) ); + BOOST_TEST_REQUIRE( i1 == 1 ); - BOOST_REQUIRE(f.unsynchronized_pop(i2)); - BOOST_REQUIRE_EQUAL(i2, 2); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.unsynchronized_pop( i2 ) ); + BOOST_TEST_REQUIRE( i2 == 2 ); + BOOST_TEST_REQUIRE( f.empty() ); } BOOST_AUTO_TEST_CASE( queue_consume_one_test ) { - queue f(64); + queue< int > f( 64 ); - BOOST_WARN(f.is_lock_free()); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_WARN( f.is_lock_free() ); + BOOST_TEST_REQUIRE( f.empty() ); - f.push(1); - f.push(2); + f.push( 1 ); + f.push( 2 ); #ifdef BOOST_NO_CXX11_LAMBDAS - bool success1 = f.consume_one(test_equal(1)); - bool success2 = f.consume_one(test_equal(2)); + bool success1 = f.consume_one( test_equal( 1 ) ); + bool success2 = f.consume_one( test_equal( 2 ) ); #else - bool success1 = f.consume_one([] (int i) { - BOOST_REQUIRE_EQUAL(i, 1); - }); + bool success1 = f.consume_one( []( int i ) { + BOOST_TEST_REQUIRE( i == 1 ); + } ); - bool success2 = f.consume_one([] (int i) { - BOOST_REQUIRE_EQUAL(i, 2); - }); + bool success2 = f.consume_one( []( int i ) { + BOOST_TEST_REQUIRE( i == 2 ); + } ); #endif - BOOST_REQUIRE(success1); - BOOST_REQUIRE(success2); + BOOST_TEST_REQUIRE( success1 ); + BOOST_TEST_REQUIRE( success2 ); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.empty() ); } BOOST_AUTO_TEST_CASE( queue_consume_all_test ) { - queue f(64); + queue< int > f( 64 ); - BOOST_WARN(f.is_lock_free()); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_WARN( f.is_lock_free() ); + BOOST_TEST_REQUIRE( f.empty() ); - f.push(1); - f.push(2); + f.push( 1 ); + f.push( 2 ); #ifdef BOOST_NO_CXX11_LAMBDAS - size_t consumed = f.consume_all(dummy_functor()); + size_t consumed = f.consume_all( dummy_functor() ); #else - size_t consumed = f.consume_all([] (int i) { - }); + size_t consumed = f.consume_all( []( int i ) {} ); #endif - BOOST_REQUIRE_EQUAL(consumed, 2u); + BOOST_TEST_REQUIRE( consumed == 2u ); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.empty() ); } BOOST_AUTO_TEST_CASE( queue_convert_pop_test ) { - queue f(128); - BOOST_REQUIRE(f.empty()); - f.push(new int(1)); - f.push(new int(2)); - f.push(new int(3)); - f.push(new int(4)); + queue< int* > f( 128 ); + BOOST_TEST_REQUIRE( f.empty() ); + f.push( new int( 1 ) ); + f.push( new int( 2 ) ); + f.push( new int( 3 ) ); + f.push( new int( 4 ) ); { - int * i1; + int* i1; - BOOST_REQUIRE(f.pop(i1)); - BOOST_REQUIRE_EQUAL(*i1, 1); + BOOST_TEST_REQUIRE( f.pop( i1 ) ); + BOOST_TEST_REQUIRE( *i1 == 1 ); delete i1; } { - boost::shared_ptr i2; - BOOST_REQUIRE(f.pop(i2)); - BOOST_REQUIRE_EQUAL(*i2, 2); + boost::shared_ptr< int > i2; + BOOST_TEST_REQUIRE( f.pop( i2 ) ); + BOOST_TEST_REQUIRE( *i2 == 2 ); } { #ifdef BOOST_NO_AUTO_PTR - unique_ptr i3; + unique_ptr< int > i3; #else - auto_ptr i3; + auto_ptr< int > i3; #endif - BOOST_REQUIRE(f.pop(i3)); + BOOST_TEST_REQUIRE( f.pop( i3 ) ); - BOOST_REQUIRE_EQUAL(*i3, 3); + BOOST_TEST_REQUIRE( *i3 == 3 ); } { - boost::shared_ptr i4; - BOOST_REQUIRE(f.pop(i4)); + boost::shared_ptr< int > i4; + BOOST_TEST_REQUIRE( f.pop( i4 ) ); - BOOST_REQUIRE_EQUAL(*i4, 4); + BOOST_TEST_REQUIRE( *i4 == 4 ); } - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.empty() ); } BOOST_AUTO_TEST_CASE( reserve_test ) { typedef boost::lockfree::queue< void* > memory_queue; - memory_queue ms(1); - ms.reserve(1); - ms.reserve_unsafe(1); + memory_queue ms( 1 ); + ms.reserve( 1 ); + ms.reserve_unsafe( 1 ); } diff --git a/test/queue_unbounded_stress_test.cpp b/test/queue_unbounded_stress_test.cpp index 10b70e2b..1b00119b 100644 --- a/test/queue_unbounded_stress_test.cpp +++ b/test/queue_unbounded_stress_test.cpp @@ -10,18 +10,18 @@ #define BOOST_TEST_MAIN #ifdef BOOST_LOCKFREE_INCLUDE_TESTS -#include +# include #else -#include +# include #endif #include "test_common.hpp" BOOST_AUTO_TEST_CASE( queue_test_unbounded ) { - typedef queue_stress_tester tester_type; - boost::scoped_ptr tester(new tester_type(4, 4) ); + typedef queue_stress_tester< false > tester_type; + boost::scoped_ptr< tester_type > tester( new tester_type( 4, 4 ) ); - boost::lockfree::queue q(128); - tester->run(q); + boost::lockfree::queue< long > q( 128 ); + tester->run( q ); } diff --git a/test/spsc_queue_stress_test.cpp b/test/spsc_queue_stress_test.cpp index a63afd0e..45d15bdf 100644 --- a/test/spsc_queue_stress_test.cpp +++ b/test/spsc_queue_stress_test.cpp @@ -9,94 +9,96 @@ #define BOOST_TEST_MAIN #ifdef BOOST_LOCKFREE_INCLUDE_TESTS -#include +# include #else -#include +# include #endif #include #include -#include "test_helpers.hpp" #include "test_common.hpp" +#include "test_helpers.hpp" using namespace boost; using namespace boost::lockfree; using namespace std; #ifndef BOOST_LOCKFREE_STRESS_TEST -static const boost::uint32_t nodes_per_thread = 100000; +static const size_t nodes_per_thread = 100000; #else -static const boost::uint32_t nodes_per_thread = 100000000; +static const size_t nodes_per_thread = 100000000; #endif struct spsc_queue_tester { - spsc_queue > sf; + spsc_queue< int, capacity< 128 > > sf; - boost::lockfree::detail::atomic spsc_queue_cnt, received_nodes; + boost::lockfree::detail::atomic< long > spsc_queue_cnt, received_nodes; // In VxWorks one RTP just supports 65535 objects #ifndef __VXWORKS__ - static_hashed_set working_set; + static_hashed_set< int, 1 << 16 > working_set; #else - static_hashed_set working_set; + static_hashed_set< int, 1 << 15 > working_set; #endif - spsc_queue_tester(void): - spsc_queue_cnt(0), received_nodes(0) + spsc_queue_tester( void ) : + spsc_queue_cnt( 0 ), + received_nodes( 0 ) {} - void add(void) + void add( void ) { - for (boost::uint32_t i = 0; i != nodes_per_thread; ++i) { - int id = generate_id(); - working_set.insert(id); + for ( size_t i = 0; i != nodes_per_thread; ++i ) { + int id = generate_id< int >(); + working_set.insert( id ); - while (sf.push(id) == false) - {} + while ( sf.push( id ) == false ) {} ++spsc_queue_cnt; } running = false; } - bool get_element(void) + bool get_element( void ) { - int data; - bool success = sf.pop(data); + int data; + bool success = sf.pop( data ); - if (success) { + if ( success ) { ++received_nodes; --spsc_queue_cnt; - bool erased = working_set.erase(data); - assert(erased); + bool erased = working_set.erase( data ); + (void)erased; + assert( erased ); return true; } else return false; } - boost::lockfree::detail::atomic running; + boost::lockfree::detail::atomic< bool > running; - void get(void) + void get( void ) { - for(;;) { + for ( ;; ) { bool success = get_element(); - if (!running && !success) + if ( !running && !success ) break; } - while ( get_element() ); + while ( get_element() ) + ; } - void run(void) + void run( void ) { running = true; - BOOST_REQUIRE(sf.empty()); + BOOST_TEST_REQUIRE( sf.empty() ); - boost::thread reader(boost::bind(&spsc_queue_tester::get, this)); - boost::thread writer(boost::bind(&spsc_queue_tester::add, this)); + boost::thread reader( boost::bind( &spsc_queue_tester::get, this ) ); + boost::thread writer( boost::bind( &spsc_queue_tester::add, this ) ); cout << "reader and writer threads created" << endl; writer.join(); @@ -104,75 +106,76 @@ struct spsc_queue_tester reader.join(); - BOOST_REQUIRE_EQUAL(received_nodes, nodes_per_thread); - BOOST_REQUIRE_EQUAL(spsc_queue_cnt, 0); - BOOST_REQUIRE(sf.empty()); - BOOST_REQUIRE(working_set.count_nodes() == 0); + BOOST_TEST_REQUIRE( received_nodes == nodes_per_thread ); + BOOST_TEST_REQUIRE( spsc_queue_cnt == 0 ); + BOOST_TEST_REQUIRE( sf.empty() ); + BOOST_TEST_REQUIRE( working_set.count_nodes() == 0 ); } }; BOOST_AUTO_TEST_CASE( spsc_queue_test_caching ) { - boost::shared_ptr test1(new spsc_queue_tester); + boost::shared_ptr< spsc_queue_tester > test1( new spsc_queue_tester ); test1->run(); } struct spsc_queue_tester_buffering { - spsc_queue > sf; + spsc_queue< int, capacity< 128 > > sf; - boost::lockfree::detail::atomic spsc_queue_cnt; + boost::lockfree::detail::atomic< long > spsc_queue_cnt; // In VxWorks one RTP just supports 65535 objects #ifndef __VXWORKS__ - static_hashed_set working_set; + static_hashed_set< int, 1 << 16 > working_set; #else - static_hashed_set working_set; + static_hashed_set< int, 1 << 15 > working_set; #endif - boost::lockfree::detail::atomic received_nodes; + boost::lockfree::detail::atomic< size_t > received_nodes; - spsc_queue_tester_buffering(void): - spsc_queue_cnt(0), received_nodes(0) + spsc_queue_tester_buffering( void ) : + spsc_queue_cnt( 0 ), + received_nodes( 0 ) {} static const size_t buf_size = 5; - void add(void) + void add( void ) { - boost::array input_buffer; - for (boost::uint32_t i = 0; i != nodes_per_thread; i+=buf_size) { - for (size_t i = 0; i != buf_size; ++i) { - int id = generate_id(); - working_set.insert(id); - input_buffer[i] = id; + boost::array< int, buf_size > input_buffer; + for ( size_t i = 0; i != nodes_per_thread; i += buf_size ) { + for ( size_t i = 0; i != buf_size; ++i ) { + int id = generate_id< int >(); + working_set.insert( id ); + input_buffer[ i ] = id; } size_t pushed = 0; do { - pushed += sf.push(input_buffer.c_array() + pushed, - input_buffer.size() - pushed); - } while (pushed != buf_size); + pushed += sf.push( input_buffer.c_array() + pushed, input_buffer.size() - pushed ); + } while ( pushed != buf_size ); - spsc_queue_cnt+=buf_size; + spsc_queue_cnt += buf_size; } running = false; } - bool get_elements(void) + bool get_elements( void ) { - boost::array output_buffer; + boost::array< int, buf_size > output_buffer; - size_t popd = sf.pop(output_buffer.c_array(), output_buffer.size()); + size_t popd = sf.pop( output_buffer.c_array(), output_buffer.size() ); - if (popd) { - received_nodes += popd; - spsc_queue_cnt -= popd; + if ( popd ) { + received_nodes += size_t( popd ); + spsc_queue_cnt -= long( popd ); - for (size_t i = 0; i != popd; ++i) { - bool erased = working_set.erase(output_buffer[i]); - assert(erased); + for ( size_t i = 0; i != popd; ++i ) { + bool erased = working_set.erase( output_buffer[ i ] ); + (void)erased; + assert( erased ); } return true; @@ -180,25 +183,26 @@ struct spsc_queue_tester_buffering return false; } - boost::lockfree::detail::atomic running; + boost::lockfree::detail::atomic< bool > running; - void get(void) + void get( void ) { - for(;;) { + for ( ;; ) { bool success = get_elements(); - if (!running && !success) + if ( !running && !success ) break; } - while ( get_elements() ); + while ( get_elements() ) + ; } - void run(void) + void run( void ) { running = true; - boost::thread reader(boost::bind(&spsc_queue_tester_buffering::get, this)); - boost::thread writer(boost::bind(&spsc_queue_tester_buffering::add, this)); + boost::thread reader( boost::bind( &spsc_queue_tester_buffering::get, this ) ); + boost::thread writer( boost::bind( &spsc_queue_tester_buffering::add, this ) ); cout << "reader and writer threads created" << endl; writer.join(); @@ -206,17 +210,16 @@ struct spsc_queue_tester_buffering reader.join(); - BOOST_REQUIRE_EQUAL(received_nodes, nodes_per_thread); - BOOST_REQUIRE_EQUAL(spsc_queue_cnt, 0); - BOOST_REQUIRE(sf.empty()); - BOOST_REQUIRE(working_set.count_nodes() == 0); + BOOST_TEST_REQUIRE( received_nodes == nodes_per_thread ); + BOOST_TEST_REQUIRE( spsc_queue_cnt == 0 ); + BOOST_TEST_REQUIRE( sf.empty() ); + BOOST_TEST_REQUIRE( working_set.count_nodes() == 0 ); } }; BOOST_AUTO_TEST_CASE( spsc_queue_test_buffering ) { - boost::shared_ptr test1(new spsc_queue_tester_buffering); + boost::shared_ptr< spsc_queue_tester_buffering > test1( new spsc_queue_tester_buffering ); test1->run(); } - diff --git a/test/spsc_queue_test.cpp b/test/spsc_queue_test.cpp index 99f393f0..2a8cb324 100644 --- a/test/spsc_queue_test.cpp +++ b/test/spsc_queue_test.cpp @@ -8,16 +8,16 @@ #define BOOST_TEST_MAIN #ifdef BOOST_LOCKFREE_INCLUDE_TESTS -#include +# include #else -#include +# include #endif #include #include -#include "test_helpers.hpp" #include "test_common.hpp" +#include "test_helpers.hpp" using namespace boost; using namespace boost::lockfree; @@ -25,106 +25,108 @@ using namespace std; BOOST_AUTO_TEST_CASE( simple_spsc_queue_test ) { - spsc_queue > f; + spsc_queue< int, capacity< 64 > > f; - BOOST_REQUIRE(f.empty()); - f.push(1); - f.push(2); + BOOST_TEST_REQUIRE( f.empty() ); + f.push( 1 ); + f.push( 2 ); - int i1(0), i2(0); + int i1( 0 ), i2( 0 ); - BOOST_REQUIRE(f.pop(i1)); - BOOST_REQUIRE_EQUAL(i1, 1); + BOOST_TEST_REQUIRE( f.pop( i1 ) ); + BOOST_TEST_REQUIRE( i1 == 1 ); - BOOST_REQUIRE(f.pop(i2)); - BOOST_REQUIRE_EQUAL(i2, 2); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.pop( i2 ) ); + BOOST_TEST_REQUIRE( i2 == 2 ); + BOOST_TEST_REQUIRE( f.empty() ); } BOOST_AUTO_TEST_CASE( simple_spsc_queue_test_compile_time_size ) { - spsc_queue f(64); + spsc_queue< int > f( 64 ); - BOOST_REQUIRE(f.empty()); - f.push(1); - f.push(2); + BOOST_TEST_REQUIRE( f.empty() ); + f.push( 1 ); + f.push( 2 ); - int i1(0), i2(0); + int i1( 0 ), i2( 0 ); - BOOST_REQUIRE(f.pop(i1)); - BOOST_REQUIRE_EQUAL(i1, 1); + BOOST_TEST_REQUIRE( f.pop( i1 ) ); + BOOST_TEST_REQUIRE( i1 == 1 ); - BOOST_REQUIRE(f.pop(i2)); - BOOST_REQUIRE_EQUAL(i2, 2); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.pop( i2 ) ); + BOOST_TEST_REQUIRE( i2 == 2 ); + BOOST_TEST_REQUIRE( f.empty() ); } BOOST_AUTO_TEST_CASE( ranged_push_test ) { - spsc_queue stk(64); + spsc_queue< int > stk( 64 ); - int data[2] = {1, 2}; + int data[ 2 ] = { 1, 2 }; - BOOST_REQUIRE_EQUAL(stk.push(data, data + 2), data + 2); + BOOST_TEST_REQUIRE( stk.push( data, data + 2 ) == data + 2 ); int out; - BOOST_REQUIRE(stk.pop(out)); BOOST_REQUIRE_EQUAL(out, 1); - BOOST_REQUIRE(stk.pop(out)); BOOST_REQUIRE_EQUAL(out, 2); - BOOST_REQUIRE(!stk.pop(out)); + BOOST_TEST_REQUIRE( stk.pop( out ) ); + BOOST_TEST_REQUIRE( out == 1 ); + BOOST_TEST_REQUIRE( stk.pop( out ) ); + BOOST_TEST_REQUIRE( out == 2 ); + BOOST_TEST_REQUIRE( !stk.pop( out ) ); } BOOST_AUTO_TEST_CASE( spsc_queue_consume_one_test ) { - spsc_queue f(64); + spsc_queue< int > f( 64 ); - BOOST_WARN(f.is_lock_free()); - BOOST_REQUIRE(f.empty()); + BOOST_WARN( f.is_lock_free() ); + BOOST_TEST_REQUIRE( f.empty() ); - f.push(1); - f.push(2); + f.push( 1 ); + f.push( 2 ); #ifdef BOOST_NO_CXX11_LAMBDAS - bool success1 = f.consume_one(test_equal(1)); - bool success2 = f.consume_one(test_equal(2)); + bool success1 = f.consume_one( test_equal( 1 ) ); + bool success2 = f.consume_one( test_equal( 2 ) ); #else - bool success1 = f.consume_one([] (int i) { - BOOST_REQUIRE_EQUAL(i, 1); - }); + bool success1 = f.consume_one( []( int i ) { + BOOST_TEST_REQUIRE( i == 1 ); + } ); - bool success2 = f.consume_one([] (int i) { - BOOST_REQUIRE_EQUAL(i, 2); - }); + bool success2 = f.consume_one( []( int i ) { + BOOST_TEST_REQUIRE( i == 2 ); + } ); #endif - BOOST_REQUIRE(success1); - BOOST_REQUIRE(success2); + BOOST_TEST_REQUIRE( success1 ); + BOOST_TEST_REQUIRE( success2 ); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.empty() ); } BOOST_AUTO_TEST_CASE( spsc_queue_consume_all_test ) { - spsc_queue f(64); + spsc_queue< int > f( 64 ); - BOOST_WARN(f.is_lock_free()); - BOOST_REQUIRE(f.empty()); + BOOST_WARN( f.is_lock_free() ); + BOOST_TEST_REQUIRE( f.empty() ); - f.push(1); - f.push(2); + f.push( 1 ); + f.push( 2 ); #ifdef BOOST_NO_CXX11_LAMBDAS - size_t consumed = f.consume_all(dummy_functor()); + size_t consumed = f.consume_all( dummy_functor() ); #else - size_t consumed = f.consume_all([] (int i) { - }); + size_t consumed = f.consume_all( []( int i ) {} ); #endif - BOOST_REQUIRE_EQUAL(consumed, 2u); + BOOST_TEST_REQUIRE( consumed == 2u ); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.empty() ); } -enum { +enum +{ pointer_and_size, reference_to_array, iterator_pair, @@ -133,40 +135,39 @@ enum { BOOST_AUTO_TEST_CASE( spsc_queue_capacity_test ) { - spsc_queue > f; + spsc_queue< int, capacity< 2 > > f; - BOOST_REQUIRE(f.push(1)); - BOOST_REQUIRE(f.push(2)); - BOOST_REQUIRE(!f.push(3)); + BOOST_TEST_REQUIRE( f.push( 1 ) ); + BOOST_TEST_REQUIRE( f.push( 2 ) ); + BOOST_TEST_REQUIRE( !f.push( 3 ) ); - spsc_queue g(2); + spsc_queue< int > g( 2 ); - BOOST_REQUIRE(g.push(1)); - BOOST_REQUIRE(g.push(2)); - BOOST_REQUIRE(!g.push(3)); + BOOST_TEST_REQUIRE( g.push( 1 ) ); + BOOST_TEST_REQUIRE( g.push( 2 ) ); + BOOST_TEST_REQUIRE( !g.push( 3 ) ); } -template -void spsc_queue_avail_test_run(QueueType & q) +template < typename QueueType > +void spsc_queue_avail_test_run( QueueType& q ) { - BOOST_REQUIRE_EQUAL( q.write_available(), 16 ); - BOOST_REQUIRE_EQUAL( q.read_available(), 0 ); + BOOST_TEST_REQUIRE( q.write_available() == 16 ); + BOOST_TEST_REQUIRE( q.read_available() == 0 ); - for (size_t i = 0; i != 8; ++i) { - BOOST_REQUIRE_EQUAL( q.write_available(), 16 - i ); - BOOST_REQUIRE_EQUAL( q.read_available(), i ); + for ( size_t i = 0; i != 8; ++i ) { + BOOST_TEST_REQUIRE( q.write_available() == 16 - i ); + BOOST_TEST_REQUIRE( q.read_available() == i ); q.push( 1 ); } // empty queue int dummy; - while (q.pop(dummy)) - {} + while ( q.pop( dummy ) ) {} - for (size_t i = 0; i != 16; ++i) { - BOOST_REQUIRE_EQUAL( q.write_available(), 16 - i ); - BOOST_REQUIRE_EQUAL( q.read_available(), i ); + for ( size_t i = 0; i != 16; ++i ) { + BOOST_TEST_REQUIRE( q.write_available() == 16 - i ); + BOOST_TEST_REQUIRE( q.read_available() == i ); q.push( 1 ); } @@ -174,234 +175,200 @@ void spsc_queue_avail_test_run(QueueType & q) BOOST_AUTO_TEST_CASE( spsc_queue_avail_test ) { - spsc_queue > f; - spsc_queue_avail_test_run(f); + spsc_queue< int, capacity< 16 > > f; + spsc_queue_avail_test_run( f ); - spsc_queue g(16); - spsc_queue_avail_test_run(g); + spsc_queue< int > g( 16 ); + spsc_queue_avail_test_run( g ); } -template -void spsc_queue_buffer_push_return_value(void) +template < int EnqueueMode > +void spsc_queue_buffer_push_return_value( void ) { - const size_t xqueue_size = 64; - const size_t buffer_size = 100; - spsc_queue > rb; + const size_t xqueue_size = 64; + const size_t buffer_size = 100; + spsc_queue< int, capacity< 100 > > rb; - int data[xqueue_size]; - for (size_t i = 0; i != xqueue_size; ++i) - data[i] = (int)i*2; + int data[ xqueue_size ]; + for ( size_t i = 0; i != xqueue_size; ++i ) + data[ i ] = (int)i * 2; - switch (EnqueueMode) { - case pointer_and_size: - BOOST_REQUIRE_EQUAL(rb.push(data, xqueue_size), xqueue_size); - break; + switch ( EnqueueMode ) { + case pointer_and_size: BOOST_TEST_REQUIRE( rb.push( data, xqueue_size ) == xqueue_size ); break; - case reference_to_array: - BOOST_REQUIRE_EQUAL(rb.push(data), xqueue_size); - break; + case reference_to_array: BOOST_TEST_REQUIRE( rb.push( data ) == xqueue_size ); break; - case iterator_pair: - BOOST_REQUIRE_EQUAL(rb.push(data, data + xqueue_size), data + xqueue_size); - break; + case iterator_pair: BOOST_TEST_REQUIRE( rb.push( data, data + xqueue_size ) == data + xqueue_size ); break; - default: - assert(false); + default: assert( false ); } - switch (EnqueueMode) { - case pointer_and_size: - BOOST_REQUIRE_EQUAL(rb.push(data, xqueue_size), buffer_size - xqueue_size); - break; + switch ( EnqueueMode ) { + case pointer_and_size: BOOST_TEST_REQUIRE( rb.push( data, xqueue_size ) == buffer_size - xqueue_size ); break; - case reference_to_array: - BOOST_REQUIRE_EQUAL(rb.push(data), buffer_size - xqueue_size); - break; + case reference_to_array: BOOST_TEST_REQUIRE( rb.push( data ) == buffer_size - xqueue_size ); break; case iterator_pair: - BOOST_REQUIRE_EQUAL(rb.push(data, data + xqueue_size), data + buffer_size - xqueue_size); + BOOST_TEST_REQUIRE( rb.push( data, data + xqueue_size ) == data + buffer_size - xqueue_size ); break; - default: - assert(false); + default: assert( false ); } } BOOST_AUTO_TEST_CASE( spsc_queue_buffer_push_return_value_test ) { - spsc_queue_buffer_push_return_value(); - spsc_queue_buffer_push_return_value(); - spsc_queue_buffer_push_return_value(); + spsc_queue_buffer_push_return_value< pointer_and_size >(); + spsc_queue_buffer_push_return_value< reference_to_array >(); + spsc_queue_buffer_push_return_value< iterator_pair >(); } -template -void spsc_queue_buffer_push(void) +template < int EnqueueMode, int ElementCount, int BufferSize, int NumberOfIterations > +void spsc_queue_buffer_push( void ) { - const size_t xqueue_size = ElementCount; - spsc_queue > rb; + const size_t xqueue_size = ElementCount; + spsc_queue< int, capacity< BufferSize > > rb; - int data[xqueue_size]; - for (size_t i = 0; i != xqueue_size; ++i) - data[i] = (int)i*2; + int data[ xqueue_size ]; + for ( size_t i = 0; i != xqueue_size; ++i ) + data[ i ] = (int)i * 2; - std::vector vdata(data, data + xqueue_size); + std::vector< int > vdata( data, data + xqueue_size ); - for (int i = 0; i != NumberOfIterations; ++i) { - BOOST_REQUIRE(rb.empty()); - switch (EnqueueMode) { - case pointer_and_size: - BOOST_REQUIRE_EQUAL(rb.push(data, xqueue_size), xqueue_size); - break; + for ( int i = 0; i != NumberOfIterations; ++i ) { + BOOST_TEST_REQUIRE( rb.empty() ); + switch ( EnqueueMode ) { + case pointer_and_size: BOOST_TEST_REQUIRE( rb.push( data, xqueue_size ) == xqueue_size ); break; - case reference_to_array: - BOOST_REQUIRE_EQUAL(rb.push(data), xqueue_size); - break; + case reference_to_array: BOOST_TEST_REQUIRE( rb.push( data ) == xqueue_size ); break; - case iterator_pair: - BOOST_REQUIRE_EQUAL(rb.push(data, data + xqueue_size), data + xqueue_size); - break; + case iterator_pair: BOOST_TEST_REQUIRE( rb.push( data, data + xqueue_size ) == data + xqueue_size ); break; - default: - assert(false); + default: assert( false ); } - int out[xqueue_size]; - BOOST_REQUIRE_EQUAL(rb.pop(out, xqueue_size), xqueue_size); - for (size_t i = 0; i != xqueue_size; ++i) - BOOST_REQUIRE_EQUAL(data[i], out[i]); + int out[ xqueue_size ]; + BOOST_TEST_REQUIRE( rb.pop( out, xqueue_size ) == xqueue_size ); + for ( size_t i = 0; i != xqueue_size; ++i ) + BOOST_TEST_REQUIRE( data[ i ] == out[ i ] ); } } BOOST_AUTO_TEST_CASE( spsc_queue_buffer_push_test ) { - spsc_queue_buffer_push(); - spsc_queue_buffer_push(); - spsc_queue_buffer_push(); + spsc_queue_buffer_push< pointer_and_size, 7, 16, 64 >(); + spsc_queue_buffer_push< reference_to_array, 7, 16, 64 >(); + spsc_queue_buffer_push< iterator_pair, 7, 16, 64 >(); } -template -void spsc_queue_buffer_pop(void) +template < int EnqueueMode, int ElementCount, int BufferSize, int NumberOfIterations > +void spsc_queue_buffer_pop( void ) { - const size_t xqueue_size = ElementCount; - spsc_queue > rb; + const size_t xqueue_size = ElementCount; + spsc_queue< int, capacity< BufferSize > > rb; - int data[xqueue_size]; - for (size_t i = 0; i != xqueue_size; ++i) - data[i] = (int)i*2; + int data[ xqueue_size ]; + for ( size_t i = 0; i != xqueue_size; ++i ) + data[ i ] = (int)i * 2; - std::vector vdata(data, data + xqueue_size); + std::vector< int > vdata( data, data + xqueue_size ); - for (int i = 0; i != NumberOfIterations; ++i) { - BOOST_REQUIRE(rb.empty()); - BOOST_REQUIRE_EQUAL(rb.push(data), xqueue_size); + for ( int i = 0; i != NumberOfIterations; ++i ) { + BOOST_TEST_REQUIRE( rb.empty() ); + BOOST_TEST_REQUIRE( rb.push( data ) == xqueue_size ); - int out[xqueue_size]; - vector vout; + int out[ xqueue_size ]; + vector< int > vout; - switch (EnqueueMode) { - case pointer_and_size: - BOOST_REQUIRE_EQUAL(rb.pop(out, xqueue_size), xqueue_size); - break; + switch ( EnqueueMode ) { + case pointer_and_size: BOOST_TEST_REQUIRE( rb.pop( out, xqueue_size ) == xqueue_size ); break; - case reference_to_array: - BOOST_REQUIRE_EQUAL(rb.pop(out), xqueue_size); - break; + case reference_to_array: BOOST_TEST_REQUIRE( rb.pop( out ) == xqueue_size ); break; - case output_iterator_: - BOOST_REQUIRE_EQUAL(rb.pop(std::back_inserter(vout)), xqueue_size); - break; + case output_iterator_: BOOST_TEST_REQUIRE( rb.pop( std::back_inserter( vout ) ) == xqueue_size ); break; - default: - assert(false); + default: assert( false ); } - if (EnqueueMode == output_iterator_) { - BOOST_REQUIRE_EQUAL(vout.size(), xqueue_size); - for (size_t i = 0; i != xqueue_size; ++i) - BOOST_REQUIRE_EQUAL(data[i], vout[i]); + if ( EnqueueMode == output_iterator_ ) { + BOOST_TEST_REQUIRE( vout.size() == xqueue_size ); + for ( size_t i = 0; i != xqueue_size; ++i ) + BOOST_TEST_REQUIRE( data[ i ] == vout[ i ] ); } else { - for (size_t i = 0; i != xqueue_size; ++i) - BOOST_REQUIRE_EQUAL(data[i], out[i]); + for ( size_t i = 0; i != xqueue_size; ++i ) + BOOST_TEST_REQUIRE( data[ i ] == out[ i ] ); } } } BOOST_AUTO_TEST_CASE( spsc_queue_buffer_pop_test ) { - spsc_queue_buffer_pop(); - spsc_queue_buffer_pop(); - spsc_queue_buffer_pop(); + spsc_queue_buffer_pop< pointer_and_size, 7, 16, 64 >(); + spsc_queue_buffer_pop< reference_to_array, 7, 16, 64 >(); + spsc_queue_buffer_pop< output_iterator_, 7, 16, 64 >(); } // Test front() and pop() template < typename Queue > -void spsc_queue_front_pop(Queue& queue) +void spsc_queue_front_pop( Queue& queue ) { - queue.push(1); - queue.push(2); - queue.push(3); + queue.push( 1 ); + queue.push( 2 ); + queue.push( 3 ); // front as ref and const ref - int& rfront = queue.front(); + int& rfront = queue.front(); const int& crfront = queue.front(); - BOOST_REQUIRE_EQUAL(1, rfront); - BOOST_REQUIRE_EQUAL(1, crfront); + BOOST_TEST_REQUIRE( 1 == rfront ); + BOOST_TEST_REQUIRE( 1 == crfront ); int front = 0; // access element pushed first front = queue.front(); - BOOST_REQUIRE_EQUAL(1, front); + BOOST_TEST_REQUIRE( 1 == front ); // front is still the same front = queue.front(); - BOOST_REQUIRE_EQUAL(1, front); + BOOST_TEST_REQUIRE( 1 == front ); queue.pop(); front = queue.front(); - BOOST_REQUIRE_EQUAL(2, front); + BOOST_TEST_REQUIRE( 2 == front ); - queue.pop(); // pop 2 + queue.pop(); // pop 2 bool pop_ret = queue.pop(); // pop 3 - BOOST_REQUIRE(pop_ret); + BOOST_TEST_REQUIRE( pop_ret ); pop_ret = queue.pop(); // pop on empty queue - BOOST_REQUIRE( ! pop_ret); + BOOST_TEST_REQUIRE( !pop_ret ); } BOOST_AUTO_TEST_CASE( spsc_queue_buffer_front_and_pop_runtime_sized_test ) { - spsc_queue > queue; - spsc_queue_front_pop(queue); + spsc_queue< int, capacity< 64 > > queue; + spsc_queue_front_pop( queue ); } BOOST_AUTO_TEST_CASE( spsc_queue_buffer_front_and_pop_compiletime_sized_test ) { - spsc_queue queue(64); - spsc_queue_front_pop(queue); + spsc_queue< int > queue( 64 ); + spsc_queue_front_pop( queue ); } BOOST_AUTO_TEST_CASE( spsc_queue_reset_test ) { - spsc_queue > f; + spsc_queue< int, capacity< 64 > > f; - BOOST_REQUIRE(f.empty()); - f.push(1); - f.push(2); + BOOST_TEST_REQUIRE( f.empty() ); + f.push( 1 ); + f.push( 2 ); f.reset(); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.empty() ); } diff --git a/test/stack_bounded_stress_test.cpp b/test/stack_bounded_stress_test.cpp index 55d097fb..05037811 100644 --- a/test/stack_bounded_stress_test.cpp +++ b/test/stack_bounded_stress_test.cpp @@ -10,18 +10,18 @@ #define BOOST_TEST_MAIN #ifdef BOOST_LOCKFREE_INCLUDE_TESTS -#include +# include #else -#include +# include #endif #include "test_common.hpp" BOOST_AUTO_TEST_CASE( stack_test_bounded ) { - typedef queue_stress_tester tester_type; - boost::scoped_ptr tester(new tester_type(4, 4) ); + typedef queue_stress_tester< true > tester_type; + boost::scoped_ptr< tester_type > tester( new tester_type( 4, 4 ) ); - boost::lockfree::stack q(128); - tester->run(q); + boost::lockfree::stack< long > q( 128 ); + tester->run( q ); } diff --git a/test/stack_fixedsize_stress_test.cpp b/test/stack_fixedsize_stress_test.cpp index 688af308..458f1a0f 100644 --- a/test/stack_fixedsize_stress_test.cpp +++ b/test/stack_fixedsize_stress_test.cpp @@ -10,9 +10,9 @@ #define BOOST_TEST_MAIN #ifdef BOOST_LOCKFREE_INCLUDE_TESTS -#include +# include #else -#include +# include #endif #include "test_common.hpp" @@ -20,9 +20,9 @@ BOOST_AUTO_TEST_CASE( stack_test_fixed_size ) { - typedef queue_stress_tester<> tester_type; - boost::scoped_ptr tester(new tester_type(4, 4) ); + typedef queue_stress_tester<> tester_type; + boost::scoped_ptr< tester_type > tester( new tester_type( 4, 4 ) ); - boost::lockfree::stack > q; - tester->run(q); + boost::lockfree::stack< long, boost::lockfree::capacity< 8 > > q; + tester->run( q ); } diff --git a/test/stack_interprocess_test.cpp b/test/stack_interprocess_test.cpp index 1a60e63f..aa39c524 100644 --- a/test/stack_interprocess_test.cpp +++ b/test/stack_interprocess_test.cpp @@ -5,53 +5,57 @@ // http://www.boost.org/LICENSE_1_0.txt) #include //std::system -#include #include #include #include using namespace boost::interprocess; -typedef allocator ShmemAllocator; -typedef boost::lockfree::stack, - boost::lockfree::capacity<2048> - > stack; +typedef allocator< int, managed_shared_memory::segment_manager > ShmemAllocator; +typedef boost::lockfree::stack< int, boost::lockfree::allocator< ShmemAllocator >, boost::lockfree::capacity< 2048 > > stack; -int main (int argc, char *argv[]) +int main( int argc, char* argv[] ) { - if(argc == 1){ + if ( argc == 1 ) { struct shm_remove { - shm_remove() { shared_memory_object::remove("MySharedMemory"); } - ~shm_remove(){ shared_memory_object::remove("MySharedMemory"); } + shm_remove() + { + shared_memory_object::remove( "MySharedMemory" ); + } + ~shm_remove() + { + shared_memory_object::remove( "MySharedMemory" ); + } } remover; - managed_shared_memory segment(create_only, "MySharedMemory", 65536); - ShmemAllocator alloc_inst (segment.get_segment_manager()); + managed_shared_memory segment( create_only, "MySharedMemory", 65536 ); + ShmemAllocator alloc_inst( segment.get_segment_manager() ); - stack * queue = segment.construct("stack")(alloc_inst); - for (int i = 0; i != 1024; ++i) - queue->push(i); + stack* queue = segment.construct< stack >( "stack" )( alloc_inst ); + for ( int i = 0; i != 1024; ++i ) + queue->push( i ); - std::string s(argv[0]); s += " child "; - if(0 != std::system(s.c_str())) + std::string s( argv[ 0 ] ); + s += " child "; + if ( 0 != std::system( s.c_str() ) ) return 1; - while (!queue->empty()) + while ( !queue->empty() ) boost::thread::yield(); return 0; } else { - managed_shared_memory segment(open_only, "MySharedMemory"); - stack * queue = segment.find("stack").first; + managed_shared_memory segment( open_only, "MySharedMemory" ); + stack* queue = segment.find< stack >( "stack" ).first; int from_queue; - for (int i = 0; i != 1024; ++i) { - bool success = queue->pop(from_queue); - assert (success); - assert (from_queue == 1023 - i); + for ( int i = 0; i != 1024; ++i ) { + bool success = queue->pop( from_queue ); + (void)success; + assert( success ); + assert( from_queue == 1023 - i ); } - segment.destroy("stack"); + segment.destroy< stack >( "stack" ); } return 0; } diff --git a/test/stack_test.cpp b/test/stack_test.cpp index 303222b6..4b25ec6d 100644 --- a/test/stack_test.cpp +++ b/test/stack_test.cpp @@ -5,207 +5,218 @@ // http://www.boost.org/LICENSE_1_0.txt) -#include #include +#include #define BOOST_TEST_MAIN #ifdef BOOST_LOCKFREE_INCLUDE_TESTS -#include +# include #else -#include +# include #endif #include "test_helpers.hpp" BOOST_AUTO_TEST_CASE( simple_stack_test ) { - boost::lockfree::stack stk(128); + boost::lockfree::stack< long > stk( 128 ); - stk.push(1); - stk.push(2); + stk.push( 1 ); + stk.push( 2 ); long out; - BOOST_REQUIRE(stk.pop(out)); BOOST_REQUIRE_EQUAL(out, 2); - BOOST_REQUIRE(stk.pop(out)); BOOST_REQUIRE_EQUAL(out, 1); - BOOST_REQUIRE(!stk.pop(out)); + BOOST_TEST_REQUIRE( stk.pop( out ) ); + BOOST_TEST_REQUIRE( out == 2 ); + BOOST_TEST_REQUIRE( stk.pop( out ) ); + BOOST_TEST_REQUIRE( out == 1 ); + BOOST_TEST_REQUIRE( !stk.pop( out ) ); } BOOST_AUTO_TEST_CASE( unsafe_stack_test ) { - boost::lockfree::stack stk(128); + boost::lockfree::stack< long > stk( 128 ); - stk.unsynchronized_push(1); - stk.unsynchronized_push(2); + stk.unsynchronized_push( 1 ); + stk.unsynchronized_push( 2 ); long out; - BOOST_REQUIRE(stk.unsynchronized_pop(out)); BOOST_REQUIRE_EQUAL(out, 2); - BOOST_REQUIRE(stk.unsynchronized_pop(out)); BOOST_REQUIRE_EQUAL(out, 1); - BOOST_REQUIRE(!stk.unsynchronized_pop(out)); + BOOST_TEST_REQUIRE( stk.unsynchronized_pop( out ) ); + BOOST_TEST_REQUIRE( out == 2 ); + BOOST_TEST_REQUIRE( stk.unsynchronized_pop( out ) ); + BOOST_TEST_REQUIRE( out == 1 ); + BOOST_TEST_REQUIRE( !stk.unsynchronized_pop( out ) ); } BOOST_AUTO_TEST_CASE( ranged_push_test ) { - boost::lockfree::stack stk(128); + boost::lockfree::stack< long > stk( 128 ); - long data[2] = {1, 2}; + long data[ 2 ] = { 1, 2 }; - BOOST_REQUIRE_EQUAL(stk.push(data, data + 2), data + 2); + BOOST_TEST_REQUIRE( stk.push( data, data + 2 ) == data + 2 ); long out; - BOOST_REQUIRE(stk.unsynchronized_pop(out)); BOOST_REQUIRE_EQUAL(out, 2); - BOOST_REQUIRE(stk.unsynchronized_pop(out)); BOOST_REQUIRE_EQUAL(out, 1); - BOOST_REQUIRE(!stk.unsynchronized_pop(out)); + BOOST_TEST_REQUIRE( stk.unsynchronized_pop( out ) ); + BOOST_TEST_REQUIRE( out == 2 ); + BOOST_TEST_REQUIRE( stk.unsynchronized_pop( out ) ); + BOOST_TEST_REQUIRE( out == 1 ); + BOOST_TEST_REQUIRE( !stk.unsynchronized_pop( out ) ); } BOOST_AUTO_TEST_CASE( ranged_unsynchronized_push_test ) { - boost::lockfree::stack stk(128); + boost::lockfree::stack< long > stk( 128 ); - long data[2] = {1, 2}; + long data[ 2 ] = { 1, 2 }; - BOOST_REQUIRE_EQUAL(stk.unsynchronized_push(data, data + 2), data + 2); + BOOST_TEST_REQUIRE( stk.unsynchronized_push( data, data + 2 ) == data + 2 ); long out; - BOOST_REQUIRE(stk.unsynchronized_pop(out)); BOOST_REQUIRE_EQUAL(out, 2); - BOOST_REQUIRE(stk.unsynchronized_pop(out)); BOOST_REQUIRE_EQUAL(out, 1); - BOOST_REQUIRE(!stk.unsynchronized_pop(out)); + BOOST_TEST_REQUIRE( stk.unsynchronized_pop( out ) ); + BOOST_TEST_REQUIRE( out == 2 ); + BOOST_TEST_REQUIRE( stk.unsynchronized_pop( out ) ); + BOOST_TEST_REQUIRE( out == 1 ); + BOOST_TEST_REQUIRE( !stk.unsynchronized_pop( out ) ); } BOOST_AUTO_TEST_CASE( fixed_size_stack_test ) { - boost::lockfree::stack > stk; + boost::lockfree::stack< long, boost::lockfree::capacity< 128 > > stk; - stk.push(1); - stk.push(2); + stk.push( 1 ); + stk.push( 2 ); long out; - BOOST_REQUIRE(stk.pop(out)); BOOST_REQUIRE_EQUAL(out, 2); - BOOST_REQUIRE(stk.pop(out)); BOOST_REQUIRE_EQUAL(out, 1); - BOOST_REQUIRE(!stk.pop(out)); - BOOST_REQUIRE(stk.empty()); + BOOST_TEST_REQUIRE( stk.pop( out ) ); + BOOST_TEST_REQUIRE( out == 2 ); + BOOST_TEST_REQUIRE( stk.pop( out ) ); + BOOST_TEST_REQUIRE( out == 1 ); + BOOST_TEST_REQUIRE( !stk.pop( out ) ); + BOOST_TEST_REQUIRE( stk.empty() ); } BOOST_AUTO_TEST_CASE( fixed_size_stack_test_exhausted ) { - boost::lockfree::stack > stk; + boost::lockfree::stack< long, boost::lockfree::capacity< 2 > > stk; - stk.push(1); - stk.push(2); - BOOST_REQUIRE(!stk.push(3)); + stk.push( 1 ); + stk.push( 2 ); + BOOST_TEST_REQUIRE( !stk.push( 3 ) ); long out; - BOOST_REQUIRE(stk.pop(out)); BOOST_REQUIRE_EQUAL(out, 2); - BOOST_REQUIRE(stk.pop(out)); BOOST_REQUIRE_EQUAL(out, 1); - BOOST_REQUIRE(!stk.pop(out)); - BOOST_REQUIRE(stk.empty()); + BOOST_TEST_REQUIRE( stk.pop( out ) ); + BOOST_TEST_REQUIRE( out == 2 ); + BOOST_TEST_REQUIRE( stk.pop( out ) ); + BOOST_TEST_REQUIRE( out == 1 ); + BOOST_TEST_REQUIRE( !stk.pop( out ) ); + BOOST_TEST_REQUIRE( stk.empty() ); } BOOST_AUTO_TEST_CASE( bounded_stack_test_exhausted ) { - boost::lockfree::stack stk(2); + boost::lockfree::stack< long > stk( 2 ); - stk.bounded_push(1); - stk.bounded_push(2); - BOOST_REQUIRE(!stk.bounded_push(3)); + stk.bounded_push( 1 ); + stk.bounded_push( 2 ); + BOOST_TEST_REQUIRE( !stk.bounded_push( 3 ) ); long out; - BOOST_REQUIRE(stk.pop(out)); BOOST_REQUIRE_EQUAL(out, 2); - BOOST_REQUIRE(stk.pop(out)); BOOST_REQUIRE_EQUAL(out, 1); - BOOST_REQUIRE(!stk.pop(out)); - BOOST_REQUIRE(stk.empty()); + BOOST_TEST_REQUIRE( stk.pop( out ) ); + BOOST_TEST_REQUIRE( out == 2 ); + BOOST_TEST_REQUIRE( stk.pop( out ) ); + BOOST_TEST_REQUIRE( out == 1 ); + BOOST_TEST_REQUIRE( !stk.pop( out ) ); + BOOST_TEST_REQUIRE( stk.empty() ); } BOOST_AUTO_TEST_CASE( stack_consume_one_test ) { - boost::lockfree::stack f(64); + boost::lockfree::stack< int > f( 64 ); - BOOST_WARN(f.is_lock_free()); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_WARN( f.is_lock_free() ); + BOOST_TEST_REQUIRE( f.empty() ); - f.push(1); - f.push(2); + f.push( 1 ); + f.push( 2 ); #ifdef BOOST_NO_CXX11_LAMBDAS - bool success1 = f.consume_one(test_equal(2)); - bool success2 = f.consume_one(test_equal(1)); + bool success1 = f.consume_one( test_equal( 2 ) ); + bool success2 = f.consume_one( test_equal( 1 ) ); #else - bool success1 = f.consume_one([] (int i) { - BOOST_REQUIRE_EQUAL(i, 2); - }); + bool success1 = f.consume_one( []( int i ) { + BOOST_TEST_REQUIRE( i == 2 ); + } ); - bool success2 = f.consume_one([] (int i) { - BOOST_REQUIRE_EQUAL(i, 1); - }); + bool success2 = f.consume_one( []( int i ) { + BOOST_TEST_REQUIRE( i == 1 ); + } ); #endif - BOOST_REQUIRE(success1); - BOOST_REQUIRE(success2); + BOOST_TEST_REQUIRE( success1 ); + BOOST_TEST_REQUIRE( success2 ); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.empty() ); } BOOST_AUTO_TEST_CASE( stack_consume_all_test ) { - boost::lockfree::stack f(64); + boost::lockfree::stack< int > f( 64 ); - BOOST_WARN(f.is_lock_free()); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_WARN( f.is_lock_free() ); + BOOST_TEST_REQUIRE( f.empty() ); - f.push(1); - f.push(2); + f.push( 1 ); + f.push( 2 ); #ifdef BOOST_NO_CXX11_LAMBDAS - size_t consumed = f.consume_all(dummy_functor()); + size_t consumed = f.consume_all( dummy_functor() ); #else - size_t consumed = f.consume_all([] (int i) { - }); + size_t consumed = f.consume_all( []( int i ) {} ); #endif - BOOST_REQUIRE_EQUAL(consumed, 2u); + BOOST_TEST_REQUIRE( consumed == 2u ); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.empty() ); } BOOST_AUTO_TEST_CASE( stack_consume_all_atomic_test ) { - boost::lockfree::stack f(64); + boost::lockfree::stack< int > f( 64 ); - BOOST_WARN(f.is_lock_free()); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_WARN( f.is_lock_free() ); + BOOST_TEST_REQUIRE( f.empty() ); - f.push(1); - f.push(2); - f.push(3); + f.push( 1 ); + f.push( 2 ); + f.push( 3 ); #ifdef BOOST_NO_CXX11_LAMBDAS - size_t consumed = f.consume_all_atomic(dummy_functor()); + size_t consumed = f.consume_all_atomic( dummy_functor() ); #else - size_t consumed = f.consume_all_atomic([] (int i) { - }); + size_t consumed = f.consume_all_atomic( []( int i ) {} ); #endif - BOOST_REQUIRE_EQUAL(consumed, 3u); + BOOST_TEST_REQUIRE( consumed == 3u ); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.empty() ); } BOOST_AUTO_TEST_CASE( stack_consume_all_atomic_reversed_test ) { - boost::lockfree::stack f(64); + boost::lockfree::stack< int > f( 64 ); - BOOST_WARN(f.is_lock_free()); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_WARN( f.is_lock_free() ); + BOOST_TEST_REQUIRE( f.empty() ); - f.push(1); - f.push(2); - f.push(3); + f.push( 1 ); + f.push( 2 ); + f.push( 3 ); #ifdef BOOST_NO_CXX11_LAMBDAS - size_t consumed = f.consume_all_atomic_reversed(dummy_functor()); + size_t consumed = f.consume_all_atomic_reversed( dummy_functor() ); #else - size_t consumed = f.consume_all_atomic_reversed([] (int i) { - }); + size_t consumed = f.consume_all_atomic_reversed( []( int i ) {} ); #endif - BOOST_REQUIRE_EQUAL(consumed, 3u); + BOOST_TEST_REQUIRE( consumed == 3u ); - BOOST_REQUIRE(f.empty()); + BOOST_TEST_REQUIRE( f.empty() ); } @@ -213,7 +224,7 @@ BOOST_AUTO_TEST_CASE( reserve_test ) { typedef boost::lockfree::stack< void* > memory_stack; - memory_stack ms(1); - ms.reserve(1); - ms.reserve_unsafe(1); + memory_stack ms( 1 ); + ms.reserve( 1 ); + ms.reserve_unsafe( 1 ); } diff --git a/test/stack_unbounded_stress_test.cpp b/test/stack_unbounded_stress_test.cpp index 98d97517..a922680a 100644 --- a/test/stack_unbounded_stress_test.cpp +++ b/test/stack_unbounded_stress_test.cpp @@ -10,9 +10,9 @@ #define BOOST_TEST_MAIN #ifdef BOOST_LOCKFREE_INCLUDE_TESTS -#include +# include #else -#include +# include #endif #include "test_common.hpp" @@ -20,9 +20,9 @@ BOOST_AUTO_TEST_CASE( stack_test_unbounded ) { - typedef queue_stress_tester tester_type; - boost::scoped_ptr tester(new tester_type(4, 4) ); + typedef queue_stress_tester< false > tester_type; + boost::scoped_ptr< tester_type > tester( new tester_type( 4, 4 ) ); - boost::lockfree::stack q(128); - tester->run(q); + boost::lockfree::stack< long > q( 128 ); + tester->run( q ); } diff --git a/test/tagged_ptr_test.cpp b/test/tagged_ptr_test.cpp index 4ce1dd74..8900d476 100644 --- a/test/tagged_ptr_test.cpp +++ b/test/tagged_ptr_test.cpp @@ -10,49 +10,49 @@ #define BOOST_TEST_MAIN #ifdef BOOST_LOCKFREE_INCLUDE_TESTS -#include +# include #else -#include +# include #endif BOOST_AUTO_TEST_CASE( tagged_ptr_test ) { using namespace boost::lockfree::detail; - int a(1), b(2); + int a( 1 ), b( 2 ); - typedef tagged_ptr::tag_t tag_t; - const tag_t max_tag = (std::numeric_limits::max)(); + typedef tagged_ptr< int >::tag_t tag_t; + const tag_t max_tag = ( std::numeric_limits< tag_t >::max )(); { - tagged_ptr i (&a, 0); - tagged_ptr j (&b, 1); + tagged_ptr< int > i( &a, 0 ); + tagged_ptr< int > j( &b, 1 ); i = j; - BOOST_REQUIRE_EQUAL(i.get_ptr(), &b); - BOOST_REQUIRE_EQUAL(i.get_tag(), 1); + BOOST_TEST_REQUIRE( i.get_ptr() == &b ); + BOOST_TEST_REQUIRE( i.get_tag() == 1 ); } { - tagged_ptr i (&a, 0); - tagged_ptr j (i); + tagged_ptr< int > i( &a, 0 ); + tagged_ptr< int > j( i ); - BOOST_REQUIRE_EQUAL(i.get_ptr(), j.get_ptr()); - BOOST_REQUIRE_EQUAL(i.get_tag(), j.get_tag()); + BOOST_TEST_REQUIRE( i.get_ptr() == j.get_ptr() ); + BOOST_TEST_REQUIRE( i.get_tag() == j.get_tag() ); } { - tagged_ptr i (&a, 0); - BOOST_REQUIRE_EQUAL(i.get_tag() + 1, i.get_next_tag()); + tagged_ptr< int > i( &a, 0 ); + BOOST_TEST_REQUIRE( i.get_tag() + 1 == i.get_next_tag() ); } { - tagged_ptr j (&a, max_tag); - BOOST_REQUIRE_EQUAL(j.get_next_tag(), 0); + tagged_ptr< int > j( &a, max_tag ); + BOOST_TEST_REQUIRE( j.get_next_tag() == 0 ); } { - tagged_ptr j (&a, max_tag - 1); - BOOST_REQUIRE_EQUAL(j.get_next_tag(), max_tag); + tagged_ptr< int > j( &a, max_tag - 1 ); + BOOST_TEST_REQUIRE( j.get_next_tag() == max_tag ); } } diff --git a/test/test_common.hpp b/test/test_common.hpp index 2db41a84..a6029bab 100644 --- a/test/test_common.hpp +++ b/test/test_common.hpp @@ -4,11 +4,12 @@ // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) +#include "test_helpers.hpp" #include #include -#include "test_helpers.hpp" #include +#include #include namespace impl { @@ -17,47 +18,51 @@ using boost::array; using namespace boost; using namespace std; -template +template < bool Bounded = false > struct queue_stress_tester { - static const unsigned int buckets = 1<<13; + static const unsigned int buckets = 1 << 13; #ifndef BOOST_LOCKFREE_STRESS_TEST - static const long node_count = 5000; + static const long node_count = 5000; #else static const long node_count = 500000; #endif const int reader_threads; const int writer_threads; - boost::lockfree::detail::atomic writers_finished; + boost::lockfree::detail::atomic< int > writers_finished; - static_hashed_set data; - static_hashed_set dequeued; - array, buckets> returned; + static_hashed_set< long, buckets > data; + static_hashed_set< long, buckets > dequeued; + array< std::set< long >, buckets > returned; - boost::lockfree::detail::atomic push_count, pop_count; + boost::lockfree::detail::atomic< int > push_count, pop_count; - queue_stress_tester(int reader, int writer): - reader_threads(reader), writer_threads(writer), push_count(0), pop_count(0) + queue_stress_tester( int reader, int writer ) : + reader_threads( reader ), + writer_threads( writer ), + push_count( 0 ), + pop_count( 0 ) {} - template - void add_items(queue & stk) + template < typename queue > + void add_items( queue& stk ) { - for (long i = 0; i != node_count; ++i) { - long id = generate_id(); + for ( long i = 0; i != node_count; ++i ) { + long id = generate_id< long >(); - bool inserted = data.insert(id); - assert(inserted); + bool inserted = data.insert( id ); + assert( inserted ); + (void)inserted; - if (Bounded) - while(stk.bounded_push(id) == false) { + if ( Bounded ) + while ( stk.bounded_push( id ) == false ) { #ifdef __VXWORKS__ thread::yield(); #endif } else - while(stk.push(id) == false) { + while ( stk.push( id ) == false ) { #ifdef __VXWORKS__ thread::yield(); #endif @@ -67,31 +72,33 @@ struct queue_stress_tester writers_finished += 1; } - boost::lockfree::detail::atomic running; + boost::lockfree::detail::atomic< bool > running; - template - bool consume_element(queue & q) + template < typename queue > + bool consume_element( queue& q ) { long id; - bool ret = q.pop(id); + bool ret = q.pop( id ); - if (!ret) + if ( !ret ) return false; - bool erased = data.erase(id); - bool inserted = dequeued.insert(id); - assert(erased); - assert(inserted); + bool erased = data.erase( id ); + bool inserted = dequeued.insert( id ); + (void)erased; + (void)inserted; + assert( erased ); + assert( inserted ); ++pop_count; return true; } - template - void get_items(queue & q) + template < typename queue > + void get_items( queue& q ) { - for (;;) { - bool received_element = consume_element(q); - if (received_element) + for ( ;; ) { + bool received_element = consume_element( q ); + if ( received_element ) continue; if ( writers_finished.load() == writer_threads ) @@ -102,25 +109,30 @@ struct queue_stress_tester #endif } - while (consume_element(q)); + while ( consume_element( q ) ) + ; } - template - void run(queue & stk) + template < typename queue > + void run( queue& stk ) { - BOOST_WARN(stk.is_lock_free()); - writers_finished.store(0); + BOOST_WARN( stk.is_lock_free() ); + writers_finished.store( 0 ); thread_group writer; thread_group reader; - BOOST_REQUIRE(stk.empty()); + BOOST_TEST_REQUIRE( stk.empty() ); - for (int i = 0; i != reader_threads; ++i) - reader.create_thread(boost::bind(&queue_stress_tester::template get_items, this, boost::ref(stk))); + for ( int i = 0; i != reader_threads; ++i ) + reader.create_thread( boost::bind( &queue_stress_tester::template get_items< queue >, + this, + boost::ref( stk ) ) ); - for (int i = 0; i != writer_threads; ++i) - writer.create_thread(boost::bind(&queue_stress_tester::template add_items, this, boost::ref(stk))); + for ( int i = 0; i != writer_threads; ++i ) + writer.create_thread( boost::bind( &queue_stress_tester::template add_items< queue >, + this, + boost::ref( stk ) ) ); std::cout << "threads created" << std::endl; @@ -132,14 +144,14 @@ struct queue_stress_tester std::cout << "reader threads joined" << std::endl; - BOOST_REQUIRE_EQUAL(data.count_nodes(), (size_t)0); - BOOST_REQUIRE(stk.empty()); + BOOST_TEST_REQUIRE( data.count_nodes() == (size_t)0 ); + BOOST_TEST_REQUIRE( stk.empty() ); - BOOST_REQUIRE_EQUAL(push_count, pop_count); - BOOST_REQUIRE_EQUAL(push_count, writer_threads * node_count); + BOOST_TEST_REQUIRE( push_count == pop_count ); + BOOST_TEST_REQUIRE( push_count == writer_threads * node_count ); } }; -} +} // namespace impl using impl::queue_stress_tester; diff --git a/test/test_helpers.hpp b/test/test_helpers.hpp index 882fa1c4..e7a9fe24 100644 --- a/test/test_helpers.hpp +++ b/test/test_helpers.hpp @@ -7,93 +7,92 @@ #ifndef BOOST_LOCKFREE_TEST_HELPERS #define BOOST_LOCKFREE_TEST_HELPERS -#include #include +#include #include +#include #include +#include -#include -template -int_type generate_id(void) +template < typename int_type > +int_type generate_id( void ) { - static boost::lockfree::detail::atomic generator(0); + static boost::lockfree::detail::atomic< int_type > generator( 0 ); return ++generator; } -template +template < typename int_type, size_t buckets > class static_hashed_set { - public: - int calc_index(int_type id) + int calc_index( int_type id ) { // knuth hash ... does not need to be good, but has to be portable - size_t factor = size_t((float)buckets * 1.616f); + size_t factor = size_t( (float)buckets * 1.616f ); - return ((size_t)id * factor) % buckets; + return ( (size_t)id * factor ) % buckets; } - bool insert(int_type const & id) + bool insert( int_type const& id ) { - std::size_t index = calc_index(id); + std::size_t index = calc_index( id ); - boost::lock_guard lock (ref_mutex[index]); + boost::lock_guard< boost::mutex > lock( ref_mutex[ index ] ); - std::pair::iterator, bool> p; - p = data[index].insert(id); + std::pair< typename std::set< int_type >::iterator, bool > p; + p = data[ index ].insert( id ); return p.second; } - bool find (int_type const & id) + bool find( int_type const& id ) { - std::size_t index = calc_index(id); + std::size_t index = calc_index( id ); - boost::lock_guard lock (ref_mutex[index]); + boost::lock_guard< boost::mutex > lock( ref_mutex[ index ] ); - return data[index].find(id) != data[index].end(); + return data[ index ].find( id ) != data[ index ].end(); } - bool erase(int_type const & id) + bool erase( int_type const& id ) { - std::size_t index = calc_index(id); + std::size_t index = calc_index( id ); - boost::lock_guard lock (ref_mutex[index]); + boost::lock_guard< boost::mutex > lock( ref_mutex[ index ] ); - if (data[index].find(id) != data[index].end()) { - data[index].erase(id); - assert(data[index].find(id) == data[index].end()); + if ( data[ index ].find( id ) != data[ index ].end() ) { + data[ index ].erase( id ); + assert( data[ index ].find( id ) == data[ index ].end() ); return true; - } - else + } else return false; } - std::size_t count_nodes(void) const + std::size_t count_nodes( void ) const { std::size_t ret = 0; - for (int i = 0; i != buckets; ++i) { - boost::lock_guard lock (ref_mutex[i]); - ret += data[i].size(); + for ( int i = 0; i != buckets; ++i ) { + boost::lock_guard< boost::mutex > lock( ref_mutex[ i ] ); + ret += data[ i ].size(); } return ret; } private: - boost::array, buckets> data; - mutable boost::array ref_mutex; + boost::array< std::set< int_type >, buckets > data; + mutable boost::array< boost::mutex, buckets > ref_mutex; }; struct test_equal { - test_equal(int i): - i(i) + test_equal( int i ) : + i( i ) {} - void operator()(int arg) const + void operator()( int arg ) const { - BOOST_REQUIRE_EQUAL(arg, i); + BOOST_TEST_REQUIRE( arg == i ); } int i; @@ -101,9 +100,8 @@ struct test_equal struct dummy_functor { - void operator()(int /* arg */) const - { - } + void operator()( int /* arg */ ) const + {} };