// Define operator* and operator-> for shared_ptr<T>. template<typename _Tp, _Lock_policy _Lp, bool = is_array<_Tp>::value, bool = is_void<_Tp>::value> class __shared_ptr_access { }
以上贴出部分类型模板声明,可以看到大致的类继承结构及成员,也就是shared_ptr< T >继承自__shared_ptr<T, __default_lock_policy>,__shared_ptr拥有实际指针element_type*和成员引用计数__shared_count<__default_lock_policy>,__shared_ptr_access暂且不看,先看看这个__shared_count
_Atomic_word来自于<bits/atomic_word.h>具体类型取决于架构和系统,在我测试的主机上x86_64-redhat下是typedef int _Atomic_word。那么主要看__shared_count和_M_pi这个_Sp_counted_base指针的计数操作,而_M_pi这个基类指针在构造时会因构造方式有所不同,以make_shared举例:
template<typename _Tp> classshared_ptr : public __shared_ptr<_Tp> { private: // This constructor is non-standard, it is used by allocate_shared. template<typename _Alloc, typename... _Args> shared_ptr(_Sp_alloc_shared_tag<_Alloc> __tag, _Args&&... __args) : __shared_ptr<_Tp>(__tag, std::forward<_Args>(__args)...) { } }
<bits/shared_ptr_base.h>
1 2 3 4 5 6 7 8 9 10 11
template<typename _Tp, _Lock_policy _Lp> class__shared_ptr : public __shared_ptr_access<_Tp, _Lp> { protected: // This constructor is non-standard, it is used by allocate_shared. template<typename _Alloc, typename... _Args> __shared_ptr(_Sp_alloc_shared_tag<_Alloc> __tag, _Args&&... __args) : _M_ptr(), _M_refcount(_M_ptr, __tag, std::forward<_Args>(__args)...) { _M_enable_shared_from_this_with(_M_ptr); } }
// Alloc parameter is not a reference so doesn't alias anything in __args template<typename... _Args> _Sp_counted_ptr_inplace(_Alloc __a, _Args&&... __args) : _M_impl(__a) { // _GLIBCXX_RESOLVE_LIB_DEFECTS // 2070. allocate_shared should use allocator_traits<A>::construct allocator_traits<_Alloc>::construct(__a, _M_ptr(), std::forward<_Args>(__args)...); // might throw }
// Override because the allocator needs to know the dynamic type virtualvoid _M_destroy() noexcept { __allocator_type __a(_M_impl._M_alloc()); __allocated_ptr<__allocator_type> __guard_ptr{ __a, this }; this->~_Sp_counted_ptr_inplace(); }
namespace __gnu_cxx _GLIBCXX_VISIBILITY(default) { _GLIBCXX_BEGIN_NAMESPACE_VERSION // Available locking policies: // _S_single single-threaded code that doesn't need to be locked. // _S_mutex multi-threaded code that requires additional support // from gthr.h or abstraction layers in concurrence.h. // _S_atomic multi-threaded code using atomic operations. enum_Lock_policy { _S_single, _S_mutex, _S_atomic };
// Compile time constant that indicates prefered locking policy in // the current configuration. _GLIBCXX17_INLINE const _Lock_policy __default_lock_policy = #ifndef __GTHREADS _S_single; #elif defined _GLIBCXX_HAVE_ATOMIC_LOCK_POLICY _S_atomic; #else _S_mutex; #endif }
template<> inlinevoid _Sp_counted_base<_S_single>::_M_release() noexcept { if (--_M_use_count == 0) { _M_dispose(); if (--_M_weak_count == 0) _M_destroy(); } }
template<> inlinevoid _Sp_counted_base<_S_mutex>::_M_release() noexcept { // Be race-detector-friendly. For more info see bits/c++config. _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_use_count); if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, -1) == 1) { _M_release_last_use(); } }
template<> inlinevoid _Sp_counted_base<_S_atomic>::_M_release() noexcept { _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_use_count); #if ! _GLIBCXX_TSAN constexprbool __lock_free = __atomic_always_lock_free(sizeof(longlong), 0) && __atomic_always_lock_free(sizeof(_Atomic_word), 0); constexprbool __double_word = sizeof(longlong) == 2 * sizeof(_Atomic_word); // The ref-count members follow the vptr, so are aligned to // alignof(void*). constexprbool __aligned = __alignof(longlong) <= alignof(void*); if _GLIBCXX17_CONSTEXPR (__lock_free && __double_word && __aligned) { constexprint __wordbits = __CHAR_BIT__ * sizeof(_Atomic_word); constexprint __shiftbits = __double_word ? __wordbits : 0; constexprlonglong __unique_ref = 1LL + (1LL << __shiftbits); auto __both_counts = reinterpret_cast<longlong*>(&_M_use_count);
_GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_weak_count); if (__atomic_load_n(__both_counts, __ATOMIC_ACQUIRE) == __unique_ref) { // Both counts are 1, so there are no weak references and // we are releasing the last strong reference. No other // threads can observe the effects of this _M_release() // call (e.g. calling use_count()) without a data race. _M_weak_count = _M_use_count = 0; _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_use_count); _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_weak_count); _M_dispose(); _M_destroy(); return; } if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, -1) == 1) [[__unlikely__]] { _M_release_last_use_cold(); return; } } else #endif if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, -1) == 1) { _M_release_last_use(); } }
// Macros for race detectors. // _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(A) and // _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(A) should be used to explain // atomic (lock-free) synchronization to race detectors: // the race detector will infer a happens-before arc from the former to the // latter when they share the same argument pointer. // // The most frequent use case for these macros (and the only case in the // current implementation of the library) is atomic reference counting: // void _M_remove_reference() // { // _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&this->_M_refcount); // if (__gnu_cxx::__exchange_and_add_dispatch(&this->_M_refcount, -1) <= 0) // { // _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&this->_M_refcount); // _M_destroy(__a); // } // } // The annotations in this example tell the race detector that all memory // accesses occurred when the refcount was positive do not race with // memory accesses which occurred after the refcount became zero.
// Called by _M_release() when the use count reaches zero. void _M_release_last_use() noexcept { _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_use_count); _M_dispose(); // There must be a memory barrier between dispose() and destroy() // to ensure that the effects of dispose() are observed in the // thread that runs destroy(). // See http://gcc.gnu.org/ml/libstdc++/2005-11/msg00136.html if (_Mutex_base<_Lp>::_S_need_barriers) { __atomic_thread_fence (__ATOMIC_ACQ_REL); }
// Be race-detector-friendly. For more info see bits/c++config. _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_weak_count); if (__gnu_cxx::__exchange_and_add_dispatch(&_M_weak_count, -1) == 1) { _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_weak_count); _M_destroy(); } }
// As above, but 'noinline' to reduce code size on the cold path. __attribute__((__noinline__)) void _M_release_last_use_cold() noexcept { _M_release_last_use(); }