blob: b685d125ba9f2efde1289110e6456bb998b5a10c [file] [log] [blame]
Howard Hinnantc51e1022010-05-11 19:42:16 +00001//===------------------------ memory.cpp ----------------------------------===//
2//
Howard Hinnantc566dc32010-05-11 21:36:01 +00003// The LLVM Compiler Infrastructure
Howard Hinnantc51e1022010-05-11 19:42:16 +00004//
Howard Hinnantee11c312010-11-16 22:09:02 +00005// This file is dual licensed under the MIT and the University of Illinois Open
6// Source Licenses. See LICENSE.TXT for details.
Howard Hinnantc51e1022010-05-11 19:42:16 +00007//
8//===----------------------------------------------------------------------===//
9
Howard Hinnantb5fffe82012-07-07 20:56:04 +000010#define _LIBCPP_BUILDING_MEMORY
Howard Hinnantc51e1022010-05-11 19:42:16 +000011#include "memory"
Jonathan Roelofs39cb6bf2014-09-05 19:45:05 +000012#ifndef _LIBCPP_HAS_NO_THREADS
Howard Hinnant9fa30202012-07-30 01:40:57 +000013#include "mutex"
Howard Hinnant58380d52012-07-30 17:13:21 +000014#include "thread"
Jonathan Roelofs39cb6bf2014-09-05 19:45:05 +000015#endif
Eric Fiselier1366d262015-08-18 21:08:54 +000016#include "include/atomic_support.h"
Howard Hinnantc51e1022010-05-11 19:42:16 +000017
18_LIBCPP_BEGIN_NAMESPACE_STD
19
20namespace
21{
22
Eric Fiselier89659d12015-07-07 00:27:16 +000023// NOTE: Relaxed and acq/rel atomics (for increment and decrement respectively)
24// should be sufficient for thread safety.
25// See https://llvm.org/bugs/show_bug.cgi?id=22803
Howard Hinnantc51e1022010-05-11 19:42:16 +000026template <class T>
Howard Hinnant155c2af2010-05-24 17:49:41 +000027inline T
Howard Hinnant719bda32011-05-28 14:41:13 +000028increment(T& t) _NOEXCEPT
Howard Hinnantc51e1022010-05-11 19:42:16 +000029{
Eric Fiselier89659d12015-07-07 00:27:16 +000030 return __libcpp_atomic_add(&t, 1, _AO_Relaxed);
Howard Hinnantc51e1022010-05-11 19:42:16 +000031}
32
33template <class T>
Howard Hinnant155c2af2010-05-24 17:49:41 +000034inline T
Howard Hinnant719bda32011-05-28 14:41:13 +000035decrement(T& t) _NOEXCEPT
Howard Hinnantc51e1022010-05-11 19:42:16 +000036{
Eric Fiselier89659d12015-07-07 00:27:16 +000037 return __libcpp_atomic_add(&t, -1, _AO_Acq_Rel);
Howard Hinnantc51e1022010-05-11 19:42:16 +000038}
39
Howard Hinnantc51e1022010-05-11 19:42:16 +000040} // namespace
41
Howard Hinnantc51e1022010-05-11 19:42:16 +000042const allocator_arg_t allocator_arg = allocator_arg_t();
43
Howard Hinnant719bda32011-05-28 14:41:13 +000044bad_weak_ptr::~bad_weak_ptr() _NOEXCEPT {}
Howard Hinnantc51e1022010-05-11 19:42:16 +000045
46const char*
Howard Hinnant719bda32011-05-28 14:41:13 +000047bad_weak_ptr::what() const _NOEXCEPT
Howard Hinnantc51e1022010-05-11 19:42:16 +000048{
49 return "bad_weak_ptr";
50}
51
52__shared_count::~__shared_count()
53{
54}
55
56void
Howard Hinnant719bda32011-05-28 14:41:13 +000057__shared_count::__add_shared() _NOEXCEPT
Howard Hinnantc51e1022010-05-11 19:42:16 +000058{
59 increment(__shared_owners_);
60}
61
Howard Hinnantca3b32a2010-11-16 21:33:17 +000062bool
Howard Hinnant719bda32011-05-28 14:41:13 +000063__shared_count::__release_shared() _NOEXCEPT
Howard Hinnantc51e1022010-05-11 19:42:16 +000064{
65 if (decrement(__shared_owners_) == -1)
Howard Hinnantca3b32a2010-11-16 21:33:17 +000066 {
Howard Hinnantc51e1022010-05-11 19:42:16 +000067 __on_zero_shared();
Howard Hinnantca3b32a2010-11-16 21:33:17 +000068 return true;
69 }
70 return false;
Howard Hinnantc51e1022010-05-11 19:42:16 +000071}
72
73__shared_weak_count::~__shared_weak_count()
74{
75}
76
77void
Howard Hinnant719bda32011-05-28 14:41:13 +000078__shared_weak_count::__add_shared() _NOEXCEPT
Howard Hinnantc51e1022010-05-11 19:42:16 +000079{
80 __shared_count::__add_shared();
Howard Hinnantc51e1022010-05-11 19:42:16 +000081}
82
83void
Howard Hinnant719bda32011-05-28 14:41:13 +000084__shared_weak_count::__add_weak() _NOEXCEPT
Howard Hinnantc51e1022010-05-11 19:42:16 +000085{
86 increment(__shared_weak_owners_);
87}
88
89void
Howard Hinnant719bda32011-05-28 14:41:13 +000090__shared_weak_count::__release_shared() _NOEXCEPT
Howard Hinnantc51e1022010-05-11 19:42:16 +000091{
Howard Hinnantca3b32a2010-11-16 21:33:17 +000092 if (__shared_count::__release_shared())
93 __release_weak();
Howard Hinnantc51e1022010-05-11 19:42:16 +000094}
95
96void
Howard Hinnant719bda32011-05-28 14:41:13 +000097__shared_weak_count::__release_weak() _NOEXCEPT
Howard Hinnantc51e1022010-05-11 19:42:16 +000098{
Ben Craig117af112016-08-01 17:51:26 +000099 // NOTE: The acquire load here is an optimization of the very
100 // common case where a shared pointer is being destructed while
101 // having no other contended references.
102 //
103 // BENEFIT: We avoid expensive atomic stores like XADD and STREX
104 // in a common case. Those instructions are slow and do nasty
105 // things to caches.
106 //
107 // IS THIS SAFE? Yes. During weak destruction, if we see that we
108 // are the last reference, we know that no-one else is accessing
109 // us. If someone were accessing us, then they would be doing so
110 // while the last shared / weak_ptr was being destructed, and
111 // that's undefined anyway.
112 //
113 // If we see anything other than a 0, then we have possible
114 // contention, and need to use an atomicrmw primitive.
115 // The same arguments don't apply for increment, where it is legal
116 // (though inadvisable) to share shared_ptr references between
117 // threads, and have them all get copied at once. The argument
118 // also doesn't apply for __release_shared, because an outstanding
119 // weak_ptr::lock() could read / modify the shared count.
Ben Craig0798f112016-08-02 13:43:48 +0000120 if (__libcpp_atomic_load(&__shared_weak_owners_, _AO_Acquire) == 0)
Ben Craig117af112016-08-01 17:51:26 +0000121 {
122 // no need to do this store, because we are about
123 // to destroy everything.
124 //__libcpp_atomic_store(&__shared_weak_owners_, -1, _AO_Release);
125 __on_zero_shared_weak();
126 }
127 else if (decrement(__shared_weak_owners_) == -1)
Howard Hinnantc51e1022010-05-11 19:42:16 +0000128 __on_zero_shared_weak();
129}
130
131__shared_weak_count*
Howard Hinnant719bda32011-05-28 14:41:13 +0000132__shared_weak_count::lock() _NOEXCEPT
Howard Hinnantc51e1022010-05-11 19:42:16 +0000133{
Eric Fiselier89659d12015-07-07 00:27:16 +0000134 long object_owners = __libcpp_atomic_load(&__shared_owners_);
Howard Hinnantc51e1022010-05-11 19:42:16 +0000135 while (object_owners != -1)
136 {
Eric Fiselier89659d12015-07-07 00:27:16 +0000137 if (__libcpp_atomic_compare_exchange(&__shared_owners_,
138 &object_owners,
139 object_owners+1))
Howard Hinnantc51e1022010-05-11 19:42:16 +0000140 return this;
Howard Hinnantc51e1022010-05-11 19:42:16 +0000141 }
142 return 0;
143}
144
Eric Fiselierf5d00122014-12-12 02:36:23 +0000145#if !defined(_LIBCPP_NO_RTTI) || !defined(_LIBCPP_BUILD_STATIC)
Howard Hinnant72f73582010-08-11 17:04:31 +0000146
Howard Hinnantc51e1022010-05-11 19:42:16 +0000147const void*
Howard Hinnant719bda32011-05-28 14:41:13 +0000148__shared_weak_count::__get_deleter(const type_info&) const _NOEXCEPT
Howard Hinnantc51e1022010-05-11 19:42:16 +0000149{
150 return 0;
151}
152
Howard Hinnantffb308e2010-08-22 00:03:27 +0000153#endif // _LIBCPP_NO_RTTI
Howard Hinnant72f73582010-08-11 17:04:31 +0000154
Eric Fiselier9b492672016-06-18 02:12:53 +0000155#if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
Howard Hinnantd48ef422012-08-19 15:13:16 +0000156
Howard Hinnant9fa30202012-07-30 01:40:57 +0000157static const std::size_t __sp_mut_count = 16;
Asiri Rathnayakefa2e2032016-05-06 14:06:29 +0000158static __libcpp_mutex_t mut_back_imp[__sp_mut_count] =
Howard Hinnant5f779792013-03-16 00:17:53 +0000159{
Asiri Rathnayakefa2e2032016-05-06 14:06:29 +0000160 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
161 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
162 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
163 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER
Howard Hinnant5f779792013-03-16 00:17:53 +0000164};
165
166static mutex* mut_back = reinterpret_cast<std::mutex*>(mut_back_imp);
Howard Hinnant9fa30202012-07-30 01:40:57 +0000167
168_LIBCPP_CONSTEXPR __sp_mut::__sp_mut(void* p) _NOEXCEPT
Howard Hinnant49e145e2012-10-30 19:06:59 +0000169 : __lx(p)
Howard Hinnant9fa30202012-07-30 01:40:57 +0000170{
171}
172
173void
174__sp_mut::lock() _NOEXCEPT
175{
Howard Hinnant49e145e2012-10-30 19:06:59 +0000176 mutex& m = *static_cast<mutex*>(__lx);
Howard Hinnant58380d52012-07-30 17:13:21 +0000177 unsigned count = 0;
178 while (!m.try_lock())
179 {
180 if (++count > 16)
181 {
182 m.lock();
183 break;
184 }
185 this_thread::yield();
186 }
Howard Hinnant9fa30202012-07-30 01:40:57 +0000187}
188
189void
190__sp_mut::unlock() _NOEXCEPT
191{
Howard Hinnant49e145e2012-10-30 19:06:59 +0000192 static_cast<mutex*>(__lx)->unlock();
Howard Hinnant9fa30202012-07-30 01:40:57 +0000193}
194
195__sp_mut&
196__get_sp_mut(const void* p)
197{
198 static __sp_mut muts[__sp_mut_count]
199 {
200 &mut_back[ 0], &mut_back[ 1], &mut_back[ 2], &mut_back[ 3],
201 &mut_back[ 4], &mut_back[ 5], &mut_back[ 6], &mut_back[ 7],
202 &mut_back[ 8], &mut_back[ 9], &mut_back[10], &mut_back[11],
203 &mut_back[12], &mut_back[13], &mut_back[14], &mut_back[15]
204 };
205 return muts[hash<const void*>()(p) & (__sp_mut_count-1)];
206}
207
Eric Fiselier9b492672016-06-18 02:12:53 +0000208#endif // !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
Howard Hinnant9fa30202012-07-30 01:40:57 +0000209
Howard Hinnantc51e1022010-05-11 19:42:16 +0000210void
211declare_reachable(void*)
212{
213}
214
215void
216declare_no_pointers(char*, size_t)
217{
218}
219
220void
221undeclare_no_pointers(char*, size_t)
222{
223}
224
225pointer_safety
Howard Hinnant719bda32011-05-28 14:41:13 +0000226get_pointer_safety() _NOEXCEPT
Howard Hinnantc51e1022010-05-11 19:42:16 +0000227{
228 return pointer_safety::relaxed;
229}
230
231void*
232__undeclare_reachable(void* p)
233{
234 return p;
235}
236
237void*
238align(size_t alignment, size_t size, void*& ptr, size_t& space)
239{
240 void* r = nullptr;
241 if (size <= space)
242 {
243 char* p1 = static_cast<char*>(ptr);
Joerg Sonnenbergerc7655a22014-01-04 17:43:00 +0000244 char* p2 = reinterpret_cast<char*>(reinterpret_cast<size_t>(p1 + (alignment - 1)) & -alignment);
Howard Hinnant28b24882011-12-01 20:21:04 +0000245 size_t d = static_cast<size_t>(p2 - p1);
Howard Hinnantc51e1022010-05-11 19:42:16 +0000246 if (d <= space - size)
247 {
248 r = p2;
249 ptr = r;
250 space -= d;
251 }
252 }
253 return r;
254}
255
256_LIBCPP_END_NAMESPACE_STD