Louis Dionne | a63bbc1 | 2021-11-17 16:25:01 -0500 | [diff] [blame] | 1 | //===----------------------------------------------------------------------===// |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 2 | // |
Chandler Carruth | 8ee27c3 | 2019-01-19 10:56:40 +0000 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 6 | // |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 7 | //===----------------------------------------------------------------------===// |
| 8 | |
Igor Kudrin | 11741ce | 2016-10-07 08:48:28 +0000 | [diff] [blame] | 9 | #include "fallback_malloc.h" |
| 10 | |
Louis Dionne | 77c156f | 2022-02-28 16:37:25 -0500 | [diff] [blame] | 11 | #include <__threading_support> |
Petr Hosek | 905aa58 | 2019-05-30 01:34:41 +0000 | [diff] [blame] | 12 | #ifndef _LIBCXXABI_HAS_NO_THREADS |
Michał Górny | 3f68363 | 2019-12-02 11:49:20 +0100 | [diff] [blame] | 13 | #if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB) |
Petr Hosek | 905aa58 | 2019-05-30 01:34:41 +0000 | [diff] [blame] | 14 | #pragma comment(lib, "pthread") |
| 15 | #endif |
| 16 | #endif |
Jonathan Roelofs | f82302a | 2014-05-06 21:30:56 +0000 | [diff] [blame] | 17 | |
Simon Tatham | ee4f792 | 2022-08-19 15:07:55 +0100 | [diff] [blame] | 18 | #include <assert.h> |
Louis Dionne | f997cb6 | 2019-10-01 18:43:02 +0000 | [diff] [blame] | 19 | #include <stdlib.h> // for malloc, calloc, free |
| 20 | #include <string.h> // for memset |
Louis Dionne | 171e609 | 2020-11-12 15:14:33 -0500 | [diff] [blame] | 21 | #include <new> // for std::__libcpp_aligned_{alloc,free} |
Igor Kudrin | 11741ce | 2016-10-07 08:48:28 +0000 | [diff] [blame] | 22 | |
Igor Kudrin | 11741ce | 2016-10-07 08:48:28 +0000 | [diff] [blame] | 23 | // A small, simple heap manager based (loosely) on |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 24 | // the startup heap manager from FreeBSD, optimized for space. |
| 25 | // |
| 26 | // Manages a fixed-size memory pool, supports malloc and free only. |
| 27 | // No support for realloc. |
| 28 | // |
| 29 | // Allocates chunks in multiples of four bytes, with a four byte header |
| 30 | // for each chunk. The overhead of each chunk is kept low by keeping pointers |
| 31 | // as two byte offsets within the heap, rather than (4 or 8 byte) pointers. |
| 32 | |
| 33 | namespace { |
| 34 | |
Jonathan Roelofs | f82302a | 2014-05-06 21:30:56 +0000 | [diff] [blame] | 35 | // When POSIX threads are not available, make the mutex operations a nop |
Asiri Rathnayake | f5ebef9 | 2016-09-21 09:09:32 +0000 | [diff] [blame] | 36 | #ifndef _LIBCXXABI_HAS_NO_THREADS |
Arthur O'Dwyer | 3359ae8 | 2022-02-08 13:08:59 -0500 | [diff] [blame] | 37 | static _LIBCPP_CONSTINIT std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER; |
Asiri Rathnayake | f5ebef9 | 2016-09-21 09:09:32 +0000 | [diff] [blame] | 38 | #else |
Arthur O'Dwyer | 3359ae8 | 2022-02-08 13:08:59 -0500 | [diff] [blame] | 39 | static _LIBCPP_CONSTINIT void* heap_mutex = 0; |
Jonathan Roelofs | f82302a | 2014-05-06 21:30:56 +0000 | [diff] [blame] | 40 | #endif |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 41 | |
| 42 | class mutexor { |
| 43 | public: |
Asiri Rathnayake | f5ebef9 | 2016-09-21 09:09:32 +0000 | [diff] [blame] | 44 | #ifndef _LIBCXXABI_HAS_NO_THREADS |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 45 | mutexor(std::__libcpp_mutex_t* m) : mtx_(m) { |
| 46 | std::__libcpp_mutex_lock(mtx_); |
| 47 | } |
| 48 | ~mutexor() { std::__libcpp_mutex_unlock(mtx_); } |
Asiri Rathnayake | f5ebef9 | 2016-09-21 09:09:32 +0000 | [diff] [blame] | 49 | #else |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 50 | mutexor(void*) {} |
| 51 | ~mutexor() {} |
Jonathan Roelofs | f82302a | 2014-05-06 21:30:56 +0000 | [diff] [blame] | 52 | #endif |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 53 | private: |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 54 | mutexor(const mutexor& rhs); |
| 55 | mutexor& operator=(const mutexor& rhs); |
Asiri Rathnayake | f5ebef9 | 2016-09-21 09:09:32 +0000 | [diff] [blame] | 56 | #ifndef _LIBCXXABI_HAS_NO_THREADS |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 57 | std::__libcpp_mutex_t* mtx_; |
Jonathan Roelofs | f82302a | 2014-05-06 21:30:56 +0000 | [diff] [blame] | 58 | #endif |
Asiri Rathnayake | 9c4469c | 2017-01-03 12:58:34 +0000 | [diff] [blame] | 59 | }; |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 60 | |
Igor Kudrin | 11741ce | 2016-10-07 08:48:28 +0000 | [diff] [blame] | 61 | static const size_t HEAP_SIZE = 512; |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 62 | char heap[HEAP_SIZE] __attribute__((aligned)); |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 63 | |
| 64 | typedef unsigned short heap_offset; |
| 65 | typedef unsigned short heap_size; |
| 66 | |
Simon Tatham | ee4f792 | 2022-08-19 15:07:55 +0100 | [diff] [blame] | 67 | // On both 64 and 32 bit targets heap_node should have the following properties |
| 68 | // Size: 4 |
| 69 | // Alignment: 2 |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 70 | struct heap_node { |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 71 | heap_offset next_node; // offset into heap |
| 72 | heap_size len; // size in units of "sizeof(heap_node)" |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 73 | }; |
| 74 | |
Simon Tatham | ee4f792 | 2022-08-19 15:07:55 +0100 | [diff] [blame] | 75 | // All pointers returned by fallback_malloc must be at least aligned |
| 76 | // as RequiredAligned. Note that RequiredAlignment can be greater than |
| 77 | // alignof(std::max_align_t) on 64 bit systems compiling 32 bit code. |
| 78 | struct FallbackMaxAlignType { |
| 79 | } __attribute__((aligned)); |
| 80 | const size_t RequiredAlignment = alignof(FallbackMaxAlignType); |
| 81 | |
| 82 | static_assert(alignof(FallbackMaxAlignType) % sizeof(heap_node) == 0, |
| 83 | "The required alignment must be evenly divisible by the sizeof(heap_node)"); |
| 84 | |
| 85 | // The number of heap_node's that can fit in a chunk of memory with the size |
| 86 | // of the RequiredAlignment. On 64 bit targets NodesPerAlignment should be 4. |
| 87 | const size_t NodesPerAlignment = alignof(FallbackMaxAlignType) / sizeof(heap_node); |
| 88 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 89 | static const heap_node* list_end = |
| 90 | (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap |
| 91 | static heap_node* freelist = NULL; |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 92 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 93 | heap_node* node_from_offset(const heap_offset offset) { |
| 94 | return (heap_node*)(heap + (offset * sizeof(heap_node))); |
| 95 | } |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 96 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 97 | heap_offset offset_from_node(const heap_node* ptr) { |
| 98 | return static_cast<heap_offset>( |
| 99 | static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) / |
| 100 | sizeof(heap_node)); |
| 101 | } |
Igor Kudrin | 11741ce | 2016-10-07 08:48:28 +0000 | [diff] [blame] | 102 | |
Simon Tatham | ee4f792 | 2022-08-19 15:07:55 +0100 | [diff] [blame] | 103 | // Return a pointer to the first address, 'A', in `heap` that can actually be |
| 104 | // used to represent a heap_node. 'A' must be aligned so that |
| 105 | // '(A + sizeof(heap_node)) % RequiredAlignment == 0'. On 64 bit systems this |
| 106 | // address should be 12 bytes after the first 16 byte boundary. |
| 107 | heap_node* getFirstAlignedNodeInHeap() { |
| 108 | heap_node* node = (heap_node*)heap; |
| 109 | const size_t alignNBytesAfterBoundary = RequiredAlignment - sizeof(heap_node); |
| 110 | size_t boundaryOffset = reinterpret_cast<size_t>(node) % RequiredAlignment; |
| 111 | size_t requiredOffset = alignNBytesAfterBoundary - boundaryOffset; |
| 112 | size_t NElemOffset = requiredOffset / sizeof(heap_node); |
| 113 | return node + NElemOffset; |
| 114 | } |
| 115 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 116 | void init_heap() { |
Simon Tatham | ee4f792 | 2022-08-19 15:07:55 +0100 | [diff] [blame] | 117 | freelist = getFirstAlignedNodeInHeap(); |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 118 | freelist->next_node = offset_from_node(list_end); |
Simon Tatham | ee4f792 | 2022-08-19 15:07:55 +0100 | [diff] [blame] | 119 | freelist->len = static_cast<heap_size>(list_end - freelist); |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 120 | } |
Igor Kudrin | 11741ce | 2016-10-07 08:48:28 +0000 | [diff] [blame] | 121 | |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 122 | // How big a chunk we allocate |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 123 | size_t alloc_size(size_t len) { |
| 124 | return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1; |
| 125 | } |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 126 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 127 | bool is_fallback_ptr(void* ptr) { |
| 128 | return ptr >= heap && ptr < (heap + HEAP_SIZE); |
| 129 | } |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 130 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 131 | void* fallback_malloc(size_t len) { |
| 132 | heap_node *p, *prev; |
| 133 | const size_t nelems = alloc_size(len); |
| 134 | mutexor mtx(&heap_mutex); |
Igor Kudrin | 11741ce | 2016-10-07 08:48:28 +0000 | [diff] [blame] | 135 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 136 | if (NULL == freelist) |
| 137 | init_heap(); |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 138 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 139 | // Walk the free list, looking for a "big enough" chunk |
| 140 | for (p = freelist, prev = 0; p && p != list_end; |
| 141 | prev = p, p = node_from_offset(p->next_node)) { |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 142 | |
Simon Tatham | ee4f792 | 2022-08-19 15:07:55 +0100 | [diff] [blame] | 143 | // Check the invariant that all heap_nodes pointers 'p' are aligned |
| 144 | // so that 'p + 1' has an alignment of at least RequiredAlignment |
| 145 | assert(reinterpret_cast<size_t>(p + 1) % RequiredAlignment == 0); |
Saleem Abdulrasool | 4de9d40 | 2015-06-03 17:25:35 +0000 | [diff] [blame] | 146 | |
Simon Tatham | ee4f792 | 2022-08-19 15:07:55 +0100 | [diff] [blame] | 147 | // Calculate the number of extra padding elements needed in order |
| 148 | // to split 'p' and create a properly aligned heap_node from the tail |
| 149 | // of 'p'. We calculate aligned_nelems such that 'p->len - aligned_nelems' |
| 150 | // will be a multiple of NodesPerAlignment. |
| 151 | size_t aligned_nelems = nelems; |
| 152 | if (p->len > nelems) { |
| 153 | heap_size remaining_len = static_cast<heap_size>(p->len - nelems); |
| 154 | aligned_nelems += remaining_len % NodesPerAlignment; |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 155 | } |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 156 | |
Simon Tatham | ee4f792 | 2022-08-19 15:07:55 +0100 | [diff] [blame] | 157 | // chunk is larger and we can create a properly aligned heap_node |
| 158 | // from the tail. In this case we shorten 'p' and return the tail. |
| 159 | if (p->len > aligned_nelems) { |
| 160 | heap_node* q; |
| 161 | p->len = static_cast<heap_size>(p->len - aligned_nelems); |
| 162 | q = p + p->len; |
| 163 | q->next_node = 0; |
| 164 | q->len = static_cast<heap_size>(aligned_nelems); |
| 165 | void* ptr = q + 1; |
| 166 | assert(reinterpret_cast<size_t>(ptr) % RequiredAlignment == 0); |
| 167 | return ptr; |
| 168 | } |
| 169 | |
| 170 | // The chunk is the exact size or the chunk is larger but not large |
| 171 | // enough to split due to alignment constraints. |
| 172 | if (p->len >= nelems) { |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 173 | if (prev == 0) |
| 174 | freelist = node_from_offset(p->next_node); |
| 175 | else |
| 176 | prev->next_node = p->next_node; |
| 177 | p->next_node = 0; |
Simon Tatham | ee4f792 | 2022-08-19 15:07:55 +0100 | [diff] [blame] | 178 | void* ptr = p + 1; |
| 179 | assert(reinterpret_cast<size_t>(ptr) % RequiredAlignment == 0); |
| 180 | return ptr; |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 181 | } |
| 182 | } |
| 183 | return NULL; // couldn't find a spot big enough |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 184 | } |
| 185 | |
| 186 | // Return the start of the next block |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 187 | heap_node* after(struct heap_node* p) { return p + p->len; } |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 188 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 189 | void fallback_free(void* ptr) { |
| 190 | struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk |
| 191 | struct heap_node *p, *prev; |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 192 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 193 | mutexor mtx(&heap_mutex); |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 194 | |
| 195 | #ifdef DEBUG_FALLBACK_MALLOC |
Louis Dionne | c8246f2 | 2020-10-13 15:47:31 -0400 | [diff] [blame] | 196 | std::printf("Freeing item at %d of size %d\n", offset_from_node(cp), cp->len); |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 197 | #endif |
| 198 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 199 | for (p = freelist, prev = 0; p && p != list_end; |
| 200 | prev = p, p = node_from_offset(p->next_node)) { |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 201 | #ifdef DEBUG_FALLBACK_MALLOC |
Louis Dionne | c8246f2 | 2020-10-13 15:47:31 -0400 | [diff] [blame] | 202 | std::printf(" p=%d, cp=%d, after(p)=%d, after(cp)=%d\n", |
| 203 | offset_from_node(p), offset_from_node(cp), |
| 204 | offset_from_node(after(p)), offset_from_node(after(cp))); |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 205 | #endif |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 206 | if (after(p) == cp) { |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 207 | #ifdef DEBUG_FALLBACK_MALLOC |
Louis Dionne | c8246f2 | 2020-10-13 15:47:31 -0400 | [diff] [blame] | 208 | std::printf(" Appending onto chunk at %d\n", offset_from_node(p)); |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 209 | #endif |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 210 | p->len = static_cast<heap_size>( |
| 211 | p->len + cp->len); // make the free heap_node larger |
| 212 | return; |
| 213 | } else if (after(cp) == p) { // there's a free heap_node right after |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 214 | #ifdef DEBUG_FALLBACK_MALLOC |
Louis Dionne | c8246f2 | 2020-10-13 15:47:31 -0400 | [diff] [blame] | 215 | std::printf(" Appending free chunk at %d\n", offset_from_node(p)); |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 216 | #endif |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 217 | cp->len = static_cast<heap_size>(cp->len + p->len); |
| 218 | if (prev == 0) { |
| 219 | freelist = cp; |
| 220 | cp->next_node = p->next_node; |
| 221 | } else |
| 222 | prev->next_node = offset_from_node(cp); |
| 223 | return; |
| 224 | } |
| 225 | } |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 226 | // Nothing to merge with, add it to the start of the free list |
| 227 | #ifdef DEBUG_FALLBACK_MALLOC |
Louis Dionne | c8246f2 | 2020-10-13 15:47:31 -0400 | [diff] [blame] | 228 | std::printf(" Making new free list entry %d\n", offset_from_node(cp)); |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 229 | #endif |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 230 | cp->next_node = offset_from_node(freelist); |
| 231 | freelist = cp; |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 232 | } |
| 233 | |
| 234 | #ifdef INSTRUMENT_FALLBACK_MALLOC |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 235 | size_t print_free_list() { |
| 236 | struct heap_node *p, *prev; |
| 237 | heap_size total_free = 0; |
| 238 | if (NULL == freelist) |
| 239 | init_heap(); |
Igor Kudrin | 11741ce | 2016-10-07 08:48:28 +0000 | [diff] [blame] | 240 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 241 | for (p = freelist, prev = 0; p && p != list_end; |
| 242 | prev = p, p = node_from_offset(p->next_node)) { |
Louis Dionne | c8246f2 | 2020-10-13 15:47:31 -0400 | [diff] [blame] | 243 | std::printf("%sOffset: %d\tsize: %d Next: %d\n", |
| 244 | (prev == 0 ? "" : " "), offset_from_node(p), p->len, p->next_node); |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 245 | total_free += p->len; |
| 246 | } |
Louis Dionne | c8246f2 | 2020-10-13 15:47:31 -0400 | [diff] [blame] | 247 | std::printf("Total Free space: %d\n", total_free); |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 248 | return total_free; |
| 249 | } |
Howard Hinnant | f46a3f8 | 2012-01-24 21:41:27 +0000 | [diff] [blame] | 250 | #endif |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 251 | } // end unnamed namespace |
Igor Kudrin | 11741ce | 2016-10-07 08:48:28 +0000 | [diff] [blame] | 252 | |
| 253 | namespace __cxxabiv1 { |
| 254 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 255 | struct __attribute__((aligned)) __aligned_type {}; |
Eric Fiselier | be1d349 | 2017-03-04 02:04:45 +0000 | [diff] [blame] | 256 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 257 | void* __aligned_malloc_with_fallback(size_t size) { |
Eric Fiselier | be1d349 | 2017-03-04 02:04:45 +0000 | [diff] [blame] | 258 | #if defined(_WIN32) |
Louis Dionne | 171e609 | 2020-11-12 15:14:33 -0500 | [diff] [blame] | 259 | if (void* dest = std::__libcpp_aligned_alloc(alignof(__aligned_type), size)) |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 260 | return dest; |
Eric Fiselier | 5dd065f | 2018-10-11 00:18:54 +0000 | [diff] [blame] | 261 | #elif defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION) |
Louis Dionne | f997cb6 | 2019-10-01 18:43:02 +0000 | [diff] [blame] | 262 | if (void* dest = ::malloc(size)) |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 263 | return dest; |
Eric Fiselier | be1d349 | 2017-03-04 02:04:45 +0000 | [diff] [blame] | 264 | #else |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 265 | if (size == 0) |
| 266 | size = 1; |
Louis Dionne | 171e609 | 2020-11-12 15:14:33 -0500 | [diff] [blame] | 267 | if (void* dest = std::__libcpp_aligned_alloc(__alignof(__aligned_type), size)) |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 268 | return dest; |
Eric Fiselier | be1d349 | 2017-03-04 02:04:45 +0000 | [diff] [blame] | 269 | #endif |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 270 | return fallback_malloc(size); |
Igor Kudrin | 11741ce | 2016-10-07 08:48:28 +0000 | [diff] [blame] | 271 | } |
| 272 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 273 | void* __calloc_with_fallback(size_t count, size_t size) { |
Louis Dionne | f997cb6 | 2019-10-01 18:43:02 +0000 | [diff] [blame] | 274 | void* ptr = ::calloc(count, size); |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 275 | if (NULL != ptr) |
Igor Kudrin | 11741ce | 2016-10-07 08:48:28 +0000 | [diff] [blame] | 276 | return ptr; |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 277 | // if calloc fails, fall back to emergency stash |
| 278 | ptr = fallback_malloc(size * count); |
| 279 | if (NULL != ptr) |
Louis Dionne | f997cb6 | 2019-10-01 18:43:02 +0000 | [diff] [blame] | 280 | ::memset(ptr, 0, size * count); |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 281 | return ptr; |
Igor Kudrin | 11741ce | 2016-10-07 08:48:28 +0000 | [diff] [blame] | 282 | } |
| 283 | |
Eric Fiselier | be1d349 | 2017-03-04 02:04:45 +0000 | [diff] [blame] | 284 | void __aligned_free_with_fallback(void* ptr) { |
| 285 | if (is_fallback_ptr(ptr)) |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 286 | fallback_free(ptr); |
Eric Fiselier | be1d349 | 2017-03-04 02:04:45 +0000 | [diff] [blame] | 287 | else { |
Louis Dionne | ead1d8d | 2020-12-01 17:43:33 -0500 | [diff] [blame] | 288 | #if defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION) |
| 289 | ::free(ptr); |
| 290 | #else |
Louis Dionne | 171e609 | 2020-11-12 15:14:33 -0500 | [diff] [blame] | 291 | std::__libcpp_aligned_free(ptr); |
Louis Dionne | ead1d8d | 2020-12-01 17:43:33 -0500 | [diff] [blame] | 292 | #endif |
Eric Fiselier | be1d349 | 2017-03-04 02:04:45 +0000 | [diff] [blame] | 293 | } |
| 294 | } |
| 295 | |
Eric Fiselier | 458afaa | 2017-03-04 03:23:15 +0000 | [diff] [blame] | 296 | void __free_with_fallback(void* ptr) { |
| 297 | if (is_fallback_ptr(ptr)) |
| 298 | fallback_free(ptr); |
| 299 | else |
Louis Dionne | f997cb6 | 2019-10-01 18:43:02 +0000 | [diff] [blame] | 300 | ::free(ptr); |
Igor Kudrin | 11741ce | 2016-10-07 08:48:28 +0000 | [diff] [blame] | 301 | } |
| 302 | |
Igor Kudrin | 11741ce | 2016-10-07 08:48:28 +0000 | [diff] [blame] | 303 | } // namespace __cxxabiv1 |