blob: 591efbefc8a52bf550704a4efa25595c410d292b [file] [log] [blame]
Louis Dionnea63bbc12021-11-17 16:25:01 -05001//===----------------------------------------------------------------------===//
Howard Hinnantf46a3f82012-01-24 21:41:27 +00002//
Chandler Carruth8ee27c32019-01-19 10:56:40 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Howard Hinnantf46a3f82012-01-24 21:41:27 +00006//
Howard Hinnantf46a3f82012-01-24 21:41:27 +00007//===----------------------------------------------------------------------===//
8
Igor Kudrin11741ce2016-10-07 08:48:28 +00009#include "fallback_malloc.h"
10
Louis Dionne77c156f2022-02-28 16:37:25 -050011#include <__threading_support>
Petr Hosek905aa582019-05-30 01:34:41 +000012#ifndef _LIBCXXABI_HAS_NO_THREADS
Michał Górny3f683632019-12-02 11:49:20 +010013#if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB)
Petr Hosek905aa582019-05-30 01:34:41 +000014#pragma comment(lib, "pthread")
15#endif
16#endif
Jonathan Roelofsf82302a2014-05-06 21:30:56 +000017
Simon Tathamee4f7922022-08-19 15:07:55 +010018#include <assert.h>
Louis Dionnef997cb62019-10-01 18:43:02 +000019#include <stdlib.h> // for malloc, calloc, free
20#include <string.h> // for memset
Louis Dionne171e6092020-11-12 15:14:33 -050021#include <new> // for std::__libcpp_aligned_{alloc,free}
Igor Kudrin11741ce2016-10-07 08:48:28 +000022
Igor Kudrin11741ce2016-10-07 08:48:28 +000023// A small, simple heap manager based (loosely) on
Howard Hinnantf46a3f82012-01-24 21:41:27 +000024// the startup heap manager from FreeBSD, optimized for space.
25//
26// Manages a fixed-size memory pool, supports malloc and free only.
27// No support for realloc.
28//
29// Allocates chunks in multiples of four bytes, with a four byte header
30// for each chunk. The overhead of each chunk is kept low by keeping pointers
31// as two byte offsets within the heap, rather than (4 or 8 byte) pointers.
32
33namespace {
34
Jonathan Roelofsf82302a2014-05-06 21:30:56 +000035// When POSIX threads are not available, make the mutex operations a nop
Asiri Rathnayakef5ebef92016-09-21 09:09:32 +000036#ifndef _LIBCXXABI_HAS_NO_THREADS
Arthur O'Dwyer3359ae82022-02-08 13:08:59 -050037static _LIBCPP_CONSTINIT std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER;
Asiri Rathnayakef5ebef92016-09-21 09:09:32 +000038#else
Arthur O'Dwyer3359ae82022-02-08 13:08:59 -050039static _LIBCPP_CONSTINIT void* heap_mutex = 0;
Jonathan Roelofsf82302a2014-05-06 21:30:56 +000040#endif
Howard Hinnantf46a3f82012-01-24 21:41:27 +000041
42class mutexor {
43public:
Asiri Rathnayakef5ebef92016-09-21 09:09:32 +000044#ifndef _LIBCXXABI_HAS_NO_THREADS
Eric Fiselier458afaa2017-03-04 03:23:15 +000045 mutexor(std::__libcpp_mutex_t* m) : mtx_(m) {
46 std::__libcpp_mutex_lock(mtx_);
47 }
48 ~mutexor() { std::__libcpp_mutex_unlock(mtx_); }
Asiri Rathnayakef5ebef92016-09-21 09:09:32 +000049#else
Eric Fiselier458afaa2017-03-04 03:23:15 +000050 mutexor(void*) {}
51 ~mutexor() {}
Jonathan Roelofsf82302a2014-05-06 21:30:56 +000052#endif
Howard Hinnantf46a3f82012-01-24 21:41:27 +000053private:
Eric Fiselier458afaa2017-03-04 03:23:15 +000054 mutexor(const mutexor& rhs);
55 mutexor& operator=(const mutexor& rhs);
Asiri Rathnayakef5ebef92016-09-21 09:09:32 +000056#ifndef _LIBCXXABI_HAS_NO_THREADS
Eric Fiselier458afaa2017-03-04 03:23:15 +000057 std::__libcpp_mutex_t* mtx_;
Jonathan Roelofsf82302a2014-05-06 21:30:56 +000058#endif
Asiri Rathnayake9c4469c2017-01-03 12:58:34 +000059};
Howard Hinnantf46a3f82012-01-24 21:41:27 +000060
Igor Kudrin11741ce2016-10-07 08:48:28 +000061static const size_t HEAP_SIZE = 512;
Eric Fiselier458afaa2017-03-04 03:23:15 +000062char heap[HEAP_SIZE] __attribute__((aligned));
Howard Hinnantf46a3f82012-01-24 21:41:27 +000063
64typedef unsigned short heap_offset;
65typedef unsigned short heap_size;
66
Simon Tathamee4f7922022-08-19 15:07:55 +010067// On both 64 and 32 bit targets heap_node should have the following properties
68// Size: 4
69// Alignment: 2
Howard Hinnantf46a3f82012-01-24 21:41:27 +000070struct heap_node {
Eric Fiselier458afaa2017-03-04 03:23:15 +000071 heap_offset next_node; // offset into heap
72 heap_size len; // size in units of "sizeof(heap_node)"
Howard Hinnantf46a3f82012-01-24 21:41:27 +000073};
74
Simon Tathamee4f7922022-08-19 15:07:55 +010075// All pointers returned by fallback_malloc must be at least aligned
76// as RequiredAligned. Note that RequiredAlignment can be greater than
77// alignof(std::max_align_t) on 64 bit systems compiling 32 bit code.
78struct FallbackMaxAlignType {
79} __attribute__((aligned));
80const size_t RequiredAlignment = alignof(FallbackMaxAlignType);
81
82static_assert(alignof(FallbackMaxAlignType) % sizeof(heap_node) == 0,
83 "The required alignment must be evenly divisible by the sizeof(heap_node)");
84
85// The number of heap_node's that can fit in a chunk of memory with the size
86// of the RequiredAlignment. On 64 bit targets NodesPerAlignment should be 4.
87const size_t NodesPerAlignment = alignof(FallbackMaxAlignType) / sizeof(heap_node);
88
Eric Fiselier458afaa2017-03-04 03:23:15 +000089static const heap_node* list_end =
90 (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap
91static heap_node* freelist = NULL;
Howard Hinnantf46a3f82012-01-24 21:41:27 +000092
Eric Fiselier458afaa2017-03-04 03:23:15 +000093heap_node* node_from_offset(const heap_offset offset) {
94 return (heap_node*)(heap + (offset * sizeof(heap_node)));
95}
Howard Hinnantf46a3f82012-01-24 21:41:27 +000096
Eric Fiselier458afaa2017-03-04 03:23:15 +000097heap_offset offset_from_node(const heap_node* ptr) {
98 return static_cast<heap_offset>(
99 static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) /
100 sizeof(heap_node));
101}
Igor Kudrin11741ce2016-10-07 08:48:28 +0000102
Simon Tathamee4f7922022-08-19 15:07:55 +0100103// Return a pointer to the first address, 'A', in `heap` that can actually be
104// used to represent a heap_node. 'A' must be aligned so that
105// '(A + sizeof(heap_node)) % RequiredAlignment == 0'. On 64 bit systems this
106// address should be 12 bytes after the first 16 byte boundary.
107heap_node* getFirstAlignedNodeInHeap() {
108 heap_node* node = (heap_node*)heap;
109 const size_t alignNBytesAfterBoundary = RequiredAlignment - sizeof(heap_node);
110 size_t boundaryOffset = reinterpret_cast<size_t>(node) % RequiredAlignment;
111 size_t requiredOffset = alignNBytesAfterBoundary - boundaryOffset;
112 size_t NElemOffset = requiredOffset / sizeof(heap_node);
113 return node + NElemOffset;
114}
115
Eric Fiselier458afaa2017-03-04 03:23:15 +0000116void init_heap() {
Simon Tathamee4f7922022-08-19 15:07:55 +0100117 freelist = getFirstAlignedNodeInHeap();
Eric Fiselier458afaa2017-03-04 03:23:15 +0000118 freelist->next_node = offset_from_node(list_end);
Simon Tathamee4f7922022-08-19 15:07:55 +0100119 freelist->len = static_cast<heap_size>(list_end - freelist);
Eric Fiselier458afaa2017-03-04 03:23:15 +0000120}
Igor Kudrin11741ce2016-10-07 08:48:28 +0000121
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000122// How big a chunk we allocate
Eric Fiselier458afaa2017-03-04 03:23:15 +0000123size_t alloc_size(size_t len) {
124 return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1;
125}
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000126
Eric Fiselier458afaa2017-03-04 03:23:15 +0000127bool is_fallback_ptr(void* ptr) {
128 return ptr >= heap && ptr < (heap + HEAP_SIZE);
129}
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000130
Eric Fiselier458afaa2017-03-04 03:23:15 +0000131void* fallback_malloc(size_t len) {
132 heap_node *p, *prev;
133 const size_t nelems = alloc_size(len);
134 mutexor mtx(&heap_mutex);
Igor Kudrin11741ce2016-10-07 08:48:28 +0000135
Eric Fiselier458afaa2017-03-04 03:23:15 +0000136 if (NULL == freelist)
137 init_heap();
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000138
Eric Fiselier458afaa2017-03-04 03:23:15 +0000139 // Walk the free list, looking for a "big enough" chunk
140 for (p = freelist, prev = 0; p && p != list_end;
141 prev = p, p = node_from_offset(p->next_node)) {
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000142
Simon Tathamee4f7922022-08-19 15:07:55 +0100143 // Check the invariant that all heap_nodes pointers 'p' are aligned
144 // so that 'p + 1' has an alignment of at least RequiredAlignment
145 assert(reinterpret_cast<size_t>(p + 1) % RequiredAlignment == 0);
Saleem Abdulrasool4de9d402015-06-03 17:25:35 +0000146
Simon Tathamee4f7922022-08-19 15:07:55 +0100147 // Calculate the number of extra padding elements needed in order
148 // to split 'p' and create a properly aligned heap_node from the tail
149 // of 'p'. We calculate aligned_nelems such that 'p->len - aligned_nelems'
150 // will be a multiple of NodesPerAlignment.
151 size_t aligned_nelems = nelems;
152 if (p->len > nelems) {
153 heap_size remaining_len = static_cast<heap_size>(p->len - nelems);
154 aligned_nelems += remaining_len % NodesPerAlignment;
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000155 }
Eric Fiselier458afaa2017-03-04 03:23:15 +0000156
Simon Tathamee4f7922022-08-19 15:07:55 +0100157 // chunk is larger and we can create a properly aligned heap_node
158 // from the tail. In this case we shorten 'p' and return the tail.
159 if (p->len > aligned_nelems) {
160 heap_node* q;
161 p->len = static_cast<heap_size>(p->len - aligned_nelems);
162 q = p + p->len;
163 q->next_node = 0;
164 q->len = static_cast<heap_size>(aligned_nelems);
165 void* ptr = q + 1;
166 assert(reinterpret_cast<size_t>(ptr) % RequiredAlignment == 0);
167 return ptr;
168 }
169
170 // The chunk is the exact size or the chunk is larger but not large
171 // enough to split due to alignment constraints.
172 if (p->len >= nelems) {
Eric Fiselier458afaa2017-03-04 03:23:15 +0000173 if (prev == 0)
174 freelist = node_from_offset(p->next_node);
175 else
176 prev->next_node = p->next_node;
177 p->next_node = 0;
Simon Tathamee4f7922022-08-19 15:07:55 +0100178 void* ptr = p + 1;
179 assert(reinterpret_cast<size_t>(ptr) % RequiredAlignment == 0);
180 return ptr;
Eric Fiselier458afaa2017-03-04 03:23:15 +0000181 }
182 }
183 return NULL; // couldn't find a spot big enough
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000184}
185
186// Return the start of the next block
Eric Fiselier458afaa2017-03-04 03:23:15 +0000187heap_node* after(struct heap_node* p) { return p + p->len; }
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000188
Eric Fiselier458afaa2017-03-04 03:23:15 +0000189void fallback_free(void* ptr) {
190 struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk
191 struct heap_node *p, *prev;
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000192
Eric Fiselier458afaa2017-03-04 03:23:15 +0000193 mutexor mtx(&heap_mutex);
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000194
195#ifdef DEBUG_FALLBACK_MALLOC
Louis Dionnec8246f22020-10-13 15:47:31 -0400196 std::printf("Freeing item at %d of size %d\n", offset_from_node(cp), cp->len);
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000197#endif
198
Eric Fiselier458afaa2017-03-04 03:23:15 +0000199 for (p = freelist, prev = 0; p && p != list_end;
200 prev = p, p = node_from_offset(p->next_node)) {
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000201#ifdef DEBUG_FALLBACK_MALLOC
Louis Dionnec8246f22020-10-13 15:47:31 -0400202 std::printf(" p=%d, cp=%d, after(p)=%d, after(cp)=%d\n",
203 offset_from_node(p), offset_from_node(cp),
204 offset_from_node(after(p)), offset_from_node(after(cp)));
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000205#endif
Eric Fiselier458afaa2017-03-04 03:23:15 +0000206 if (after(p) == cp) {
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000207#ifdef DEBUG_FALLBACK_MALLOC
Louis Dionnec8246f22020-10-13 15:47:31 -0400208 std::printf(" Appending onto chunk at %d\n", offset_from_node(p));
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000209#endif
Eric Fiselier458afaa2017-03-04 03:23:15 +0000210 p->len = static_cast<heap_size>(
211 p->len + cp->len); // make the free heap_node larger
212 return;
213 } else if (after(cp) == p) { // there's a free heap_node right after
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000214#ifdef DEBUG_FALLBACK_MALLOC
Louis Dionnec8246f22020-10-13 15:47:31 -0400215 std::printf(" Appending free chunk at %d\n", offset_from_node(p));
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000216#endif
Eric Fiselier458afaa2017-03-04 03:23:15 +0000217 cp->len = static_cast<heap_size>(cp->len + p->len);
218 if (prev == 0) {
219 freelist = cp;
220 cp->next_node = p->next_node;
221 } else
222 prev->next_node = offset_from_node(cp);
223 return;
224 }
225 }
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000226// Nothing to merge with, add it to the start of the free list
227#ifdef DEBUG_FALLBACK_MALLOC
Louis Dionnec8246f22020-10-13 15:47:31 -0400228 std::printf(" Making new free list entry %d\n", offset_from_node(cp));
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000229#endif
Eric Fiselier458afaa2017-03-04 03:23:15 +0000230 cp->next_node = offset_from_node(freelist);
231 freelist = cp;
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000232}
233
234#ifdef INSTRUMENT_FALLBACK_MALLOC
Eric Fiselier458afaa2017-03-04 03:23:15 +0000235size_t print_free_list() {
236 struct heap_node *p, *prev;
237 heap_size total_free = 0;
238 if (NULL == freelist)
239 init_heap();
Igor Kudrin11741ce2016-10-07 08:48:28 +0000240
Eric Fiselier458afaa2017-03-04 03:23:15 +0000241 for (p = freelist, prev = 0; p && p != list_end;
242 prev = p, p = node_from_offset(p->next_node)) {
Louis Dionnec8246f22020-10-13 15:47:31 -0400243 std::printf("%sOffset: %d\tsize: %d Next: %d\n",
244 (prev == 0 ? "" : " "), offset_from_node(p), p->len, p->next_node);
Eric Fiselier458afaa2017-03-04 03:23:15 +0000245 total_free += p->len;
246 }
Louis Dionnec8246f22020-10-13 15:47:31 -0400247 std::printf("Total Free space: %d\n", total_free);
Eric Fiselier458afaa2017-03-04 03:23:15 +0000248 return total_free;
249}
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000250#endif
Eric Fiselier458afaa2017-03-04 03:23:15 +0000251} // end unnamed namespace
Igor Kudrin11741ce2016-10-07 08:48:28 +0000252
253namespace __cxxabiv1 {
254
Eric Fiselier458afaa2017-03-04 03:23:15 +0000255struct __attribute__((aligned)) __aligned_type {};
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000256
Eric Fiselier458afaa2017-03-04 03:23:15 +0000257void* __aligned_malloc_with_fallback(size_t size) {
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000258#if defined(_WIN32)
Louis Dionne171e6092020-11-12 15:14:33 -0500259 if (void* dest = std::__libcpp_aligned_alloc(alignof(__aligned_type), size))
Eric Fiselier458afaa2017-03-04 03:23:15 +0000260 return dest;
Eric Fiselier5dd065f2018-10-11 00:18:54 +0000261#elif defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION)
Louis Dionnef997cb62019-10-01 18:43:02 +0000262 if (void* dest = ::malloc(size))
Eric Fiselier458afaa2017-03-04 03:23:15 +0000263 return dest;
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000264#else
Eric Fiselier458afaa2017-03-04 03:23:15 +0000265 if (size == 0)
266 size = 1;
Louis Dionne171e6092020-11-12 15:14:33 -0500267 if (void* dest = std::__libcpp_aligned_alloc(__alignof(__aligned_type), size))
Eric Fiselier458afaa2017-03-04 03:23:15 +0000268 return dest;
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000269#endif
Eric Fiselier458afaa2017-03-04 03:23:15 +0000270 return fallback_malloc(size);
Igor Kudrin11741ce2016-10-07 08:48:28 +0000271}
272
Eric Fiselier458afaa2017-03-04 03:23:15 +0000273void* __calloc_with_fallback(size_t count, size_t size) {
Louis Dionnef997cb62019-10-01 18:43:02 +0000274 void* ptr = ::calloc(count, size);
Eric Fiselier458afaa2017-03-04 03:23:15 +0000275 if (NULL != ptr)
Igor Kudrin11741ce2016-10-07 08:48:28 +0000276 return ptr;
Eric Fiselier458afaa2017-03-04 03:23:15 +0000277 // if calloc fails, fall back to emergency stash
278 ptr = fallback_malloc(size * count);
279 if (NULL != ptr)
Louis Dionnef997cb62019-10-01 18:43:02 +0000280 ::memset(ptr, 0, size * count);
Eric Fiselier458afaa2017-03-04 03:23:15 +0000281 return ptr;
Igor Kudrin11741ce2016-10-07 08:48:28 +0000282}
283
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000284void __aligned_free_with_fallback(void* ptr) {
285 if (is_fallback_ptr(ptr))
Eric Fiselier458afaa2017-03-04 03:23:15 +0000286 fallback_free(ptr);
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000287 else {
Louis Dionneead1d8d2020-12-01 17:43:33 -0500288#if defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION)
289 ::free(ptr);
290#else
Louis Dionne171e6092020-11-12 15:14:33 -0500291 std::__libcpp_aligned_free(ptr);
Louis Dionneead1d8d2020-12-01 17:43:33 -0500292#endif
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000293 }
294}
295
Eric Fiselier458afaa2017-03-04 03:23:15 +0000296void __free_with_fallback(void* ptr) {
297 if (is_fallback_ptr(ptr))
298 fallback_free(ptr);
299 else
Louis Dionnef997cb62019-10-01 18:43:02 +0000300 ::free(ptr);
Igor Kudrin11741ce2016-10-07 08:48:28 +0000301}
302
Igor Kudrin11741ce2016-10-07 08:48:28 +0000303} // namespace __cxxabiv1