blob: bec9952596bff0c769427231900fff49e8fcf0fe [file] [log] [blame]
Igor Kudrin11741ce2016-10-07 08:48:28 +00001//===------------------------ fallback_malloc.cpp -------------------------===//
Howard Hinnantf46a3f82012-01-24 21:41:27 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is dual licensed under the MIT and the University of Illinois Open
6// Source Licenses. See LICENSE.TXT for details.
7//
Howard Hinnantf46a3f82012-01-24 21:41:27 +00008//===----------------------------------------------------------------------===//
9
Eric Fiselierdbbe9062018-09-22 19:22:36 +000010// Define _LIBCPP_BUILDING_LIBRARY to ensure _LIBCPP_HAS_NO_ALIGNED_ALLOCATION
11// is only defined when libc aligned allocation is not available.
12#define _LIBCPP_BUILDING_LIBRARY
Igor Kudrin11741ce2016-10-07 08:48:28 +000013#include "fallback_malloc.h"
14
Asiri Rathnayake9c4469c2017-01-03 12:58:34 +000015#include <__threading_support>
Jonathan Roelofsf82302a2014-05-06 21:30:56 +000016
Igor Kudrin11741ce2016-10-07 08:48:28 +000017#include <cstdlib> // for malloc, calloc, free
18#include <cstring> // for memset
19
Igor Kudrin11741ce2016-10-07 08:48:28 +000020// A small, simple heap manager based (loosely) on
Howard Hinnantf46a3f82012-01-24 21:41:27 +000021// the startup heap manager from FreeBSD, optimized for space.
22//
23// Manages a fixed-size memory pool, supports malloc and free only.
24// No support for realloc.
25//
26// Allocates chunks in multiples of four bytes, with a four byte header
27// for each chunk. The overhead of each chunk is kept low by keeping pointers
28// as two byte offsets within the heap, rather than (4 or 8 byte) pointers.
29
30namespace {
31
Jonathan Roelofsf82302a2014-05-06 21:30:56 +000032// When POSIX threads are not available, make the mutex operations a nop
Asiri Rathnayakef5ebef92016-09-21 09:09:32 +000033#ifndef _LIBCXXABI_HAS_NO_THREADS
Asiri Rathnayake9c4469c2017-01-03 12:58:34 +000034_LIBCPP_SAFE_STATIC
35static std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER;
Asiri Rathnayakef5ebef92016-09-21 09:09:32 +000036#else
Eric Fiselier458afaa2017-03-04 03:23:15 +000037static void* heap_mutex = 0;
Jonathan Roelofsf82302a2014-05-06 21:30:56 +000038#endif
Howard Hinnantf46a3f82012-01-24 21:41:27 +000039
40class mutexor {
41public:
Asiri Rathnayakef5ebef92016-09-21 09:09:32 +000042#ifndef _LIBCXXABI_HAS_NO_THREADS
Eric Fiselier458afaa2017-03-04 03:23:15 +000043 mutexor(std::__libcpp_mutex_t* m) : mtx_(m) {
44 std::__libcpp_mutex_lock(mtx_);
45 }
46 ~mutexor() { std::__libcpp_mutex_unlock(mtx_); }
Asiri Rathnayakef5ebef92016-09-21 09:09:32 +000047#else
Eric Fiselier458afaa2017-03-04 03:23:15 +000048 mutexor(void*) {}
49 ~mutexor() {}
Jonathan Roelofsf82302a2014-05-06 21:30:56 +000050#endif
Howard Hinnantf46a3f82012-01-24 21:41:27 +000051private:
Eric Fiselier458afaa2017-03-04 03:23:15 +000052 mutexor(const mutexor& rhs);
53 mutexor& operator=(const mutexor& rhs);
Asiri Rathnayakef5ebef92016-09-21 09:09:32 +000054#ifndef _LIBCXXABI_HAS_NO_THREADS
Eric Fiselier458afaa2017-03-04 03:23:15 +000055 std::__libcpp_mutex_t* mtx_;
Jonathan Roelofsf82302a2014-05-06 21:30:56 +000056#endif
Asiri Rathnayake9c4469c2017-01-03 12:58:34 +000057};
Howard Hinnantf46a3f82012-01-24 21:41:27 +000058
Igor Kudrin11741ce2016-10-07 08:48:28 +000059static const size_t HEAP_SIZE = 512;
Eric Fiselier458afaa2017-03-04 03:23:15 +000060char heap[HEAP_SIZE] __attribute__((aligned));
Howard Hinnantf46a3f82012-01-24 21:41:27 +000061
62typedef unsigned short heap_offset;
63typedef unsigned short heap_size;
64
65struct heap_node {
Eric Fiselier458afaa2017-03-04 03:23:15 +000066 heap_offset next_node; // offset into heap
67 heap_size len; // size in units of "sizeof(heap_node)"
Howard Hinnantf46a3f82012-01-24 21:41:27 +000068};
69
Eric Fiselier458afaa2017-03-04 03:23:15 +000070static const heap_node* list_end =
71 (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap
72static heap_node* freelist = NULL;
Howard Hinnantf46a3f82012-01-24 21:41:27 +000073
Eric Fiselier458afaa2017-03-04 03:23:15 +000074heap_node* node_from_offset(const heap_offset offset) {
75 return (heap_node*)(heap + (offset * sizeof(heap_node)));
76}
Howard Hinnantf46a3f82012-01-24 21:41:27 +000077
Eric Fiselier458afaa2017-03-04 03:23:15 +000078heap_offset offset_from_node(const heap_node* ptr) {
79 return static_cast<heap_offset>(
80 static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) /
81 sizeof(heap_node));
82}
Igor Kudrin11741ce2016-10-07 08:48:28 +000083
Eric Fiselier458afaa2017-03-04 03:23:15 +000084void init_heap() {
85 freelist = (heap_node*)heap;
86 freelist->next_node = offset_from_node(list_end);
87 freelist->len = HEAP_SIZE / sizeof(heap_node);
88}
Igor Kudrin11741ce2016-10-07 08:48:28 +000089
Howard Hinnantf46a3f82012-01-24 21:41:27 +000090// How big a chunk we allocate
Eric Fiselier458afaa2017-03-04 03:23:15 +000091size_t alloc_size(size_t len) {
92 return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1;
93}
Howard Hinnantf46a3f82012-01-24 21:41:27 +000094
Eric Fiselier458afaa2017-03-04 03:23:15 +000095bool is_fallback_ptr(void* ptr) {
96 return ptr >= heap && ptr < (heap + HEAP_SIZE);
97}
Howard Hinnantf46a3f82012-01-24 21:41:27 +000098
Eric Fiselier458afaa2017-03-04 03:23:15 +000099void* fallback_malloc(size_t len) {
100 heap_node *p, *prev;
101 const size_t nelems = alloc_size(len);
102 mutexor mtx(&heap_mutex);
Igor Kudrin11741ce2016-10-07 08:48:28 +0000103
Eric Fiselier458afaa2017-03-04 03:23:15 +0000104 if (NULL == freelist)
105 init_heap();
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000106
Eric Fiselier458afaa2017-03-04 03:23:15 +0000107 // Walk the free list, looking for a "big enough" chunk
108 for (p = freelist, prev = 0; p && p != list_end;
109 prev = p, p = node_from_offset(p->next_node)) {
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000110
Eric Fiselier458afaa2017-03-04 03:23:15 +0000111 if (p->len > nelems) { // chunk is larger, shorten, and return the tail
112 heap_node* q;
Saleem Abdulrasool4de9d402015-06-03 17:25:35 +0000113
Eric Fiselier458afaa2017-03-04 03:23:15 +0000114 p->len = static_cast<heap_size>(p->len - nelems);
115 q = p + p->len;
116 q->next_node = 0;
117 q->len = static_cast<heap_size>(nelems);
118 return (void*)(q + 1);
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000119 }
Eric Fiselier458afaa2017-03-04 03:23:15 +0000120
121 if (p->len == nelems) { // exact size match
122 if (prev == 0)
123 freelist = node_from_offset(p->next_node);
124 else
125 prev->next_node = p->next_node;
126 p->next_node = 0;
127 return (void*)(p + 1);
128 }
129 }
130 return NULL; // couldn't find a spot big enough
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000131}
132
133// Return the start of the next block
Eric Fiselier458afaa2017-03-04 03:23:15 +0000134heap_node* after(struct heap_node* p) { return p + p->len; }
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000135
Eric Fiselier458afaa2017-03-04 03:23:15 +0000136void fallback_free(void* ptr) {
137 struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk
138 struct heap_node *p, *prev;
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000139
Eric Fiselier458afaa2017-03-04 03:23:15 +0000140 mutexor mtx(&heap_mutex);
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000141
142#ifdef DEBUG_FALLBACK_MALLOC
Eric Fiselier458afaa2017-03-04 03:23:15 +0000143 std::cout << "Freeing item at " << offset_from_node(cp) << " of size "
144 << cp->len << std::endl;
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000145#endif
146
Eric Fiselier458afaa2017-03-04 03:23:15 +0000147 for (p = freelist, prev = 0; p && p != list_end;
148 prev = p, p = node_from_offset(p->next_node)) {
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000149#ifdef DEBUG_FALLBACK_MALLOC
Eric Fiselier458afaa2017-03-04 03:23:15 +0000150 std::cout << " p, cp, after (p), after(cp) " << offset_from_node(p) << ' '
151 << offset_from_node(cp) << ' ' << offset_from_node(after(p))
152 << ' ' << offset_from_node(after(cp)) << std::endl;
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000153#endif
Eric Fiselier458afaa2017-03-04 03:23:15 +0000154 if (after(p) == cp) {
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000155#ifdef DEBUG_FALLBACK_MALLOC
Eric Fiselier458afaa2017-03-04 03:23:15 +0000156 std::cout << " Appending onto chunk at " << offset_from_node(p)
157 << std::endl;
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000158#endif
Eric Fiselier458afaa2017-03-04 03:23:15 +0000159 p->len = static_cast<heap_size>(
160 p->len + cp->len); // make the free heap_node larger
161 return;
162 } else if (after(cp) == p) { // there's a free heap_node right after
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000163#ifdef DEBUG_FALLBACK_MALLOC
Eric Fiselier458afaa2017-03-04 03:23:15 +0000164 std::cout << " Appending free chunk at " << offset_from_node(p)
165 << std::endl;
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000166#endif
Eric Fiselier458afaa2017-03-04 03:23:15 +0000167 cp->len = static_cast<heap_size>(cp->len + p->len);
168 if (prev == 0) {
169 freelist = cp;
170 cp->next_node = p->next_node;
171 } else
172 prev->next_node = offset_from_node(cp);
173 return;
174 }
175 }
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000176// Nothing to merge with, add it to the start of the free list
177#ifdef DEBUG_FALLBACK_MALLOC
Eric Fiselier458afaa2017-03-04 03:23:15 +0000178 std::cout << " Making new free list entry " << offset_from_node(cp)
179 << std::endl;
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000180#endif
Eric Fiselier458afaa2017-03-04 03:23:15 +0000181 cp->next_node = offset_from_node(freelist);
182 freelist = cp;
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000183}
184
185#ifdef INSTRUMENT_FALLBACK_MALLOC
Eric Fiselier458afaa2017-03-04 03:23:15 +0000186size_t print_free_list() {
187 struct heap_node *p, *prev;
188 heap_size total_free = 0;
189 if (NULL == freelist)
190 init_heap();
Igor Kudrin11741ce2016-10-07 08:48:28 +0000191
Eric Fiselier458afaa2017-03-04 03:23:15 +0000192 for (p = freelist, prev = 0; p && p != list_end;
193 prev = p, p = node_from_offset(p->next_node)) {
194 std::cout << (prev == 0 ? "" : " ") << "Offset: " << offset_from_node(p)
195 << "\tsize: " << p->len << " Next: " << p->next_node << std::endl;
196 total_free += p->len;
197 }
198 std::cout << "Total Free space: " << total_free << std::endl;
199 return total_free;
200}
Howard Hinnantf46a3f82012-01-24 21:41:27 +0000201#endif
Eric Fiselier458afaa2017-03-04 03:23:15 +0000202} // end unnamed namespace
Igor Kudrin11741ce2016-10-07 08:48:28 +0000203
204namespace __cxxabiv1 {
205
Eric Fiselier458afaa2017-03-04 03:23:15 +0000206struct __attribute__((aligned)) __aligned_type {};
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000207
Eric Fiselier458afaa2017-03-04 03:23:15 +0000208void* __aligned_malloc_with_fallback(size_t size) {
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000209#if defined(_WIN32)
Eric Fiselier458afaa2017-03-04 03:23:15 +0000210 if (void* dest = _aligned_malloc(size, alignof(__aligned_type)))
211 return dest;
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000212#elif defined(_LIBCPP_HAS_NO_ALIGNED_ALLOCATION)
Eric Fiselier458afaa2017-03-04 03:23:15 +0000213 if (void* dest = std::malloc(size))
214 return dest;
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000215#else
Eric Fiselier458afaa2017-03-04 03:23:15 +0000216 if (size == 0)
217 size = 1;
218 void* dest;
219 if (::posix_memalign(&dest, alignof(__aligned_type), size) == 0)
220 return dest;
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000221#endif
Eric Fiselier458afaa2017-03-04 03:23:15 +0000222 return fallback_malloc(size);
Igor Kudrin11741ce2016-10-07 08:48:28 +0000223}
224
Eric Fiselier458afaa2017-03-04 03:23:15 +0000225void* __calloc_with_fallback(size_t count, size_t size) {
226 void* ptr = std::calloc(count, size);
227 if (NULL != ptr)
Igor Kudrin11741ce2016-10-07 08:48:28 +0000228 return ptr;
Eric Fiselier458afaa2017-03-04 03:23:15 +0000229 // if calloc fails, fall back to emergency stash
230 ptr = fallback_malloc(size * count);
231 if (NULL != ptr)
232 std::memset(ptr, 0, size * count);
233 return ptr;
Igor Kudrin11741ce2016-10-07 08:48:28 +0000234}
235
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000236void __aligned_free_with_fallback(void* ptr) {
237 if (is_fallback_ptr(ptr))
Eric Fiselier458afaa2017-03-04 03:23:15 +0000238 fallback_free(ptr);
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000239 else {
240#if defined(_WIN32)
Eric Fiselier458afaa2017-03-04 03:23:15 +0000241 ::_aligned_free(ptr);
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000242#else
Eric Fiselier458afaa2017-03-04 03:23:15 +0000243 std::free(ptr);
Eric Fiselierbe1d3492017-03-04 02:04:45 +0000244#endif
245 }
246}
247
Eric Fiselier458afaa2017-03-04 03:23:15 +0000248void __free_with_fallback(void* ptr) {
249 if (is_fallback_ptr(ptr))
250 fallback_free(ptr);
251 else
252 std::free(ptr);
Igor Kudrin11741ce2016-10-07 08:48:28 +0000253}
254
Igor Kudrin11741ce2016-10-07 08:48:28 +0000255} // namespace __cxxabiv1