blob: 803b7af9259ec5488adfce57c16726f3fc975d1a [file] [log] [blame]
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +01001/*
2 * Copyright (c) 2016, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Intel Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
29 * Keyon Jie <yang.jie@linux.intel.com>
30 */
31
32#include <reef/alloc.h>
33#include <reef/reef.h>
34#include <reef/debug.h>
Liam Girdwoode52ce682018-02-21 15:36:25 +000035#include <reef/panic.h>
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010036#include <reef/trace.h>
37#include <reef/lock.h>
38#include <platform/memory.h>
39#include <stdint.h>
40
41/* debug to set memory value on every allocation */
42#define DEBUG_BLOCK_ALLOC 0
43#define DEBUG_BLOCK_ALLOC_VALUE 0x6b6b6b6b
44
45/* debug to set memory value on every free TODO: not working atm */
46#define DEBUG_BLOCK_FREE 0
47#define DEBUG_BLOCK_FREE_VALUE 0x5a5a5a5a
48
49/* memory tracing support */
50#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
51#define trace_mem(__e) trace_event(TRACE_CLASS_MEM, __e)
52#else
53#define trace_mem(__e)
54#endif
55
56#define trace_mem_error(__e) trace_error(TRACE_CLASS_MEM, __e)
57
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010058/* We have 3 memory pools
59 *
60 * 1) System memory pool does not have a map and it's size is fixed at build
61 * time. Memory cannot be freed from this pool. Used by device drivers
62 * and any system core. Saved as part of PM context.
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010063 * 2) Runtime memory pool has variable size allocation map and memory is freed
64 * on calls to rfree(). Saved as part of PM context. Global size
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010065 * set at build time.
66 * 3) Buffer memory pool has fixed size allocation map and can be freed on
67 * module removal or calls to rfree(). Saved as part of PM context.
68 */
69
70struct block_hdr {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010071 uint16_t size; /* size in blocks of this continuous allocation */
72 uint16_t flags; /* usage flags for page */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010073} __attribute__ ((packed));
74
75struct block_map {
76 uint16_t block_size; /* size of block in bytes */
77 uint16_t count; /* number of blocks in map */
78 uint16_t free_count; /* number of free blocks */
79 uint16_t first_free; /* index of first free block */
80 struct block_hdr *block; /* base block header */
81 uint32_t base; /* base address of space */
82} __attribute__ ((packed));
83
84#define BLOCK_DEF(sz, cnt, hdr) \
85 {.block_size = sz, .count = cnt, .free_count = cnt, .block = hdr}
86
87/* Heap blocks for modules */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010088//static struct block_hdr mod_block8[HEAP_RT_COUNT8];
89static struct block_hdr mod_block16[HEAP_RT_COUNT16];
90static struct block_hdr mod_block32[HEAP_RT_COUNT32];
91static struct block_hdr mod_block64[HEAP_RT_COUNT64];
92static struct block_hdr mod_block128[HEAP_RT_COUNT128];
93static struct block_hdr mod_block256[HEAP_RT_COUNT256];
94static struct block_hdr mod_block512[HEAP_RT_COUNT512];
95static struct block_hdr mod_block1024[HEAP_RT_COUNT1024];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010096
97/* Heap memory map for modules */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010098static struct block_map rt_heap_map[] = {
99/* BLOCK_DEF(8, HEAP_RT_COUNT8, mod_block8), */
100 BLOCK_DEF(16, HEAP_RT_COUNT16, mod_block16),
101 BLOCK_DEF(32, HEAP_RT_COUNT32, mod_block32),
102 BLOCK_DEF(64, HEAP_RT_COUNT64, mod_block64),
103 BLOCK_DEF(128, HEAP_RT_COUNT128, mod_block128),
104 BLOCK_DEF(256, HEAP_RT_COUNT256, mod_block256),
105 BLOCK_DEF(512, HEAP_RT_COUNT512, mod_block512),
106 BLOCK_DEF(1024, HEAP_RT_COUNT1024, mod_block1024),
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100107};
108
109/* Heap blocks for buffers */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100110static struct block_hdr buf_block[HEAP_BUFFER_COUNT];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100111
112/* Heap memory map for buffers */
113static struct block_map buf_heap_map[] = {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100114 BLOCK_DEF(HEAP_BUFFER_BLOCK_SIZE, HEAP_BUFFER_COUNT, buf_block),
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100115};
116
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100117#if (HEAP_DMA_BUFFER_SIZE > 0)
118/* Heap memory map for DMA buffers - only used for HW with special DMA memories */
119static struct block_map dma_buf_heap_map[] = {
120 BLOCK_DEF(HEAP_DMA_BUFFER_BLOCK_SIZE, HEAP_DMA_BUFFER_COUNT, buf_block),
121};
122#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100123
124struct mm_heap {
125 uint32_t blocks;
126 struct block_map *map;
127 uint32_t heap;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100128 uint32_t size;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100129 struct mm_info info;
130};
131
132/* heap block memory map */
133struct mm {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100134 struct mm_heap runtime; /* general heap for components */
135 struct mm_heap system; /* system heap - used during init cannot be freed */
136 struct mm_heap buffer; /* general component buffer heap */
137#if (HEAP_DMA_BUFFER_SIZE > 0)
138 struct mm_heap dma; /* general component DMA buffer heap */
139#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100140 struct mm_info total;
141 spinlock_t lock; /* all allocs and frees are atomic */
142};
143
144struct mm memmap = {
145 .system = {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100146 .heap = HEAP_SYSTEM_BASE,
147 .size = HEAP_SYSTEM_SIZE,
148 .info = {.free = HEAP_SYSTEM_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100149 },
150
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100151 .runtime = {
152 .blocks = ARRAY_SIZE(rt_heap_map),
153 .map = rt_heap_map,
154 .heap = HEAP_RUNTIME_BASE,
155 .size = HEAP_RUNTIME_SIZE,
156 .info = {.free = HEAP_RUNTIME_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100157 },
158
159 .buffer = {
160 .blocks = ARRAY_SIZE(buf_heap_map),
161 .map = buf_heap_map,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100162 .heap = HEAP_BUFFER_BASE,
163 .size = HEAP_BUFFER_SIZE,
164 .info = {.free = HEAP_BUFFER_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100165 },
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100166
167#if (HEAP_DMA_BUFFER_SIZE > 0)
168 .dma = {
169 .blocks = ARRAY_SIZE(dma_buf_heap_map),
170 .map = dma_buf_heap_map,
171 .heap = HEAP_DMA_BUFFER_BASE,
172 .size = HEAP_DMA_BUFFER_SIZE,
173 .info = {.free = HEAP_DMA_BUFFER_SIZE,},
174 },
175#endif
176 .total = {.free = HEAP_SYSTEM_SIZE + HEAP_RUNTIME_SIZE +
177 HEAP_BUFFER_SIZE + HEAP_DMA_BUFFER_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100178};
179
180/* total size of block */
181static inline uint32_t block_get_size(struct block_map *map)
182{
183 return sizeof(*map) + map->count *
184 (map->block_size + sizeof(struct block_hdr));
185}
186
187/* total size of heap */
188static inline uint32_t heap_get_size(struct mm_heap *heap)
189{
190 uint32_t size = sizeof(struct mm_heap);
191 int i;
192
193 for (i = 0; i < heap->blocks; i++) {
194 size += block_get_size(&heap->map[i]);
195 }
196
197 return size;
198}
199
200#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
201static void alloc_memset_region(void *ptr, uint32_t bytes, uint32_t val)
202{
203 uint32_t count = bytes >> 2;
204 uint32_t *dest = ptr, i;
205
206 for (i = 0; i < count; i++)
207 dest[i] = val;
208}
209#endif
210
211/* allocate from system memory pool */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100212static void *rmalloc_sys(size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100213{
214 void *ptr = (void *)memmap.system.heap;
215
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100216 /* always succeeds or panics */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100217 memmap.system.heap += bytes;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100218 if (memmap.system.heap >= HEAP_SYSTEM_BASE + HEAP_SYSTEM_SIZE) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100219 trace_mem_error("eMd");
Liam Girdwoode52ce682018-02-21 15:36:25 +0000220 panic(SOF_IPC_PANIC_MEM);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100221 }
222
223#if DEBUG_BLOCK_ALLOC
224 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
225#endif
226
227 return ptr;
228}
229
230/* allocate single block */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100231static void *alloc_block(struct mm_heap *heap, int level, int bflags)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100232{
233 struct block_map *map = &heap->map[level];
234 struct block_hdr *hdr = &map->block[map->first_free];
235 void *ptr;
236 int i;
237
238 map->free_count--;
239 ptr = (void *)(map->base + map->first_free * map->block_size);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100240 hdr->size = 1;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100241 hdr->flags = RFLAGS_USED | bflags;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100242 heap->info.used += map->block_size;
243 heap->info.free -= map->block_size;
244
245 /* find next free */
246 for (i = map->first_free; i < map->count; ++i) {
247
248 hdr = &map->block[i];
249
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100250 if (hdr->flags == 0) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100251 map->first_free = i;
252 break;
253 }
254 }
255
256#if DEBUG_BLOCK_ALLOC
257 alloc_memset_region(ptr, map->block_size, DEBUG_BLOCK_ALLOC_VALUE);
258#endif
259
260 return ptr;
261}
262
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600263/* allocates continuous blocks */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100264static void *alloc_cont_blocks(struct mm_heap *heap, int level, int bflags,
265 size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100266{
267 struct block_map *map = &heap->map[level];
268 struct block_hdr *hdr = &map->block[map->first_free];
269 void *ptr;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500270 unsigned int start;
271 unsigned int current;
272 unsigned int count = bytes / map->block_size;
273 unsigned int i;
274 unsigned int remaining = map->count - count;
275 unsigned int end;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100276
277 if (bytes % map->block_size)
278 count++;
279
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600280 /* check for continuous blocks from "start" */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100281 for (start = map->first_free; start < remaining; start++) {
282
283 /* check that we have enough free blocks from start pos */
284 end = start + count;
285 for (current = start; current < end; current++) {
286 hdr = &map->block[current];
287
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100288 /* is block used */
289 if (hdr->flags == RFLAGS_USED)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100290 break;
291 }
292
293 /* enough free blocks ? */
294 if (current == end)
295 goto found;
296 }
297
298 /* not found */
299 trace_mem_error("eCb");
300 return NULL;
301
302found:
303 /* found some free blocks */
304 map->free_count -= count;
305 ptr = (void *)(map->base + start * map->block_size);
306 hdr = &map->block[start];
307 hdr->size = count;
308 heap->info.used += count * map->block_size;
309 heap->info.free -= count * map->block_size;
310
311 /* allocate each block */
312 for (current = start; current < end; current++) {
313 hdr = &map->block[current];
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100314 hdr->flags = RFLAGS_USED | bflags;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100315 }
316
317 /* do we need to find a new first free block ? */
318 if (start == map->first_free) {
319
320 /* find next free */
321 for (i = map->first_free + count; i < map->count; ++i) {
322
323 hdr = &map->block[i];
324
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100325 if (hdr->flags == 0) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100326 map->first_free = i;
327 break;
328 }
329 }
330 }
331
332#if DEBUG_BLOCK_ALLOC
333 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
334#endif
335
336 return ptr;
337}
338
339/* free block(s) */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100340static void free_block(struct mm_heap *heap, void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100341{
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800342 struct mm_heap * mm_heap;
343 struct block_map * block_map;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100344 struct block_hdr *hdr;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500345 int i;
346 int block;
347 int array_size;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100348
349 /* sanity check */
350 if (ptr == NULL)
351 return;
352
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800353 /* find mm_heap that ptr belongs to */
354 if ((uint32_t)ptr >= memmap.runtime.heap &&
355 (uint32_t)ptr < memmap.runtime.heap + memmap.runtime.size) {
356 mm_heap = &memmap.runtime;
357 array_size = ARRAY_SIZE(rt_heap_map);
358 } else if ((uint32_t)ptr >= memmap.buffer.heap &&
359 (uint32_t)ptr < memmap.buffer.heap + memmap.buffer.size) {
360 mm_heap = &memmap.buffer;
361 array_size = ARRAY_SIZE(buf_heap_map);
362#if (HEAP_DMA_BUFFER_SIZE > 0)
363 } else if ((uint32_t)ptr >= memmap.dma.heap &&
364 (uint32_t)ptr < memmap.dma.heap + memmap.dma.size) {
365 mm_heap = &memmap.dma;
366 array_size = ARRAY_SIZE(dma_buf_heap_map);
367#endif
368 } else
369 return;
370
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100371 /* find block that ptr belongs to */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800372 for (i = 0; i < array_size - 1; i ++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100373
374 /* is ptr in this block */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800375 if ((uint32_t)ptr < mm_heap->map[i + 1].base)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100376 goto found;
377 }
378
379 /* not found */
380 trace_mem_error("eMF");
381 return;
382
383found:
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800384 /* the block i is it */
385 block_map = &mm_heap->map[i];
386
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100387 /* calculate block header */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800388 block = ((uint32_t)ptr - block_map->base) / block_map->block_size;
389 hdr = &block_map->block[block];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100390
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600391 /* free block header and continuous blocks */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100392 for (i = block; i < block + hdr->size; i++) {
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800393 hdr = &block_map->block[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100394 hdr->size = 0;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100395 hdr->flags = 0;
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800396 block_map->free_count++;
397 heap->info.used -= block_map->block_size;
398 heap->info.free += block_map->block_size;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100399 }
400
401 /* set first free */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800402 if (block < block_map->first_free)
403 block_map->first_free = block;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100404
405#if DEBUG_BLOCK_FREE
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800406 alloc_memset_region(ptr, block_map->block_size * (i - 1), DEBUG_BLOCK_FREE_VALUE);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100407#endif
408}
409
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100410/* allocate single block for runtime */
411static void *rmalloc_runtime(int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100412{
413 int i;
414
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100415 for (i = 0; i < ARRAY_SIZE(rt_heap_map); i ++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100416
417 /* is block big enough */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100418 if (rt_heap_map[i].block_size < bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100419 continue;
420
421 /* does block have free space */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100422 if (rt_heap_map[i].free_count == 0)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100423 continue;
424
425 /* free block space exists */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100426 return alloc_block(&memmap.runtime, i, bflags);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100427 }
428
429 trace_mem_error("eMm");
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100430 trace_value(bytes);
431 trace_value(bflags);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100432 return NULL;
433}
434
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100435void *rmalloc(int zone, int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100436{
437 uint32_t flags;
438 void *ptr = NULL;
439
440 spin_lock_irq(&memmap.lock, flags);
441
442 switch (zone) {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100443 case RZONE_SYS:
444 ptr = rmalloc_sys(bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100445 break;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100446 case RZONE_RUNTIME:
447 ptr = rmalloc_runtime(bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100448 break;
449 default:
450 trace_mem_error("eMz");
451 break;
452 }
453
454 spin_unlock_irq(&memmap.lock, flags);
455 return ptr;
456}
457
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100458void *rzalloc(int zone, int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100459{
460 void *ptr = NULL;
461
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100462 ptr = rmalloc(zone, bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100463 if (ptr != NULL) {
464 bzero(ptr, bytes);
465 }
466
467 return ptr;
468}
469
470/* allocates continuous buffer on 1k boundary */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100471void *rballoc(int zone, int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100472{
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800473 struct block_map * block_map = buf_heap_map;
474 struct mm_heap * mm_heap = &memmap.buffer;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500475 int i;
476 int array_size = ARRAY_SIZE(buf_heap_map);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100477 uint32_t flags;
478 void *ptr = NULL;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100479
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800480#if (HEAP_DMA_BUFFER_SIZE > 0)
481 if (bflags & RFLAGS_DMA) {
482 mm_heap = &memmap.dma;
483 block_map = dma_buf_heap_map;
484 array_size = ARRAY_SIZE(dma_buf_heap_map);
485 }
486#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100487 spin_lock_irq(&memmap.lock, flags);
488
489 /* will request fit in single block */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800490 for (i = 0; i < array_size; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100491
492 /* is block big enough */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800493 if (block_map[i].block_size < bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100494 continue;
495
496 /* does block have free space */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800497 if (block_map[i].free_count == 0)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100498 continue;
499
500 /* allocate block */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800501 ptr = alloc_block(mm_heap, i, bflags);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100502 goto out;
503 }
504
505 /* request spans > 1 block */
506
507 /* only 1 choice for block size */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800508 if (array_size == 1) {
509 ptr = alloc_cont_blocks(mm_heap, 0, bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100510 goto out;
511 } else {
512
513 /* find best block size for request */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800514 for (i = 0; i < array_size; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100515
516 /* allocate is block size smaller than request */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800517 if (block_map[i].block_size < bytes)
518 alloc_cont_blocks(mm_heap, i, bflags,
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100519 bytes);
520 }
521 }
522
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800523 ptr = alloc_cont_blocks(mm_heap, array_size - 1,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100524 bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100525
526out:
527 spin_unlock_irq(&memmap.lock, flags);
528 return ptr;
529}
530
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100531void rfree(void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100532{
533 uint32_t flags;
534
535 spin_lock_irq(&memmap.lock, flags);
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100536 free_block(&memmap.runtime, ptr);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100537 spin_unlock_irq(&memmap.lock, flags);
538}
539
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100540void rbfree(void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100541{
542 uint32_t flags;
543
544 spin_lock_irq(&memmap.lock, flags);
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100545 free_block(&memmap.buffer, ptr);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100546 spin_unlock_irq(&memmap.lock, flags);
547}
548
549uint32_t mm_pm_context_size(void)
550{
551 uint32_t size;
552
553 /* calc context size for each area */
554 size = memmap.buffer.info.used;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100555 size += memmap.runtime.info.used;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100556 size += memmap.system.info.used;
557
558 /* add memory maps */
559 size += heap_get_size(&memmap.buffer);
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100560 size += heap_get_size(&memmap.runtime);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100561 size += heap_get_size(&memmap.system);
562
563 /* recalc totals */
564 memmap.total.free = memmap.buffer.info.free +
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100565 memmap.runtime.info.free + memmap.system.info.free;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100566 memmap.total.used = memmap.buffer.info.used +
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100567 memmap.runtime.info.used + memmap.system.info.used;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100568
569 return size;
570}
571
572/*
573 * Save the DSP memories that are in use the system and modules. All pipeline and modules
574 * must be disabled before calling this functions. No allocations are permitted after
575 * calling this and before calling restore.
576 */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100577int mm_pm_context_save(struct dma_copy *dc, struct dma_sg_config *sg)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100578{
579 uint32_t used;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500580 int32_t offset = 0;
581 int32_t ret;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100582
583 /* first make sure SG buffer has enough space on host for DSP context */
584 used = mm_pm_context_size();
585 if (used > dma_sg_get_size(sg))
586 return -EINVAL;
587
588 /* copy memory maps to SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100589 ret = dma_copy_to_host(dc, sg, offset,
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100590 (void *)&memmap, sizeof(memmap));
591 if (ret < 0)
592 return ret;
593
594 /* copy system memory contents to SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100595 ret = dma_copy_to_host(dc, sg, offset + ret,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100596 (void *)memmap.system.heap, (int32_t)(memmap.system.size));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100597 if (ret < 0)
598 return ret;
599
600 /* copy module memory contents to SG */
601 // TODO: iterate over module block map and copy contents of each block
602 // to the host.
603
604 /* copy buffer memory contents to SG */
605 // TODO: iterate over buffer block map and copy contents of each block
606 // to the host.
607
608 return ret;
609}
610
611/*
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600612 * Restore the DSP memories to modules and the system. This must be called immediately
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100613 * after booting before any pipeline work.
614 */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100615int mm_pm_context_restore(struct dma_copy *dc, struct dma_sg_config *sg)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100616{
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500617 int32_t offset = 0;
618 int32_t ret;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100619
620 /* copy memory maps from SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100621 ret = dma_copy_from_host(dc, sg, offset,
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100622 (void *)&memmap, sizeof(memmap));
623 if (ret < 0)
624 return ret;
625
626 /* copy system memory contents from SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100627 ret = dma_copy_to_host(dc, sg, offset + ret,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100628 (void *)memmap.system.heap, (int32_t)(memmap.system.size));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100629 if (ret < 0)
630 return ret;
631
632 /* copy module memory contents from SG */
633 // TODO: iterate over module block map and copy contents of each block
634 // to the host. This is the same block order used by the context store
635
636 /* copy buffer memory contents from SG */
637 // TODO: iterate over buffer block map and copy contents of each block
638 // to the host. This is the same block order used by the context store
639
640 return 0;
641}
642
643/* initialise map */
Liam Girdwood69222cf2017-06-06 16:41:07 +0100644void init_heap(struct reef *reef)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100645{
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500646 struct block_map *next_map;
647 struct block_map *current_map;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100648 int i;
649
Liam Girdwood3ef88cf2018-02-19 15:34:09 +0000650 /* sanity check for malformed images or loader issues */
651 if (memmap.system.heap != HEAP_SYSTEM_BASE)
652 panic(SOF_IPC_PANIC_MEM);
653
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100654 spinlock_init(&memmap.lock);
655
656 /* initialise buffer map */
657 current_map = &buf_heap_map[0];
658 current_map->base = memmap.buffer.heap;
659
660 for (i = 1; i < ARRAY_SIZE(buf_heap_map); i++) {
661 next_map = &buf_heap_map[i];
662 next_map->base = current_map->base +
663 current_map->block_size * current_map->count;
664 current_map = &buf_heap_map[i];
665 }
666
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100667 /* initialise runtime map */
668 current_map = &rt_heap_map[0];
669 current_map->base = memmap.runtime.heap;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100670
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100671 for (i = 1; i < ARRAY_SIZE(rt_heap_map); i++) {
672 next_map = &rt_heap_map[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100673 next_map->base = current_map->base +
674 current_map->block_size * current_map->count;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100675 current_map = &rt_heap_map[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100676 }
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100677
678#if (HEAP_DMA_BUFFER_SIZE > 0)
679 /* initialise DMA map */
680 current_map = &dma_buf_heap_map[0];
681 current_map->base = memmap.dma.heap;
682
683 for (i = 1; i < ARRAY_SIZE(dma_buf_heap_map); i++) {
684 next_map = &dma_buf_heap_map[i];
685 next_map->base = current_map->base +
686 current_map->block_size * current_map->count;
687 current_map = &dma_buf_heap_map[i];
688 }
689#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100690}