blob: e28817bf3c6417219b5924846180bde45835f29c [file] [log] [blame]
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +01001/*
2 * Copyright (c) 2016, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Intel Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
29 * Keyon Jie <yang.jie@linux.intel.com>
30 */
31
32#include <reef/alloc.h>
33#include <reef/reef.h>
34#include <reef/debug.h>
35#include <reef/trace.h>
36#include <reef/lock.h>
37#include <platform/memory.h>
38#include <stdint.h>
39
40/* debug to set memory value on every allocation */
41#define DEBUG_BLOCK_ALLOC 0
42#define DEBUG_BLOCK_ALLOC_VALUE 0x6b6b6b6b
43
44/* debug to set memory value on every free TODO: not working atm */
45#define DEBUG_BLOCK_FREE 0
46#define DEBUG_BLOCK_FREE_VALUE 0x5a5a5a5a
47
48/* memory tracing support */
49#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
50#define trace_mem(__e) trace_event(TRACE_CLASS_MEM, __e)
51#else
52#define trace_mem(__e)
53#endif
54
55#define trace_mem_error(__e) trace_error(TRACE_CLASS_MEM, __e)
56
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010057/* We have 3 memory pools
58 *
59 * 1) System memory pool does not have a map and it's size is fixed at build
60 * time. Memory cannot be freed from this pool. Used by device drivers
61 * and any system core. Saved as part of PM context.
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010062 * 2) Runtime memory pool has variable size allocation map and memory is freed
63 * on calls to rfree(). Saved as part of PM context. Global size
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010064 * set at build time.
65 * 3) Buffer memory pool has fixed size allocation map and can be freed on
66 * module removal or calls to rfree(). Saved as part of PM context.
67 */
68
69struct block_hdr {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010070 uint16_t size; /* size in blocks of this continuous allocation */
71 uint16_t flags; /* usage flags for page */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010072} __attribute__ ((packed));
73
74struct block_map {
75 uint16_t block_size; /* size of block in bytes */
76 uint16_t count; /* number of blocks in map */
77 uint16_t free_count; /* number of free blocks */
78 uint16_t first_free; /* index of first free block */
79 struct block_hdr *block; /* base block header */
80 uint32_t base; /* base address of space */
81} __attribute__ ((packed));
82
83#define BLOCK_DEF(sz, cnt, hdr) \
84 {.block_size = sz, .count = cnt, .free_count = cnt, .block = hdr}
85
86/* Heap blocks for modules */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010087//static struct block_hdr mod_block8[HEAP_RT_COUNT8];
88static struct block_hdr mod_block16[HEAP_RT_COUNT16];
89static struct block_hdr mod_block32[HEAP_RT_COUNT32];
90static struct block_hdr mod_block64[HEAP_RT_COUNT64];
91static struct block_hdr mod_block128[HEAP_RT_COUNT128];
92static struct block_hdr mod_block256[HEAP_RT_COUNT256];
93static struct block_hdr mod_block512[HEAP_RT_COUNT512];
94static struct block_hdr mod_block1024[HEAP_RT_COUNT1024];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010095
96/* Heap memory map for modules */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010097static struct block_map rt_heap_map[] = {
98/* BLOCK_DEF(8, HEAP_RT_COUNT8, mod_block8), */
99 BLOCK_DEF(16, HEAP_RT_COUNT16, mod_block16),
100 BLOCK_DEF(32, HEAP_RT_COUNT32, mod_block32),
101 BLOCK_DEF(64, HEAP_RT_COUNT64, mod_block64),
102 BLOCK_DEF(128, HEAP_RT_COUNT128, mod_block128),
103 BLOCK_DEF(256, HEAP_RT_COUNT256, mod_block256),
104 BLOCK_DEF(512, HEAP_RT_COUNT512, mod_block512),
105 BLOCK_DEF(1024, HEAP_RT_COUNT1024, mod_block1024),
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100106};
107
108/* Heap blocks for buffers */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100109static struct block_hdr buf_block[HEAP_BUFFER_COUNT];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100110
111/* Heap memory map for buffers */
112static struct block_map buf_heap_map[] = {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100113 BLOCK_DEF(HEAP_BUFFER_BLOCK_SIZE, HEAP_BUFFER_COUNT, buf_block),
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100114};
115
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100116#if (HEAP_DMA_BUFFER_SIZE > 0)
117/* Heap memory map for DMA buffers - only used for HW with special DMA memories */
118static struct block_map dma_buf_heap_map[] = {
119 BLOCK_DEF(HEAP_DMA_BUFFER_BLOCK_SIZE, HEAP_DMA_BUFFER_COUNT, buf_block),
120};
121#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100122
123struct mm_heap {
124 uint32_t blocks;
125 struct block_map *map;
126 uint32_t heap;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100127 uint32_t size;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100128 struct mm_info info;
129};
130
131/* heap block memory map */
132struct mm {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100133 struct mm_heap runtime; /* general heap for components */
134 struct mm_heap system; /* system heap - used during init cannot be freed */
135 struct mm_heap buffer; /* general component buffer heap */
136#if (HEAP_DMA_BUFFER_SIZE > 0)
137 struct mm_heap dma; /* general component DMA buffer heap */
138#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100139 struct mm_info total;
140 spinlock_t lock; /* all allocs and frees are atomic */
141};
142
143struct mm memmap = {
144 .system = {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100145 .heap = HEAP_SYSTEM_BASE,
146 .size = HEAP_SYSTEM_SIZE,
147 .info = {.free = HEAP_SYSTEM_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100148 },
149
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100150 .runtime = {
151 .blocks = ARRAY_SIZE(rt_heap_map),
152 .map = rt_heap_map,
153 .heap = HEAP_RUNTIME_BASE,
154 .size = HEAP_RUNTIME_SIZE,
155 .info = {.free = HEAP_RUNTIME_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100156 },
157
158 .buffer = {
159 .blocks = ARRAY_SIZE(buf_heap_map),
160 .map = buf_heap_map,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100161 .heap = HEAP_BUFFER_BASE,
162 .size = HEAP_BUFFER_SIZE,
163 .info = {.free = HEAP_BUFFER_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100164 },
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100165
166#if (HEAP_DMA_BUFFER_SIZE > 0)
167 .dma = {
168 .blocks = ARRAY_SIZE(dma_buf_heap_map),
169 .map = dma_buf_heap_map,
170 .heap = HEAP_DMA_BUFFER_BASE,
171 .size = HEAP_DMA_BUFFER_SIZE,
172 .info = {.free = HEAP_DMA_BUFFER_SIZE,},
173 },
174#endif
175 .total = {.free = HEAP_SYSTEM_SIZE + HEAP_RUNTIME_SIZE +
176 HEAP_BUFFER_SIZE + HEAP_DMA_BUFFER_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100177};
178
179/* total size of block */
180static inline uint32_t block_get_size(struct block_map *map)
181{
182 return sizeof(*map) + map->count *
183 (map->block_size + sizeof(struct block_hdr));
184}
185
186/* total size of heap */
187static inline uint32_t heap_get_size(struct mm_heap *heap)
188{
189 uint32_t size = sizeof(struct mm_heap);
190 int i;
191
192 for (i = 0; i < heap->blocks; i++) {
193 size += block_get_size(&heap->map[i]);
194 }
195
196 return size;
197}
198
199#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
200static void alloc_memset_region(void *ptr, uint32_t bytes, uint32_t val)
201{
202 uint32_t count = bytes >> 2;
203 uint32_t *dest = ptr, i;
204
205 for (i = 0; i < count; i++)
206 dest[i] = val;
207}
208#endif
209
210/* allocate from system memory pool */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100211static void *rmalloc_sys(size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100212{
213 void *ptr = (void *)memmap.system.heap;
214
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100215 /* always succeeds or panics */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100216 memmap.system.heap += bytes;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100217 if (memmap.system.heap >= HEAP_SYSTEM_BASE + HEAP_SYSTEM_SIZE) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100218 trace_mem_error("eMd");
219 panic(PANIC_MEM);
220 }
221
222#if DEBUG_BLOCK_ALLOC
223 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
224#endif
225
226 return ptr;
227}
228
229/* allocate single block */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100230static void *alloc_block(struct mm_heap *heap, int level, int bflags)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100231{
232 struct block_map *map = &heap->map[level];
233 struct block_hdr *hdr = &map->block[map->first_free];
234 void *ptr;
235 int i;
236
237 map->free_count--;
238 ptr = (void *)(map->base + map->first_free * map->block_size);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100239 hdr->size = 1;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100240 hdr->flags = RFLAGS_USED | bflags;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100241 heap->info.used += map->block_size;
242 heap->info.free -= map->block_size;
243
244 /* find next free */
245 for (i = map->first_free; i < map->count; ++i) {
246
247 hdr = &map->block[i];
248
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100249 if (hdr->flags == 0) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100250 map->first_free = i;
251 break;
252 }
253 }
254
255#if DEBUG_BLOCK_ALLOC
256 alloc_memset_region(ptr, map->block_size, DEBUG_BLOCK_ALLOC_VALUE);
257#endif
258
259 return ptr;
260}
261
262/* allocates continious blocks */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100263static void *alloc_cont_blocks(struct mm_heap *heap, int level, int bflags,
264 size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100265{
266 struct block_map *map = &heap->map[level];
267 struct block_hdr *hdr = &map->block[map->first_free];
268 void *ptr;
269 unsigned int start, current, count = bytes / map->block_size;
270 unsigned int i, remaining = map->count - count, end;
271
272 if (bytes % map->block_size)
273 count++;
274
275 /* check for continious blocks from "start" */
276 for (start = map->first_free; start < remaining; start++) {
277
278 /* check that we have enough free blocks from start pos */
279 end = start + count;
280 for (current = start; current < end; current++) {
281 hdr = &map->block[current];
282
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100283 /* is block used */
284 if (hdr->flags == RFLAGS_USED)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100285 break;
286 }
287
288 /* enough free blocks ? */
289 if (current == end)
290 goto found;
291 }
292
293 /* not found */
294 trace_mem_error("eCb");
295 return NULL;
296
297found:
298 /* found some free blocks */
299 map->free_count -= count;
300 ptr = (void *)(map->base + start * map->block_size);
301 hdr = &map->block[start];
302 hdr->size = count;
303 heap->info.used += count * map->block_size;
304 heap->info.free -= count * map->block_size;
305
306 /* allocate each block */
307 for (current = start; current < end; current++) {
308 hdr = &map->block[current];
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100309 hdr->flags = RFLAGS_USED | bflags;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100310 }
311
312 /* do we need to find a new first free block ? */
313 if (start == map->first_free) {
314
315 /* find next free */
316 for (i = map->first_free + count; i < map->count; ++i) {
317
318 hdr = &map->block[i];
319
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100320 if (hdr->flags == 0) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100321 map->first_free = i;
322 break;
323 }
324 }
325 }
326
327#if DEBUG_BLOCK_ALLOC
328 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
329#endif
330
331 return ptr;
332}
333
334/* free block(s) */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100335static void free_block(struct mm_heap *heap, void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100336{
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800337 struct mm_heap * mm_heap;
338 struct block_map * block_map;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100339 struct block_hdr *hdr;
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800340 int i, block, array_size;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100341
342 /* sanity check */
343 if (ptr == NULL)
344 return;
345
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800346 /* find mm_heap that ptr belongs to */
347 if ((uint32_t)ptr >= memmap.runtime.heap &&
348 (uint32_t)ptr < memmap.runtime.heap + memmap.runtime.size) {
349 mm_heap = &memmap.runtime;
350 array_size = ARRAY_SIZE(rt_heap_map);
351 } else if ((uint32_t)ptr >= memmap.buffer.heap &&
352 (uint32_t)ptr < memmap.buffer.heap + memmap.buffer.size) {
353 mm_heap = &memmap.buffer;
354 array_size = ARRAY_SIZE(buf_heap_map);
355#if (HEAP_DMA_BUFFER_SIZE > 0)
356 } else if ((uint32_t)ptr >= memmap.dma.heap &&
357 (uint32_t)ptr < memmap.dma.heap + memmap.dma.size) {
358 mm_heap = &memmap.dma;
359 array_size = ARRAY_SIZE(dma_buf_heap_map);
360#endif
361 } else
362 return;
363
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100364 /* find block that ptr belongs to */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800365 for (i = 0; i < array_size - 1; i ++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100366
367 /* is ptr in this block */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800368 if ((uint32_t)ptr < mm_heap->map[i + 1].base)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100369 goto found;
370 }
371
372 /* not found */
373 trace_mem_error("eMF");
374 return;
375
376found:
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800377 /* the block i is it */
378 block_map = &mm_heap->map[i];
379
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100380 /* calculate block header */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800381 block = ((uint32_t)ptr - block_map->base) / block_map->block_size;
382 hdr = &block_map->block[block];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100383
384 /* free block header and continious blocks */
385 for (i = block; i < block + hdr->size; i++) {
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800386 hdr = &block_map->block[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100387 hdr->size = 0;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100388 hdr->flags = 0;
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800389 block_map->free_count++;
390 heap->info.used -= block_map->block_size;
391 heap->info.free += block_map->block_size;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100392 }
393
394 /* set first free */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800395 if (block < block_map->first_free)
396 block_map->first_free = block;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100397
398#if DEBUG_BLOCK_FREE
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800399 alloc_memset_region(ptr, block_map->block_size * (i - 1), DEBUG_BLOCK_FREE_VALUE);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100400#endif
401}
402
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100403/* allocate single block for runtime */
404static void *rmalloc_runtime(int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100405{
406 int i;
407
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100408 for (i = 0; i < ARRAY_SIZE(rt_heap_map); i ++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100409
410 /* is block big enough */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100411 if (rt_heap_map[i].block_size < bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100412 continue;
413
414 /* does block have free space */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100415 if (rt_heap_map[i].free_count == 0)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100416 continue;
417
418 /* free block space exists */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100419 return alloc_block(&memmap.runtime, i, bflags);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100420 }
421
422 trace_mem_error("eMm");
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100423 trace_value(bytes);
424 trace_value(bflags);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100425 return NULL;
426}
427
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100428void *rmalloc(int zone, int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100429{
430 uint32_t flags;
431 void *ptr = NULL;
432
433 spin_lock_irq(&memmap.lock, flags);
434
435 switch (zone) {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100436 case RZONE_SYS:
437 ptr = rmalloc_sys(bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100438 break;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100439 case RZONE_RUNTIME:
440 ptr = rmalloc_runtime(bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100441 break;
442 default:
443 trace_mem_error("eMz");
444 break;
445 }
446
447 spin_unlock_irq(&memmap.lock, flags);
448 return ptr;
449}
450
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100451void *rzalloc(int zone, int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100452{
453 void *ptr = NULL;
454
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100455 ptr = rmalloc(zone, bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100456 if (ptr != NULL) {
457 bzero(ptr, bytes);
458 }
459
460 return ptr;
461}
462
463/* allocates continuous buffer on 1k boundary */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100464void *rballoc(int zone, int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100465{
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800466 struct block_map * block_map = buf_heap_map;
467 struct mm_heap * mm_heap = &memmap.buffer;
468 int i, array_size = ARRAY_SIZE(buf_heap_map);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100469 uint32_t flags;
470 void *ptr = NULL;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100471
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800472#if (HEAP_DMA_BUFFER_SIZE > 0)
473 if (bflags & RFLAGS_DMA) {
474 mm_heap = &memmap.dma;
475 block_map = dma_buf_heap_map;
476 array_size = ARRAY_SIZE(dma_buf_heap_map);
477 }
478#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100479 spin_lock_irq(&memmap.lock, flags);
480
481 /* will request fit in single block */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800482 for (i = 0; i < array_size; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100483
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800484 trace_value(block_map[i].block_size);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100485 /* is block big enough */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800486 if (block_map[i].block_size < bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100487 continue;
488
489 /* does block have free space */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800490 if (block_map[i].free_count == 0)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100491 continue;
492
493 /* allocate block */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800494 ptr = alloc_block(mm_heap, i, bflags);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100495 goto out;
496 }
497
498 /* request spans > 1 block */
499
500 /* only 1 choice for block size */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800501 if (array_size == 1) {
502 ptr = alloc_cont_blocks(mm_heap, 0, bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100503 goto out;
504 } else {
505
506 /* find best block size for request */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800507 for (i = 0; i < array_size; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100508
509 /* allocate is block size smaller than request */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800510 if (block_map[i].block_size < bytes)
511 alloc_cont_blocks(mm_heap, i, bflags,
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100512 bytes);
513 }
514 }
515
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800516 ptr = alloc_cont_blocks(mm_heap, array_size - 1,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100517 bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100518
519out:
520 spin_unlock_irq(&memmap.lock, flags);
521 return ptr;
522}
523
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100524void rfree(void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100525{
526 uint32_t flags;
527
528 spin_lock_irq(&memmap.lock, flags);
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100529 free_block(&memmap.runtime, ptr);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100530 spin_unlock_irq(&memmap.lock, flags);
531}
532
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100533void rbfree(void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100534{
535 uint32_t flags;
536
537 spin_lock_irq(&memmap.lock, flags);
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100538 free_block(&memmap.buffer, ptr);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100539 spin_unlock_irq(&memmap.lock, flags);
540}
541
542uint32_t mm_pm_context_size(void)
543{
544 uint32_t size;
545
546 /* calc context size for each area */
547 size = memmap.buffer.info.used;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100548 size += memmap.runtime.info.used;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100549 size += memmap.system.info.used;
550
551 /* add memory maps */
552 size += heap_get_size(&memmap.buffer);
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100553 size += heap_get_size(&memmap.runtime);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100554 size += heap_get_size(&memmap.system);
555
556 /* recalc totals */
557 memmap.total.free = memmap.buffer.info.free +
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100558 memmap.runtime.info.free + memmap.system.info.free;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100559 memmap.total.used = memmap.buffer.info.used +
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100560 memmap.runtime.info.used + memmap.system.info.used;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100561
562 return size;
563}
564
565/*
566 * Save the DSP memories that are in use the system and modules. All pipeline and modules
567 * must be disabled before calling this functions. No allocations are permitted after
568 * calling this and before calling restore.
569 */
570int mm_pm_context_save(struct dma_sg_config *sg)
571{
572 uint32_t used;
573 int32_t offset = 0, ret;
574
575 /* first make sure SG buffer has enough space on host for DSP context */
576 used = mm_pm_context_size();
577 if (used > dma_sg_get_size(sg))
578 return -EINVAL;
579
580 /* copy memory maps to SG */
581 ret = dma_copy_to_host(sg, offset,
582 (void *)&memmap, sizeof(memmap));
583 if (ret < 0)
584 return ret;
585
586 /* copy system memory contents to SG */
587 ret = dma_copy_to_host(sg, offset + ret,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100588 (void *)memmap.system.heap, (int32_t)(memmap.system.size));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100589 if (ret < 0)
590 return ret;
591
592 /* copy module memory contents to SG */
593 // TODO: iterate over module block map and copy contents of each block
594 // to the host.
595
596 /* copy buffer memory contents to SG */
597 // TODO: iterate over buffer block map and copy contents of each block
598 // to the host.
599
600 return ret;
601}
602
603/*
604 * Restore the DSP memories to modules abd the system. This must be called immediately
605 * after booting before any pipeline work.
606 */
607int mm_pm_context_restore(struct dma_sg_config *sg)
608{
609 int32_t offset = 0, ret;
610
611 /* copy memory maps from SG */
612 ret = dma_copy_from_host(sg, offset,
613 (void *)&memmap, sizeof(memmap));
614 if (ret < 0)
615 return ret;
616
617 /* copy system memory contents from SG */
618 ret = dma_copy_to_host(sg, offset + ret,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100619 (void *)memmap.system.heap, (int32_t)(memmap.system.size));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100620 if (ret < 0)
621 return ret;
622
623 /* copy module memory contents from SG */
624 // TODO: iterate over module block map and copy contents of each block
625 // to the host. This is the same block order used by the context store
626
627 /* copy buffer memory contents from SG */
628 // TODO: iterate over buffer block map and copy contents of each block
629 // to the host. This is the same block order used by the context store
630
631 return 0;
632}
633
634/* initialise map */
Liam Girdwood69222cf2017-06-06 16:41:07 +0100635void init_heap(struct reef *reef)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100636{
637 struct block_map *next_map, *current_map;
638 int i;
639
640 spinlock_init(&memmap.lock);
641
642 /* initialise buffer map */
643 current_map = &buf_heap_map[0];
644 current_map->base = memmap.buffer.heap;
645
646 for (i = 1; i < ARRAY_SIZE(buf_heap_map); i++) {
647 next_map = &buf_heap_map[i];
648 next_map->base = current_map->base +
649 current_map->block_size * current_map->count;
650 current_map = &buf_heap_map[i];
651 }
652
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100653 /* initialise runtime map */
654 current_map = &rt_heap_map[0];
655 current_map->base = memmap.runtime.heap;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100656
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100657 for (i = 1; i < ARRAY_SIZE(rt_heap_map); i++) {
658 next_map = &rt_heap_map[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100659 next_map->base = current_map->base +
660 current_map->block_size * current_map->count;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100661 current_map = &rt_heap_map[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100662 }
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100663
664#if (HEAP_DMA_BUFFER_SIZE > 0)
665 /* initialise DMA map */
666 current_map = &dma_buf_heap_map[0];
667 current_map->base = memmap.dma.heap;
668
669 for (i = 1; i < ARRAY_SIZE(dma_buf_heap_map); i++) {
670 next_map = &dma_buf_heap_map[i];
671 next_map->base = current_map->base +
672 current_map->block_size * current_map->count;
673 current_map = &dma_buf_heap_map[i];
674 }
675#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100676}