blob: b77fbf820c51f381ace211462e6c2039b1d41393 [file] [log] [blame]
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +01001/*
2 * Copyright (c) 2016, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Intel Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
29 * Keyon Jie <yang.jie@linux.intel.com>
30 */
31
32#include <reef/alloc.h>
33#include <reef/reef.h>
34#include <reef/debug.h>
35#include <reef/trace.h>
36#include <reef/lock.h>
37#include <platform/memory.h>
38#include <stdint.h>
39
40/* debug to set memory value on every allocation */
41#define DEBUG_BLOCK_ALLOC 0
42#define DEBUG_BLOCK_ALLOC_VALUE 0x6b6b6b6b
43
44/* debug to set memory value on every free TODO: not working atm */
45#define DEBUG_BLOCK_FREE 0
46#define DEBUG_BLOCK_FREE_VALUE 0x5a5a5a5a
47
48/* memory tracing support */
49#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
50#define trace_mem(__e) trace_event(TRACE_CLASS_MEM, __e)
51#else
52#define trace_mem(__e)
53#endif
54
55#define trace_mem_error(__e) trace_error(TRACE_CLASS_MEM, __e)
56
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010057/* We have 3 memory pools
58 *
59 * 1) System memory pool does not have a map and it's size is fixed at build
60 * time. Memory cannot be freed from this pool. Used by device drivers
61 * and any system core. Saved as part of PM context.
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010062 * 2) Runtime memory pool has variable size allocation map and memory is freed
63 * on calls to rfree(). Saved as part of PM context. Global size
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010064 * set at build time.
65 * 3) Buffer memory pool has fixed size allocation map and can be freed on
66 * module removal or calls to rfree(). Saved as part of PM context.
67 */
68
69struct block_hdr {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010070 uint16_t size; /* size in blocks of this continuous allocation */
71 uint16_t flags; /* usage flags for page */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010072} __attribute__ ((packed));
73
74struct block_map {
75 uint16_t block_size; /* size of block in bytes */
76 uint16_t count; /* number of blocks in map */
77 uint16_t free_count; /* number of free blocks */
78 uint16_t first_free; /* index of first free block */
79 struct block_hdr *block; /* base block header */
80 uint32_t base; /* base address of space */
81} __attribute__ ((packed));
82
83#define BLOCK_DEF(sz, cnt, hdr) \
84 {.block_size = sz, .count = cnt, .free_count = cnt, .block = hdr}
85
86/* Heap blocks for modules */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010087//static struct block_hdr mod_block8[HEAP_RT_COUNT8];
88static struct block_hdr mod_block16[HEAP_RT_COUNT16];
89static struct block_hdr mod_block32[HEAP_RT_COUNT32];
90static struct block_hdr mod_block64[HEAP_RT_COUNT64];
91static struct block_hdr mod_block128[HEAP_RT_COUNT128];
92static struct block_hdr mod_block256[HEAP_RT_COUNT256];
93static struct block_hdr mod_block512[HEAP_RT_COUNT512];
94static struct block_hdr mod_block1024[HEAP_RT_COUNT1024];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010095
96/* Heap memory map for modules */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010097static struct block_map rt_heap_map[] = {
98/* BLOCK_DEF(8, HEAP_RT_COUNT8, mod_block8), */
99 BLOCK_DEF(16, HEAP_RT_COUNT16, mod_block16),
100 BLOCK_DEF(32, HEAP_RT_COUNT32, mod_block32),
101 BLOCK_DEF(64, HEAP_RT_COUNT64, mod_block64),
102 BLOCK_DEF(128, HEAP_RT_COUNT128, mod_block128),
103 BLOCK_DEF(256, HEAP_RT_COUNT256, mod_block256),
104 BLOCK_DEF(512, HEAP_RT_COUNT512, mod_block512),
105 BLOCK_DEF(1024, HEAP_RT_COUNT1024, mod_block1024),
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100106};
107
108/* Heap blocks for buffers */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100109static struct block_hdr buf_block[HEAP_BUFFER_COUNT];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100110
111/* Heap memory map for buffers */
112static struct block_map buf_heap_map[] = {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100113 BLOCK_DEF(HEAP_BUFFER_BLOCK_SIZE, HEAP_BUFFER_COUNT, buf_block),
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100114};
115
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100116#if (HEAP_DMA_BUFFER_SIZE > 0)
117/* Heap memory map for DMA buffers - only used for HW with special DMA memories */
118static struct block_map dma_buf_heap_map[] = {
119 BLOCK_DEF(HEAP_DMA_BUFFER_BLOCK_SIZE, HEAP_DMA_BUFFER_COUNT, buf_block),
120};
121#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100122
123struct mm_heap {
124 uint32_t blocks;
125 struct block_map *map;
126 uint32_t heap;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100127 uint32_t size;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100128 struct mm_info info;
129};
130
131/* heap block memory map */
132struct mm {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100133 struct mm_heap runtime; /* general heap for components */
134 struct mm_heap system; /* system heap - used during init cannot be freed */
135 struct mm_heap buffer; /* general component buffer heap */
136#if (HEAP_DMA_BUFFER_SIZE > 0)
137 struct mm_heap dma; /* general component DMA buffer heap */
138#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100139 struct mm_info total;
140 spinlock_t lock; /* all allocs and frees are atomic */
141};
142
143struct mm memmap = {
144 .system = {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100145 .heap = HEAP_SYSTEM_BASE,
146 .size = HEAP_SYSTEM_SIZE,
147 .info = {.free = HEAP_SYSTEM_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100148 },
149
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100150 .runtime = {
151 .blocks = ARRAY_SIZE(rt_heap_map),
152 .map = rt_heap_map,
153 .heap = HEAP_RUNTIME_BASE,
154 .size = HEAP_RUNTIME_SIZE,
155 .info = {.free = HEAP_RUNTIME_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100156 },
157
158 .buffer = {
159 .blocks = ARRAY_SIZE(buf_heap_map),
160 .map = buf_heap_map,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100161 .heap = HEAP_BUFFER_BASE,
162 .size = HEAP_BUFFER_SIZE,
163 .info = {.free = HEAP_BUFFER_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100164 },
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100165
166#if (HEAP_DMA_BUFFER_SIZE > 0)
167 .dma = {
168 .blocks = ARRAY_SIZE(dma_buf_heap_map),
169 .map = dma_buf_heap_map,
170 .heap = HEAP_DMA_BUFFER_BASE,
171 .size = HEAP_DMA_BUFFER_SIZE,
172 .info = {.free = HEAP_DMA_BUFFER_SIZE,},
173 },
174#endif
175 .total = {.free = HEAP_SYSTEM_SIZE + HEAP_RUNTIME_SIZE +
176 HEAP_BUFFER_SIZE + HEAP_DMA_BUFFER_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100177};
178
179/* total size of block */
180static inline uint32_t block_get_size(struct block_map *map)
181{
182 return sizeof(*map) + map->count *
183 (map->block_size + sizeof(struct block_hdr));
184}
185
186/* total size of heap */
187static inline uint32_t heap_get_size(struct mm_heap *heap)
188{
189 uint32_t size = sizeof(struct mm_heap);
190 int i;
191
192 for (i = 0; i < heap->blocks; i++) {
193 size += block_get_size(&heap->map[i]);
194 }
195
196 return size;
197}
198
199#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
200static void alloc_memset_region(void *ptr, uint32_t bytes, uint32_t val)
201{
202 uint32_t count = bytes >> 2;
203 uint32_t *dest = ptr, i;
204
205 for (i = 0; i < count; i++)
206 dest[i] = val;
207}
208#endif
209
210/* allocate from system memory pool */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100211static void *rmalloc_sys(size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100212{
213 void *ptr = (void *)memmap.system.heap;
214
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100215 /* always succeeds or panics */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100216 memmap.system.heap += bytes;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100217 if (memmap.system.heap >= HEAP_SYSTEM_BASE + HEAP_SYSTEM_SIZE) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100218 trace_mem_error("eMd");
219 panic(PANIC_MEM);
220 }
221
222#if DEBUG_BLOCK_ALLOC
223 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
224#endif
225
226 return ptr;
227}
228
229/* allocate single block */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100230static void *alloc_block(struct mm_heap *heap, int level, int bflags)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100231{
232 struct block_map *map = &heap->map[level];
233 struct block_hdr *hdr = &map->block[map->first_free];
234 void *ptr;
235 int i;
236
237 map->free_count--;
238 ptr = (void *)(map->base + map->first_free * map->block_size);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100239 hdr->size = 1;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100240 hdr->flags = RFLAGS_USED | bflags;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100241 heap->info.used += map->block_size;
242 heap->info.free -= map->block_size;
243
244 /* find next free */
245 for (i = map->first_free; i < map->count; ++i) {
246
247 hdr = &map->block[i];
248
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100249 if (hdr->flags == 0) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100250 map->first_free = i;
251 break;
252 }
253 }
254
255#if DEBUG_BLOCK_ALLOC
256 alloc_memset_region(ptr, map->block_size, DEBUG_BLOCK_ALLOC_VALUE);
257#endif
258
259 return ptr;
260}
261
262/* allocates continious blocks */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100263static void *alloc_cont_blocks(struct mm_heap *heap, int level, int bflags,
264 size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100265{
266 struct block_map *map = &heap->map[level];
267 struct block_hdr *hdr = &map->block[map->first_free];
268 void *ptr;
269 unsigned int start, current, count = bytes / map->block_size;
270 unsigned int i, remaining = map->count - count, end;
271
272 if (bytes % map->block_size)
273 count++;
274
275 /* check for continious blocks from "start" */
276 for (start = map->first_free; start < remaining; start++) {
277
278 /* check that we have enough free blocks from start pos */
279 end = start + count;
280 for (current = start; current < end; current++) {
281 hdr = &map->block[current];
282
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100283 /* is block used */
284 if (hdr->flags == RFLAGS_USED)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100285 break;
286 }
287
288 /* enough free blocks ? */
289 if (current == end)
290 goto found;
291 }
292
293 /* not found */
294 trace_mem_error("eCb");
295 return NULL;
296
297found:
298 /* found some free blocks */
299 map->free_count -= count;
300 ptr = (void *)(map->base + start * map->block_size);
301 hdr = &map->block[start];
302 hdr->size = count;
303 heap->info.used += count * map->block_size;
304 heap->info.free -= count * map->block_size;
305
306 /* allocate each block */
307 for (current = start; current < end; current++) {
308 hdr = &map->block[current];
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100309 hdr->flags = RFLAGS_USED | bflags;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100310 }
311
312 /* do we need to find a new first free block ? */
313 if (start == map->first_free) {
314
315 /* find next free */
316 for (i = map->first_free + count; i < map->count; ++i) {
317
318 hdr = &map->block[i];
319
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100320 if (hdr->flags == 0) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100321 map->first_free = i;
322 break;
323 }
324 }
325 }
326
327#if DEBUG_BLOCK_ALLOC
328 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
329#endif
330
331 return ptr;
332}
333
334/* free block(s) */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100335static void free_block(struct mm_heap *heap, void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100336{
337 struct block_map *map;
338 struct block_hdr *hdr;
339 int i, block;
340
341 /* sanity check */
342 if (ptr == NULL)
343 return;
344
345 /* find block that ptr belongs to */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100346 for (i = 0; i < ARRAY_SIZE(rt_heap_map) - 1; i ++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100347
348 /* is ptr in this block */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100349 if ((uint32_t)ptr >= rt_heap_map[i].base &&
350 (uint32_t)ptr < rt_heap_map[i + 1].base)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100351 goto found;
352 }
353
354 /* not found */
355 trace_mem_error("eMF");
356 return;
357
358found:
359 /* calculate block header */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100360 map = &rt_heap_map[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100361 block = ((uint32_t)ptr - map->base) / map->block_size;
362 hdr = &map->block[block];
363
364 /* free block header and continious blocks */
365 for (i = block; i < block + hdr->size; i++) {
366 hdr = &map->block[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100367 hdr->size = 0;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100368 hdr->flags = 0;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100369 map->free_count++;
370 heap->info.used -= map->block_size;
371 heap->info.free += map->block_size;
372 }
373
374 /* set first free */
375 if (block < map->first_free)
376 map->first_free = block;
377
378#if DEBUG_BLOCK_FREE
379 alloc_memset_region(ptr, map->block_size * (i - 1), DEBUG_BLOCK_FREE_VALUE);
380#endif
381}
382
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100383/* allocate single block for runtime */
384static void *rmalloc_runtime(int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100385{
386 int i;
387
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100388 for (i = 0; i < ARRAY_SIZE(rt_heap_map); i ++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100389
390 /* is block big enough */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100391 if (rt_heap_map[i].block_size < bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100392 continue;
393
394 /* does block have free space */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100395 if (rt_heap_map[i].free_count == 0)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100396 continue;
397
398 /* free block space exists */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100399 return alloc_block(&memmap.runtime, i, bflags);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100400 }
401
402 trace_mem_error("eMm");
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100403 trace_value(bytes);
404 trace_value(bflags);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100405 return NULL;
406}
407
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100408void *rmalloc(int zone, int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100409{
410 uint32_t flags;
411 void *ptr = NULL;
412
413 spin_lock_irq(&memmap.lock, flags);
414
415 switch (zone) {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100416 case RZONE_SYS:
417 ptr = rmalloc_sys(bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100418 break;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100419 case RZONE_RUNTIME:
420 ptr = rmalloc_runtime(bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100421 break;
422 default:
423 trace_mem_error("eMz");
424 break;
425 }
426
427 spin_unlock_irq(&memmap.lock, flags);
428 return ptr;
429}
430
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100431void *rzalloc(int zone, int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100432{
433 void *ptr = NULL;
434
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100435 ptr = rmalloc(zone, bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100436 if (ptr != NULL) {
437 bzero(ptr, bytes);
438 }
439
440 return ptr;
441}
442
443/* allocates continuous buffer on 1k boundary */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100444void *rballoc(int zone, int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100445{
446 uint32_t flags;
447 void *ptr = NULL;
448 int i;
449
450 spin_lock_irq(&memmap.lock, flags);
451
452 /* will request fit in single block */
453 for (i = 0; i < ARRAY_SIZE(buf_heap_map); i++) {
454
455 /* is block big enough */
456 if (buf_heap_map[i].block_size < bytes)
457 continue;
458
459 /* does block have free space */
460 if (buf_heap_map[i].free_count == 0)
461 continue;
462
463 /* allocate block */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100464 ptr = alloc_block(&memmap.buffer, i, bflags);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100465 goto out;
466 }
467
468 /* request spans > 1 block */
469
470 /* only 1 choice for block size */
471 if (ARRAY_SIZE(buf_heap_map) == 1) {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100472 ptr = alloc_cont_blocks(&memmap.buffer, 0, bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100473 goto out;
474 } else {
475
476 /* find best block size for request */
477 for (i = 0; i < ARRAY_SIZE(buf_heap_map); i++) {
478
479 /* allocate is block size smaller than request */
480 if (buf_heap_map[i].block_size < bytes)
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100481 alloc_cont_blocks(&memmap.buffer, i, bflags,
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100482 bytes);
483 }
484 }
485
486 ptr = alloc_cont_blocks(&memmap.buffer, ARRAY_SIZE(buf_heap_map) - 1,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100487 bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100488
489out:
490 spin_unlock_irq(&memmap.lock, flags);
491 return ptr;
492}
493
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100494void rfree(void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100495{
496 uint32_t flags;
497
498 spin_lock_irq(&memmap.lock, flags);
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100499 free_block(&memmap.runtime, ptr);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100500 spin_unlock_irq(&memmap.lock, flags);
501}
502
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100503void rbfree(void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100504{
505 uint32_t flags;
506
507 spin_lock_irq(&memmap.lock, flags);
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100508 free_block(&memmap.buffer, ptr);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100509 spin_unlock_irq(&memmap.lock, flags);
510}
511
512uint32_t mm_pm_context_size(void)
513{
514 uint32_t size;
515
516 /* calc context size for each area */
517 size = memmap.buffer.info.used;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100518 size += memmap.runtime.info.used;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100519 size += memmap.system.info.used;
520
521 /* add memory maps */
522 size += heap_get_size(&memmap.buffer);
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100523 size += heap_get_size(&memmap.runtime);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100524 size += heap_get_size(&memmap.system);
525
526 /* recalc totals */
527 memmap.total.free = memmap.buffer.info.free +
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100528 memmap.runtime.info.free + memmap.system.info.free;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100529 memmap.total.used = memmap.buffer.info.used +
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100530 memmap.runtime.info.used + memmap.system.info.used;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100531
532 return size;
533}
534
535/*
536 * Save the DSP memories that are in use the system and modules. All pipeline and modules
537 * must be disabled before calling this functions. No allocations are permitted after
538 * calling this and before calling restore.
539 */
540int mm_pm_context_save(struct dma_sg_config *sg)
541{
542 uint32_t used;
543 int32_t offset = 0, ret;
544
545 /* first make sure SG buffer has enough space on host for DSP context */
546 used = mm_pm_context_size();
547 if (used > dma_sg_get_size(sg))
548 return -EINVAL;
549
550 /* copy memory maps to SG */
551 ret = dma_copy_to_host(sg, offset,
552 (void *)&memmap, sizeof(memmap));
553 if (ret < 0)
554 return ret;
555
556 /* copy system memory contents to SG */
557 ret = dma_copy_to_host(sg, offset + ret,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100558 (void *)memmap.system.heap, (int32_t)(memmap.system.size));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100559 if (ret < 0)
560 return ret;
561
562 /* copy module memory contents to SG */
563 // TODO: iterate over module block map and copy contents of each block
564 // to the host.
565
566 /* copy buffer memory contents to SG */
567 // TODO: iterate over buffer block map and copy contents of each block
568 // to the host.
569
570 return ret;
571}
572
573/*
574 * Restore the DSP memories to modules abd the system. This must be called immediately
575 * after booting before any pipeline work.
576 */
577int mm_pm_context_restore(struct dma_sg_config *sg)
578{
579 int32_t offset = 0, ret;
580
581 /* copy memory maps from SG */
582 ret = dma_copy_from_host(sg, offset,
583 (void *)&memmap, sizeof(memmap));
584 if (ret < 0)
585 return ret;
586
587 /* copy system memory contents from SG */
588 ret = dma_copy_to_host(sg, offset + ret,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100589 (void *)memmap.system.heap, (int32_t)(memmap.system.size));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100590 if (ret < 0)
591 return ret;
592
593 /* copy module memory contents from SG */
594 // TODO: iterate over module block map and copy contents of each block
595 // to the host. This is the same block order used by the context store
596
597 /* copy buffer memory contents from SG */
598 // TODO: iterate over buffer block map and copy contents of each block
599 // to the host. This is the same block order used by the context store
600
601 return 0;
602}
603
604/* initialise map */
605void init_heap(void)
606{
607 struct block_map *next_map, *current_map;
608 int i;
609
610 spinlock_init(&memmap.lock);
611
612 /* initialise buffer map */
613 current_map = &buf_heap_map[0];
614 current_map->base = memmap.buffer.heap;
615
616 for (i = 1; i < ARRAY_SIZE(buf_heap_map); i++) {
617 next_map = &buf_heap_map[i];
618 next_map->base = current_map->base +
619 current_map->block_size * current_map->count;
620 current_map = &buf_heap_map[i];
621 }
622
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100623 /* initialise runtime map */
624 current_map = &rt_heap_map[0];
625 current_map->base = memmap.runtime.heap;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100626
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100627 for (i = 1; i < ARRAY_SIZE(rt_heap_map); i++) {
628 next_map = &rt_heap_map[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100629 next_map->base = current_map->base +
630 current_map->block_size * current_map->count;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100631 current_map = &rt_heap_map[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100632 }
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100633
634#if (HEAP_DMA_BUFFER_SIZE > 0)
635 /* initialise DMA map */
636 current_map = &dma_buf_heap_map[0];
637 current_map->base = memmap.dma.heap;
638
639 for (i = 1; i < ARRAY_SIZE(dma_buf_heap_map); i++) {
640 next_map = &dma_buf_heap_map[i];
641 next_map->base = current_map->base +
642 current_map->block_size * current_map->count;
643 current_map = &dma_buf_heap_map[i];
644 }
645#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100646}