blob: 847cb93baa5f064d4b5f435e1e0b4d5734459b56 [file] [log] [blame]
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +01001/*
2 * Copyright (c) 2016, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Intel Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
29 * Keyon Jie <yang.jie@linux.intel.com>
30 */
31
32#include <reef/alloc.h>
33#include <reef/reef.h>
34#include <reef/debug.h>
35#include <reef/trace.h>
36#include <reef/lock.h>
37#include <platform/memory.h>
38#include <stdint.h>
39
40/* debug to set memory value on every allocation */
41#define DEBUG_BLOCK_ALLOC 0
42#define DEBUG_BLOCK_ALLOC_VALUE 0x6b6b6b6b
43
44/* debug to set memory value on every free TODO: not working atm */
45#define DEBUG_BLOCK_FREE 0
46#define DEBUG_BLOCK_FREE_VALUE 0x5a5a5a5a
47
48/* memory tracing support */
49#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
50#define trace_mem(__e) trace_event(TRACE_CLASS_MEM, __e)
51#else
52#define trace_mem(__e)
53#endif
54
55#define trace_mem_error(__e) trace_error(TRACE_CLASS_MEM, __e)
56
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010057/* We have 3 memory pools
58 *
59 * 1) System memory pool does not have a map and it's size is fixed at build
60 * time. Memory cannot be freed from this pool. Used by device drivers
61 * and any system core. Saved as part of PM context.
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010062 * 2) Runtime memory pool has variable size allocation map and memory is freed
63 * on calls to rfree(). Saved as part of PM context. Global size
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010064 * set at build time.
65 * 3) Buffer memory pool has fixed size allocation map and can be freed on
66 * module removal or calls to rfree(). Saved as part of PM context.
67 */
68
69struct block_hdr {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010070 uint16_t size; /* size in blocks of this continuous allocation */
71 uint16_t flags; /* usage flags for page */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010072} __attribute__ ((packed));
73
74struct block_map {
75 uint16_t block_size; /* size of block in bytes */
76 uint16_t count; /* number of blocks in map */
77 uint16_t free_count; /* number of free blocks */
78 uint16_t first_free; /* index of first free block */
79 struct block_hdr *block; /* base block header */
80 uint32_t base; /* base address of space */
81} __attribute__ ((packed));
82
83#define BLOCK_DEF(sz, cnt, hdr) \
84 {.block_size = sz, .count = cnt, .free_count = cnt, .block = hdr}
85
86/* Heap blocks for modules */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010087//static struct block_hdr mod_block8[HEAP_RT_COUNT8];
88static struct block_hdr mod_block16[HEAP_RT_COUNT16];
89static struct block_hdr mod_block32[HEAP_RT_COUNT32];
90static struct block_hdr mod_block64[HEAP_RT_COUNT64];
91static struct block_hdr mod_block128[HEAP_RT_COUNT128];
92static struct block_hdr mod_block256[HEAP_RT_COUNT256];
93static struct block_hdr mod_block512[HEAP_RT_COUNT512];
94static struct block_hdr mod_block1024[HEAP_RT_COUNT1024];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010095
96/* Heap memory map for modules */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010097static struct block_map rt_heap_map[] = {
98/* BLOCK_DEF(8, HEAP_RT_COUNT8, mod_block8), */
99 BLOCK_DEF(16, HEAP_RT_COUNT16, mod_block16),
100 BLOCK_DEF(32, HEAP_RT_COUNT32, mod_block32),
101 BLOCK_DEF(64, HEAP_RT_COUNT64, mod_block64),
102 BLOCK_DEF(128, HEAP_RT_COUNT128, mod_block128),
103 BLOCK_DEF(256, HEAP_RT_COUNT256, mod_block256),
104 BLOCK_DEF(512, HEAP_RT_COUNT512, mod_block512),
105 BLOCK_DEF(1024, HEAP_RT_COUNT1024, mod_block1024),
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100106};
107
108/* Heap blocks for buffers */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100109static struct block_hdr buf_block[HEAP_BUFFER_COUNT];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100110
111/* Heap memory map for buffers */
112static struct block_map buf_heap_map[] = {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100113 BLOCK_DEF(HEAP_BUFFER_BLOCK_SIZE, HEAP_BUFFER_COUNT, buf_block),
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100114};
115
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100116#if (HEAP_DMA_BUFFER_SIZE > 0)
117/* Heap memory map for DMA buffers - only used for HW with special DMA memories */
118static struct block_map dma_buf_heap_map[] = {
119 BLOCK_DEF(HEAP_DMA_BUFFER_BLOCK_SIZE, HEAP_DMA_BUFFER_COUNT, buf_block),
120};
121#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100122
123struct mm_heap {
124 uint32_t blocks;
125 struct block_map *map;
126 uint32_t heap;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100127 uint32_t size;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100128 struct mm_info info;
129};
130
131/* heap block memory map */
132struct mm {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100133 struct mm_heap runtime; /* general heap for components */
134 struct mm_heap system; /* system heap - used during init cannot be freed */
135 struct mm_heap buffer; /* general component buffer heap */
136#if (HEAP_DMA_BUFFER_SIZE > 0)
137 struct mm_heap dma; /* general component DMA buffer heap */
138#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100139 struct mm_info total;
140 spinlock_t lock; /* all allocs and frees are atomic */
141};
142
143struct mm memmap = {
144 .system = {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100145 .heap = HEAP_SYSTEM_BASE,
146 .size = HEAP_SYSTEM_SIZE,
147 .info = {.free = HEAP_SYSTEM_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100148 },
149
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100150 .runtime = {
151 .blocks = ARRAY_SIZE(rt_heap_map),
152 .map = rt_heap_map,
153 .heap = HEAP_RUNTIME_BASE,
154 .size = HEAP_RUNTIME_SIZE,
155 .info = {.free = HEAP_RUNTIME_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100156 },
157
158 .buffer = {
159 .blocks = ARRAY_SIZE(buf_heap_map),
160 .map = buf_heap_map,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100161 .heap = HEAP_BUFFER_BASE,
162 .size = HEAP_BUFFER_SIZE,
163 .info = {.free = HEAP_BUFFER_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100164 },
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100165
166#if (HEAP_DMA_BUFFER_SIZE > 0)
167 .dma = {
168 .blocks = ARRAY_SIZE(dma_buf_heap_map),
169 .map = dma_buf_heap_map,
170 .heap = HEAP_DMA_BUFFER_BASE,
171 .size = HEAP_DMA_BUFFER_SIZE,
172 .info = {.free = HEAP_DMA_BUFFER_SIZE,},
173 },
174#endif
175 .total = {.free = HEAP_SYSTEM_SIZE + HEAP_RUNTIME_SIZE +
176 HEAP_BUFFER_SIZE + HEAP_DMA_BUFFER_SIZE,},
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100177};
178
179/* total size of block */
180static inline uint32_t block_get_size(struct block_map *map)
181{
182 return sizeof(*map) + map->count *
183 (map->block_size + sizeof(struct block_hdr));
184}
185
186/* total size of heap */
187static inline uint32_t heap_get_size(struct mm_heap *heap)
188{
189 uint32_t size = sizeof(struct mm_heap);
190 int i;
191
192 for (i = 0; i < heap->blocks; i++) {
193 size += block_get_size(&heap->map[i]);
194 }
195
196 return size;
197}
198
199#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
200static void alloc_memset_region(void *ptr, uint32_t bytes, uint32_t val)
201{
202 uint32_t count = bytes >> 2;
203 uint32_t *dest = ptr, i;
204
205 for (i = 0; i < count; i++)
206 dest[i] = val;
207}
208#endif
209
210/* allocate from system memory pool */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100211static void *rmalloc_sys(size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100212{
213 void *ptr = (void *)memmap.system.heap;
214
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100215 /* always succeeds or panics */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100216 memmap.system.heap += bytes;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100217 if (memmap.system.heap >= HEAP_SYSTEM_BASE + HEAP_SYSTEM_SIZE) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100218 trace_mem_error("eMd");
219 panic(PANIC_MEM);
220 }
221
222#if DEBUG_BLOCK_ALLOC
223 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
224#endif
225
226 return ptr;
227}
228
229/* allocate single block */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100230static void *alloc_block(struct mm_heap *heap, int level, int bflags)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100231{
232 struct block_map *map = &heap->map[level];
233 struct block_hdr *hdr = &map->block[map->first_free];
234 void *ptr;
235 int i;
236
237 map->free_count--;
238 ptr = (void *)(map->base + map->first_free * map->block_size);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100239 hdr->size = 1;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100240 hdr->flags = RFLAGS_USED | bflags;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100241 heap->info.used += map->block_size;
242 heap->info.free -= map->block_size;
243
244 /* find next free */
245 for (i = map->first_free; i < map->count; ++i) {
246
247 hdr = &map->block[i];
248
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100249 if (hdr->flags == 0) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100250 map->first_free = i;
251 break;
252 }
253 }
254
255#if DEBUG_BLOCK_ALLOC
256 alloc_memset_region(ptr, map->block_size, DEBUG_BLOCK_ALLOC_VALUE);
257#endif
258
259 return ptr;
260}
261
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600262/* allocates continuous blocks */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100263static void *alloc_cont_blocks(struct mm_heap *heap, int level, int bflags,
264 size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100265{
266 struct block_map *map = &heap->map[level];
267 struct block_hdr *hdr = &map->block[map->first_free];
268 void *ptr;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500269 unsigned int start;
270 unsigned int current;
271 unsigned int count = bytes / map->block_size;
272 unsigned int i;
273 unsigned int remaining = map->count - count;
274 unsigned int end;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100275
276 if (bytes % map->block_size)
277 count++;
278
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600279 /* check for continuous blocks from "start" */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100280 for (start = map->first_free; start < remaining; start++) {
281
282 /* check that we have enough free blocks from start pos */
283 end = start + count;
284 for (current = start; current < end; current++) {
285 hdr = &map->block[current];
286
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100287 /* is block used */
288 if (hdr->flags == RFLAGS_USED)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100289 break;
290 }
291
292 /* enough free blocks ? */
293 if (current == end)
294 goto found;
295 }
296
297 /* not found */
298 trace_mem_error("eCb");
299 return NULL;
300
301found:
302 /* found some free blocks */
303 map->free_count -= count;
304 ptr = (void *)(map->base + start * map->block_size);
305 hdr = &map->block[start];
306 hdr->size = count;
307 heap->info.used += count * map->block_size;
308 heap->info.free -= count * map->block_size;
309
310 /* allocate each block */
311 for (current = start; current < end; current++) {
312 hdr = &map->block[current];
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100313 hdr->flags = RFLAGS_USED | bflags;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100314 }
315
316 /* do we need to find a new first free block ? */
317 if (start == map->first_free) {
318
319 /* find next free */
320 for (i = map->first_free + count; i < map->count; ++i) {
321
322 hdr = &map->block[i];
323
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100324 if (hdr->flags == 0) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100325 map->first_free = i;
326 break;
327 }
328 }
329 }
330
331#if DEBUG_BLOCK_ALLOC
332 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
333#endif
334
335 return ptr;
336}
337
338/* free block(s) */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100339static void free_block(struct mm_heap *heap, void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100340{
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800341 struct mm_heap * mm_heap;
342 struct block_map * block_map;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100343 struct block_hdr *hdr;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500344 int i;
345 int block;
346 int array_size;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100347
348 /* sanity check */
349 if (ptr == NULL)
350 return;
351
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800352 /* find mm_heap that ptr belongs to */
353 if ((uint32_t)ptr >= memmap.runtime.heap &&
354 (uint32_t)ptr < memmap.runtime.heap + memmap.runtime.size) {
355 mm_heap = &memmap.runtime;
356 array_size = ARRAY_SIZE(rt_heap_map);
357 } else if ((uint32_t)ptr >= memmap.buffer.heap &&
358 (uint32_t)ptr < memmap.buffer.heap + memmap.buffer.size) {
359 mm_heap = &memmap.buffer;
360 array_size = ARRAY_SIZE(buf_heap_map);
361#if (HEAP_DMA_BUFFER_SIZE > 0)
362 } else if ((uint32_t)ptr >= memmap.dma.heap &&
363 (uint32_t)ptr < memmap.dma.heap + memmap.dma.size) {
364 mm_heap = &memmap.dma;
365 array_size = ARRAY_SIZE(dma_buf_heap_map);
366#endif
367 } else
368 return;
369
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100370 /* find block that ptr belongs to */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800371 for (i = 0; i < array_size - 1; i ++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100372
373 /* is ptr in this block */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800374 if ((uint32_t)ptr < mm_heap->map[i + 1].base)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100375 goto found;
376 }
377
378 /* not found */
379 trace_mem_error("eMF");
380 return;
381
382found:
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800383 /* the block i is it */
384 block_map = &mm_heap->map[i];
385
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100386 /* calculate block header */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800387 block = ((uint32_t)ptr - block_map->base) / block_map->block_size;
388 hdr = &block_map->block[block];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100389
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600390 /* free block header and continuous blocks */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100391 for (i = block; i < block + hdr->size; i++) {
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800392 hdr = &block_map->block[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100393 hdr->size = 0;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100394 hdr->flags = 0;
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800395 block_map->free_count++;
396 heap->info.used -= block_map->block_size;
397 heap->info.free += block_map->block_size;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100398 }
399
400 /* set first free */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800401 if (block < block_map->first_free)
402 block_map->first_free = block;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100403
404#if DEBUG_BLOCK_FREE
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800405 alloc_memset_region(ptr, block_map->block_size * (i - 1), DEBUG_BLOCK_FREE_VALUE);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100406#endif
407}
408
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100409/* allocate single block for runtime */
410static void *rmalloc_runtime(int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100411{
412 int i;
413
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100414 for (i = 0; i < ARRAY_SIZE(rt_heap_map); i ++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100415
416 /* is block big enough */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100417 if (rt_heap_map[i].block_size < bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100418 continue;
419
420 /* does block have free space */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100421 if (rt_heap_map[i].free_count == 0)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100422 continue;
423
424 /* free block space exists */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100425 return alloc_block(&memmap.runtime, i, bflags);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100426 }
427
428 trace_mem_error("eMm");
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100429 trace_value(bytes);
430 trace_value(bflags);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100431 return NULL;
432}
433
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100434void *rmalloc(int zone, int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100435{
436 uint32_t flags;
437 void *ptr = NULL;
438
439 spin_lock_irq(&memmap.lock, flags);
440
441 switch (zone) {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100442 case RZONE_SYS:
443 ptr = rmalloc_sys(bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100444 break;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100445 case RZONE_RUNTIME:
446 ptr = rmalloc_runtime(bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100447 break;
448 default:
449 trace_mem_error("eMz");
450 break;
451 }
452
453 spin_unlock_irq(&memmap.lock, flags);
454 return ptr;
455}
456
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100457void *rzalloc(int zone, int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100458{
459 void *ptr = NULL;
460
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100461 ptr = rmalloc(zone, bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100462 if (ptr != NULL) {
463 bzero(ptr, bytes);
464 }
465
466 return ptr;
467}
468
469/* allocates continuous buffer on 1k boundary */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100470void *rballoc(int zone, int bflags, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100471{
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800472 struct block_map * block_map = buf_heap_map;
473 struct mm_heap * mm_heap = &memmap.buffer;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500474 int i;
475 int array_size = ARRAY_SIZE(buf_heap_map);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100476 uint32_t flags;
477 void *ptr = NULL;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100478
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800479#if (HEAP_DMA_BUFFER_SIZE > 0)
480 if (bflags & RFLAGS_DMA) {
481 mm_heap = &memmap.dma;
482 block_map = dma_buf_heap_map;
483 array_size = ARRAY_SIZE(dma_buf_heap_map);
484 }
485#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100486 spin_lock_irq(&memmap.lock, flags);
487
488 /* will request fit in single block */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800489 for (i = 0; i < array_size; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100490
491 /* is block big enough */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800492 if (block_map[i].block_size < bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100493 continue;
494
495 /* does block have free space */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800496 if (block_map[i].free_count == 0)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100497 continue;
498
499 /* allocate block */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800500 ptr = alloc_block(mm_heap, i, bflags);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100501 goto out;
502 }
503
504 /* request spans > 1 block */
505
506 /* only 1 choice for block size */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800507 if (array_size == 1) {
508 ptr = alloc_cont_blocks(mm_heap, 0, bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100509 goto out;
510 } else {
511
512 /* find best block size for request */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800513 for (i = 0; i < array_size; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100514
515 /* allocate is block size smaller than request */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800516 if (block_map[i].block_size < bytes)
517 alloc_cont_blocks(mm_heap, i, bflags,
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100518 bytes);
519 }
520 }
521
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800522 ptr = alloc_cont_blocks(mm_heap, array_size - 1,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100523 bflags, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100524
525out:
526 spin_unlock_irq(&memmap.lock, flags);
527 return ptr;
528}
529
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100530void rfree(void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100531{
532 uint32_t flags;
533
534 spin_lock_irq(&memmap.lock, flags);
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100535 free_block(&memmap.runtime, ptr);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100536 spin_unlock_irq(&memmap.lock, flags);
537}
538
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100539void rbfree(void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100540{
541 uint32_t flags;
542
543 spin_lock_irq(&memmap.lock, flags);
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100544 free_block(&memmap.buffer, ptr);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100545 spin_unlock_irq(&memmap.lock, flags);
546}
547
548uint32_t mm_pm_context_size(void)
549{
550 uint32_t size;
551
552 /* calc context size for each area */
553 size = memmap.buffer.info.used;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100554 size += memmap.runtime.info.used;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100555 size += memmap.system.info.used;
556
557 /* add memory maps */
558 size += heap_get_size(&memmap.buffer);
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100559 size += heap_get_size(&memmap.runtime);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100560 size += heap_get_size(&memmap.system);
561
562 /* recalc totals */
563 memmap.total.free = memmap.buffer.info.free +
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100564 memmap.runtime.info.free + memmap.system.info.free;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100565 memmap.total.used = memmap.buffer.info.used +
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100566 memmap.runtime.info.used + memmap.system.info.used;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100567
568 return size;
569}
570
571/*
572 * Save the DSP memories that are in use the system and modules. All pipeline and modules
573 * must be disabled before calling this functions. No allocations are permitted after
574 * calling this and before calling restore.
575 */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100576int mm_pm_context_save(struct dma_copy *dc, struct dma_sg_config *sg)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100577{
578 uint32_t used;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500579 int32_t offset = 0;
580 int32_t ret;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100581
582 /* first make sure SG buffer has enough space on host for DSP context */
583 used = mm_pm_context_size();
584 if (used > dma_sg_get_size(sg))
585 return -EINVAL;
586
587 /* copy memory maps to SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100588 ret = dma_copy_to_host(dc, sg, offset,
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100589 (void *)&memmap, sizeof(memmap));
590 if (ret < 0)
591 return ret;
592
593 /* copy system memory contents to SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100594 ret = dma_copy_to_host(dc, sg, offset + ret,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100595 (void *)memmap.system.heap, (int32_t)(memmap.system.size));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100596 if (ret < 0)
597 return ret;
598
599 /* copy module memory contents to SG */
600 // TODO: iterate over module block map and copy contents of each block
601 // to the host.
602
603 /* copy buffer memory contents to SG */
604 // TODO: iterate over buffer block map and copy contents of each block
605 // to the host.
606
607 return ret;
608}
609
610/*
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600611 * Restore the DSP memories to modules and the system. This must be called immediately
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100612 * after booting before any pipeline work.
613 */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100614int mm_pm_context_restore(struct dma_copy *dc, struct dma_sg_config *sg)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100615{
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500616 int32_t offset = 0;
617 int32_t ret;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100618
619 /* copy memory maps from SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100620 ret = dma_copy_from_host(dc, sg, offset,
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100621 (void *)&memmap, sizeof(memmap));
622 if (ret < 0)
623 return ret;
624
625 /* copy system memory contents from SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100626 ret = dma_copy_to_host(dc, sg, offset + ret,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100627 (void *)memmap.system.heap, (int32_t)(memmap.system.size));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100628 if (ret < 0)
629 return ret;
630
631 /* copy module memory contents from SG */
632 // TODO: iterate over module block map and copy contents of each block
633 // to the host. This is the same block order used by the context store
634
635 /* copy buffer memory contents from SG */
636 // TODO: iterate over buffer block map and copy contents of each block
637 // to the host. This is the same block order used by the context store
638
639 return 0;
640}
641
642/* initialise map */
Liam Girdwood69222cf2017-06-06 16:41:07 +0100643void init_heap(struct reef *reef)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100644{
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500645 struct block_map *next_map;
646 struct block_map *current_map;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100647 int i;
648
649 spinlock_init(&memmap.lock);
650
651 /* initialise buffer map */
652 current_map = &buf_heap_map[0];
653 current_map->base = memmap.buffer.heap;
654
655 for (i = 1; i < ARRAY_SIZE(buf_heap_map); i++) {
656 next_map = &buf_heap_map[i];
657 next_map->base = current_map->base +
658 current_map->block_size * current_map->count;
659 current_map = &buf_heap_map[i];
660 }
661
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100662 /* initialise runtime map */
663 current_map = &rt_heap_map[0];
664 current_map->base = memmap.runtime.heap;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100665
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100666 for (i = 1; i < ARRAY_SIZE(rt_heap_map); i++) {
667 next_map = &rt_heap_map[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100668 next_map->base = current_map->base +
669 current_map->block_size * current_map->count;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100670 current_map = &rt_heap_map[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100671 }
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100672
673#if (HEAP_DMA_BUFFER_SIZE > 0)
674 /* initialise DMA map */
675 current_map = &dma_buf_heap_map[0];
676 current_map->base = memmap.dma.heap;
677
678 for (i = 1; i < ARRAY_SIZE(dma_buf_heap_map); i++) {
679 next_map = &dma_buf_heap_map[i];
680 next_map->base = current_map->base +
681 current_map->block_size * current_map->count;
682 current_map = &dma_buf_heap_map[i];
683 }
684#endif
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100685}