blob: 39d85fef1590971ecf97ecee35c17d6459af66a6 [file] [log] [blame]
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +01001/*
2 * Copyright (c) 2016, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Intel Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
29 * Keyon Jie <yang.jie@linux.intel.com>
30 */
31
32#include <reef/alloc.h>
33#include <reef/reef.h>
34#include <reef/debug.h>
35#include <reef/trace.h>
36#include <reef/lock.h>
37#include <platform/memory.h>
38#include <stdint.h>
39
40/* debug to set memory value on every allocation */
41#define DEBUG_BLOCK_ALLOC 0
42#define DEBUG_BLOCK_ALLOC_VALUE 0x6b6b6b6b
43
44/* debug to set memory value on every free TODO: not working atm */
45#define DEBUG_BLOCK_FREE 0
46#define DEBUG_BLOCK_FREE_VALUE 0x5a5a5a5a
47
48/* memory tracing support */
49#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
50#define trace_mem(__e) trace_event(TRACE_CLASS_MEM, __e)
51#else
52#define trace_mem(__e)
53#endif
54
55#define trace_mem_error(__e) trace_error(TRACE_CLASS_MEM, __e)
56
57/* block status */
58#define BLOCK_FREE 0
59#define BLOCK_USED 1
60
61/* We have 3 memory pools
62 *
63 * 1) System memory pool does not have a map and it's size is fixed at build
64 * time. Memory cannot be freed from this pool. Used by device drivers
65 * and any system core. Saved as part of PM context.
66 * 2) Module memory pool has variable size allocation map and memory is freed
67 * on module or calls to rfree(). Saved as part of PM context. Global size
68 * set at build time.
69 * 3) Buffer memory pool has fixed size allocation map and can be freed on
70 * module removal or calls to rfree(). Saved as part of PM context.
71 */
72
73struct block_hdr {
74 uint8_t module; /* module that owns this page */
75 uint8_t size; /* size in blocks of this continious allocation */
76 uint8_t flags; /* usage flags for page */
77 uint8_t instance; /* module instance ID */
78} __attribute__ ((packed));
79
80struct block_map {
81 uint16_t block_size; /* size of block in bytes */
82 uint16_t count; /* number of blocks in map */
83 uint16_t free_count; /* number of free blocks */
84 uint16_t first_free; /* index of first free block */
85 struct block_hdr *block; /* base block header */
86 uint32_t base; /* base address of space */
87} __attribute__ ((packed));
88
89#define BLOCK_DEF(sz, cnt, hdr) \
90 {.block_size = sz, .count = cnt, .free_count = cnt, .block = hdr}
91
92/* Heap blocks for modules */
93//static struct block_hdr mod_block8[HEAP_MOD_COUNT8];
94static struct block_hdr mod_block16[HEAP_MOD_COUNT16];
95static struct block_hdr mod_block32[HEAP_MOD_COUNT32];
96static struct block_hdr mod_block64[HEAP_MOD_COUNT64];
97static struct block_hdr mod_block128[HEAP_MOD_COUNT128];
98static struct block_hdr mod_block256[HEAP_MOD_COUNT256];
99static struct block_hdr mod_block512[HEAP_MOD_COUNT512];
100static struct block_hdr mod_block1024[HEAP_MOD_COUNT1024];
101
102/* Heap memory map for modules */
103static struct block_map mod_heap_map[] = {
104/* BLOCK_DEF(8, HEAP_MOD_COUNT8, mod_block8), */
105 BLOCK_DEF(16, HEAP_MOD_COUNT16, mod_block16),
106 BLOCK_DEF(32, HEAP_MOD_COUNT32, mod_block32),
107 BLOCK_DEF(64, HEAP_MOD_COUNT64, mod_block64),
108 BLOCK_DEF(128, HEAP_MOD_COUNT128, mod_block128),
109 BLOCK_DEF(256, HEAP_MOD_COUNT256, mod_block256),
110 BLOCK_DEF(512, HEAP_MOD_COUNT512, mod_block512),
111 BLOCK_DEF(1024, HEAP_MOD_COUNT1024, mod_block1024),
112};
113
114/* Heap blocks for buffers */
115static struct block_hdr buf_block1024[HEAP_BUF_COUNT];
116
117/* Heap memory map for buffers */
118static struct block_map buf_heap_map[] = {
119 BLOCK_DEF(1024, HEAP_BUF_COUNT, buf_block1024),
120};
121
122/* memory heap start locations from linker */
123extern uint32_t _system_heap;
124extern uint32_t _module_heap;
125extern uint32_t _buffer_heap;
126extern uint32_t _stack_sentry;
127
128struct mm_heap {
129 uint32_t blocks;
130 struct block_map *map;
131 uint32_t heap;
132 uint32_t heap_end;
133 struct mm_info info;
134};
135
136/* heap block memory map */
137struct mm {
138
139 struct mm_heap module; /* general heap for components */
140 struct mm_heap system; /* general component buffer heap */
141 struct mm_heap buffer; /* system heap - used during init cannot be freed */
142 struct mm_info total;
143 spinlock_t lock; /* all allocs and frees are atomic */
144};
145
146struct mm memmap = {
147 .system = {
148 .heap = (uint32_t)&_system_heap,
149 .heap_end = (uint32_t)&_module_heap,
150 .info = {.free = SYSTEM_MEM,},
151 },
152
153 .module = {
154 .blocks = ARRAY_SIZE(mod_heap_map),
155 .map = mod_heap_map,
156 .heap = (uint32_t)&_module_heap,
157 .heap_end = (uint32_t)&_buffer_heap,
158 .info = {.free = HEAP_MOD_SIZE,},
159 },
160
161 .buffer = {
162 .blocks = ARRAY_SIZE(buf_heap_map),
163 .map = buf_heap_map,
164 .heap = (uint32_t)&_buffer_heap,
165 .heap_end = (uint32_t)&_stack_sentry,
166 .info = {.free = HEAP_BUF_SIZE,},
167 },
168 .total = {.free = SYSTEM_MEM + HEAP_MOD_SIZE + HEAP_BUF_SIZE,},
169};
170
171/* total size of block */
172static inline uint32_t block_get_size(struct block_map *map)
173{
174 return sizeof(*map) + map->count *
175 (map->block_size + sizeof(struct block_hdr));
176}
177
178/* total size of heap */
179static inline uint32_t heap_get_size(struct mm_heap *heap)
180{
181 uint32_t size = sizeof(struct mm_heap);
182 int i;
183
184 for (i = 0; i < heap->blocks; i++) {
185 size += block_get_size(&heap->map[i]);
186 }
187
188 return size;
189}
190
191#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
192static void alloc_memset_region(void *ptr, uint32_t bytes, uint32_t val)
193{
194 uint32_t count = bytes >> 2;
195 uint32_t *dest = ptr, i;
196
197 for (i = 0; i < count; i++)
198 dest[i] = val;
199}
200#endif
201
202/* allocate from system memory pool */
203static void *rmalloc_dev(size_t bytes)
204{
205 void *ptr = (void *)memmap.system.heap;
206
207 /* always suceeds or panics */
208 memmap.system.heap += bytes;
209 if (memmap.system.heap >= memmap.system.heap_end) {
210 trace_mem_error("eMd");
211 panic(PANIC_MEM);
212 }
213
214#if DEBUG_BLOCK_ALLOC
215 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
216#endif
217
218 return ptr;
219}
220
221/* allocate single block */
222static void *alloc_block(struct mm_heap *heap, int level, int module)
223{
224 struct block_map *map = &heap->map[level];
225 struct block_hdr *hdr = &map->block[map->first_free];
226 void *ptr;
227 int i;
228
229 map->free_count--;
230 ptr = (void *)(map->base + map->first_free * map->block_size);
231 hdr->module = module;
232 hdr->size = 1;
233 hdr->flags = BLOCK_USED;
234 heap->info.used += map->block_size;
235 heap->info.free -= map->block_size;
236
237 /* find next free */
238 for (i = map->first_free; i < map->count; ++i) {
239
240 hdr = &map->block[i];
241
242 if (hdr->flags == BLOCK_FREE) {
243 map->first_free = i;
244 break;
245 }
246 }
247
248#if DEBUG_BLOCK_ALLOC
249 alloc_memset_region(ptr, map->block_size, DEBUG_BLOCK_ALLOC_VALUE);
250#endif
251
252 return ptr;
253}
254
255/* allocates continious blocks */
256static void *alloc_cont_blocks(struct mm_heap *heap, int level, int module, size_t bytes)
257{
258 struct block_map *map = &heap->map[level];
259 struct block_hdr *hdr = &map->block[map->first_free];
260 void *ptr;
261 unsigned int start, current, count = bytes / map->block_size;
262 unsigned int i, remaining = map->count - count, end;
263
264 if (bytes % map->block_size)
265 count++;
266
267 /* check for continious blocks from "start" */
268 for (start = map->first_free; start < remaining; start++) {
269
270 /* check that we have enough free blocks from start pos */
271 end = start + count;
272 for (current = start; current < end; current++) {
273 hdr = &map->block[current];
274
275 /* is block free */
276 if (hdr->flags == BLOCK_USED)
277 break;
278 }
279
280 /* enough free blocks ? */
281 if (current == end)
282 goto found;
283 }
284
285 /* not found */
286 trace_mem_error("eCb");
287 return NULL;
288
289found:
290 /* found some free blocks */
291 map->free_count -= count;
292 ptr = (void *)(map->base + start * map->block_size);
293 hdr = &map->block[start];
294 hdr->size = count;
295 heap->info.used += count * map->block_size;
296 heap->info.free -= count * map->block_size;
297
298 /* allocate each block */
299 for (current = start; current < end; current++) {
300 hdr = &map->block[current];
301 hdr->module = module;
302 hdr->flags = BLOCK_USED;
303 }
304
305 /* do we need to find a new first free block ? */
306 if (start == map->first_free) {
307
308 /* find next free */
309 for (i = map->first_free + count; i < map->count; ++i) {
310
311 hdr = &map->block[i];
312
313 if (hdr->flags == BLOCK_FREE) {
314 map->first_free = i;
315 break;
316 }
317 }
318 }
319
320#if DEBUG_BLOCK_ALLOC
321 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
322#endif
323
324 return ptr;
325}
326
327/* free block(s) */
328static void free_block(struct mm_heap *heap, int module, void *ptr)
329{
330 struct block_map *map;
331 struct block_hdr *hdr;
332 int i, block;
333
334 /* sanity check */
335 if (ptr == NULL)
336 return;
337
338 /* find block that ptr belongs to */
339 for (i = 0; i < ARRAY_SIZE(mod_heap_map) - 1; i ++) {
340
341 /* is ptr in this block */
342 if ((uint32_t)ptr >= mod_heap_map[i].base &&
343 (uint32_t)ptr < mod_heap_map[i + 1].base)
344 goto found;
345 }
346
347 /* not found */
348 trace_mem_error("eMF");
349 return;
350
351found:
352 /* calculate block header */
353 map = &mod_heap_map[i];
354 block = ((uint32_t)ptr - map->base) / map->block_size;
355 hdr = &map->block[block];
356
357 /* free block header and continious blocks */
358 for (i = block; i < block + hdr->size; i++) {
359 hdr = &map->block[i];
360 hdr->module = 0;
361 hdr->size = 0;
362 hdr->flags = BLOCK_FREE;
363 map->free_count++;
364 heap->info.used -= map->block_size;
365 heap->info.free += map->block_size;
366 }
367
368 /* set first free */
369 if (block < map->first_free)
370 map->first_free = block;
371
372#if DEBUG_BLOCK_FREE
373 alloc_memset_region(ptr, map->block_size * (i - 1), DEBUG_BLOCK_FREE_VALUE);
374#endif
375}
376
377/* allocate single block for module */
378static void *rmalloc_mod(int module, size_t bytes)
379{
380 int i;
381
382 for (i = 0; i < ARRAY_SIZE(mod_heap_map); i ++) {
383
384 /* is block big enough */
385 if (mod_heap_map[i].block_size < bytes)
386 continue;
387
388 /* does block have free space */
389 if (mod_heap_map[i].free_count == 0)
390 continue;
391
392 /* free block space exists */
393 return alloc_block(&memmap.module, i, module);
394 }
395
396 trace_mem_error("eMm");
397 return NULL;
398}
399
400void *rmalloc(int zone, int module, size_t bytes)
401{
402 uint32_t flags;
403 void *ptr = NULL;
404
405 spin_lock_irq(&memmap.lock, flags);
406
407 switch (zone) {
408 case RZONE_DEV:
409 ptr = rmalloc_dev(bytes);
410 break;
411 case RZONE_MODULE:
412 ptr = rmalloc_mod(module, bytes);
413 break;
414 default:
415 trace_mem_error("eMz");
416 break;
417 }
418
419 spin_unlock_irq(&memmap.lock, flags);
420 return ptr;
421}
422
423void *rzalloc(int zone, int module, size_t bytes)
424{
425 void *ptr = NULL;
426
427 ptr = rmalloc(zone, module, bytes);
428 if (ptr != NULL) {
429 bzero(ptr, bytes);
430 }
431
432 return ptr;
433}
434
435/* allocates continuous buffer on 1k boundary */
436void *rballoc(int zone, int module, size_t bytes)
437{
438 uint32_t flags;
439 void *ptr = NULL;
440 int i;
441
442 spin_lock_irq(&memmap.lock, flags);
443
444 /* will request fit in single block */
445 for (i = 0; i < ARRAY_SIZE(buf_heap_map); i++) {
446
447 /* is block big enough */
448 if (buf_heap_map[i].block_size < bytes)
449 continue;
450
451 /* does block have free space */
452 if (buf_heap_map[i].free_count == 0)
453 continue;
454
455 /* allocate block */
456 ptr = alloc_block(&memmap.buffer, i, module);
457 goto out;
458 }
459
460 /* request spans > 1 block */
461
462 /* only 1 choice for block size */
463 if (ARRAY_SIZE(buf_heap_map) == 1) {
464 ptr = alloc_cont_blocks(&memmap.buffer, 0, module, bytes);
465 goto out;
466 } else {
467
468 /* find best block size for request */
469 for (i = 0; i < ARRAY_SIZE(buf_heap_map); i++) {
470
471 /* allocate is block size smaller than request */
472 if (buf_heap_map[i].block_size < bytes)
473 alloc_cont_blocks(&memmap.buffer, i, module,
474 bytes);
475 }
476 }
477
478 ptr = alloc_cont_blocks(&memmap.buffer, ARRAY_SIZE(buf_heap_map) - 1,
479 module, bytes);
480
481out:
482 spin_unlock_irq(&memmap.lock, flags);
483 return ptr;
484}
485
486void rfree(int zone, int module, void *ptr)
487{
488 uint32_t flags;
489
490 spin_lock_irq(&memmap.lock, flags);
491
492 switch (zone) {
493 case RZONE_DEV:
494 trace_mem_error("eMF");
495 panic(PANIC_MEM);
496 break;
497 case RZONE_MODULE:
498 free_block(&memmap.module, module, ptr);
499 break;
500 default:
501 trace_mem_error("eMf");
502 break;
503 }
504
505 spin_unlock_irq(&memmap.lock, flags);
506}
507
508void rbfree(int zone, int module, void *ptr)
509{
510 uint32_t flags;
511
512 spin_lock_irq(&memmap.lock, flags);
513
514 switch (zone) {
515 case RZONE_DEV:
516 trace_mem_error("eMF");
517 panic(PANIC_MEM);
518 break;
519 case RZONE_MODULE:
520 free_block(&memmap.buffer, module, ptr);
521 break;
522 default:
523 trace_mem_error("eMf");
524 break;
525 }
526
527 spin_unlock_irq(&memmap.lock, flags);
528}
529
530uint32_t mm_pm_context_size(void)
531{
532 uint32_t size;
533
534 /* calc context size for each area */
535 size = memmap.buffer.info.used;
536 size += memmap.module.info.used;
537 size += memmap.system.info.used;
538
539 /* add memory maps */
540 size += heap_get_size(&memmap.buffer);
541 size += heap_get_size(&memmap.module);
542 size += heap_get_size(&memmap.system);
543
544 /* recalc totals */
545 memmap.total.free = memmap.buffer.info.free +
546 memmap.module.info.free + memmap.system.info.free;
547 memmap.total.used = memmap.buffer.info.used +
548 memmap.module.info.used + memmap.system.info.used;
549
550 return size;
551}
552
553/*
554 * Save the DSP memories that are in use the system and modules. All pipeline and modules
555 * must be disabled before calling this functions. No allocations are permitted after
556 * calling this and before calling restore.
557 */
558int mm_pm_context_save(struct dma_sg_config *sg)
559{
560 uint32_t used;
561 int32_t offset = 0, ret;
562
563 /* first make sure SG buffer has enough space on host for DSP context */
564 used = mm_pm_context_size();
565 if (used > dma_sg_get_size(sg))
566 return -EINVAL;
567
568 /* copy memory maps to SG */
569 ret = dma_copy_to_host(sg, offset,
570 (void *)&memmap, sizeof(memmap));
571 if (ret < 0)
572 return ret;
573
574 /* copy system memory contents to SG */
575 ret = dma_copy_to_host(sg, offset + ret,
576 (void *)memmap.system.heap,
577 (int32_t)(memmap.system.heap_end - memmap.system.heap));
578 if (ret < 0)
579 return ret;
580
581 /* copy module memory contents to SG */
582 // TODO: iterate over module block map and copy contents of each block
583 // to the host.
584
585 /* copy buffer memory contents to SG */
586 // TODO: iterate over buffer block map and copy contents of each block
587 // to the host.
588
589 return ret;
590}
591
592/*
593 * Restore the DSP memories to modules abd the system. This must be called immediately
594 * after booting before any pipeline work.
595 */
596int mm_pm_context_restore(struct dma_sg_config *sg)
597{
598 int32_t offset = 0, ret;
599
600 /* copy memory maps from SG */
601 ret = dma_copy_from_host(sg, offset,
602 (void *)&memmap, sizeof(memmap));
603 if (ret < 0)
604 return ret;
605
606 /* copy system memory contents from SG */
607 ret = dma_copy_to_host(sg, offset + ret,
608 (void *)memmap.system.heap,
609 (int32_t)(memmap.system.heap_end - memmap.system.heap));
610 if (ret < 0)
611 return ret;
612
613 /* copy module memory contents from SG */
614 // TODO: iterate over module block map and copy contents of each block
615 // to the host. This is the same block order used by the context store
616
617 /* copy buffer memory contents from SG */
618 // TODO: iterate over buffer block map and copy contents of each block
619 // to the host. This is the same block order used by the context store
620
621 return 0;
622}
623
624/* initialise map */
625void init_heap(void)
626{
627 struct block_map *next_map, *current_map;
628 int i;
629
630 spinlock_init(&memmap.lock);
631
632 /* initialise buffer map */
633 current_map = &buf_heap_map[0];
634 current_map->base = memmap.buffer.heap;
635
636 for (i = 1; i < ARRAY_SIZE(buf_heap_map); i++) {
637 next_map = &buf_heap_map[i];
638 next_map->base = current_map->base +
639 current_map->block_size * current_map->count;
640 current_map = &buf_heap_map[i];
641 }
642
643 /* initialise module map */
644 current_map = &mod_heap_map[0];
645 current_map->base = memmap.module.heap;
646
647 for (i = 1; i < ARRAY_SIZE(mod_heap_map); i++) {
648 next_map = &mod_heap_map[i];
649 next_map->base = current_map->base +
650 current_map->block_size * current_map->count;
651 current_map = &mod_heap_map[i];
652 }
653}