blob: 7844eb1985ca8a5a6bfdbeeefa42bfb326d29401 [file] [log] [blame]
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +01001/*
2 * Copyright (c) 2016, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Intel Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
29 * Keyon Jie <yang.jie@linux.intel.com>
30 */
31
Pierre-Louis Bossart81708a52018-04-04 18:46:50 -050032#include <sof/alloc.h>
33#include <sof/sof.h>
34#include <sof/debug.h>
35#include <sof/panic.h>
36#include <sof/trace.h>
37#include <sof/lock.h>
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010038#include <platform/memory.h>
39#include <stdint.h>
40
41/* debug to set memory value on every allocation */
42#define DEBUG_BLOCK_ALLOC 0
43#define DEBUG_BLOCK_ALLOC_VALUE 0x6b6b6b6b
44
45/* debug to set memory value on every free TODO: not working atm */
46#define DEBUG_BLOCK_FREE 0
47#define DEBUG_BLOCK_FREE_VALUE 0x5a5a5a5a
48
49/* memory tracing support */
50#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
51#define trace_mem(__e) trace_event(TRACE_CLASS_MEM, __e)
52#else
53#define trace_mem(__e)
54#endif
55
56#define trace_mem_error(__e) trace_error(TRACE_CLASS_MEM, __e)
57
Liam Girdwoodc760f832018-03-01 12:15:15 +000058extern struct mm memmap;
59
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010060/* We have 3 memory pools
61 *
62 * 1) System memory pool does not have a map and it's size is fixed at build
63 * time. Memory cannot be freed from this pool. Used by device drivers
64 * and any system core. Saved as part of PM context.
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010065 * 2) Runtime memory pool has variable size allocation map and memory is freed
66 * on calls to rfree(). Saved as part of PM context. Global size
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010067 * set at build time.
68 * 3) Buffer memory pool has fixed size allocation map and can be freed on
69 * module removal or calls to rfree(). Saved as part of PM context.
70 */
71
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010072
73/* total size of block */
74static inline uint32_t block_get_size(struct block_map *map)
75{
76 return sizeof(*map) + map->count *
77 (map->block_size + sizeof(struct block_hdr));
78}
79
80/* total size of heap */
81static inline uint32_t heap_get_size(struct mm_heap *heap)
82{
83 uint32_t size = sizeof(struct mm_heap);
84 int i;
85
86 for (i = 0; i < heap->blocks; i++) {
87 size += block_get_size(&heap->map[i]);
88 }
89
90 return size;
91}
92
93#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
94static void alloc_memset_region(void *ptr, uint32_t bytes, uint32_t val)
95{
96 uint32_t count = bytes >> 2;
97 uint32_t *dest = ptr, i;
98
99 for (i = 0; i < count; i++)
100 dest[i] = val;
101}
102#endif
103
104/* allocate from system memory pool */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100105static void *rmalloc_sys(size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100106{
107 void *ptr = (void *)memmap.system.heap;
108
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100109 /* always succeeds or panics */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100110 memmap.system.heap += bytes;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100111 if (memmap.system.heap >= HEAP_SYSTEM_BASE + HEAP_SYSTEM_SIZE) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100112 trace_mem_error("eMd");
Liam Girdwoode52ce682018-02-21 15:36:25 +0000113 panic(SOF_IPC_PANIC_MEM);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100114 }
115
116#if DEBUG_BLOCK_ALLOC
117 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
118#endif
119
Tomasz Laudabb752042018-07-06 12:09:58 +0200120 dcache_writeback_invalidate_region(&memmap.system,
121 sizeof(memmap.system));
122
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100123 return ptr;
124}
125
126/* allocate single block */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000127static void *alloc_block(struct mm_heap *heap, int level,
128 uint32_t caps)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100129{
130 struct block_map *map = &heap->map[level];
131 struct block_hdr *hdr = &map->block[map->first_free];
132 void *ptr;
133 int i;
134
135 map->free_count--;
136 ptr = (void *)(map->base + map->first_free * map->block_size);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100137 hdr->size = 1;
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000138 hdr->used = 1;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100139 heap->info.used += map->block_size;
140 heap->info.free -= map->block_size;
Tomasz Laudabb752042018-07-06 12:09:58 +0200141 dcache_writeback_invalidate_region(hdr, sizeof(*hdr));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100142
143 /* find next free */
144 for (i = map->first_free; i < map->count; ++i) {
145
146 hdr = &map->block[i];
147
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000148 if (hdr->used == 0) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100149 map->first_free = i;
150 break;
151 }
152 }
153
154#if DEBUG_BLOCK_ALLOC
155 alloc_memset_region(ptr, map->block_size, DEBUG_BLOCK_ALLOC_VALUE);
156#endif
157
Tomasz Laudabb752042018-07-06 12:09:58 +0200158 dcache_writeback_invalidate_region(map, sizeof(*map));
159 dcache_writeback_invalidate_region(heap, sizeof(*heap));
160
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100161 return ptr;
162}
163
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600164/* allocates continuous blocks */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000165static void *alloc_cont_blocks(struct mm_heap *heap, int level,
166 uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100167{
168 struct block_map *map = &heap->map[level];
169 struct block_hdr *hdr = &map->block[map->first_free];
170 void *ptr;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500171 unsigned int start;
172 unsigned int current;
173 unsigned int count = bytes / map->block_size;
174 unsigned int i;
175 unsigned int remaining = map->count - count;
176 unsigned int end;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100177
178 if (bytes % map->block_size)
179 count++;
180
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600181 /* check for continuous blocks from "start" */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100182 for (start = map->first_free; start < remaining; start++) {
183
184 /* check that we have enough free blocks from start pos */
185 end = start + count;
186 for (current = start; current < end; current++) {
187 hdr = &map->block[current];
188
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100189 /* is block used */
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000190 if (hdr->used)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100191 break;
192 }
193
194 /* enough free blocks ? */
195 if (current == end)
196 goto found;
197 }
198
199 /* not found */
200 trace_mem_error("eCb");
201 return NULL;
202
203found:
204 /* found some free blocks */
205 map->free_count -= count;
206 ptr = (void *)(map->base + start * map->block_size);
207 hdr = &map->block[start];
208 hdr->size = count;
209 heap->info.used += count * map->block_size;
210 heap->info.free -= count * map->block_size;
Tomasz Laudabb752042018-07-06 12:09:58 +0200211 dcache_writeback_invalidate_region(hdr, sizeof(*hdr));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100212
213 /* allocate each block */
214 for (current = start; current < end; current++) {
215 hdr = &map->block[current];
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000216 hdr->used = 1;
Tomasz Laudabb752042018-07-06 12:09:58 +0200217 dcache_writeback_invalidate_region(hdr, sizeof(*hdr));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100218 }
219
220 /* do we need to find a new first free block ? */
221 if (start == map->first_free) {
222
223 /* find next free */
224 for (i = map->first_free + count; i < map->count; ++i) {
225
226 hdr = &map->block[i];
227
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000228 if (hdr->used == 0) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100229 map->first_free = i;
230 break;
231 }
232 }
233 }
234
235#if DEBUG_BLOCK_ALLOC
236 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
237#endif
238
Tomasz Laudabb752042018-07-06 12:09:58 +0200239 dcache_writeback_invalidate_region(map, sizeof(*map));
240 dcache_writeback_invalidate_region(heap, sizeof(*heap));
241
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100242 return ptr;
243}
244
Liam Girdwoodc760f832018-03-01 12:15:15 +0000245static struct mm_heap *get_heap_from_ptr(void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100246{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000247 struct mm_heap *heap;
248 int i;
249
250 /* find mm_heap that ptr belongs to */
251 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++) {
252 heap = &memmap.runtime[i];
253
254 if ((uint32_t)ptr >= heap->heap &&
255 (uint32_t)ptr < heap->heap + heap->size)
256 return heap;
257 }
258
259 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++) {
260 heap = &memmap.buffer[i];
261
262 if ((uint32_t)ptr >= heap->heap &&
263 (uint32_t)ptr < heap->heap + heap->size)
264 return heap;
265 }
266
267 return NULL;
268}
269
270static struct mm_heap *get_runtime_heap_from_caps(uint32_t caps)
271{
272 struct mm_heap *heap;
273 uint32_t mask;
274 int i;
275
276 /* find first heap that support type */
277 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++) {
278 heap = &memmap.runtime[i];
279 mask = heap->caps & caps;
280 if (mask == caps)
281 return heap;
282 }
283
284 return NULL;
285}
286
287static struct mm_heap *get_buffer_heap_from_caps(uint32_t caps)
288{
289 struct mm_heap *heap;
290 uint32_t mask;
291 int i;
292
293 /* find first heap that support type */
294 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++) {
295 heap = &memmap.buffer[i];
296 mask = heap->caps & caps;
297 if (mask == caps)
298 return heap;
299 }
300
301 return NULL;
302}
303
304/* free block(s) */
305static void free_block(void *ptr)
306{
307 struct mm_heap *heap;
308 struct block_map *block_map;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100309 struct block_hdr *hdr;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500310 int i;
311 int block;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100312
313 /* sanity check */
314 if (ptr == NULL)
315 return;
316
Liam Girdwoodc760f832018-03-01 12:15:15 +0000317 heap = get_heap_from_ptr(ptr);
318 if (heap == NULL)
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800319 return;
320
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100321 /* find block that ptr belongs to */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000322 for (i = 0; i < heap->blocks - 1; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100323
324 /* is ptr in this block */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000325 if ((uint32_t)ptr < heap->map[i + 1].base)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100326 goto found;
327 }
328
329 /* not found */
330 trace_mem_error("eMF");
331 return;
332
333found:
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800334 /* the block i is it */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000335 block_map = &heap->map[i];
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800336
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100337 /* calculate block header */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800338 block = ((uint32_t)ptr - block_map->base) / block_map->block_size;
339 hdr = &block_map->block[block];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100340
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600341 /* free block header and continuous blocks */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100342 for (i = block; i < block + hdr->size; i++) {
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800343 hdr = &block_map->block[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100344 hdr->size = 0;
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000345 hdr->used = 0;
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800346 block_map->free_count++;
347 heap->info.used -= block_map->block_size;
348 heap->info.free += block_map->block_size;
Tomasz Laudabb752042018-07-06 12:09:58 +0200349 dcache_writeback_invalidate_region(hdr, sizeof(*hdr));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100350 }
351
Liam Girdwoodc760f832018-03-01 12:15:15 +0000352 /* set first free block */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800353 if (block < block_map->first_free)
354 block_map->first_free = block;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100355
356#if DEBUG_BLOCK_FREE
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800357 alloc_memset_region(ptr, block_map->block_size * (i - 1), DEBUG_BLOCK_FREE_VALUE);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100358#endif
Tomasz Laudabb752042018-07-06 12:09:58 +0200359
360 dcache_writeback_invalidate_region(block_map, sizeof(*block_map));
361 dcache_writeback_invalidate_region(heap, sizeof(*heap));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100362}
363
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100364/* allocate single block for runtime */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000365static void *rmalloc_runtime(uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100366{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000367 struct mm_heap *heap;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100368 int i;
369
Liam Girdwood5972ac32018-03-06 14:15:32 +0000370 /* check runtime heap for capabilities */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000371 heap = get_runtime_heap_from_caps(caps);
Liam Girdwood5972ac32018-03-06 14:15:32 +0000372 if (heap)
373 goto find;
374
375 /* next check buffer heap for capabilities */
376 heap = get_buffer_heap_from_caps(caps);
Liam Girdwoodc760f832018-03-01 12:15:15 +0000377 if (heap == NULL)
378 goto error;
379
Liam Girdwood5972ac32018-03-06 14:15:32 +0000380find:
Liam Girdwoodc760f832018-03-01 12:15:15 +0000381 for (i = 0; i < heap->blocks; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100382
383 /* is block big enough */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000384 if (heap->map[i].block_size < bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100385 continue;
386
387 /* does block have free space */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000388 if (heap->map[i].free_count == 0)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100389 continue;
390
391 /* free block space exists */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000392 return alloc_block(heap, i, caps);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100393 }
394
Liam Girdwoodc760f832018-03-01 12:15:15 +0000395error:
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100396 trace_mem_error("eMm");
Ranjani Sridharan210989d2018-03-25 17:34:04 -0700397 trace_error_value(bytes);
398 trace_error_value(caps);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100399 return NULL;
400}
401
Liam Girdwoodc760f832018-03-01 12:15:15 +0000402void *rmalloc(int zone, uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100403{
404 uint32_t flags;
405 void *ptr = NULL;
406
407 spin_lock_irq(&memmap.lock, flags);
408
409 switch (zone) {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100410 case RZONE_SYS:
411 ptr = rmalloc_sys(bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100412 break;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100413 case RZONE_RUNTIME:
Liam Girdwoodc760f832018-03-01 12:15:15 +0000414 ptr = rmalloc_runtime(caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100415 break;
416 default:
417 trace_mem_error("eMz");
418 break;
419 }
420
421 spin_unlock_irq(&memmap.lock, flags);
422 return ptr;
423}
424
Liam Girdwoodc760f832018-03-01 12:15:15 +0000425void *rzalloc(int zone, uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100426{
427 void *ptr = NULL;
428
Liam Girdwoodc760f832018-03-01 12:15:15 +0000429 ptr = rmalloc(zone, caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100430 if (ptr != NULL) {
431 bzero(ptr, bytes);
432 }
433
434 return ptr;
435}
436
Liam Girdwoodc760f832018-03-01 12:15:15 +0000437/* allocates continuous buffers */
438void *rballoc(int zone, uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100439{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000440 struct mm_heap *heap;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500441 int i;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100442 uint32_t flags;
443 void *ptr = NULL;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100444
445 spin_lock_irq(&memmap.lock, flags);
446
Liam Girdwoodc760f832018-03-01 12:15:15 +0000447 heap = get_buffer_heap_from_caps(caps);
448 if (heap == NULL)
449 goto out;
450
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100451 /* will request fit in single block */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000452 for (i = 0; i < heap->blocks; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100453
454 /* is block big enough */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000455 if (heap->map[i].block_size < bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100456 continue;
457
458 /* does block have free space */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000459 if (heap->map[i].free_count == 0)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100460 continue;
461
462 /* allocate block */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000463 ptr = alloc_block(heap, i, caps);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100464 goto out;
465 }
466
467 /* request spans > 1 block */
468
469 /* only 1 choice for block size */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000470 if (heap->blocks == 1) {
471 ptr = alloc_cont_blocks(heap, 0, caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100472 goto out;
473 } else {
474
475 /* find best block size for request */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000476 for (i = 0; i < heap->blocks; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100477
478 /* allocate is block size smaller than request */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000479 if (heap->map[i].block_size < bytes)
480 alloc_cont_blocks(heap, i, caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100481 }
482 }
483
Liam Girdwoodc760f832018-03-01 12:15:15 +0000484 ptr = alloc_cont_blocks(heap, heap->blocks - 1, caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100485
486out:
487 spin_unlock_irq(&memmap.lock, flags);
488 return ptr;
489}
490
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100491void rfree(void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100492{
493 uint32_t flags;
494
495 spin_lock_irq(&memmap.lock, flags);
Liam Girdwoodc760f832018-03-01 12:15:15 +0000496 free_block(ptr);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100497 spin_unlock_irq(&memmap.lock, flags);
498}
499
500uint32_t mm_pm_context_size(void)
501{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000502 uint32_t size = 0;
503 int i;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100504
505 /* calc context size for each area */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000506 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++)
507 size += memmap.buffer[i].info.used;
508 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++)
509 size += memmap.runtime[i].info.used;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100510 size += memmap.system.info.used;
511
512 /* add memory maps */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000513 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++)
514 size += heap_get_size(&memmap.buffer[i]);
515 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++)
516 size += heap_get_size(&memmap.runtime[i]);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100517 size += heap_get_size(&memmap.system);
518
519 /* recalc totals */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000520 memmap.total.free = memmap.system.info.free;
521 memmap.total.used = memmap.system.info.used;
522
523 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++) {
524 memmap.total.free += memmap.buffer[i].info.free;
525 memmap.total.used += memmap.buffer[i].info.used;
526 }
527
528 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++) {
529 memmap.total.free = memmap.runtime[i].info.free;
530 memmap.total.used = memmap.runtime[i].info.used;
531 }
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100532
533 return size;
534}
535
536/*
537 * Save the DSP memories that are in use the system and modules. All pipeline and modules
538 * must be disabled before calling this functions. No allocations are permitted after
539 * calling this and before calling restore.
540 */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100541int mm_pm_context_save(struct dma_copy *dc, struct dma_sg_config *sg)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100542{
543 uint32_t used;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500544 int32_t offset = 0;
545 int32_t ret;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100546
547 /* first make sure SG buffer has enough space on host for DSP context */
548 used = mm_pm_context_size();
549 if (used > dma_sg_get_size(sg))
550 return -EINVAL;
551
552 /* copy memory maps to SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100553 ret = dma_copy_to_host(dc, sg, offset,
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100554 (void *)&memmap, sizeof(memmap));
555 if (ret < 0)
556 return ret;
557
558 /* copy system memory contents to SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100559 ret = dma_copy_to_host(dc, sg, offset + ret,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100560 (void *)memmap.system.heap, (int32_t)(memmap.system.size));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100561 if (ret < 0)
562 return ret;
563
564 /* copy module memory contents to SG */
565 // TODO: iterate over module block map and copy contents of each block
566 // to the host.
567
568 /* copy buffer memory contents to SG */
569 // TODO: iterate over buffer block map and copy contents of each block
570 // to the host.
571
572 return ret;
573}
574
575/*
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600576 * Restore the DSP memories to modules and the system. This must be called immediately
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100577 * after booting before any pipeline work.
578 */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100579int mm_pm_context_restore(struct dma_copy *dc, struct dma_sg_config *sg)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100580{
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500581 int32_t offset = 0;
582 int32_t ret;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100583
584 /* copy memory maps from SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100585 ret = dma_copy_from_host(dc, sg, offset,
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100586 (void *)&memmap, sizeof(memmap));
587 if (ret < 0)
588 return ret;
589
590 /* copy system memory contents from SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100591 ret = dma_copy_to_host(dc, sg, offset + ret,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100592 (void *)memmap.system.heap, (int32_t)(memmap.system.size));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100593 if (ret < 0)
594 return ret;
595
596 /* copy module memory contents from SG */
597 // TODO: iterate over module block map and copy contents of each block
598 // to the host. This is the same block order used by the context store
599
600 /* copy buffer memory contents from SG */
601 // TODO: iterate over buffer block map and copy contents of each block
602 // to the host. This is the same block order used by the context store
603
604 return 0;
605}
606
607/* initialise map */
Pierre-Louis Bossart81708a52018-04-04 18:46:50 -0500608void init_heap(struct sof *sof)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100609{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000610 struct mm_heap *heap;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500611 struct block_map *next_map;
612 struct block_map *current_map;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100613 int i;
Liam Girdwoodc760f832018-03-01 12:15:15 +0000614 int j;
615 int k;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100616
Liam Girdwood3ef88cf2018-02-19 15:34:09 +0000617 /* sanity check for malformed images or loader issues */
618 if (memmap.system.heap != HEAP_SYSTEM_BASE)
619 panic(SOF_IPC_PANIC_MEM);
620
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100621 spinlock_init(&memmap.lock);
622
623 /* initialise buffer map */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000624 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++) {
625 heap = &memmap.buffer[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100626
Liam Girdwoodc760f832018-03-01 12:15:15 +0000627 for (j = 0; j < heap->blocks; j++) {
628
629 current_map = &heap->map[j];
630 current_map->base = heap->heap;
Tomasz Laudabb752042018-07-06 12:09:58 +0200631 dcache_writeback_region(current_map,
632 sizeof(*current_map));
Liam Girdwoodc760f832018-03-01 12:15:15 +0000633
634 for (k = 1; k < heap->blocks; k++) {
635 next_map = &heap->map[k];
636 next_map->base = current_map->base +
637 current_map->block_size *
638 current_map->count;
639 current_map = &heap->map[k];
Tomasz Laudabb752042018-07-06 12:09:58 +0200640 dcache_writeback_region(current_map,
641 sizeof(*current_map));
Liam Girdwoodc760f832018-03-01 12:15:15 +0000642 }
643 }
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100644 }
645
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100646 /* initialise runtime map */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000647 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++) {
648 heap = &memmap.runtime[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100649
Liam Girdwoodc760f832018-03-01 12:15:15 +0000650 for (j = 0; j < heap->blocks; j++) {
651
652 current_map = &heap->map[j];
653 current_map->base = heap->heap;
Tomasz Laudabb752042018-07-06 12:09:58 +0200654 dcache_writeback_region(current_map,
655 sizeof(*current_map));
Liam Girdwoodc760f832018-03-01 12:15:15 +0000656
657 for (k = 1; k < heap->blocks; k++) {
658 next_map = &heap->map[k];
659 next_map->base = current_map->base +
660 current_map->block_size *
661 current_map->count;
662 current_map = &heap->map[k];
Tomasz Laudabb752042018-07-06 12:09:58 +0200663 dcache_writeback_region(current_map,
664 sizeof(*current_map));
Liam Girdwoodc760f832018-03-01 12:15:15 +0000665 }
666 }
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100667 }
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100668}