blob: 3ca14c3f93e244fe44904b1aac126f62ee347945 [file] [log] [blame]
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +01001/*
2 * Copyright (c) 2016, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Intel Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
29 * Keyon Jie <yang.jie@linux.intel.com>
30 */
31
Pierre-Louis Bossart81708a52018-04-04 18:46:50 -050032#include <sof/alloc.h>
33#include <sof/sof.h>
34#include <sof/debug.h>
35#include <sof/panic.h>
36#include <sof/trace.h>
37#include <sof/lock.h>
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010038#include <platform/memory.h>
39#include <stdint.h>
40
41/* debug to set memory value on every allocation */
42#define DEBUG_BLOCK_ALLOC 0
43#define DEBUG_BLOCK_ALLOC_VALUE 0x6b6b6b6b
44
45/* debug to set memory value on every free TODO: not working atm */
46#define DEBUG_BLOCK_FREE 0
47#define DEBUG_BLOCK_FREE_VALUE 0x5a5a5a5a
48
49/* memory tracing support */
50#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
51#define trace_mem(__e) trace_event(TRACE_CLASS_MEM, __e)
52#else
53#define trace_mem(__e)
54#endif
55
56#define trace_mem_error(__e) trace_error(TRACE_CLASS_MEM, __e)
57
Liam Girdwoodc760f832018-03-01 12:15:15 +000058extern struct mm memmap;
59
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010060/* We have 3 memory pools
61 *
62 * 1) System memory pool does not have a map and it's size is fixed at build
63 * time. Memory cannot be freed from this pool. Used by device drivers
64 * and any system core. Saved as part of PM context.
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010065 * 2) Runtime memory pool has variable size allocation map and memory is freed
66 * on calls to rfree(). Saved as part of PM context. Global size
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010067 * set at build time.
68 * 3) Buffer memory pool has fixed size allocation map and can be freed on
69 * module removal or calls to rfree(). Saved as part of PM context.
70 */
71
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010072
73/* total size of block */
74static inline uint32_t block_get_size(struct block_map *map)
75{
76 return sizeof(*map) + map->count *
77 (map->block_size + sizeof(struct block_hdr));
78}
79
80/* total size of heap */
81static inline uint32_t heap_get_size(struct mm_heap *heap)
82{
83 uint32_t size = sizeof(struct mm_heap);
84 int i;
85
86 for (i = 0; i < heap->blocks; i++) {
87 size += block_get_size(&heap->map[i]);
88 }
89
90 return size;
91}
92
93#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
94static void alloc_memset_region(void *ptr, uint32_t bytes, uint32_t val)
95{
96 uint32_t count = bytes >> 2;
97 uint32_t *dest = ptr, i;
98
99 for (i = 0; i < count; i++)
100 dest[i] = val;
101}
102#endif
103
104/* allocate from system memory pool */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100105static void *rmalloc_sys(size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100106{
107 void *ptr = (void *)memmap.system.heap;
108
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100109 /* always succeeds or panics */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100110 memmap.system.heap += bytes;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100111 if (memmap.system.heap >= HEAP_SYSTEM_BASE + HEAP_SYSTEM_SIZE) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100112 trace_mem_error("eMd");
Liam Girdwoode52ce682018-02-21 15:36:25 +0000113 panic(SOF_IPC_PANIC_MEM);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100114 }
115
116#if DEBUG_BLOCK_ALLOC
117 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
118#endif
119
120 return ptr;
121}
122
123/* allocate single block */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000124static void *alloc_block(struct mm_heap *heap, int level,
125 uint32_t caps)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100126{
127 struct block_map *map = &heap->map[level];
128 struct block_hdr *hdr = &map->block[map->first_free];
129 void *ptr;
130 int i;
131
132 map->free_count--;
133 ptr = (void *)(map->base + map->first_free * map->block_size);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100134 hdr->size = 1;
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000135 hdr->used = 1;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100136 heap->info.used += map->block_size;
137 heap->info.free -= map->block_size;
138
139 /* find next free */
140 for (i = map->first_free; i < map->count; ++i) {
141
142 hdr = &map->block[i];
143
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000144 if (hdr->used == 0) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100145 map->first_free = i;
146 break;
147 }
148 }
149
150#if DEBUG_BLOCK_ALLOC
151 alloc_memset_region(ptr, map->block_size, DEBUG_BLOCK_ALLOC_VALUE);
152#endif
153
154 return ptr;
155}
156
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600157/* allocates continuous blocks */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000158static void *alloc_cont_blocks(struct mm_heap *heap, int level,
159 uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100160{
161 struct block_map *map = &heap->map[level];
162 struct block_hdr *hdr = &map->block[map->first_free];
163 void *ptr;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500164 unsigned int start;
165 unsigned int current;
166 unsigned int count = bytes / map->block_size;
167 unsigned int i;
168 unsigned int remaining = map->count - count;
169 unsigned int end;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100170
171 if (bytes % map->block_size)
172 count++;
173
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600174 /* check for continuous blocks from "start" */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100175 for (start = map->first_free; start < remaining; start++) {
176
177 /* check that we have enough free blocks from start pos */
178 end = start + count;
179 for (current = start; current < end; current++) {
180 hdr = &map->block[current];
181
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100182 /* is block used */
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000183 if (hdr->used)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100184 break;
185 }
186
187 /* enough free blocks ? */
188 if (current == end)
189 goto found;
190 }
191
192 /* not found */
193 trace_mem_error("eCb");
194 return NULL;
195
196found:
197 /* found some free blocks */
198 map->free_count -= count;
199 ptr = (void *)(map->base + start * map->block_size);
200 hdr = &map->block[start];
201 hdr->size = count;
202 heap->info.used += count * map->block_size;
203 heap->info.free -= count * map->block_size;
204
205 /* allocate each block */
206 for (current = start; current < end; current++) {
207 hdr = &map->block[current];
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000208 hdr->used = 1;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100209 }
210
211 /* do we need to find a new first free block ? */
212 if (start == map->first_free) {
213
214 /* find next free */
215 for (i = map->first_free + count; i < map->count; ++i) {
216
217 hdr = &map->block[i];
218
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000219 if (hdr->used == 0) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100220 map->first_free = i;
221 break;
222 }
223 }
224 }
225
226#if DEBUG_BLOCK_ALLOC
227 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
228#endif
229
230 return ptr;
231}
232
Liam Girdwoodc760f832018-03-01 12:15:15 +0000233static struct mm_heap *get_heap_from_ptr(void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100234{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000235 struct mm_heap *heap;
236 int i;
237
238 /* find mm_heap that ptr belongs to */
239 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++) {
240 heap = &memmap.runtime[i];
241
242 if ((uint32_t)ptr >= heap->heap &&
243 (uint32_t)ptr < heap->heap + heap->size)
244 return heap;
245 }
246
247 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++) {
248 heap = &memmap.buffer[i];
249
250 if ((uint32_t)ptr >= heap->heap &&
251 (uint32_t)ptr < heap->heap + heap->size)
252 return heap;
253 }
254
255 return NULL;
256}
257
258static struct mm_heap *get_runtime_heap_from_caps(uint32_t caps)
259{
260 struct mm_heap *heap;
261 uint32_t mask;
262 int i;
263
264 /* find first heap that support type */
265 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++) {
266 heap = &memmap.runtime[i];
267 mask = heap->caps & caps;
268 if (mask == caps)
269 return heap;
270 }
271
272 return NULL;
273}
274
275static struct mm_heap *get_buffer_heap_from_caps(uint32_t caps)
276{
277 struct mm_heap *heap;
278 uint32_t mask;
279 int i;
280
281 /* find first heap that support type */
282 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++) {
283 heap = &memmap.buffer[i];
284 mask = heap->caps & caps;
285 if (mask == caps)
286 return heap;
287 }
288
289 return NULL;
290}
291
292/* free block(s) */
293static void free_block(void *ptr)
294{
295 struct mm_heap *heap;
296 struct block_map *block_map;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100297 struct block_hdr *hdr;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500298 int i;
299 int block;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100300
301 /* sanity check */
302 if (ptr == NULL)
303 return;
304
Liam Girdwoodc760f832018-03-01 12:15:15 +0000305 heap = get_heap_from_ptr(ptr);
306 if (heap == NULL)
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800307 return;
308
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100309 /* find block that ptr belongs to */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000310 for (i = 0; i < heap->blocks - 1; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100311
312 /* is ptr in this block */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000313 if ((uint32_t)ptr < heap->map[i + 1].base)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100314 goto found;
315 }
316
317 /* not found */
318 trace_mem_error("eMF");
319 return;
320
321found:
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800322 /* the block i is it */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000323 block_map = &heap->map[i];
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800324
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100325 /* calculate block header */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800326 block = ((uint32_t)ptr - block_map->base) / block_map->block_size;
327 hdr = &block_map->block[block];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100328
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600329 /* free block header and continuous blocks */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100330 for (i = block; i < block + hdr->size; i++) {
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800331 hdr = &block_map->block[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100332 hdr->size = 0;
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000333 hdr->used = 0;
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800334 block_map->free_count++;
335 heap->info.used -= block_map->block_size;
336 heap->info.free += block_map->block_size;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100337 }
338
Liam Girdwoodc760f832018-03-01 12:15:15 +0000339 /* set first free block */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800340 if (block < block_map->first_free)
341 block_map->first_free = block;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100342
343#if DEBUG_BLOCK_FREE
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800344 alloc_memset_region(ptr, block_map->block_size * (i - 1), DEBUG_BLOCK_FREE_VALUE);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100345#endif
346}
347
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100348/* allocate single block for runtime */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000349static void *rmalloc_runtime(uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100350{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000351 struct mm_heap *heap;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100352 int i;
353
Liam Girdwood5972ac32018-03-06 14:15:32 +0000354 /* check runtime heap for capabilities */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000355 heap = get_runtime_heap_from_caps(caps);
Liam Girdwood5972ac32018-03-06 14:15:32 +0000356 if (heap)
357 goto find;
358
359 /* next check buffer heap for capabilities */
360 heap = get_buffer_heap_from_caps(caps);
Liam Girdwoodc760f832018-03-01 12:15:15 +0000361 if (heap == NULL)
362 goto error;
363
Liam Girdwood5972ac32018-03-06 14:15:32 +0000364find:
Liam Girdwoodc760f832018-03-01 12:15:15 +0000365 for (i = 0; i < heap->blocks; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100366
367 /* is block big enough */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000368 if (heap->map[i].block_size < bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100369 continue;
370
371 /* does block have free space */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000372 if (heap->map[i].free_count == 0)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100373 continue;
374
375 /* free block space exists */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000376 return alloc_block(heap, i, caps);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100377 }
378
Liam Girdwoodc760f832018-03-01 12:15:15 +0000379error:
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100380 trace_mem_error("eMm");
Ranjani Sridharan210989d2018-03-25 17:34:04 -0700381 trace_error_value(bytes);
382 trace_error_value(caps);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100383 return NULL;
384}
385
Liam Girdwoodc760f832018-03-01 12:15:15 +0000386void *rmalloc(int zone, uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100387{
388 uint32_t flags;
389 void *ptr = NULL;
390
391 spin_lock_irq(&memmap.lock, flags);
392
393 switch (zone) {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100394 case RZONE_SYS:
395 ptr = rmalloc_sys(bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100396 break;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100397 case RZONE_RUNTIME:
Liam Girdwoodc760f832018-03-01 12:15:15 +0000398 ptr = rmalloc_runtime(caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100399 break;
400 default:
401 trace_mem_error("eMz");
402 break;
403 }
404
405 spin_unlock_irq(&memmap.lock, flags);
406 return ptr;
407}
408
Liam Girdwoodc760f832018-03-01 12:15:15 +0000409void *rzalloc(int zone, uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100410{
411 void *ptr = NULL;
412
Liam Girdwoodc760f832018-03-01 12:15:15 +0000413 ptr = rmalloc(zone, caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100414 if (ptr != NULL) {
415 bzero(ptr, bytes);
416 }
417
418 return ptr;
419}
420
Liam Girdwoodc760f832018-03-01 12:15:15 +0000421/* allocates continuous buffers */
422void *rballoc(int zone, uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100423{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000424 struct mm_heap *heap;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500425 int i;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100426 uint32_t flags;
427 void *ptr = NULL;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100428
429 spin_lock_irq(&memmap.lock, flags);
430
Liam Girdwoodc760f832018-03-01 12:15:15 +0000431 heap = get_buffer_heap_from_caps(caps);
432 if (heap == NULL)
433 goto out;
434
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100435 /* will request fit in single block */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000436 for (i = 0; i < heap->blocks; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100437
438 /* is block big enough */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000439 if (heap->map[i].block_size < bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100440 continue;
441
442 /* does block have free space */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000443 if (heap->map[i].free_count == 0)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100444 continue;
445
446 /* allocate block */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000447 ptr = alloc_block(heap, i, caps);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100448 goto out;
449 }
450
451 /* request spans > 1 block */
452
453 /* only 1 choice for block size */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000454 if (heap->blocks == 1) {
455 ptr = alloc_cont_blocks(heap, 0, caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100456 goto out;
457 } else {
458
459 /* find best block size for request */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000460 for (i = 0; i < heap->blocks; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100461
462 /* allocate is block size smaller than request */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000463 if (heap->map[i].block_size < bytes)
464 alloc_cont_blocks(heap, i, caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100465 }
466 }
467
Liam Girdwoodc760f832018-03-01 12:15:15 +0000468 ptr = alloc_cont_blocks(heap, heap->blocks - 1, caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100469
470out:
471 spin_unlock_irq(&memmap.lock, flags);
472 return ptr;
473}
474
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100475void rfree(void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100476{
477 uint32_t flags;
478
479 spin_lock_irq(&memmap.lock, flags);
Liam Girdwoodc760f832018-03-01 12:15:15 +0000480 free_block(ptr);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100481 spin_unlock_irq(&memmap.lock, flags);
482}
483
484uint32_t mm_pm_context_size(void)
485{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000486 uint32_t size = 0;
487 int i;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100488
489 /* calc context size for each area */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000490 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++)
491 size += memmap.buffer[i].info.used;
492 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++)
493 size += memmap.runtime[i].info.used;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100494 size += memmap.system.info.used;
495
496 /* add memory maps */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000497 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++)
498 size += heap_get_size(&memmap.buffer[i]);
499 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++)
500 size += heap_get_size(&memmap.runtime[i]);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100501 size += heap_get_size(&memmap.system);
502
503 /* recalc totals */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000504 memmap.total.free = memmap.system.info.free;
505 memmap.total.used = memmap.system.info.used;
506
507 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++) {
508 memmap.total.free += memmap.buffer[i].info.free;
509 memmap.total.used += memmap.buffer[i].info.used;
510 }
511
512 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++) {
513 memmap.total.free = memmap.runtime[i].info.free;
514 memmap.total.used = memmap.runtime[i].info.used;
515 }
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100516
517 return size;
518}
519
520/*
521 * Save the DSP memories that are in use the system and modules. All pipeline and modules
522 * must be disabled before calling this functions. No allocations are permitted after
523 * calling this and before calling restore.
524 */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100525int mm_pm_context_save(struct dma_copy *dc, struct dma_sg_config *sg)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100526{
527 uint32_t used;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500528 int32_t offset = 0;
529 int32_t ret;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100530
531 /* first make sure SG buffer has enough space on host for DSP context */
532 used = mm_pm_context_size();
533 if (used > dma_sg_get_size(sg))
534 return -EINVAL;
535
536 /* copy memory maps to SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100537 ret = dma_copy_to_host(dc, sg, offset,
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100538 (void *)&memmap, sizeof(memmap));
539 if (ret < 0)
540 return ret;
541
542 /* copy system memory contents to SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100543 ret = dma_copy_to_host(dc, sg, offset + ret,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100544 (void *)memmap.system.heap, (int32_t)(memmap.system.size));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100545 if (ret < 0)
546 return ret;
547
548 /* copy module memory contents to SG */
549 // TODO: iterate over module block map and copy contents of each block
550 // to the host.
551
552 /* copy buffer memory contents to SG */
553 // TODO: iterate over buffer block map and copy contents of each block
554 // to the host.
555
556 return ret;
557}
558
559/*
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600560 * Restore the DSP memories to modules and the system. This must be called immediately
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100561 * after booting before any pipeline work.
562 */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100563int mm_pm_context_restore(struct dma_copy *dc, struct dma_sg_config *sg)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100564{
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500565 int32_t offset = 0;
566 int32_t ret;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100567
568 /* copy memory maps from SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100569 ret = dma_copy_from_host(dc, sg, offset,
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100570 (void *)&memmap, sizeof(memmap));
571 if (ret < 0)
572 return ret;
573
574 /* copy system memory contents from SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100575 ret = dma_copy_to_host(dc, sg, offset + ret,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100576 (void *)memmap.system.heap, (int32_t)(memmap.system.size));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100577 if (ret < 0)
578 return ret;
579
580 /* copy module memory contents from SG */
581 // TODO: iterate over module block map and copy contents of each block
582 // to the host. This is the same block order used by the context store
583
584 /* copy buffer memory contents from SG */
585 // TODO: iterate over buffer block map and copy contents of each block
586 // to the host. This is the same block order used by the context store
587
588 return 0;
589}
590
591/* initialise map */
Pierre-Louis Bossart81708a52018-04-04 18:46:50 -0500592void init_heap(struct sof *sof)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100593{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000594 struct mm_heap *heap;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500595 struct block_map *next_map;
596 struct block_map *current_map;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100597 int i;
Liam Girdwoodc760f832018-03-01 12:15:15 +0000598 int j;
599 int k;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100600
Liam Girdwood3ef88cf2018-02-19 15:34:09 +0000601 /* sanity check for malformed images or loader issues */
602 if (memmap.system.heap != HEAP_SYSTEM_BASE)
603 panic(SOF_IPC_PANIC_MEM);
604
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100605 spinlock_init(&memmap.lock);
606
607 /* initialise buffer map */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000608 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++) {
609 heap = &memmap.buffer[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100610
Liam Girdwoodc760f832018-03-01 12:15:15 +0000611 for (j = 0; j < heap->blocks; j++) {
612
613 current_map = &heap->map[j];
614 current_map->base = heap->heap;
615
616 for (k = 1; k < heap->blocks; k++) {
617 next_map = &heap->map[k];
618 next_map->base = current_map->base +
619 current_map->block_size *
620 current_map->count;
621 current_map = &heap->map[k];
622 }
623 }
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100624 }
625
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100626 /* initialise runtime map */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000627 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++) {
628 heap = &memmap.runtime[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100629
Liam Girdwoodc760f832018-03-01 12:15:15 +0000630 for (j = 0; j < heap->blocks; j++) {
631
632 current_map = &heap->map[j];
633 current_map->base = heap->heap;
634
635 for (k = 1; k < heap->blocks; k++) {
636 next_map = &heap->map[k];
637 next_map->base = current_map->base +
638 current_map->block_size *
639 current_map->count;
640 current_map = &heap->map[k];
641 }
642 }
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100643 }
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100644}