blob: 9bc751137e2ed4e7a8b3c6f23cf3b93f7f0c311f [file] [log] [blame]
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +01001/*
2 * Copyright (c) 2016, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Intel Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
29 * Keyon Jie <yang.jie@linux.intel.com>
30 */
31
Pierre-Louis Bossart81708a52018-04-04 18:46:50 -050032#include <sof/alloc.h>
33#include <sof/sof.h>
34#include <sof/debug.h>
35#include <sof/panic.h>
36#include <sof/trace.h>
37#include <sof/lock.h>
Tomasz Lauda7d38ed12018-08-14 15:24:06 +020038#include <sof/cpu.h>
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010039#include <platform/memory.h>
40#include <stdint.h>
41
42/* debug to set memory value on every allocation */
43#define DEBUG_BLOCK_ALLOC 0
44#define DEBUG_BLOCK_ALLOC_VALUE 0x6b6b6b6b
45
46/* debug to set memory value on every free TODO: not working atm */
47#define DEBUG_BLOCK_FREE 0
48#define DEBUG_BLOCK_FREE_VALUE 0x5a5a5a5a
49
50/* memory tracing support */
51#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
52#define trace_mem(__e) trace_event(TRACE_CLASS_MEM, __e)
53#else
54#define trace_mem(__e)
55#endif
56
57#define trace_mem_error(__e) trace_error(TRACE_CLASS_MEM, __e)
58
Liam Girdwoodc760f832018-03-01 12:15:15 +000059extern struct mm memmap;
60
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010061/* We have 3 memory pools
62 *
63 * 1) System memory pool does not have a map and it's size is fixed at build
64 * time. Memory cannot be freed from this pool. Used by device drivers
65 * and any system core. Saved as part of PM context.
Liam Girdwood50f7b0e2017-06-06 12:52:15 +010066 * 2) Runtime memory pool has variable size allocation map and memory is freed
67 * on calls to rfree(). Saved as part of PM context. Global size
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010068 * set at build time.
69 * 3) Buffer memory pool has fixed size allocation map and can be freed on
70 * module removal or calls to rfree(). Saved as part of PM context.
71 */
72
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010073
74/* total size of block */
75static inline uint32_t block_get_size(struct block_map *map)
76{
77 return sizeof(*map) + map->count *
78 (map->block_size + sizeof(struct block_hdr));
79}
80
81/* total size of heap */
82static inline uint32_t heap_get_size(struct mm_heap *heap)
83{
84 uint32_t size = sizeof(struct mm_heap);
85 int i;
86
87 for (i = 0; i < heap->blocks; i++) {
88 size += block_get_size(&heap->map[i]);
89 }
90
91 return size;
92}
93
94#if DEBUG_BLOCK_ALLOC || DEBUG_BLOCK_FREE
95static void alloc_memset_region(void *ptr, uint32_t bytes, uint32_t val)
96{
97 uint32_t count = bytes >> 2;
98 uint32_t *dest = ptr, i;
99
100 for (i = 0; i < count; i++)
101 dest[i] = val;
102}
103#endif
104
105/* allocate from system memory pool */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100106static void *rmalloc_sys(size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100107{
Tomasz Lauda7d38ed12018-08-14 15:24:06 +0200108 void *ptr;
109
110 /* system memory reserved only for master core */
111 if (cpu_get_id() != PLATFORM_MASTER_CORE_ID) {
112 trace_mem_error("eM0");
113 return NULL;
114 }
115
116 ptr = (void *)memmap.system.heap;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100117
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100118 /* always succeeds or panics */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100119 memmap.system.heap += bytes;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100120 if (memmap.system.heap >= HEAP_SYSTEM_BASE + HEAP_SYSTEM_SIZE) {
Tomasz Lauda7d38ed12018-08-14 15:24:06 +0200121 trace_mem_error("eM1");
Liam Girdwoode52ce682018-02-21 15:36:25 +0000122 panic(SOF_IPC_PANIC_MEM);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100123 }
124
125#if DEBUG_BLOCK_ALLOC
126 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
127#endif
128
129 return ptr;
130}
131
132/* allocate single block */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000133static void *alloc_block(struct mm_heap *heap, int level,
134 uint32_t caps)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100135{
136 struct block_map *map = &heap->map[level];
137 struct block_hdr *hdr = &map->block[map->first_free];
138 void *ptr;
139 int i;
140
141 map->free_count--;
142 ptr = (void *)(map->base + map->first_free * map->block_size);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100143 hdr->size = 1;
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000144 hdr->used = 1;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100145 heap->info.used += map->block_size;
146 heap->info.free -= map->block_size;
Tomasz Laudabb752042018-07-06 12:09:58 +0200147 dcache_writeback_invalidate_region(hdr, sizeof(*hdr));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100148
149 /* find next free */
150 for (i = map->first_free; i < map->count; ++i) {
151
152 hdr = &map->block[i];
153
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000154 if (hdr->used == 0) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100155 map->first_free = i;
156 break;
157 }
158 }
159
160#if DEBUG_BLOCK_ALLOC
161 alloc_memset_region(ptr, map->block_size, DEBUG_BLOCK_ALLOC_VALUE);
162#endif
163
Tomasz Laudabb752042018-07-06 12:09:58 +0200164 dcache_writeback_invalidate_region(map, sizeof(*map));
165 dcache_writeback_invalidate_region(heap, sizeof(*heap));
166
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100167 return ptr;
168}
169
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600170/* allocates continuous blocks */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000171static void *alloc_cont_blocks(struct mm_heap *heap, int level,
172 uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100173{
174 struct block_map *map = &heap->map[level];
175 struct block_hdr *hdr = &map->block[map->first_free];
176 void *ptr;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500177 unsigned int start;
178 unsigned int current;
179 unsigned int count = bytes / map->block_size;
180 unsigned int i;
181 unsigned int remaining = map->count - count;
182 unsigned int end;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100183
184 if (bytes % map->block_size)
185 count++;
186
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600187 /* check for continuous blocks from "start" */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100188 for (start = map->first_free; start < remaining; start++) {
189
190 /* check that we have enough free blocks from start pos */
191 end = start + count;
192 for (current = start; current < end; current++) {
193 hdr = &map->block[current];
194
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100195 /* is block used */
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000196 if (hdr->used)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100197 break;
198 }
199
200 /* enough free blocks ? */
201 if (current == end)
202 goto found;
203 }
204
205 /* not found */
206 trace_mem_error("eCb");
207 return NULL;
208
209found:
210 /* found some free blocks */
211 map->free_count -= count;
212 ptr = (void *)(map->base + start * map->block_size);
213 hdr = &map->block[start];
214 hdr->size = count;
215 heap->info.used += count * map->block_size;
216 heap->info.free -= count * map->block_size;
Tomasz Laudabb752042018-07-06 12:09:58 +0200217 dcache_writeback_invalidate_region(hdr, sizeof(*hdr));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100218
219 /* allocate each block */
220 for (current = start; current < end; current++) {
221 hdr = &map->block[current];
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000222 hdr->used = 1;
Tomasz Laudabb752042018-07-06 12:09:58 +0200223 dcache_writeback_invalidate_region(hdr, sizeof(*hdr));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100224 }
225
226 /* do we need to find a new first free block ? */
227 if (start == map->first_free) {
228
229 /* find next free */
230 for (i = map->first_free + count; i < map->count; ++i) {
231
232 hdr = &map->block[i];
233
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000234 if (hdr->used == 0) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100235 map->first_free = i;
236 break;
237 }
238 }
239 }
240
241#if DEBUG_BLOCK_ALLOC
242 alloc_memset_region(ptr, bytes, DEBUG_BLOCK_ALLOC_VALUE);
243#endif
244
Tomasz Laudabb752042018-07-06 12:09:58 +0200245 dcache_writeback_invalidate_region(map, sizeof(*map));
246 dcache_writeback_invalidate_region(heap, sizeof(*heap));
247
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100248 return ptr;
249}
250
Liam Girdwoodc760f832018-03-01 12:15:15 +0000251static struct mm_heap *get_heap_from_ptr(void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100252{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000253 struct mm_heap *heap;
254 int i;
255
256 /* find mm_heap that ptr belongs to */
257 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++) {
258 heap = &memmap.runtime[i];
259
260 if ((uint32_t)ptr >= heap->heap &&
261 (uint32_t)ptr < heap->heap + heap->size)
262 return heap;
263 }
264
265 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++) {
266 heap = &memmap.buffer[i];
267
268 if ((uint32_t)ptr >= heap->heap &&
269 (uint32_t)ptr < heap->heap + heap->size)
270 return heap;
271 }
272
273 return NULL;
274}
275
276static struct mm_heap *get_runtime_heap_from_caps(uint32_t caps)
277{
278 struct mm_heap *heap;
279 uint32_t mask;
280 int i;
281
282 /* find first heap that support type */
283 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++) {
284 heap = &memmap.runtime[i];
285 mask = heap->caps & caps;
286 if (mask == caps)
287 return heap;
288 }
289
290 return NULL;
291}
292
293static struct mm_heap *get_buffer_heap_from_caps(uint32_t caps)
294{
295 struct mm_heap *heap;
296 uint32_t mask;
297 int i;
298
299 /* find first heap that support type */
300 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++) {
301 heap = &memmap.buffer[i];
302 mask = heap->caps & caps;
303 if (mask == caps)
304 return heap;
305 }
306
307 return NULL;
308}
309
310/* free block(s) */
311static void free_block(void *ptr)
312{
313 struct mm_heap *heap;
314 struct block_map *block_map;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100315 struct block_hdr *hdr;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500316 int i;
317 int block;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100318
319 /* sanity check */
320 if (ptr == NULL)
321 return;
322
Liam Girdwoodc760f832018-03-01 12:15:15 +0000323 heap = get_heap_from_ptr(ptr);
324 if (heap == NULL)
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800325 return;
326
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100327 /* find block that ptr belongs to */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000328 for (i = 0; i < heap->blocks - 1; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100329
330 /* is ptr in this block */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000331 if ((uint32_t)ptr < heap->map[i + 1].base)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100332 goto found;
333 }
334
335 /* not found */
336 trace_mem_error("eMF");
337 return;
338
339found:
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800340 /* the block i is it */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000341 block_map = &heap->map[i];
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800342
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100343 /* calculate block header */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800344 block = ((uint32_t)ptr - block_map->base) / block_map->block_size;
345 hdr = &block_map->block[block];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100346
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600347 /* free block header and continuous blocks */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100348 for (i = block; i < block + hdr->size; i++) {
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800349 hdr = &block_map->block[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100350 hdr->size = 0;
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000351 hdr->used = 0;
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800352 block_map->free_count++;
353 heap->info.used -= block_map->block_size;
354 heap->info.free += block_map->block_size;
Tomasz Laudabb752042018-07-06 12:09:58 +0200355 dcache_writeback_invalidate_region(hdr, sizeof(*hdr));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100356 }
357
Liam Girdwoodc760f832018-03-01 12:15:15 +0000358 /* set first free block */
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800359 if (block < block_map->first_free)
360 block_map->first_free = block;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100361
362#if DEBUG_BLOCK_FREE
Keyon Jie14e2d8e2017-09-01 16:41:25 +0800363 alloc_memset_region(ptr, block_map->block_size * (i - 1), DEBUG_BLOCK_FREE_VALUE);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100364#endif
Tomasz Laudabb752042018-07-06 12:09:58 +0200365
366 dcache_writeback_invalidate_region(block_map, sizeof(*block_map));
367 dcache_writeback_invalidate_region(heap, sizeof(*heap));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100368}
369
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100370/* allocate single block for runtime */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000371static void *rmalloc_runtime(uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100372{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000373 struct mm_heap *heap;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100374 int i;
375
Liam Girdwood5972ac32018-03-06 14:15:32 +0000376 /* check runtime heap for capabilities */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000377 heap = get_runtime_heap_from_caps(caps);
Liam Girdwood5972ac32018-03-06 14:15:32 +0000378 if (heap)
379 goto find;
380
381 /* next check buffer heap for capabilities */
382 heap = get_buffer_heap_from_caps(caps);
Liam Girdwoodc760f832018-03-01 12:15:15 +0000383 if (heap == NULL)
384 goto error;
385
Liam Girdwood5972ac32018-03-06 14:15:32 +0000386find:
Liam Girdwoodc760f832018-03-01 12:15:15 +0000387 for (i = 0; i < heap->blocks; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100388
389 /* is block big enough */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000390 if (heap->map[i].block_size < bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100391 continue;
392
393 /* does block have free space */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000394 if (heap->map[i].free_count == 0)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100395 continue;
396
397 /* free block space exists */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000398 return alloc_block(heap, i, caps);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100399 }
400
Liam Girdwoodc760f832018-03-01 12:15:15 +0000401error:
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100402 trace_mem_error("eMm");
Ranjani Sridharan210989d2018-03-25 17:34:04 -0700403 trace_error_value(bytes);
404 trace_error_value(caps);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100405 return NULL;
406}
407
Liam Girdwoodc760f832018-03-01 12:15:15 +0000408void *rmalloc(int zone, uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100409{
410 uint32_t flags;
411 void *ptr = NULL;
412
413 spin_lock_irq(&memmap.lock, flags);
414
415 switch (zone) {
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100416 case RZONE_SYS:
417 ptr = rmalloc_sys(bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100418 break;
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100419 case RZONE_RUNTIME:
Liam Girdwoodc760f832018-03-01 12:15:15 +0000420 ptr = rmalloc_runtime(caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100421 break;
422 default:
423 trace_mem_error("eMz");
424 break;
425 }
426
427 spin_unlock_irq(&memmap.lock, flags);
428 return ptr;
429}
430
Liam Girdwoodc760f832018-03-01 12:15:15 +0000431void *rzalloc(int zone, uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100432{
433 void *ptr = NULL;
434
Liam Girdwoodc760f832018-03-01 12:15:15 +0000435 ptr = rmalloc(zone, caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100436 if (ptr != NULL) {
437 bzero(ptr, bytes);
438 }
439
440 return ptr;
441}
442
Liam Girdwoodc760f832018-03-01 12:15:15 +0000443/* allocates continuous buffers */
444void *rballoc(int zone, uint32_t caps, size_t bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100445{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000446 struct mm_heap *heap;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500447 int i;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100448 uint32_t flags;
449 void *ptr = NULL;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100450
451 spin_lock_irq(&memmap.lock, flags);
452
Liam Girdwoodc760f832018-03-01 12:15:15 +0000453 heap = get_buffer_heap_from_caps(caps);
454 if (heap == NULL)
455 goto out;
456
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100457 /* will request fit in single block */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000458 for (i = 0; i < heap->blocks; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100459
460 /* is block big enough */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000461 if (heap->map[i].block_size < bytes)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100462 continue;
463
464 /* does block have free space */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000465 if (heap->map[i].free_count == 0)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100466 continue;
467
468 /* allocate block */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000469 ptr = alloc_block(heap, i, caps);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100470 goto out;
471 }
472
473 /* request spans > 1 block */
474
475 /* only 1 choice for block size */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000476 if (heap->blocks == 1) {
477 ptr = alloc_cont_blocks(heap, 0, caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100478 goto out;
479 } else {
480
481 /* find best block size for request */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000482 for (i = 0; i < heap->blocks; i++) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100483
484 /* allocate is block size smaller than request */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000485 if (heap->map[i].block_size < bytes)
486 alloc_cont_blocks(heap, i, caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100487 }
488 }
489
Liam Girdwoodc760f832018-03-01 12:15:15 +0000490 ptr = alloc_cont_blocks(heap, heap->blocks - 1, caps, bytes);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100491
492out:
493 spin_unlock_irq(&memmap.lock, flags);
494 return ptr;
495}
496
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100497void rfree(void *ptr)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100498{
499 uint32_t flags;
500
501 spin_lock_irq(&memmap.lock, flags);
Liam Girdwoodc760f832018-03-01 12:15:15 +0000502 free_block(ptr);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100503 spin_unlock_irq(&memmap.lock, flags);
504}
505
506uint32_t mm_pm_context_size(void)
507{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000508 uint32_t size = 0;
509 int i;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100510
511 /* calc context size for each area */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000512 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++)
513 size += memmap.buffer[i].info.used;
514 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++)
515 size += memmap.runtime[i].info.used;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100516 size += memmap.system.info.used;
517
518 /* add memory maps */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000519 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++)
520 size += heap_get_size(&memmap.buffer[i]);
521 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++)
522 size += heap_get_size(&memmap.runtime[i]);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100523 size += heap_get_size(&memmap.system);
524
525 /* recalc totals */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000526 memmap.total.free = memmap.system.info.free;
527 memmap.total.used = memmap.system.info.used;
528
529 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++) {
530 memmap.total.free += memmap.buffer[i].info.free;
531 memmap.total.used += memmap.buffer[i].info.used;
532 }
533
534 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++) {
535 memmap.total.free = memmap.runtime[i].info.free;
536 memmap.total.used = memmap.runtime[i].info.used;
537 }
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100538
539 return size;
540}
541
542/*
543 * Save the DSP memories that are in use the system and modules. All pipeline and modules
544 * must be disabled before calling this functions. No allocations are permitted after
545 * calling this and before calling restore.
546 */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100547int mm_pm_context_save(struct dma_copy *dc, struct dma_sg_config *sg)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100548{
549 uint32_t used;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500550 int32_t offset = 0;
551 int32_t ret;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100552
553 /* first make sure SG buffer has enough space on host for DSP context */
554 used = mm_pm_context_size();
555 if (used > dma_sg_get_size(sg))
556 return -EINVAL;
557
558 /* copy memory maps to SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100559 ret = dma_copy_to_host(dc, sg, offset,
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100560 (void *)&memmap, sizeof(memmap));
561 if (ret < 0)
562 return ret;
563
564 /* copy system memory contents to SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100565 ret = dma_copy_to_host(dc, sg, offset + ret,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100566 (void *)memmap.system.heap, (int32_t)(memmap.system.size));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100567 if (ret < 0)
568 return ret;
569
570 /* copy module memory contents to SG */
571 // TODO: iterate over module block map and copy contents of each block
572 // to the host.
573
574 /* copy buffer memory contents to SG */
575 // TODO: iterate over buffer block map and copy contents of each block
576 // to the host.
577
578 return ret;
579}
580
581/*
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600582 * Restore the DSP memories to modules and the system. This must be called immediately
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100583 * after booting before any pipeline work.
584 */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100585int mm_pm_context_restore(struct dma_copy *dc, struct dma_sg_config *sg)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100586{
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500587 int32_t offset = 0;
588 int32_t ret;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100589
590 /* copy memory maps from SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100591 ret = dma_copy_from_host(dc, sg, offset,
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100592 (void *)&memmap, sizeof(memmap));
593 if (ret < 0)
594 return ret;
595
596 /* copy system memory contents from SG */
Liam Girdwood1e5c5592017-10-15 23:36:28 +0100597 ret = dma_copy_to_host(dc, sg, offset + ret,
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100598 (void *)memmap.system.heap, (int32_t)(memmap.system.size));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100599 if (ret < 0)
600 return ret;
601
602 /* copy module memory contents from SG */
603 // TODO: iterate over module block map and copy contents of each block
604 // to the host. This is the same block order used by the context store
605
606 /* copy buffer memory contents from SG */
607 // TODO: iterate over buffer block map and copy contents of each block
608 // to the host. This is the same block order used by the context store
609
610 return 0;
611}
612
613/* initialise map */
Pierre-Louis Bossart81708a52018-04-04 18:46:50 -0500614void init_heap(struct sof *sof)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100615{
Liam Girdwoodc760f832018-03-01 12:15:15 +0000616 struct mm_heap *heap;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500617 struct block_map *next_map;
618 struct block_map *current_map;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100619 int i;
Liam Girdwoodc760f832018-03-01 12:15:15 +0000620 int j;
621 int k;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100622
Liam Girdwood3ef88cf2018-02-19 15:34:09 +0000623 /* sanity check for malformed images or loader issues */
624 if (memmap.system.heap != HEAP_SYSTEM_BASE)
625 panic(SOF_IPC_PANIC_MEM);
626
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100627 spinlock_init(&memmap.lock);
628
629 /* initialise buffer map */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000630 for (i = 0; i < PLATFORM_HEAP_BUFFER; i++) {
631 heap = &memmap.buffer[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100632
Liam Girdwoodc760f832018-03-01 12:15:15 +0000633 for (j = 0; j < heap->blocks; j++) {
634
635 current_map = &heap->map[j];
636 current_map->base = heap->heap;
Tomasz Laudabb752042018-07-06 12:09:58 +0200637 dcache_writeback_region(current_map,
638 sizeof(*current_map));
Liam Girdwoodc760f832018-03-01 12:15:15 +0000639
640 for (k = 1; k < heap->blocks; k++) {
641 next_map = &heap->map[k];
642 next_map->base = current_map->base +
643 current_map->block_size *
644 current_map->count;
645 current_map = &heap->map[k];
Tomasz Laudabb752042018-07-06 12:09:58 +0200646 dcache_writeback_region(current_map,
647 sizeof(*current_map));
Liam Girdwoodc760f832018-03-01 12:15:15 +0000648 }
649 }
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100650 }
651
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100652 /* initialise runtime map */
Liam Girdwoodc760f832018-03-01 12:15:15 +0000653 for (i = 0; i < PLATFORM_HEAP_RUNTIME; i++) {
654 heap = &memmap.runtime[i];
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100655
Liam Girdwoodc760f832018-03-01 12:15:15 +0000656 for (j = 0; j < heap->blocks; j++) {
657
658 current_map = &heap->map[j];
659 current_map->base = heap->heap;
Tomasz Laudabb752042018-07-06 12:09:58 +0200660 dcache_writeback_region(current_map,
661 sizeof(*current_map));
Liam Girdwoodc760f832018-03-01 12:15:15 +0000662
663 for (k = 1; k < heap->blocks; k++) {
664 next_map = &heap->map[k];
665 next_map->base = current_map->base +
666 current_map->block_size *
667 current_map->count;
668 current_map = &heap->map[k];
Tomasz Laudabb752042018-07-06 12:09:58 +0200669 dcache_writeback_region(current_map,
670 sizeof(*current_map));
Liam Girdwoodc760f832018-03-01 12:15:15 +0000671 }
672 }
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100673 }
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100674}