blob: 1fc2ef1013789168eaf5b5547db6e31595634470 [file] [log] [blame]
Jordan Crousef6145c32008-03-19 23:56:58 +00001/*
Jordan Crousef6145c32008-03-19 23:56:58 +00002 *
3 * Copyright (C) 2008 Advanced Micro Devices, Inc.
Stefan Reinauere5d30b72010-03-25 22:15:19 +00004 * Copyright (C) 2008-2010 coresystems GmbH
Jordan Crousef6145c32008-03-19 23:56:58 +00005 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
Uwe Hermann6a441bf2008-03-20 19:54:59 +000030/*
Uwe Hermann661e3802008-03-21 18:37:23 +000031 * This is a classically weak malloc() implementation. We have a relatively
Uwe Hermann6a441bf2008-03-20 19:54:59 +000032 * small and static heap, so we take the easy route with an O(N) loop
33 * through the tree for every malloc() and free(). Obviously, this doesn't
34 * scale past a few hundred KB (if that).
35 *
Uwe Hermann661e3802008-03-21 18:37:23 +000036 * We're also susceptible to the usual buffer overrun poisoning, though the
Uwe Hermann6a441bf2008-03-20 19:54:59 +000037 * risk is within acceptable ranges for this implementation (don't overrun
38 * your buffers, kids!).
39 */
Jordan Crousef6145c32008-03-19 23:56:58 +000040
Stefan Reinauere5d30b72010-03-25 22:15:19 +000041#define IN_MALLOC_C
Jordan Crousef6145c32008-03-19 23:56:58 +000042#include <libpayload.h>
Furquan Shaikh79a591f2014-05-13 13:47:32 -070043#include <stdint.h>
Jordan Crousef6145c32008-03-19 23:56:58 +000044
Julius Wernerb8fad3d2013-08-27 15:48:32 -070045struct memory_type {
46 void *start;
47 void *end;
48 struct align_region_t* align_regions;
Julius Wernereab2a292019-03-05 16:55:15 -080049#if CONFIG(LP_DEBUG_MALLOC)
Julius Werner9665d382013-09-13 18:21:46 -070050 int magic_initialized;
51 size_t minimal_free;
52 const char *name;
53#endif
Julius Wernerb8fad3d2013-08-27 15:48:32 -070054};
55
Uwe Hermann6a441bf2008-03-20 19:54:59 +000056extern char _heap, _eheap; /* Defined in the ldscript. */
Jordan Crousef6145c32008-03-19 23:56:58 +000057
Julius Werner9665d382013-09-13 18:21:46 -070058static struct memory_type default_type =
59 { (void *)&_heap, (void *)&_eheap, NULL
Julius Wernereab2a292019-03-05 16:55:15 -080060#if CONFIG(LP_DEBUG_MALLOC)
Julius Werner9665d382013-09-13 18:21:46 -070061 , 0, 0, "HEAP"
62#endif
63 };
Julius Wernerb8fad3d2013-08-27 15:48:32 -070064static struct memory_type *const heap = &default_type;
65static struct memory_type *dma = &default_type;
Jordan Crousef6145c32008-03-19 23:56:58 +000066
Nico Huber25dd2472013-06-14 15:34:59 +020067typedef u64 hdrtype_t;
Nico Huber2d4b4ca2013-06-14 15:26:49 +020068#define HDRSIZE (sizeof(hdrtype_t))
Jordan Crousef6145c32008-03-19 23:56:58 +000069
Nico Huber2d4b4ca2013-06-14 15:26:49 +020070#define SIZE_BITS ((HDRSIZE << 3) - 7)
71#define MAGIC (((hdrtype_t)0x2a) << (SIZE_BITS + 1))
72#define FLAG_FREE (((hdrtype_t)0x01) << (SIZE_BITS + 0))
73#define MAX_SIZE ((((hdrtype_t)0x01) << SIZE_BITS) - 1)
Jordan Crousef6145c32008-03-19 23:56:58 +000074
Nico Huber3665ace2012-10-08 15:03:35 +020075#define SIZE(_h) ((_h) & MAX_SIZE)
Jordan Crousef6145c32008-03-19 23:56:58 +000076
Nico Huber3665ace2012-10-08 15:03:35 +020077#define _HEADER(_s, _f) ((hdrtype_t) (MAGIC | (_f) | ((_s) & MAX_SIZE)))
Jordan Crousef6145c32008-03-19 23:56:58 +000078
79#define FREE_BLOCK(_s) _HEADER(_s, FLAG_FREE)
Nico Huber3665ace2012-10-08 15:03:35 +020080#define USED_BLOCK(_s) _HEADER(_s, 0)
Jordan Crousef6145c32008-03-19 23:56:58 +000081
Jordan Crousef6145c32008-03-19 23:56:58 +000082#define IS_FREE(_h) (((_h) & (MAGIC | FLAG_FREE)) == (MAGIC | FLAG_FREE))
83#define HAS_MAGIC(_h) (((_h) & MAGIC) == MAGIC)
84
Julius Wernerb8fad3d2013-08-27 15:48:32 -070085static int free_aligned(void* addr, struct memory_type *type);
Jordan Crousef6145c32008-03-19 23:56:58 +000086void print_malloc_map(void);
87
Julius Wernerb8fad3d2013-08-27 15:48:32 -070088void init_dma_memory(void *start, u32 size)
89{
Julius Werner509c37e2013-08-28 12:29:28 -070090 if (dma_initialized()) {
Julius Werner9665d382013-09-13 18:21:46 -070091 printf("ERROR: %s called twice!\n", __func__);
Julius Wernerb8fad3d2013-08-27 15:48:32 -070092 return;
93 }
94
Julius Werner9665d382013-09-13 18:21:46 -070095 /*
Martin Rothe81ce042017-06-03 20:00:36 -060096 * DMA memory might not be zeroed by coreboot on stage loading, so make
Julius Werner9665d382013-09-13 18:21:46 -070097 * sure we clear the magic cookie from last boot.
98 */
99 *(hdrtype_t *)start = 0;
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700100
101 dma = malloc(sizeof(*dma));
102 dma->start = start;
103 dma->end = start + size;
104 dma->align_regions = NULL;
Julius Werner9665d382013-09-13 18:21:46 -0700105
Julius Wernereab2a292019-03-05 16:55:15 -0800106#if CONFIG(LP_DEBUG_MALLOC)
Julius Werner9665d382013-09-13 18:21:46 -0700107 dma->minimal_free = 0;
108 dma->magic_initialized = 0;
109 dma->name = "DMA";
110
111 printf("Initialized cache-coherent DMA memory at [%p:%p]\n", start, start + size);
112#endif
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700113}
114
Arthur Heymansae57f1d2023-08-25 13:07:25 +0200115int dma_initialized(void)
Julius Werner509c37e2013-08-28 12:29:28 -0700116{
117 return dma != heap;
118}
119
120/* For boards that don't initialize DMA we assume all locations are coherent */
Yu-Ping Wu30d8e722022-08-23 16:40:03 +0800121int dma_coherent(const void *ptr)
Julius Werner509c37e2013-08-28 12:29:28 -0700122{
123 return !dma_initialized() || (dma->start <= ptr && dma->end > ptr);
124}
125
Yu-Ping Wuaec3b1f2020-09-11 14:39:03 +0800126/* Find free block of size >= len */
127static hdrtype_t volatile *find_free_block(int len, struct memory_type *type)
Jordan Crousef6145c32008-03-19 23:56:58 +0000128{
129 hdrtype_t header;
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700130 hdrtype_t volatile *ptr = (hdrtype_t volatile *)type->start;
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000131
132 /* Align the size. */
Patrick Georgi04a5b482015-01-21 17:37:34 +0100133 len = ALIGN_UP(len, HDRSIZE);
Jordan Crousef6145c32008-03-19 23:56:58 +0000134
Nico Huber3665ace2012-10-08 15:03:35 +0200135 if (!len || len > MAX_SIZE)
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000136 return (void *)NULL;
Jordan Crousef6145c32008-03-19 23:56:58 +0000137
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000138 /* Make sure the region is setup correctly. */
Julius Werner9665d382013-09-13 18:21:46 -0700139 if (!HAS_MAGIC(*ptr)) {
140 size_t size = (type->end - type->start) - HDRSIZE;
141 *ptr = FREE_BLOCK(size);
Julius Wernereab2a292019-03-05 16:55:15 -0800142#if CONFIG(LP_DEBUG_MALLOC)
Julius Werner9665d382013-09-13 18:21:46 -0700143 type->magic_initialized = 1;
144 type->minimal_free = size;
145#endif
146 }
Jordan Crousef6145c32008-03-19 23:56:58 +0000147
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000148 /* Find some free space. */
Jordan Crousef6145c32008-03-19 23:56:58 +0000149 do {
Marc Jones987e8832012-03-01 16:12:11 -0700150 header = *ptr;
Jordan Crousef6145c32008-03-19 23:56:58 +0000151 int size = SIZE(header);
152
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000153 if (!HAS_MAGIC(header) || size == 0) {
Stefan Reinauere5d30b72010-03-25 22:15:19 +0000154 printf("memory allocator panic. (%s%s)\n",
155 !HAS_MAGIC(header) ? " no magic " : "",
156 size == 0 ? " size=0 " : "");
Jordan Crouse24a04042008-04-25 23:08:47 +0000157 halt();
Stefan Reinaueree673192008-08-14 14:40:10 +0000158 }
Jordan Crouse24a04042008-04-25 23:08:47 +0000159
Yu-Ping Wuaec3b1f2020-09-11 14:39:03 +0800160 if ((header & FLAG_FREE) && len <= size)
161 return ptr;
Jordan Crousef6145c32008-03-19 23:56:58 +0000162
Furquan Shaikh79a591f2014-05-13 13:47:32 -0700163 ptr = (hdrtype_t volatile *)((uintptr_t)ptr + HDRSIZE + size);
Jordan Crousef6145c32008-03-19 23:56:58 +0000164
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700165 } while (ptr < (hdrtype_t *) type->end);
Jordan Crousef6145c32008-03-19 23:56:58 +0000166
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000167 /* Nothing available. */
Yu-Ping Wuaec3b1f2020-09-11 14:39:03 +0800168 return NULL;
169}
170
171/* Mark the block with length 'len' as used */
172static void use_block(hdrtype_t volatile *ptr, int len)
173{
174 /* Align the size. */
175 len = ALIGN_UP(len, HDRSIZE);
176
177 hdrtype_t volatile *nptr = (hdrtype_t volatile *)
178 ((uintptr_t)ptr + HDRSIZE + len);
179 int size = SIZE(*ptr);
180 int nsize = size - (HDRSIZE + len);
181
182 /*
183 * If there is still room in this block, then mark it as such otherwise
184 * account the whole space for that block.
185 */
186 if (nsize > 0) {
187 /* Mark the block as used. */
188 *ptr = USED_BLOCK(len);
189
190 /* Create a new free block. */
191 *nptr = FREE_BLOCK(nsize);
192 } else {
193 /* Mark the block as used. */
194 *ptr = USED_BLOCK(size);
195 }
196}
197
198static void *alloc(int len, struct memory_type *type)
199{
200 hdrtype_t volatile *ptr = find_free_block(len, type);
201
202 if (ptr == NULL)
203 return NULL;
204
205 use_block(ptr, len);
206 return (void *)((uintptr_t)ptr + HDRSIZE);
Jordan Crousef6145c32008-03-19 23:56:58 +0000207}
208
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700209static void _consolidate(struct memory_type *type)
Jordan Crousef6145c32008-03-19 23:56:58 +0000210{
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700211 void *ptr = type->start;
Jordan Crousef6145c32008-03-19 23:56:58 +0000212
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700213 while (ptr < type->end) {
Jordan Crousef6145c32008-03-19 23:56:58 +0000214 void *nptr;
215 hdrtype_t hdr = *((hdrtype_t *) ptr);
216 unsigned int size = 0;
217
218 if (!IS_FREE(hdr)) {
219 ptr += HDRSIZE + SIZE(hdr);
220 continue;
221 }
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000222
Jordan Crousef6145c32008-03-19 23:56:58 +0000223 size = SIZE(hdr);
224 nptr = ptr + HDRSIZE + SIZE(hdr);
225
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700226 while (nptr < type->end) {
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000227 hdrtype_t nhdr = *((hdrtype_t *) nptr);
228
Jordan Crousef6145c32008-03-19 23:56:58 +0000229 if (!(IS_FREE(nhdr)))
230 break;
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000231
Jordan Crousef6145c32008-03-19 23:56:58 +0000232 size += SIZE(nhdr) + HDRSIZE;
233
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000234 *((hdrtype_t *) nptr) = 0;
Jordan Crousef6145c32008-03-19 23:56:58 +0000235
236 nptr += (HDRSIZE + SIZE(nhdr));
237 }
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000238
Jordan Crousef6145c32008-03-19 23:56:58 +0000239 *((hdrtype_t *) ptr) = FREE_BLOCK(size);
240 ptr = nptr;
241 }
242}
243
244void free(void *ptr)
245{
246 hdrtype_t hdr;
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700247 struct memory_type *type = heap;
Jordan Crousef6145c32008-03-19 23:56:58 +0000248
Hsuan Ting Chen8742e2a2020-09-17 15:51:34 +0800249 /* No action occurs on NULL. */
250 if (ptr == NULL)
251 return;
252
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000253 /* Sanity check. */
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700254 if (ptr < type->start || ptr >= type->end) {
255 type = dma;
256 if (ptr < type->start || ptr >= type->end)
257 return;
258 }
Jordan Crousef6145c32008-03-19 23:56:58 +0000259
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700260 if (free_aligned(ptr, type)) return;
261
262 ptr -= HDRSIZE;
Jordan Crousef6145c32008-03-19 23:56:58 +0000263 hdr = *((hdrtype_t *) ptr);
264
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000265 /* Not our header (we're probably poisoned). */
Jordan Crousef6145c32008-03-19 23:56:58 +0000266 if (!HAS_MAGIC(hdr))
267 return;
268
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000269 /* Double free. */
Jordan Crousef6145c32008-03-19 23:56:58 +0000270 if (hdr & FLAG_FREE)
271 return;
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000272
Jordan Crousef6145c32008-03-19 23:56:58 +0000273 *((hdrtype_t *) ptr) = FREE_BLOCK(SIZE(hdr));
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700274 _consolidate(type);
Jordan Crousef6145c32008-03-19 23:56:58 +0000275}
276
277void *malloc(size_t size)
278{
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700279 return alloc(size, heap);
280}
281
282void *dma_malloc(size_t size)
283{
284 return alloc(size, dma);
Jordan Crousef6145c32008-03-19 23:56:58 +0000285}
286
287void *calloc(size_t nmemb, size_t size)
288{
Jordan Crouse24a04042008-04-25 23:08:47 +0000289 size_t total = nmemb * size;
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700290 void *ptr = alloc(total, heap);
Jordan Crousef6145c32008-03-19 23:56:58 +0000291
292 if (ptr)
293 memset(ptr, 0, total);
294
295 return ptr;
296}
297
298void *realloc(void *ptr, size_t size)
299{
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000300 void *ret, *pptr;
Yu-Ping Wuaec3b1f2020-09-11 14:39:03 +0800301 hdrtype_t volatile *block;
Jordan Crousef6145c32008-03-19 23:56:58 +0000302 unsigned int osize;
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700303 struct memory_type *type = heap;
Jordan Crousef6145c32008-03-19 23:56:58 +0000304
305 if (ptr == NULL)
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700306 return alloc(size, type);
Jordan Crousef6145c32008-03-19 23:56:58 +0000307
308 pptr = ptr - HDRSIZE;
309
310 if (!HAS_MAGIC(*((hdrtype_t *) pptr)))
311 return NULL;
Jordan Crousef6145c32008-03-19 23:56:58 +0000312
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700313 if (ptr < type->start || ptr >= type->end)
314 type = dma;
315
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000316 /* Get the original size of the block. */
317 osize = SIZE(*((hdrtype_t *) pptr));
318
319 /*
320 * Free the memory to update the tables - this won't touch the actual
321 * memory, so we can still use it for the copy after we have
322 * reallocated the new space.
323 */
324 free(ptr);
Yu-Ping Wuaec3b1f2020-09-11 14:39:03 +0800325
326 block = find_free_block(size, type);
327 if (block == NULL)
328 return NULL;
329
330 ret = (void *)((uintptr_t)block + HDRSIZE);
Jordan Crousef6145c32008-03-19 23:56:58 +0000331
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000332 /*
Yu-Ping Wuaec3b1f2020-09-11 14:39:03 +0800333 * If ret == ptr, then no copy is needed. Otherwise, move the memory to
334 * the new location, which might be before the old one and overlap since
335 * the free() above includes a _consolidate().
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000336 */
Yu-Ping Wuaec3b1f2020-09-11 14:39:03 +0800337 if (ret != ptr)
338 memmove(ret, ptr, osize > size ? size : osize);
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000339
Yu-Ping Wuaec3b1f2020-09-11 14:39:03 +0800340 /* Mark the block as used. */
341 use_block(block, size);
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000342
Jordan Crousef6145c32008-03-19 23:56:58 +0000343 return ret;
344}
345
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000346struct align_region_t
347{
Elyes HAOUAS824b4b82020-02-15 09:27:11 +0100348 /* If alignment is 0 then the region represents a large region which
Aaron Durbin8bbd04e2015-01-22 08:59:03 -0600349 * has no metadata for tracking subelements. */
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000350 int alignment;
351 /* start in memory, and size in bytes */
352 void* start;
353 int size;
354 /* layout within a region:
355 - num_elements bytes, 0: free, 1: used, 2: used, combines with next
356 - padding to alignment
357 - data section
358 - waste space
359
360 start_data points to the start of the data section
361 */
362 void* start_data;
363 /* number of free blocks sized "alignment" */
364 int free;
365 struct align_region_t *next;
366};
367
Aaron Durbin8bbd04e2015-01-22 08:59:03 -0600368static inline int region_is_large(const struct align_region_t *r)
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000369{
Aaron Durbin8bbd04e2015-01-22 08:59:03 -0600370 return r->alignment == 0;
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000371}
372
Aaron Durbin8bbd04e2015-01-22 08:59:03 -0600373static inline int addr_in_region(const struct align_region_t *r, void *addr)
374{
375 return ((addr >= r->start_data) && (addr < r->start_data + r->size));
376}
377
378/* num_elements == 0 indicates a large aligned region instead of a smaller
379 * region comprised of alignment-sized chunks. */
380static struct align_region_t *allocate_region(int alignment, int num_elements,
381 size_t size, struct memory_type *type)
382{
383 struct align_region_t *r;
384 size_t extra_space;
385
Julius Wernereab2a292019-03-05 16:55:15 -0800386#if CONFIG(LP_DEBUG_MALLOC)
Aaron Durbin8bbd04e2015-01-22 08:59:03 -0600387 printf("%s(old align_regions=%p, alignment=%u, num_elements=%u, size=%zu)\n",
388 __func__, type->align_regions, alignment, num_elements, size);
389#endif
390
391 r = malloc(sizeof(*r));
392
393 if (r == NULL)
394 return NULL;
395
Jonathan Neuschäfera4fbc382016-04-05 21:36:34 +0200396 memset(r, 0, sizeof(*r));
Aaron Durbin8bbd04e2015-01-22 08:59:03 -0600397
398 if (num_elements != 0) {
399 r->alignment = alignment;
400 r->size = num_elements * alignment;
401 r->free = num_elements;
402 /* Allocate enough memory for alignment requirements and
403 * metadata for each chunk. */
404 extra_space = num_elements;
405 } else {
406 /* Large aligned allocation. Set alignment = 0. */
407 r->alignment = 0;
408 r->size = size;
409 extra_space = 0;
410 }
411
412 r->start = alloc(r->size + alignment + extra_space, type);
413
414 if (r->start == NULL) {
415 free(r);
416 return NULL;
417 }
418
419 r->start_data = (void *)ALIGN_UP((uintptr_t)r->start + extra_space,
420 alignment);
421
422 /* Clear any (if requested) metadata. */
423 memset(r->start, 0, extra_space);
424
425 /* Link the region with the rest. */
426 r->next = type->align_regions;
427 type->align_regions = r;
428
429 return r;
430}
431
432static void try_free_region(struct align_region_t **prev_link)
433{
434 struct align_region_t *r = *prev_link;
435
436 /* All large regions are immediately free-able. Non-large regions
437 * need to be checked for the fully freed state. */
438 if (!region_is_large(r)) {
439 if (r->free != r->size / r->alignment)
440 return;
441 }
442
443 /* Unlink region from link list. */
444 *prev_link = r->next;
445
446 /* Free the data and metadata. */
447 free(r->start);
448 free(r);
449}
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000450
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700451static int free_aligned(void* addr, struct memory_type *type)
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000452{
Aaron Durbin8bbd04e2015-01-22 08:59:03 -0600453 struct align_region_t **prev_link = &type->align_regions;
454
455 while (*prev_link != NULL)
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000456 {
Aaron Durbin8bbd04e2015-01-22 08:59:03 -0600457 if (!addr_in_region(*prev_link, addr)) {
458 prev_link = &((*prev_link)->next);
459 continue;
460 }
461
462 if (region_is_large(*prev_link)) {
463 try_free_region(prev_link);
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000464 return 1;
465 }
Aaron Durbin8bbd04e2015-01-22 08:59:03 -0600466
467 int i = (addr-(*prev_link)->start_data)/(*prev_link)->alignment;
468 u8 *meta = (*prev_link)->start;
469 while (meta[i] == 2)
470 {
471 meta[i++] = 0;
472 (*prev_link)->free++;
473 }
474 meta[i] = 0;
475 (*prev_link)->free++;
476 try_free_region(prev_link);
477 return 1;
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000478 }
479 return 0;
480}
481
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700482static void *alloc_aligned(size_t align, size_t size, struct memory_type *type)
Patrick Georgi5ccfa1a2008-09-02 15:49:32 +0000483{
Aaron Durbin8bbd04e2015-01-22 08:59:03 -0600484 /* Define a large request to be 1024 bytes for either alignment or
485 * size of allocation. */
486 const size_t large_request = 1024;
487
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000488 if (size == 0) return 0;
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700489 if (type->align_regions == 0) {
490 type->align_regions = malloc(sizeof(struct align_region_t));
491 if (type->align_regions == NULL)
Stefan Reinauer5fe6e232009-07-31 11:39:55 +0000492 return NULL;
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700493 memset(type->align_regions, 0, sizeof(struct align_region_t));
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000494 }
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700495 struct align_region_t *reg = type->align_regions;
Aaron Durbin8bbd04e2015-01-22 08:59:03 -0600496
497 if (size >= large_request || align >= large_request) {
498 reg = allocate_region(align, 0, size, type);
499 if (reg == NULL)
500 return NULL;
501 return reg->start_data;
502 }
503
Stefan Reinauer14e22772010-04-27 06:56:47 +0000504look_further:
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000505 while (reg != 0)
506 {
507 if ((reg->alignment == align) && (reg->free >= (size + align - 1)/align))
508 {
Julius Wernereab2a292019-03-05 16:55:15 -0800509#if CONFIG(LP_DEBUG_MALLOC)
Julius Wernerf5b76fe2016-05-19 13:15:16 -0700510 printf(" found memalign region. %u free, %zu required\n", reg->free, (size + align - 1)/align);
Stefan Reinauere5d30b72010-03-25 22:15:19 +0000511#endif
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000512 break;
513 }
514 reg = reg->next;
515 }
516 if (reg == 0)
517 {
Julius Wernereab2a292019-03-05 16:55:15 -0800518#if CONFIG(LP_DEBUG_MALLOC)
Stefan Reinauere5d30b72010-03-25 22:15:19 +0000519 printf(" need to allocate a new memalign region\n");
520#endif
521 /* get align regions */
Aaron Durbin8bbd04e2015-01-22 08:59:03 -0600522 reg = allocate_region(align, large_request/align, size, type);
Julius Wernereab2a292019-03-05 16:55:15 -0800523#if CONFIG(LP_DEBUG_MALLOC)
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700524 printf(" ... returned %p\n", reg);
Stefan Reinauere5d30b72010-03-25 22:15:19 +0000525#endif
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000526 }
Stefan Reinauere5d30b72010-03-25 22:15:19 +0000527 if (reg == 0) {
528 /* Nothing available. */
529 return (void *)NULL;
530 }
531
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000532 int i, count = 0, target = (size+align-1)/align;
533 for (i = 0; i < (reg->size/align); i++)
534 {
535 if (((u8*)reg->start)[i] == 0)
536 {
537 count++;
538 if (count == target) {
539 count = i+1-count;
540 for (i=0; i<target-1; i++)
541 {
542 ((u8*)reg->start)[count+i]=2;
543 }
544 ((u8*)reg->start)[count+target-1]=1;
545 reg->free -= target;
546 return reg->start_data+(align*count);
547 }
548 } else {
549 count = 0;
550 }
551 }
Nico Huber7a32e882012-11-22 17:37:32 +0100552 /* The free space in this region is fragmented,
553 so we will move on and try the next one: */
554 reg = reg->next;
Stefan Reinauer1ff26a72009-04-29 19:11:18 +0000555 goto look_further; // end condition is once a new region is allocated - it always has enough space
Patrick Georgi5ccfa1a2008-09-02 15:49:32 +0000556}
557
Julius Wernerb8fad3d2013-08-27 15:48:32 -0700558void *memalign(size_t align, size_t size)
559{
560 return alloc_aligned(align, size, heap);
561}
562
563void *dma_memalign(size_t align, size_t size)
564{
565 return alloc_aligned(align, size, dma);
566}
567
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000568/* This is for debugging purposes. */
Julius Wernereab2a292019-03-05 16:55:15 -0800569#if CONFIG(LP_DEBUG_MALLOC)
Jordan Crousef6145c32008-03-19 23:56:58 +0000570void print_malloc_map(void)
571{
Julius Werner9665d382013-09-13 18:21:46 -0700572 struct memory_type *type = heap;
573 void *ptr;
574 int free_memory;
Jordan Crousef6145c32008-03-19 23:56:58 +0000575
Julius Werner9665d382013-09-13 18:21:46 -0700576again:
577 ptr = type->start;
578 free_memory = 0;
579
580 while (ptr < type->end) {
Jordan Crousef6145c32008-03-19 23:56:58 +0000581 hdrtype_t hdr = *((hdrtype_t *) ptr);
582
583 if (!HAS_MAGIC(hdr)) {
Julius Werner9665d382013-09-13 18:21:46 -0700584 if (type->magic_initialized)
585 printf("%s: Poisoned magic - we're toast\n", type->name);
Stefan Reinauere5d30b72010-03-25 22:15:19 +0000586 else
Julius Werner9665d382013-09-13 18:21:46 -0700587 printf("%s: No magic yet - going to initialize\n", type->name);
Jordan Crousef6145c32008-03-19 23:56:58 +0000588 break;
589 }
590
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000591 /* FIXME: Verify the size of the block. */
Jordan Crousef6145c32008-03-19 23:56:58 +0000592
Julius Wernerf5b76fe2016-05-19 13:15:16 -0700593 printf("%s %x: %s (%llx bytes)\n", type->name,
Julius Werner9665d382013-09-13 18:21:46 -0700594 (unsigned int)(ptr - type->start),
Uwe Hermann6a441bf2008-03-20 19:54:59 +0000595 hdr & FLAG_FREE ? "FREE" : "USED", SIZE(hdr));
Jordan Crousef6145c32008-03-19 23:56:58 +0000596
Stefan Reinauere5d30b72010-03-25 22:15:19 +0000597 if (hdr & FLAG_FREE)
598 free_memory += SIZE(hdr);
599
Jordan Crousef6145c32008-03-19 23:56:58 +0000600 ptr += HDRSIZE + SIZE(hdr);
601 }
Stefan Reinauere5d30b72010-03-25 22:15:19 +0000602
Julius Werner9665d382013-09-13 18:21:46 -0700603 if (free_memory && (type->minimal_free > free_memory))
604 type->minimal_free = free_memory;
Julius Wernerf5b76fe2016-05-19 13:15:16 -0700605 printf("%s: Maximum memory consumption: %zu bytes\n", type->name,
Julius Werner9665d382013-09-13 18:21:46 -0700606 (type->end - type->start) - HDRSIZE - type->minimal_free);
607
608 if (type != dma) {
609 type = dma;
610 goto again;
611 }
Jordan Crousef6145c32008-03-19 23:56:58 +0000612}
Jordan Crousef6145c32008-03-19 23:56:58 +0000613#endif