Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 1 | /* |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 2 | * |
| 3 | * Copyright (C) 2008 Advanced Micro Devices, Inc. |
Stefan Reinauer | e5d30b7 | 2010-03-25 22:15:19 +0000 | [diff] [blame] | 4 | * Copyright (C) 2008-2010 coresystems GmbH |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 5 | * |
| 6 | * Redistribution and use in source and binary forms, with or without |
| 7 | * modification, are permitted provided that the following conditions |
| 8 | * are met: |
| 9 | * 1. Redistributions of source code must retain the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer. |
| 11 | * 2. Redistributions in binary form must reproduce the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer in the |
| 13 | * documentation and/or other materials provided with the distribution. |
| 14 | * 3. The name of the author may not be used to endorse or promote products |
| 15 | * derived from this software without specific prior written permission. |
| 16 | * |
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 18 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 19 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 20 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
| 21 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 22 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 23 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 24 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 27 | * SUCH DAMAGE. |
| 28 | */ |
| 29 | |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 30 | /* |
Uwe Hermann | 661e380 | 2008-03-21 18:37:23 +0000 | [diff] [blame] | 31 | * This is a classically weak malloc() implementation. We have a relatively |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 32 | * small and static heap, so we take the easy route with an O(N) loop |
| 33 | * through the tree for every malloc() and free(). Obviously, this doesn't |
| 34 | * scale past a few hundred KB (if that). |
| 35 | * |
Uwe Hermann | 661e380 | 2008-03-21 18:37:23 +0000 | [diff] [blame] | 36 | * We're also susceptible to the usual buffer overrun poisoning, though the |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 37 | * risk is within acceptable ranges for this implementation (don't overrun |
| 38 | * your buffers, kids!). |
| 39 | */ |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 40 | |
Stefan Reinauer | e5d30b7 | 2010-03-25 22:15:19 +0000 | [diff] [blame] | 41 | #define IN_MALLOC_C |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 42 | #include <libpayload.h> |
Furquan Shaikh | 79a591f | 2014-05-13 13:47:32 -0700 | [diff] [blame] | 43 | #include <stdint.h> |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 44 | |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 45 | struct memory_type { |
| 46 | void *start; |
| 47 | void *end; |
| 48 | struct align_region_t* align_regions; |
Julius Werner | eab2a29 | 2019-03-05 16:55:15 -0800 | [diff] [blame] | 49 | #if CONFIG(LP_DEBUG_MALLOC) |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 50 | int magic_initialized; |
| 51 | size_t minimal_free; |
| 52 | const char *name; |
| 53 | #endif |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 54 | }; |
| 55 | |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 56 | extern char _heap, _eheap; /* Defined in the ldscript. */ |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 57 | |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 58 | static struct memory_type default_type = |
| 59 | { (void *)&_heap, (void *)&_eheap, NULL |
Julius Werner | eab2a29 | 2019-03-05 16:55:15 -0800 | [diff] [blame] | 60 | #if CONFIG(LP_DEBUG_MALLOC) |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 61 | , 0, 0, "HEAP" |
| 62 | #endif |
| 63 | }; |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 64 | static struct memory_type *const heap = &default_type; |
| 65 | static struct memory_type *dma = &default_type; |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 66 | |
Nico Huber | 25dd247 | 2013-06-14 15:34:59 +0200 | [diff] [blame] | 67 | typedef u64 hdrtype_t; |
Nico Huber | 2d4b4ca | 2013-06-14 15:26:49 +0200 | [diff] [blame] | 68 | #define HDRSIZE (sizeof(hdrtype_t)) |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 69 | |
Nico Huber | 2d4b4ca | 2013-06-14 15:26:49 +0200 | [diff] [blame] | 70 | #define SIZE_BITS ((HDRSIZE << 3) - 7) |
| 71 | #define MAGIC (((hdrtype_t)0x2a) << (SIZE_BITS + 1)) |
| 72 | #define FLAG_FREE (((hdrtype_t)0x01) << (SIZE_BITS + 0)) |
| 73 | #define MAX_SIZE ((((hdrtype_t)0x01) << SIZE_BITS) - 1) |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 74 | |
Nico Huber | 3665ace | 2012-10-08 15:03:35 +0200 | [diff] [blame] | 75 | #define SIZE(_h) ((_h) & MAX_SIZE) |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 76 | |
Nico Huber | 3665ace | 2012-10-08 15:03:35 +0200 | [diff] [blame] | 77 | #define _HEADER(_s, _f) ((hdrtype_t) (MAGIC | (_f) | ((_s) & MAX_SIZE))) |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 78 | |
| 79 | #define FREE_BLOCK(_s) _HEADER(_s, FLAG_FREE) |
Nico Huber | 3665ace | 2012-10-08 15:03:35 +0200 | [diff] [blame] | 80 | #define USED_BLOCK(_s) _HEADER(_s, 0) |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 81 | |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 82 | #define IS_FREE(_h) (((_h) & (MAGIC | FLAG_FREE)) == (MAGIC | FLAG_FREE)) |
| 83 | #define HAS_MAGIC(_h) (((_h) & MAGIC) == MAGIC) |
| 84 | |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 85 | static int free_aligned(void* addr, struct memory_type *type); |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 86 | void print_malloc_map(void); |
| 87 | |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 88 | void init_dma_memory(void *start, u32 size) |
| 89 | { |
Julius Werner | 509c37e | 2013-08-28 12:29:28 -0700 | [diff] [blame] | 90 | if (dma_initialized()) { |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 91 | printf("ERROR: %s called twice!\n", __func__); |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 92 | return; |
| 93 | } |
| 94 | |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 95 | /* |
Martin Roth | e81ce04 | 2017-06-03 20:00:36 -0600 | [diff] [blame] | 96 | * DMA memory might not be zeroed by coreboot on stage loading, so make |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 97 | * sure we clear the magic cookie from last boot. |
| 98 | */ |
| 99 | *(hdrtype_t *)start = 0; |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 100 | |
| 101 | dma = malloc(sizeof(*dma)); |
| 102 | dma->start = start; |
| 103 | dma->end = start + size; |
| 104 | dma->align_regions = NULL; |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 105 | |
Julius Werner | eab2a29 | 2019-03-05 16:55:15 -0800 | [diff] [blame] | 106 | #if CONFIG(LP_DEBUG_MALLOC) |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 107 | dma->minimal_free = 0; |
| 108 | dma->magic_initialized = 0; |
| 109 | dma->name = "DMA"; |
| 110 | |
| 111 | printf("Initialized cache-coherent DMA memory at [%p:%p]\n", start, start + size); |
| 112 | #endif |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 113 | } |
| 114 | |
Arthur Heymans | ae57f1d | 2023-08-25 13:07:25 +0200 | [diff] [blame^] | 115 | int dma_initialized(void) |
Julius Werner | 509c37e | 2013-08-28 12:29:28 -0700 | [diff] [blame] | 116 | { |
| 117 | return dma != heap; |
| 118 | } |
| 119 | |
| 120 | /* For boards that don't initialize DMA we assume all locations are coherent */ |
Yu-Ping Wu | 30d8e72 | 2022-08-23 16:40:03 +0800 | [diff] [blame] | 121 | int dma_coherent(const void *ptr) |
Julius Werner | 509c37e | 2013-08-28 12:29:28 -0700 | [diff] [blame] | 122 | { |
| 123 | return !dma_initialized() || (dma->start <= ptr && dma->end > ptr); |
| 124 | } |
| 125 | |
Yu-Ping Wu | aec3b1f | 2020-09-11 14:39:03 +0800 | [diff] [blame] | 126 | /* Find free block of size >= len */ |
| 127 | static hdrtype_t volatile *find_free_block(int len, struct memory_type *type) |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 128 | { |
| 129 | hdrtype_t header; |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 130 | hdrtype_t volatile *ptr = (hdrtype_t volatile *)type->start; |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 131 | |
| 132 | /* Align the size. */ |
Patrick Georgi | 04a5b48 | 2015-01-21 17:37:34 +0100 | [diff] [blame] | 133 | len = ALIGN_UP(len, HDRSIZE); |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 134 | |
Nico Huber | 3665ace | 2012-10-08 15:03:35 +0200 | [diff] [blame] | 135 | if (!len || len > MAX_SIZE) |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 136 | return (void *)NULL; |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 137 | |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 138 | /* Make sure the region is setup correctly. */ |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 139 | if (!HAS_MAGIC(*ptr)) { |
| 140 | size_t size = (type->end - type->start) - HDRSIZE; |
| 141 | *ptr = FREE_BLOCK(size); |
Julius Werner | eab2a29 | 2019-03-05 16:55:15 -0800 | [diff] [blame] | 142 | #if CONFIG(LP_DEBUG_MALLOC) |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 143 | type->magic_initialized = 1; |
| 144 | type->minimal_free = size; |
| 145 | #endif |
| 146 | } |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 147 | |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 148 | /* Find some free space. */ |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 149 | do { |
Marc Jones | 987e883 | 2012-03-01 16:12:11 -0700 | [diff] [blame] | 150 | header = *ptr; |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 151 | int size = SIZE(header); |
| 152 | |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 153 | if (!HAS_MAGIC(header) || size == 0) { |
Stefan Reinauer | e5d30b7 | 2010-03-25 22:15:19 +0000 | [diff] [blame] | 154 | printf("memory allocator panic. (%s%s)\n", |
| 155 | !HAS_MAGIC(header) ? " no magic " : "", |
| 156 | size == 0 ? " size=0 " : ""); |
Jordan Crouse | 24a0404 | 2008-04-25 23:08:47 +0000 | [diff] [blame] | 157 | halt(); |
Stefan Reinauer | ee67319 | 2008-08-14 14:40:10 +0000 | [diff] [blame] | 158 | } |
Jordan Crouse | 24a0404 | 2008-04-25 23:08:47 +0000 | [diff] [blame] | 159 | |
Yu-Ping Wu | aec3b1f | 2020-09-11 14:39:03 +0800 | [diff] [blame] | 160 | if ((header & FLAG_FREE) && len <= size) |
| 161 | return ptr; |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 162 | |
Furquan Shaikh | 79a591f | 2014-05-13 13:47:32 -0700 | [diff] [blame] | 163 | ptr = (hdrtype_t volatile *)((uintptr_t)ptr + HDRSIZE + size); |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 164 | |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 165 | } while (ptr < (hdrtype_t *) type->end); |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 166 | |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 167 | /* Nothing available. */ |
Yu-Ping Wu | aec3b1f | 2020-09-11 14:39:03 +0800 | [diff] [blame] | 168 | return NULL; |
| 169 | } |
| 170 | |
| 171 | /* Mark the block with length 'len' as used */ |
| 172 | static void use_block(hdrtype_t volatile *ptr, int len) |
| 173 | { |
| 174 | /* Align the size. */ |
| 175 | len = ALIGN_UP(len, HDRSIZE); |
| 176 | |
| 177 | hdrtype_t volatile *nptr = (hdrtype_t volatile *) |
| 178 | ((uintptr_t)ptr + HDRSIZE + len); |
| 179 | int size = SIZE(*ptr); |
| 180 | int nsize = size - (HDRSIZE + len); |
| 181 | |
| 182 | /* |
| 183 | * If there is still room in this block, then mark it as such otherwise |
| 184 | * account the whole space for that block. |
| 185 | */ |
| 186 | if (nsize > 0) { |
| 187 | /* Mark the block as used. */ |
| 188 | *ptr = USED_BLOCK(len); |
| 189 | |
| 190 | /* Create a new free block. */ |
| 191 | *nptr = FREE_BLOCK(nsize); |
| 192 | } else { |
| 193 | /* Mark the block as used. */ |
| 194 | *ptr = USED_BLOCK(size); |
| 195 | } |
| 196 | } |
| 197 | |
| 198 | static void *alloc(int len, struct memory_type *type) |
| 199 | { |
| 200 | hdrtype_t volatile *ptr = find_free_block(len, type); |
| 201 | |
| 202 | if (ptr == NULL) |
| 203 | return NULL; |
| 204 | |
| 205 | use_block(ptr, len); |
| 206 | return (void *)((uintptr_t)ptr + HDRSIZE); |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 207 | } |
| 208 | |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 209 | static void _consolidate(struct memory_type *type) |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 210 | { |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 211 | void *ptr = type->start; |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 212 | |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 213 | while (ptr < type->end) { |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 214 | void *nptr; |
| 215 | hdrtype_t hdr = *((hdrtype_t *) ptr); |
| 216 | unsigned int size = 0; |
| 217 | |
| 218 | if (!IS_FREE(hdr)) { |
| 219 | ptr += HDRSIZE + SIZE(hdr); |
| 220 | continue; |
| 221 | } |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 222 | |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 223 | size = SIZE(hdr); |
| 224 | nptr = ptr + HDRSIZE + SIZE(hdr); |
| 225 | |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 226 | while (nptr < type->end) { |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 227 | hdrtype_t nhdr = *((hdrtype_t *) nptr); |
| 228 | |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 229 | if (!(IS_FREE(nhdr))) |
| 230 | break; |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 231 | |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 232 | size += SIZE(nhdr) + HDRSIZE; |
| 233 | |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 234 | *((hdrtype_t *) nptr) = 0; |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 235 | |
| 236 | nptr += (HDRSIZE + SIZE(nhdr)); |
| 237 | } |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 238 | |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 239 | *((hdrtype_t *) ptr) = FREE_BLOCK(size); |
| 240 | ptr = nptr; |
| 241 | } |
| 242 | } |
| 243 | |
| 244 | void free(void *ptr) |
| 245 | { |
| 246 | hdrtype_t hdr; |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 247 | struct memory_type *type = heap; |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 248 | |
Hsuan Ting Chen | 8742e2a | 2020-09-17 15:51:34 +0800 | [diff] [blame] | 249 | /* No action occurs on NULL. */ |
| 250 | if (ptr == NULL) |
| 251 | return; |
| 252 | |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 253 | /* Sanity check. */ |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 254 | if (ptr < type->start || ptr >= type->end) { |
| 255 | type = dma; |
| 256 | if (ptr < type->start || ptr >= type->end) |
| 257 | return; |
| 258 | } |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 259 | |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 260 | if (free_aligned(ptr, type)) return; |
| 261 | |
| 262 | ptr -= HDRSIZE; |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 263 | hdr = *((hdrtype_t *) ptr); |
| 264 | |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 265 | /* Not our header (we're probably poisoned). */ |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 266 | if (!HAS_MAGIC(hdr)) |
| 267 | return; |
| 268 | |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 269 | /* Double free. */ |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 270 | if (hdr & FLAG_FREE) |
| 271 | return; |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 272 | |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 273 | *((hdrtype_t *) ptr) = FREE_BLOCK(SIZE(hdr)); |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 274 | _consolidate(type); |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 275 | } |
| 276 | |
| 277 | void *malloc(size_t size) |
| 278 | { |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 279 | return alloc(size, heap); |
| 280 | } |
| 281 | |
| 282 | void *dma_malloc(size_t size) |
| 283 | { |
| 284 | return alloc(size, dma); |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 285 | } |
| 286 | |
| 287 | void *calloc(size_t nmemb, size_t size) |
| 288 | { |
Jordan Crouse | 24a0404 | 2008-04-25 23:08:47 +0000 | [diff] [blame] | 289 | size_t total = nmemb * size; |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 290 | void *ptr = alloc(total, heap); |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 291 | |
| 292 | if (ptr) |
| 293 | memset(ptr, 0, total); |
| 294 | |
| 295 | return ptr; |
| 296 | } |
| 297 | |
| 298 | void *realloc(void *ptr, size_t size) |
| 299 | { |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 300 | void *ret, *pptr; |
Yu-Ping Wu | aec3b1f | 2020-09-11 14:39:03 +0800 | [diff] [blame] | 301 | hdrtype_t volatile *block; |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 302 | unsigned int osize; |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 303 | struct memory_type *type = heap; |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 304 | |
| 305 | if (ptr == NULL) |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 306 | return alloc(size, type); |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 307 | |
| 308 | pptr = ptr - HDRSIZE; |
| 309 | |
| 310 | if (!HAS_MAGIC(*((hdrtype_t *) pptr))) |
| 311 | return NULL; |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 312 | |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 313 | if (ptr < type->start || ptr >= type->end) |
| 314 | type = dma; |
| 315 | |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 316 | /* Get the original size of the block. */ |
| 317 | osize = SIZE(*((hdrtype_t *) pptr)); |
| 318 | |
| 319 | /* |
| 320 | * Free the memory to update the tables - this won't touch the actual |
| 321 | * memory, so we can still use it for the copy after we have |
| 322 | * reallocated the new space. |
| 323 | */ |
| 324 | free(ptr); |
Yu-Ping Wu | aec3b1f | 2020-09-11 14:39:03 +0800 | [diff] [blame] | 325 | |
| 326 | block = find_free_block(size, type); |
| 327 | if (block == NULL) |
| 328 | return NULL; |
| 329 | |
| 330 | ret = (void *)((uintptr_t)block + HDRSIZE); |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 331 | |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 332 | /* |
Yu-Ping Wu | aec3b1f | 2020-09-11 14:39:03 +0800 | [diff] [blame] | 333 | * If ret == ptr, then no copy is needed. Otherwise, move the memory to |
| 334 | * the new location, which might be before the old one and overlap since |
| 335 | * the free() above includes a _consolidate(). |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 336 | */ |
Yu-Ping Wu | aec3b1f | 2020-09-11 14:39:03 +0800 | [diff] [blame] | 337 | if (ret != ptr) |
| 338 | memmove(ret, ptr, osize > size ? size : osize); |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 339 | |
Yu-Ping Wu | aec3b1f | 2020-09-11 14:39:03 +0800 | [diff] [blame] | 340 | /* Mark the block as used. */ |
| 341 | use_block(block, size); |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 342 | |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 343 | return ret; |
| 344 | } |
| 345 | |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 346 | struct align_region_t |
| 347 | { |
Elyes HAOUAS | 824b4b8 | 2020-02-15 09:27:11 +0100 | [diff] [blame] | 348 | /* If alignment is 0 then the region represents a large region which |
Aaron Durbin | 8bbd04e | 2015-01-22 08:59:03 -0600 | [diff] [blame] | 349 | * has no metadata for tracking subelements. */ |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 350 | int alignment; |
| 351 | /* start in memory, and size in bytes */ |
| 352 | void* start; |
| 353 | int size; |
| 354 | /* layout within a region: |
| 355 | - num_elements bytes, 0: free, 1: used, 2: used, combines with next |
| 356 | - padding to alignment |
| 357 | - data section |
| 358 | - waste space |
| 359 | |
| 360 | start_data points to the start of the data section |
| 361 | */ |
| 362 | void* start_data; |
| 363 | /* number of free blocks sized "alignment" */ |
| 364 | int free; |
| 365 | struct align_region_t *next; |
| 366 | }; |
| 367 | |
Aaron Durbin | 8bbd04e | 2015-01-22 08:59:03 -0600 | [diff] [blame] | 368 | static inline int region_is_large(const struct align_region_t *r) |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 369 | { |
Aaron Durbin | 8bbd04e | 2015-01-22 08:59:03 -0600 | [diff] [blame] | 370 | return r->alignment == 0; |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 371 | } |
| 372 | |
Aaron Durbin | 8bbd04e | 2015-01-22 08:59:03 -0600 | [diff] [blame] | 373 | static inline int addr_in_region(const struct align_region_t *r, void *addr) |
| 374 | { |
| 375 | return ((addr >= r->start_data) && (addr < r->start_data + r->size)); |
| 376 | } |
| 377 | |
| 378 | /* num_elements == 0 indicates a large aligned region instead of a smaller |
| 379 | * region comprised of alignment-sized chunks. */ |
| 380 | static struct align_region_t *allocate_region(int alignment, int num_elements, |
| 381 | size_t size, struct memory_type *type) |
| 382 | { |
| 383 | struct align_region_t *r; |
| 384 | size_t extra_space; |
| 385 | |
Julius Werner | eab2a29 | 2019-03-05 16:55:15 -0800 | [diff] [blame] | 386 | #if CONFIG(LP_DEBUG_MALLOC) |
Aaron Durbin | 8bbd04e | 2015-01-22 08:59:03 -0600 | [diff] [blame] | 387 | printf("%s(old align_regions=%p, alignment=%u, num_elements=%u, size=%zu)\n", |
| 388 | __func__, type->align_regions, alignment, num_elements, size); |
| 389 | #endif |
| 390 | |
| 391 | r = malloc(sizeof(*r)); |
| 392 | |
| 393 | if (r == NULL) |
| 394 | return NULL; |
| 395 | |
Jonathan Neuschäfer | a4fbc38 | 2016-04-05 21:36:34 +0200 | [diff] [blame] | 396 | memset(r, 0, sizeof(*r)); |
Aaron Durbin | 8bbd04e | 2015-01-22 08:59:03 -0600 | [diff] [blame] | 397 | |
| 398 | if (num_elements != 0) { |
| 399 | r->alignment = alignment; |
| 400 | r->size = num_elements * alignment; |
| 401 | r->free = num_elements; |
| 402 | /* Allocate enough memory for alignment requirements and |
| 403 | * metadata for each chunk. */ |
| 404 | extra_space = num_elements; |
| 405 | } else { |
| 406 | /* Large aligned allocation. Set alignment = 0. */ |
| 407 | r->alignment = 0; |
| 408 | r->size = size; |
| 409 | extra_space = 0; |
| 410 | } |
| 411 | |
| 412 | r->start = alloc(r->size + alignment + extra_space, type); |
| 413 | |
| 414 | if (r->start == NULL) { |
| 415 | free(r); |
| 416 | return NULL; |
| 417 | } |
| 418 | |
| 419 | r->start_data = (void *)ALIGN_UP((uintptr_t)r->start + extra_space, |
| 420 | alignment); |
| 421 | |
| 422 | /* Clear any (if requested) metadata. */ |
| 423 | memset(r->start, 0, extra_space); |
| 424 | |
| 425 | /* Link the region with the rest. */ |
| 426 | r->next = type->align_regions; |
| 427 | type->align_regions = r; |
| 428 | |
| 429 | return r; |
| 430 | } |
| 431 | |
| 432 | static void try_free_region(struct align_region_t **prev_link) |
| 433 | { |
| 434 | struct align_region_t *r = *prev_link; |
| 435 | |
| 436 | /* All large regions are immediately free-able. Non-large regions |
| 437 | * need to be checked for the fully freed state. */ |
| 438 | if (!region_is_large(r)) { |
| 439 | if (r->free != r->size / r->alignment) |
| 440 | return; |
| 441 | } |
| 442 | |
| 443 | /* Unlink region from link list. */ |
| 444 | *prev_link = r->next; |
| 445 | |
| 446 | /* Free the data and metadata. */ |
| 447 | free(r->start); |
| 448 | free(r); |
| 449 | } |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 450 | |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 451 | static int free_aligned(void* addr, struct memory_type *type) |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 452 | { |
Aaron Durbin | 8bbd04e | 2015-01-22 08:59:03 -0600 | [diff] [blame] | 453 | struct align_region_t **prev_link = &type->align_regions; |
| 454 | |
| 455 | while (*prev_link != NULL) |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 456 | { |
Aaron Durbin | 8bbd04e | 2015-01-22 08:59:03 -0600 | [diff] [blame] | 457 | if (!addr_in_region(*prev_link, addr)) { |
| 458 | prev_link = &((*prev_link)->next); |
| 459 | continue; |
| 460 | } |
| 461 | |
| 462 | if (region_is_large(*prev_link)) { |
| 463 | try_free_region(prev_link); |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 464 | return 1; |
| 465 | } |
Aaron Durbin | 8bbd04e | 2015-01-22 08:59:03 -0600 | [diff] [blame] | 466 | |
| 467 | int i = (addr-(*prev_link)->start_data)/(*prev_link)->alignment; |
| 468 | u8 *meta = (*prev_link)->start; |
| 469 | while (meta[i] == 2) |
| 470 | { |
| 471 | meta[i++] = 0; |
| 472 | (*prev_link)->free++; |
| 473 | } |
| 474 | meta[i] = 0; |
| 475 | (*prev_link)->free++; |
| 476 | try_free_region(prev_link); |
| 477 | return 1; |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 478 | } |
| 479 | return 0; |
| 480 | } |
| 481 | |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 482 | static void *alloc_aligned(size_t align, size_t size, struct memory_type *type) |
Patrick Georgi | 5ccfa1a | 2008-09-02 15:49:32 +0000 | [diff] [blame] | 483 | { |
Aaron Durbin | 8bbd04e | 2015-01-22 08:59:03 -0600 | [diff] [blame] | 484 | /* Define a large request to be 1024 bytes for either alignment or |
| 485 | * size of allocation. */ |
| 486 | const size_t large_request = 1024; |
| 487 | |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 488 | if (size == 0) return 0; |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 489 | if (type->align_regions == 0) { |
| 490 | type->align_regions = malloc(sizeof(struct align_region_t)); |
| 491 | if (type->align_regions == NULL) |
Stefan Reinauer | 5fe6e23 | 2009-07-31 11:39:55 +0000 | [diff] [blame] | 492 | return NULL; |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 493 | memset(type->align_regions, 0, sizeof(struct align_region_t)); |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 494 | } |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 495 | struct align_region_t *reg = type->align_regions; |
Aaron Durbin | 8bbd04e | 2015-01-22 08:59:03 -0600 | [diff] [blame] | 496 | |
| 497 | if (size >= large_request || align >= large_request) { |
| 498 | reg = allocate_region(align, 0, size, type); |
| 499 | if (reg == NULL) |
| 500 | return NULL; |
| 501 | return reg->start_data; |
| 502 | } |
| 503 | |
Stefan Reinauer | 14e2277 | 2010-04-27 06:56:47 +0000 | [diff] [blame] | 504 | look_further: |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 505 | while (reg != 0) |
| 506 | { |
| 507 | if ((reg->alignment == align) && (reg->free >= (size + align - 1)/align)) |
| 508 | { |
Julius Werner | eab2a29 | 2019-03-05 16:55:15 -0800 | [diff] [blame] | 509 | #if CONFIG(LP_DEBUG_MALLOC) |
Julius Werner | f5b76fe | 2016-05-19 13:15:16 -0700 | [diff] [blame] | 510 | printf(" found memalign region. %u free, %zu required\n", reg->free, (size + align - 1)/align); |
Stefan Reinauer | e5d30b7 | 2010-03-25 22:15:19 +0000 | [diff] [blame] | 511 | #endif |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 512 | break; |
| 513 | } |
| 514 | reg = reg->next; |
| 515 | } |
| 516 | if (reg == 0) |
| 517 | { |
Julius Werner | eab2a29 | 2019-03-05 16:55:15 -0800 | [diff] [blame] | 518 | #if CONFIG(LP_DEBUG_MALLOC) |
Stefan Reinauer | e5d30b7 | 2010-03-25 22:15:19 +0000 | [diff] [blame] | 519 | printf(" need to allocate a new memalign region\n"); |
| 520 | #endif |
| 521 | /* get align regions */ |
Aaron Durbin | 8bbd04e | 2015-01-22 08:59:03 -0600 | [diff] [blame] | 522 | reg = allocate_region(align, large_request/align, size, type); |
Julius Werner | eab2a29 | 2019-03-05 16:55:15 -0800 | [diff] [blame] | 523 | #if CONFIG(LP_DEBUG_MALLOC) |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 524 | printf(" ... returned %p\n", reg); |
Stefan Reinauer | e5d30b7 | 2010-03-25 22:15:19 +0000 | [diff] [blame] | 525 | #endif |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 526 | } |
Stefan Reinauer | e5d30b7 | 2010-03-25 22:15:19 +0000 | [diff] [blame] | 527 | if (reg == 0) { |
| 528 | /* Nothing available. */ |
| 529 | return (void *)NULL; |
| 530 | } |
| 531 | |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 532 | int i, count = 0, target = (size+align-1)/align; |
| 533 | for (i = 0; i < (reg->size/align); i++) |
| 534 | { |
| 535 | if (((u8*)reg->start)[i] == 0) |
| 536 | { |
| 537 | count++; |
| 538 | if (count == target) { |
| 539 | count = i+1-count; |
| 540 | for (i=0; i<target-1; i++) |
| 541 | { |
| 542 | ((u8*)reg->start)[count+i]=2; |
| 543 | } |
| 544 | ((u8*)reg->start)[count+target-1]=1; |
| 545 | reg->free -= target; |
| 546 | return reg->start_data+(align*count); |
| 547 | } |
| 548 | } else { |
| 549 | count = 0; |
| 550 | } |
| 551 | } |
Nico Huber | 7a32e88 | 2012-11-22 17:37:32 +0100 | [diff] [blame] | 552 | /* The free space in this region is fragmented, |
| 553 | so we will move on and try the next one: */ |
| 554 | reg = reg->next; |
Stefan Reinauer | 1ff26a7 | 2009-04-29 19:11:18 +0000 | [diff] [blame] | 555 | goto look_further; // end condition is once a new region is allocated - it always has enough space |
Patrick Georgi | 5ccfa1a | 2008-09-02 15:49:32 +0000 | [diff] [blame] | 556 | } |
| 557 | |
Julius Werner | b8fad3d | 2013-08-27 15:48:32 -0700 | [diff] [blame] | 558 | void *memalign(size_t align, size_t size) |
| 559 | { |
| 560 | return alloc_aligned(align, size, heap); |
| 561 | } |
| 562 | |
| 563 | void *dma_memalign(size_t align, size_t size) |
| 564 | { |
| 565 | return alloc_aligned(align, size, dma); |
| 566 | } |
| 567 | |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 568 | /* This is for debugging purposes. */ |
Julius Werner | eab2a29 | 2019-03-05 16:55:15 -0800 | [diff] [blame] | 569 | #if CONFIG(LP_DEBUG_MALLOC) |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 570 | void print_malloc_map(void) |
| 571 | { |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 572 | struct memory_type *type = heap; |
| 573 | void *ptr; |
| 574 | int free_memory; |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 575 | |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 576 | again: |
| 577 | ptr = type->start; |
| 578 | free_memory = 0; |
| 579 | |
| 580 | while (ptr < type->end) { |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 581 | hdrtype_t hdr = *((hdrtype_t *) ptr); |
| 582 | |
| 583 | if (!HAS_MAGIC(hdr)) { |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 584 | if (type->magic_initialized) |
| 585 | printf("%s: Poisoned magic - we're toast\n", type->name); |
Stefan Reinauer | e5d30b7 | 2010-03-25 22:15:19 +0000 | [diff] [blame] | 586 | else |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 587 | printf("%s: No magic yet - going to initialize\n", type->name); |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 588 | break; |
| 589 | } |
| 590 | |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 591 | /* FIXME: Verify the size of the block. */ |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 592 | |
Julius Werner | f5b76fe | 2016-05-19 13:15:16 -0700 | [diff] [blame] | 593 | printf("%s %x: %s (%llx bytes)\n", type->name, |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 594 | (unsigned int)(ptr - type->start), |
Uwe Hermann | 6a441bf | 2008-03-20 19:54:59 +0000 | [diff] [blame] | 595 | hdr & FLAG_FREE ? "FREE" : "USED", SIZE(hdr)); |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 596 | |
Stefan Reinauer | e5d30b7 | 2010-03-25 22:15:19 +0000 | [diff] [blame] | 597 | if (hdr & FLAG_FREE) |
| 598 | free_memory += SIZE(hdr); |
| 599 | |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 600 | ptr += HDRSIZE + SIZE(hdr); |
| 601 | } |
Stefan Reinauer | e5d30b7 | 2010-03-25 22:15:19 +0000 | [diff] [blame] | 602 | |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 603 | if (free_memory && (type->minimal_free > free_memory)) |
| 604 | type->minimal_free = free_memory; |
Julius Werner | f5b76fe | 2016-05-19 13:15:16 -0700 | [diff] [blame] | 605 | printf("%s: Maximum memory consumption: %zu bytes\n", type->name, |
Julius Werner | 9665d38 | 2013-09-13 18:21:46 -0700 | [diff] [blame] | 606 | (type->end - type->start) - HDRSIZE - type->minimal_free); |
| 607 | |
| 608 | if (type != dma) { |
| 609 | type = dma; |
| 610 | goto again; |
| 611 | } |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 612 | } |
Jordan Crouse | f6145c3 | 2008-03-19 23:56:58 +0000 | [diff] [blame] | 613 | #endif |