blob: c87037d80736c10734d01802da6081b078a96851 [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin20686d82015-03-05 14:11:27 -06002
3#include <assert.h>
4#include <cbmem.h>
5#include <console/console.h>
6#include <imd.h>
7#include <stdlib.h>
8#include <string.h>
Julius Wernera2148372019-11-13 19:50:33 -08009#include <types.h>
Aaron Durbin20686d82015-03-05 14:11:27 -060010
11/* For more details on implementation and usage please see the imd.h header. */
12
13static const uint32_t IMD_ROOT_PTR_MAGIC = 0xc0389481;
14static const uint32_t IMD_ENTRY_MAGIC = ~0xc0389481;
Aaron Durbincac50502015-03-24 23:14:46 -050015static const uint32_t SMALL_REGION_ID = CBMEM_ID_IMD_SMALL;
Aaron Durbin20686d82015-03-05 14:11:27 -060016static const size_t LIMIT_ALIGN = 4096;
17
18/* In-memory data structures. */
19struct imd_root_pointer {
20 uint32_t magic;
21 /* Relative to upper limit/offset. */
22 int32_t root_offset;
Stefan Reinauer6a001132017-07-13 02:20:27 +020023} __packed;
Aaron Durbin20686d82015-03-05 14:11:27 -060024
25struct imd_entry {
26 uint32_t magic;
27 /* start is located relative to imd_root */
28 int32_t start_offset;
29 uint32_t size;
30 uint32_t id;
Stefan Reinauer6a001132017-07-13 02:20:27 +020031} __packed;
Aaron Durbin20686d82015-03-05 14:11:27 -060032
33struct imd_root {
34 uint32_t max_entries;
35 uint32_t num_entries;
36 uint32_t flags;
37 uint32_t entry_align;
38 /* Used for fixing the size of an imd. Relative to the root. */
39 int32_t max_offset;
40 struct imd_entry entries[0];
Stefan Reinauer6a001132017-07-13 02:20:27 +020041} __packed;
Aaron Durbin20686d82015-03-05 14:11:27 -060042
43#define IMD_FLAG_LOCKED 1
44
45static void *relative_pointer(void *base, ssize_t offset)
46{
47 intptr_t b = (intptr_t)base;
48 b += offset;
49 return (void *)b;
50}
51
52static bool imd_root_pointer_valid(const struct imd_root_pointer *rp)
53{
54 return !!(rp->magic == IMD_ROOT_PTR_MAGIC);
55}
56
Aaron Durbincac50502015-03-24 23:14:46 -050057static struct imd_root *imdr_root(const struct imdr *imdr)
Aaron Durbin20686d82015-03-05 14:11:27 -060058{
Aaron Durbincac50502015-03-24 23:14:46 -050059 return imdr->r;
Aaron Durbin20686d82015-03-05 14:11:27 -060060}
61
62/*
63 * The root pointer is relative to the upper limit of the imd. i.e. It sits
64 * just below the upper limit.
65 */
Aaron Durbincac50502015-03-24 23:14:46 -050066static struct imd_root_pointer *imdr_get_root_pointer(const struct imdr *imdr)
Aaron Durbin20686d82015-03-05 14:11:27 -060067{
68 struct imd_root_pointer *rp;
69
Aaron Durbincac50502015-03-24 23:14:46 -050070 rp = relative_pointer((void *)imdr->limit, -sizeof(*rp));
Aaron Durbin20686d82015-03-05 14:11:27 -060071
72 return rp;
73}
74
75static void imd_link_root(struct imd_root_pointer *rp, struct imd_root *r)
76{
77 rp->magic = IMD_ROOT_PTR_MAGIC;
78 rp->root_offset = (int32_t)((intptr_t)r - (intptr_t)rp);
79}
80
Aaron Durbincac50502015-03-24 23:14:46 -050081static struct imd_entry *root_last_entry(struct imd_root *r)
82{
83 return &r->entries[r->num_entries - 1];
84}
85
86static size_t root_num_entries(size_t root_size)
87{
88 size_t entries_size;
89
90 entries_size = root_size;
91 entries_size -= sizeof(struct imd_root_pointer);
92 entries_size -= sizeof(struct imd_root);
93
94 return entries_size / sizeof(struct imd_entry);
95}
96
97static size_t imd_root_data_left(struct imd_root *r)
98{
99 struct imd_entry *last_entry;
100
101 last_entry = root_last_entry(r);
102
103 if (r->max_offset != 0)
104 return last_entry->start_offset - r->max_offset;
105
106 return ~(size_t)0;
107}
108
109static bool root_is_locked(const struct imd_root *r)
110{
111 return !!(r->flags & IMD_FLAG_LOCKED);
112}
113
Aaron Durbin20686d82015-03-05 14:11:27 -0600114static void imd_entry_assign(struct imd_entry *e, uint32_t id,
115 ssize_t offset, size_t size)
116{
117 e->magic = IMD_ENTRY_MAGIC;
118 e->start_offset = offset;
119 e->size = size;
120 e->id = id;
121}
122
Aaron Durbincac50502015-03-24 23:14:46 -0500123static void imdr_init(struct imdr *ir, void *upper_limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600124{
125 uintptr_t limit = (uintptr_t)upper_limit;
126 /* Upper limit is aligned down to 4KiB */
Aaron Durbincac50502015-03-24 23:14:46 -0500127 ir->limit = ALIGN_DOWN(limit, LIMIT_ALIGN);
128 ir->r = NULL;
Aaron Durbin20686d82015-03-05 14:11:27 -0600129}
130
Aaron Durbincac50502015-03-24 23:14:46 -0500131static int imdr_create_empty(struct imdr *imdr, size_t root_size,
132 size_t entry_align)
Aaron Durbin20686d82015-03-05 14:11:27 -0600133{
134 struct imd_root_pointer *rp;
135 struct imd_root *r;
136 struct imd_entry *e;
137 ssize_t root_offset;
Aaron Durbin20686d82015-03-05 14:11:27 -0600138
Aaron Durbincac50502015-03-24 23:14:46 -0500139 if (!imdr->limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600140 return -1;
141
142 /* root_size and entry_align should be a power of 2. */
143 assert(IS_POWER_OF_2(root_size));
144 assert(IS_POWER_OF_2(entry_align));
145
Aaron Durbincac50502015-03-24 23:14:46 -0500146 if (!imdr->limit)
147 return -1;
148
Aaron Durbin20686d82015-03-05 14:11:27 -0600149 /*
Martin Roth2ed0aa22016-01-05 20:58:58 -0700150 * root_size needs to be large enough to accommodate root pointer and
Aaron Durbin20686d82015-03-05 14:11:27 -0600151 * root book keeping structure. The caller needs to ensure there's
152 * enough room for tracking individual allocations.
153 */
154 if (root_size < (sizeof(*rp) + sizeof(*r)))
155 return -1;
156
Lee Leahy73402172017-03-10 15:23:24 -0800157 /* For simplicity don't allow sizes or alignments to exceed LIMIT_ALIGN.
158 */
Aaron Durbin20686d82015-03-05 14:11:27 -0600159 if (root_size > LIMIT_ALIGN || entry_align > LIMIT_ALIGN)
160 return -1;
161
162 /* Additionally, don't handle an entry alignment > root_size. */
163 if (entry_align > root_size)
164 return -1;
165
Aaron Durbincac50502015-03-24 23:14:46 -0500166 rp = imdr_get_root_pointer(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600167
168 root_offset = -(ssize_t)root_size;
169 /* Set root pointer. */
Aaron Durbincac50502015-03-24 23:14:46 -0500170 imdr->r = relative_pointer((void *)imdr->limit, root_offset);
171 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600172 imd_link_root(rp, r);
173
174 memset(r, 0, sizeof(*r));
175 r->entry_align = entry_align;
176
177 /* Calculate size left for entries. */
Aaron Durbincac50502015-03-24 23:14:46 -0500178 r->max_entries = root_num_entries(root_size);
Aaron Durbin20686d82015-03-05 14:11:27 -0600179
180 /* Fill in first entry covering the root region. */
181 r->num_entries = 1;
182 e = &r->entries[0];
183 imd_entry_assign(e, CBMEM_ID_IMD_ROOT, 0, root_size);
184
185 printk(BIOS_DEBUG, "IMD: root @ %p %u entries.\n", r, r->max_entries);
186
187 return 0;
188}
189
Aaron Durbincac50502015-03-24 23:14:46 -0500190static int imdr_recover(struct imdr *imdr)
Aaron Durbin20686d82015-03-05 14:11:27 -0600191{
192 struct imd_root_pointer *rp;
193 struct imd_root *r;
194 uintptr_t low_limit;
195 size_t i;
196
Aaron Durbincac50502015-03-24 23:14:46 -0500197 if (!imdr->limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600198 return -1;
199
Aaron Durbincac50502015-03-24 23:14:46 -0500200 rp = imdr_get_root_pointer(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600201
202 if (!imd_root_pointer_valid(rp))
203 return -1;
204
205 r = relative_pointer(rp, rp->root_offset);
206
207 /* Confirm the root and root pointer are just under the limit. */
208 if (ALIGN_UP((uintptr_t)&r->entries[r->max_entries], LIMIT_ALIGN) !=
Aaron Durbincac50502015-03-24 23:14:46 -0500209 imdr->limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600210 return -1;
211
212 if (r->num_entries > r->max_entries)
213 return -1;
214
215 /* Entry alignment should be power of 2. */
216 if (!IS_POWER_OF_2(r->entry_align))
217 return -1;
218
219 low_limit = (uintptr_t)relative_pointer(r, r->max_offset);
220
221 /* If no max_offset then lowest limit is 0. */
222 if (low_limit == (uintptr_t)r)
223 low_limit = 0;
224
225 for (i = 0; i < r->num_entries; i++) {
226 uintptr_t start_addr;
227 const struct imd_entry *e = &r->entries[i];
228
229 if (e->magic != IMD_ENTRY_MAGIC)
230 return -1;
231
232 start_addr = (uintptr_t)relative_pointer(r, e->start_offset);
233 if (start_addr < low_limit)
234 return -1;
Aaron Durbincac50502015-03-24 23:14:46 -0500235 if (start_addr >= imdr->limit ||
236 (start_addr + e->size) > imdr->limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600237 return -1;
238 }
239
240 /* Set root pointer. */
Aaron Durbincac50502015-03-24 23:14:46 -0500241 imdr->r = r;
Aaron Durbin20686d82015-03-05 14:11:27 -0600242
243 return 0;
244}
245
Aaron Durbincac50502015-03-24 23:14:46 -0500246static const struct imd_entry *imdr_entry_find(const struct imdr *imdr,
247 uint32_t id)
Aaron Durbin20686d82015-03-05 14:11:27 -0600248{
249 struct imd_root *r;
250 struct imd_entry *e;
Aaron Durbincac50502015-03-24 23:14:46 -0500251 size_t i;
Aaron Durbin20686d82015-03-05 14:11:27 -0600252
Aaron Durbincac50502015-03-24 23:14:46 -0500253 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600254
Aaron Durbincac50502015-03-24 23:14:46 -0500255 if (r == NULL)
256 return NULL;
Aaron Durbin20686d82015-03-05 14:11:27 -0600257
Aaron Durbincac50502015-03-24 23:14:46 -0500258 e = NULL;
259 /* Skip first entry covering the root. */
260 for (i = 1; i < r->num_entries; i++) {
261 if (id != r->entries[i].id)
262 continue;
263 e = &r->entries[i];
264 break;
265 }
266
267 return e;
268}
269
270static int imdr_limit_size(struct imdr *imdr, size_t max_size)
271{
272 struct imd_root *r;
273 ssize_t smax_size;
274 size_t root_size;
275
276 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600277 if (r == NULL)
278 return -1;
279
Aaron Durbincac50502015-03-24 23:14:46 -0500280 root_size = imdr->limit - (uintptr_t)r;
Aaron Durbin20686d82015-03-05 14:11:27 -0600281
Aaron Durbincac50502015-03-24 23:14:46 -0500282 if (max_size < root_size)
283 return -1;
Aaron Durbin20686d82015-03-05 14:11:27 -0600284
Aaron Durbincac50502015-03-24 23:14:46 -0500285 /* Take into account the root size. */
286 smax_size = max_size - root_size;
287 smax_size = -smax_size;
Aaron Durbin20686d82015-03-05 14:11:27 -0600288
Aaron Durbincac50502015-03-24 23:14:46 -0500289 r->max_offset = smax_size;
Aaron Durbin20686d82015-03-05 14:11:27 -0600290
291 return 0;
292}
293
Aaron Durbincac50502015-03-24 23:14:46 -0500294static size_t imdr_entry_size(const struct imdr *imdr,
295 const struct imd_entry *e)
296{
297 return e->size;
298}
299
300static void *imdr_entry_at(const struct imdr *imdr, const struct imd_entry *e)
301{
302 return relative_pointer(imdr_root(imdr), e->start_offset);
303}
304
Aaron Durbin20686d82015-03-05 14:11:27 -0600305static struct imd_entry *imd_entry_add_to_root(struct imd_root *r, uint32_t id,
306 size_t size)
307{
308 struct imd_entry *entry;
309 struct imd_entry *last_entry;
310 ssize_t e_offset;
311 size_t used_size;
312
313 if (r->num_entries == r->max_entries)
314 return NULL;
315
316 /* Determine total size taken up by entry. */
317 used_size = ALIGN_UP(size, r->entry_align);
318
Aaron Durbin20686d82015-03-05 14:11:27 -0600319 /* See if size overflows imd total size. */
Aaron Durbincac50502015-03-24 23:14:46 -0500320 if (used_size > imd_root_data_left(r))
321 return NULL;
Aaron Durbin20686d82015-03-05 14:11:27 -0600322
323 /*
324 * Determine if offset field overflows. All offsets should be lower
325 * than the previous one.
326 */
Aaron Durbincac50502015-03-24 23:14:46 -0500327 last_entry = root_last_entry(r);
Aaron Durbin20686d82015-03-05 14:11:27 -0600328 e_offset = last_entry->start_offset;
329 e_offset -= (ssize_t)used_size;
330 if (e_offset > last_entry->start_offset)
331 return NULL;
332
333 entry = root_last_entry(r) + 1;
334 r->num_entries++;
335
336 imd_entry_assign(entry, id, e_offset, size);
337
338 return entry;
339}
340
Aaron Durbincac50502015-03-24 23:14:46 -0500341static const struct imd_entry *imdr_entry_add(const struct imdr *imdr,
342 uint32_t id, size_t size)
Aaron Durbin20686d82015-03-05 14:11:27 -0600343{
344 struct imd_root *r;
345
Aaron Durbincac50502015-03-24 23:14:46 -0500346 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600347
348 if (r == NULL)
349 return NULL;
350
351 if (root_is_locked(r))
352 return NULL;
353
354 return imd_entry_add_to_root(r, id, size);
355}
356
Aaron Durbincac50502015-03-24 23:14:46 -0500357static bool imdr_has_entry(const struct imdr *imdr, const struct imd_entry *e)
358{
359 struct imd_root *r;
360 size_t idx;
361
362 r = imdr_root(imdr);
363 if (r == NULL)
364 return false;
365
366 /* Determine if the entry is within this root structure. */
367 idx = e - &r->entries[0];
368 if (idx >= r->num_entries)
369 return false;
370
371 return true;
372}
373
374static const struct imdr *imd_entry_to_imdr(const struct imd *imd,
375 const struct imd_entry *entry)
376{
377 if (imdr_has_entry(&imd->lg, entry))
378 return &imd->lg;
379
380 if (imdr_has_entry(&imd->sm, entry))
381 return &imd->sm;
382
383 return NULL;
384}
385
386/* Initialize imd handle. */
387void imd_handle_init(struct imd *imd, void *upper_limit)
388{
389 imdr_init(&imd->lg, upper_limit);
390 imdr_init(&imd->sm, NULL);
391}
392
393void imd_handle_init_partial_recovery(struct imd *imd)
394{
395 const struct imd_entry *e;
396 struct imd_root_pointer *rp;
397 struct imdr *imdr;
398
Aaron Durbin01562b62015-05-11 14:19:37 -0500399 if (imd->lg.limit == 0)
400 return;
401
Aaron Durbincac50502015-03-24 23:14:46 -0500402 imd_handle_init(imd, (void *)imd->lg.limit);
403
404 /* Initialize root pointer for the large regions. */
405 imdr = &imd->lg;
406 rp = imdr_get_root_pointer(imdr);
407 imdr->r = relative_pointer(rp, rp->root_offset);
408
409 e = imdr_entry_find(imdr, SMALL_REGION_ID);
410
411 if (e == NULL)
412 return;
413
414 imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
415 imd->sm.limit += imdr_entry_size(imdr, e);
416 imdr = &imd->sm;
417 rp = imdr_get_root_pointer(imdr);
418 imdr->r = relative_pointer(rp, rp->root_offset);
419}
420
421int imd_create_empty(struct imd *imd, size_t root_size, size_t entry_align)
422{
423 return imdr_create_empty(&imd->lg, root_size, entry_align);
424}
425
426int imd_create_tiered_empty(struct imd *imd,
427 size_t lg_root_size, size_t lg_entry_align,
428 size_t sm_root_size, size_t sm_entry_align)
429{
Lee Leahy3e1cab42017-03-10 17:48:31 -0800430 size_t sm_region_size;
Aaron Durbincac50502015-03-24 23:14:46 -0500431 const struct imd_entry *e;
432 struct imdr *imdr;
433
434 imdr = &imd->lg;
435
436 if (imdr_create_empty(imdr, lg_root_size, lg_entry_align) != 0)
437 return -1;
438
439 /* Calculate the size of the small region to request. */
440 sm_region_size = root_num_entries(sm_root_size) * sm_entry_align;
441 sm_region_size += sm_root_size;
442 sm_region_size = ALIGN_UP(sm_region_size, lg_entry_align);
443
444 /* Add a new entry to the large region to cover the root and entries. */
445 e = imdr_entry_add(imdr, SMALL_REGION_ID, sm_region_size);
446
447 if (e == NULL)
448 goto fail;
449
450 imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
451 imd->sm.limit += sm_region_size;
452
453 if (imdr_create_empty(&imd->sm, sm_root_size, sm_entry_align) != 0 ||
454 imdr_limit_size(&imd->sm, sm_region_size))
455 goto fail;
456
457 return 0;
458fail:
459 imd_handle_init(imd, (void *)imdr->limit);
460 return -1;
461}
462
463int imd_recover(struct imd *imd)
464{
465 const struct imd_entry *e;
466 uintptr_t small_upper_limit;
467 struct imdr *imdr;
468
469 imdr = &imd->lg;
470 if (imdr_recover(imdr) != 0)
471 return -1;
472
473 /* Determine if small region is region is present. */
474 e = imdr_entry_find(imdr, SMALL_REGION_ID);
475
476 if (e == NULL)
477 return 0;
478
479 small_upper_limit = (uintptr_t)imdr_entry_at(imdr, e);
480 small_upper_limit += imdr_entry_size(imdr, e);
481
482 imd->sm.limit = small_upper_limit;
483
484 /* Tear down any changes on failure. */
485 if (imdr_recover(&imd->sm) != 0) {
486 imd_handle_init(imd, (void *)imd->lg.limit);
487 return -1;
488 }
489
490 return 0;
491}
492
493int imd_limit_size(struct imd *imd, size_t max_size)
494{
495 return imdr_limit_size(&imd->lg, max_size);
496}
497
498int imd_lockdown(struct imd *imd)
499{
500 struct imd_root *r;
501
502 r = imdr_root(&imd->lg);
503 if (r == NULL)
504 return -1;
505
506 r->flags |= IMD_FLAG_LOCKED;
507
508 r = imdr_root(&imd->sm);
509 if (r != NULL)
510 r->flags |= IMD_FLAG_LOCKED;
511
512 return 0;
513}
514
515int imd_region_used(struct imd *imd, void **base, size_t *size)
Aaron Durbin20686d82015-03-05 14:11:27 -0600516{
517 struct imd_root *r;
518 struct imd_entry *e;
Aaron Durbincac50502015-03-24 23:14:46 -0500519 void *low_addr;
520 size_t sz_used;
Aaron Durbin20686d82015-03-05 14:11:27 -0600521
Aaron Durbincac50502015-03-24 23:14:46 -0500522 if (!imd->lg.limit)
523 return -1;
524
525 r = imdr_root(&imd->lg);
Aaron Durbin20686d82015-03-05 14:11:27 -0600526
527 if (r == NULL)
Aaron Durbincac50502015-03-24 23:14:46 -0500528 return -1;
Aaron Durbin20686d82015-03-05 14:11:27 -0600529
Aaron Durbincac50502015-03-24 23:14:46 -0500530 /* Use last entry to obtain lowest address. */
531 e = root_last_entry(r);
532
533 low_addr = relative_pointer(r, e->start_offset);
534
535 /* Total size used is the last entry's base up to the limit. */
536 sz_used = imd->lg.limit - (uintptr_t)low_addr;
537
538 *base = low_addr;
539 *size = sz_used;
540
541 return 0;
542}
543
544const struct imd_entry *imd_entry_add(const struct imd *imd, uint32_t id,
545 size_t size)
546{
547 struct imd_root *r;
548 const struct imdr *imdr;
549 const struct imd_entry *e = NULL;
550
551 /*
552 * Determine if requested size is less than 1/4 of small data
553 * region is left.
554 */
555 imdr = &imd->sm;
556 r = imdr_root(imdr);
557
558 /* No small region. Use the large region. */
559 if (r == NULL)
560 return imdr_entry_add(&imd->lg, id, size);
561 else if (size <= r->entry_align || size <= imd_root_data_left(r) / 4)
562 e = imdr_entry_add(imdr, id, size);
563
564 /* Fall back on large region allocation. */
565 if (e == NULL)
566 e = imdr_entry_add(&imd->lg, id, size);
567
568 return e;
569}
570
571const struct imd_entry *imd_entry_find(const struct imd *imd, uint32_t id)
572{
573 const struct imd_entry *e;
574
575 /* Many of the smaller allocations are used a lot. Therefore, try
576 * the small region first. */
577 e = imdr_entry_find(&imd->sm, id);
578
579 if (e == NULL)
580 e = imdr_entry_find(&imd->lg, id);
Aaron Durbin20686d82015-03-05 14:11:27 -0600581
582 return e;
583}
584
585const struct imd_entry *imd_entry_find_or_add(const struct imd *imd,
586 uint32_t id, size_t size)
587{
588 const struct imd_entry *e;
589
590 e = imd_entry_find(imd, id);
591
592 if (e != NULL)
593 return e;
594
595 return imd_entry_add(imd, id, size);
596}
597
598size_t imd_entry_size(const struct imd *imd, const struct imd_entry *entry)
599{
Aaron Durbincac50502015-03-24 23:14:46 -0500600 return imdr_entry_size(NULL, entry);
Aaron Durbin20686d82015-03-05 14:11:27 -0600601}
602
603void *imd_entry_at(const struct imd *imd, const struct imd_entry *entry)
604{
Aaron Durbincac50502015-03-24 23:14:46 -0500605 const struct imdr *imdr;
Aaron Durbin20686d82015-03-05 14:11:27 -0600606
Aaron Durbincac50502015-03-24 23:14:46 -0500607 imdr = imd_entry_to_imdr(imd, entry);
Aaron Durbin20686d82015-03-05 14:11:27 -0600608
Aaron Durbincac50502015-03-24 23:14:46 -0500609 if (imdr == NULL)
Aaron Durbin20686d82015-03-05 14:11:27 -0600610 return NULL;
611
Aaron Durbincac50502015-03-24 23:14:46 -0500612 return imdr_entry_at(imdr, entry);
Aaron Durbin20686d82015-03-05 14:11:27 -0600613}
614
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500615uint32_t imd_entry_id(const struct imd *imd, const struct imd_entry *entry)
616{
617 return entry->id;
618}
619
Aaron Durbin20686d82015-03-05 14:11:27 -0600620int imd_entry_remove(const struct imd *imd, const struct imd_entry *entry)
621{
622 struct imd_root *r;
Aaron Durbincac50502015-03-24 23:14:46 -0500623 const struct imdr *imdr;
Aaron Durbin20686d82015-03-05 14:11:27 -0600624
Aaron Durbincac50502015-03-24 23:14:46 -0500625 imdr = imd_entry_to_imdr(imd, entry);
626
627 if (imdr == NULL)
Lee Leahy35af5c42017-03-09 17:35:28 -0800628 return -1;
Aaron Durbincac50502015-03-24 23:14:46 -0500629
630 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600631
632 if (r == NULL)
633 return -1;
634
635 if (root_is_locked(r))
636 return -1;
637
638 if (entry != root_last_entry(r))
639 return -1;
640
641 r->num_entries--;
642
643 return 0;
644}
645
Aaron Durbincac50502015-03-24 23:14:46 -0500646static void imdr_print_entries(const struct imdr *imdr, const char *indent,
647 const struct imd_lookup *lookup, size_t size)
Aaron Durbin20686d82015-03-05 14:11:27 -0600648{
649 struct imd_root *r;
650 size_t i;
651 size_t j;
652
Aaron Durbincac50502015-03-24 23:14:46 -0500653 if (imdr == NULL)
654 return;
Aaron Durbin20686d82015-03-05 14:11:27 -0600655
Aaron Durbincac50502015-03-24 23:14:46 -0500656 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600657
658 for (i = 0; i < r->num_entries; i++) {
659 const char *name = NULL;
660 const struct imd_entry *e = &r->entries[i];
661
662 for (j = 0; j < size; j++) {
663 if (lookup[j].id == e->id) {
664 name = lookup[j].name;
665 break;
666 }
667 }
668
Aaron Durbincac50502015-03-24 23:14:46 -0500669 printk(BIOS_DEBUG, "%s", indent);
670
Aaron Durbin20686d82015-03-05 14:11:27 -0600671 if (name == NULL)
672 printk(BIOS_DEBUG, "%08x ", e->id);
673 else
674 printk(BIOS_DEBUG, "%s", name);
675 printk(BIOS_DEBUG, "%2zu. ", i);
Aaron Durbincac50502015-03-24 23:14:46 -0500676 printk(BIOS_DEBUG, "%p ", imdr_entry_at(imdr, e));
Julius Werner540a9802019-12-09 13:03:29 -0800677 printk(BIOS_DEBUG, "0x%08zx\n", imdr_entry_size(imdr, e));
Aaron Durbincac50502015-03-24 23:14:46 -0500678 }
679}
680
681int imd_print_entries(const struct imd *imd, const struct imd_lookup *lookup,
682 size_t size)
683{
684 if (imdr_root(&imd->lg) == NULL)
685 return -1;
686
687 imdr_print_entries(&imd->lg, "", lookup, size);
688 if (imdr_root(&imd->sm) != NULL) {
689 printk(BIOS_DEBUG, "IMD small region:\n");
690 imdr_print_entries(&imd->sm, " ", lookup, size);
Aaron Durbin20686d82015-03-05 14:11:27 -0600691 }
692
693 return 0;
694}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500695
696int imd_cursor_init(const struct imd *imd, struct imd_cursor *cursor)
697{
698 if (imd == NULL || cursor == NULL)
699 return -1;
700
701 memset(cursor, 0, sizeof(*cursor));
702
703 cursor->imdr[0] = &imd->lg;
704 cursor->imdr[1] = &imd->sm;
705
706 return 0;
707}
708
709const struct imd_entry *imd_cursor_next(struct imd_cursor *cursor)
710{
711 struct imd_root *r;
712 const struct imd_entry *e;
713
714 if (cursor->current_imdr >= ARRAY_SIZE(cursor->imdr))
715 return NULL;
716
717 r = imdr_root(cursor->imdr[cursor->current_imdr]);
718
719 if (r == NULL)
720 return NULL;
721
722 if (cursor->current_entry >= r->num_entries) {
723 /* Try next imdr. */
724 cursor->current_imdr++;
725 cursor->current_entry = 0;
726 return imd_cursor_next(cursor);
727 }
728
729 e = &r->entries[cursor->current_entry];
730 cursor->current_entry++;
731
732 return e;
733}