blob: 5cba121df2e06bd71891f0b59d454ec95ccf2920 [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin20686d82015-03-05 14:11:27 -06002
3#include <assert.h>
4#include <cbmem.h>
5#include <console/console.h>
6#include <imd.h>
Aaron Durbin20686d82015-03-05 14:11:27 -06007#include <string.h>
Julius Wernera2148372019-11-13 19:50:33 -08008#include <types.h>
Jakub Czapigaa01138b2020-10-05 10:20:29 +02009#include <imd_private.h>
10
Aaron Durbin20686d82015-03-05 14:11:27 -060011
12/* For more details on implementation and usage please see the imd.h header. */
13
Aaron Durbin20686d82015-03-05 14:11:27 -060014static void *relative_pointer(void *base, ssize_t offset)
15{
16 intptr_t b = (intptr_t)base;
17 b += offset;
18 return (void *)b;
19}
20
21static bool imd_root_pointer_valid(const struct imd_root_pointer *rp)
22{
23 return !!(rp->magic == IMD_ROOT_PTR_MAGIC);
24}
25
Aaron Durbincac50502015-03-24 23:14:46 -050026static struct imd_root *imdr_root(const struct imdr *imdr)
Aaron Durbin20686d82015-03-05 14:11:27 -060027{
Aaron Durbincac50502015-03-24 23:14:46 -050028 return imdr->r;
Aaron Durbin20686d82015-03-05 14:11:27 -060029}
30
31/*
32 * The root pointer is relative to the upper limit of the imd. i.e. It sits
33 * just below the upper limit.
34 */
Aaron Durbincac50502015-03-24 23:14:46 -050035static struct imd_root_pointer *imdr_get_root_pointer(const struct imdr *imdr)
Aaron Durbin20686d82015-03-05 14:11:27 -060036{
37 struct imd_root_pointer *rp;
38
Aaron Durbincac50502015-03-24 23:14:46 -050039 rp = relative_pointer((void *)imdr->limit, -sizeof(*rp));
Aaron Durbin20686d82015-03-05 14:11:27 -060040
41 return rp;
42}
43
44static void imd_link_root(struct imd_root_pointer *rp, struct imd_root *r)
45{
46 rp->magic = IMD_ROOT_PTR_MAGIC;
47 rp->root_offset = (int32_t)((intptr_t)r - (intptr_t)rp);
48}
49
Aaron Durbincac50502015-03-24 23:14:46 -050050static struct imd_entry *root_last_entry(struct imd_root *r)
51{
52 return &r->entries[r->num_entries - 1];
53}
54
55static size_t root_num_entries(size_t root_size)
56{
57 size_t entries_size;
58
59 entries_size = root_size;
60 entries_size -= sizeof(struct imd_root_pointer);
61 entries_size -= sizeof(struct imd_root);
62
63 return entries_size / sizeof(struct imd_entry);
64}
65
66static size_t imd_root_data_left(struct imd_root *r)
67{
68 struct imd_entry *last_entry;
69
70 last_entry = root_last_entry(r);
71
72 if (r->max_offset != 0)
73 return last_entry->start_offset - r->max_offset;
74
75 return ~(size_t)0;
76}
77
78static bool root_is_locked(const struct imd_root *r)
79{
80 return !!(r->flags & IMD_FLAG_LOCKED);
81}
82
Aaron Durbin20686d82015-03-05 14:11:27 -060083static void imd_entry_assign(struct imd_entry *e, uint32_t id,
84 ssize_t offset, size_t size)
85{
86 e->magic = IMD_ENTRY_MAGIC;
87 e->start_offset = offset;
88 e->size = size;
89 e->id = id;
90}
91
Aaron Durbincac50502015-03-24 23:14:46 -050092static void imdr_init(struct imdr *ir, void *upper_limit)
Aaron Durbin20686d82015-03-05 14:11:27 -060093{
94 uintptr_t limit = (uintptr_t)upper_limit;
95 /* Upper limit is aligned down to 4KiB */
Aaron Durbincac50502015-03-24 23:14:46 -050096 ir->limit = ALIGN_DOWN(limit, LIMIT_ALIGN);
97 ir->r = NULL;
Aaron Durbin20686d82015-03-05 14:11:27 -060098}
99
Aaron Durbincac50502015-03-24 23:14:46 -0500100static int imdr_create_empty(struct imdr *imdr, size_t root_size,
101 size_t entry_align)
Aaron Durbin20686d82015-03-05 14:11:27 -0600102{
103 struct imd_root_pointer *rp;
104 struct imd_root *r;
105 struct imd_entry *e;
106 ssize_t root_offset;
Aaron Durbin20686d82015-03-05 14:11:27 -0600107
Aaron Durbincac50502015-03-24 23:14:46 -0500108 if (!imdr->limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600109 return -1;
110
111 /* root_size and entry_align should be a power of 2. */
112 assert(IS_POWER_OF_2(root_size));
113 assert(IS_POWER_OF_2(entry_align));
114
115 /*
Martin Roth2ed0aa22016-01-05 20:58:58 -0700116 * root_size needs to be large enough to accommodate root pointer and
Jan Dabrosfc835882020-08-19 15:06:10 +0200117 * root book keeping structure. Furthermore, there needs to be a space
118 * for at least one entry covering root region. The caller needs to
119 * ensure there's enough room for tracking individual allocations.
Aaron Durbin20686d82015-03-05 14:11:27 -0600120 */
Jan Dabrosfc835882020-08-19 15:06:10 +0200121 if (root_size < (sizeof(*rp) + sizeof(*r) + sizeof(*e)))
Aaron Durbin20686d82015-03-05 14:11:27 -0600122 return -1;
123
Lee Leahy73402172017-03-10 15:23:24 -0800124 /* For simplicity don't allow sizes or alignments to exceed LIMIT_ALIGN.
125 */
Aaron Durbin20686d82015-03-05 14:11:27 -0600126 if (root_size > LIMIT_ALIGN || entry_align > LIMIT_ALIGN)
127 return -1;
128
129 /* Additionally, don't handle an entry alignment > root_size. */
130 if (entry_align > root_size)
131 return -1;
132
Aaron Durbincac50502015-03-24 23:14:46 -0500133 rp = imdr_get_root_pointer(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600134
135 root_offset = -(ssize_t)root_size;
136 /* Set root pointer. */
Aaron Durbincac50502015-03-24 23:14:46 -0500137 imdr->r = relative_pointer((void *)imdr->limit, root_offset);
138 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600139 imd_link_root(rp, r);
140
141 memset(r, 0, sizeof(*r));
142 r->entry_align = entry_align;
143
144 /* Calculate size left for entries. */
Aaron Durbincac50502015-03-24 23:14:46 -0500145 r->max_entries = root_num_entries(root_size);
Aaron Durbin20686d82015-03-05 14:11:27 -0600146
147 /* Fill in first entry covering the root region. */
148 r->num_entries = 1;
149 e = &r->entries[0];
150 imd_entry_assign(e, CBMEM_ID_IMD_ROOT, 0, root_size);
151
152 printk(BIOS_DEBUG, "IMD: root @ %p %u entries.\n", r, r->max_entries);
153
154 return 0;
155}
156
Aaron Durbincac50502015-03-24 23:14:46 -0500157static int imdr_recover(struct imdr *imdr)
Aaron Durbin20686d82015-03-05 14:11:27 -0600158{
159 struct imd_root_pointer *rp;
160 struct imd_root *r;
161 uintptr_t low_limit;
162 size_t i;
163
Aaron Durbincac50502015-03-24 23:14:46 -0500164 if (!imdr->limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600165 return -1;
166
Aaron Durbincac50502015-03-24 23:14:46 -0500167 rp = imdr_get_root_pointer(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600168
169 if (!imd_root_pointer_valid(rp))
170 return -1;
171
172 r = relative_pointer(rp, rp->root_offset);
173
Jan Dabros086c5e62020-08-21 08:24:41 +0200174 /* Ensure that root is just under the root pointer */
175 if ((intptr_t)rp - (intptr_t)&r->entries[r->max_entries] > sizeof(struct imd_entry))
Aaron Durbin20686d82015-03-05 14:11:27 -0600176 return -1;
177
178 if (r->num_entries > r->max_entries)
179 return -1;
180
181 /* Entry alignment should be power of 2. */
182 if (!IS_POWER_OF_2(r->entry_align))
183 return -1;
184
185 low_limit = (uintptr_t)relative_pointer(r, r->max_offset);
186
187 /* If no max_offset then lowest limit is 0. */
188 if (low_limit == (uintptr_t)r)
189 low_limit = 0;
190
191 for (i = 0; i < r->num_entries; i++) {
192 uintptr_t start_addr;
193 const struct imd_entry *e = &r->entries[i];
194
195 if (e->magic != IMD_ENTRY_MAGIC)
196 return -1;
197
198 start_addr = (uintptr_t)relative_pointer(r, e->start_offset);
199 if (start_addr < low_limit)
200 return -1;
Aaron Durbincac50502015-03-24 23:14:46 -0500201 if (start_addr >= imdr->limit ||
202 (start_addr + e->size) > imdr->limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600203 return -1;
204 }
205
206 /* Set root pointer. */
Aaron Durbincac50502015-03-24 23:14:46 -0500207 imdr->r = r;
Aaron Durbin20686d82015-03-05 14:11:27 -0600208
209 return 0;
210}
211
Aaron Durbincac50502015-03-24 23:14:46 -0500212static const struct imd_entry *imdr_entry_find(const struct imdr *imdr,
213 uint32_t id)
Aaron Durbin20686d82015-03-05 14:11:27 -0600214{
215 struct imd_root *r;
216 struct imd_entry *e;
Aaron Durbincac50502015-03-24 23:14:46 -0500217 size_t i;
Aaron Durbin20686d82015-03-05 14:11:27 -0600218
Aaron Durbincac50502015-03-24 23:14:46 -0500219 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600220
Aaron Durbincac50502015-03-24 23:14:46 -0500221 if (r == NULL)
222 return NULL;
Aaron Durbin20686d82015-03-05 14:11:27 -0600223
Aaron Durbincac50502015-03-24 23:14:46 -0500224 e = NULL;
225 /* Skip first entry covering the root. */
226 for (i = 1; i < r->num_entries; i++) {
227 if (id != r->entries[i].id)
228 continue;
229 e = &r->entries[i];
230 break;
231 }
232
233 return e;
234}
235
236static int imdr_limit_size(struct imdr *imdr, size_t max_size)
237{
238 struct imd_root *r;
239 ssize_t smax_size;
240 size_t root_size;
241
242 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600243 if (r == NULL)
244 return -1;
245
Aaron Durbincac50502015-03-24 23:14:46 -0500246 root_size = imdr->limit - (uintptr_t)r;
Aaron Durbin20686d82015-03-05 14:11:27 -0600247
Aaron Durbincac50502015-03-24 23:14:46 -0500248 if (max_size < root_size)
249 return -1;
Aaron Durbin20686d82015-03-05 14:11:27 -0600250
Aaron Durbincac50502015-03-24 23:14:46 -0500251 /* Take into account the root size. */
252 smax_size = max_size - root_size;
253 smax_size = -smax_size;
Aaron Durbin20686d82015-03-05 14:11:27 -0600254
Aaron Durbincac50502015-03-24 23:14:46 -0500255 r->max_offset = smax_size;
Aaron Durbin20686d82015-03-05 14:11:27 -0600256
257 return 0;
258}
259
Anna Karas215e7fc2020-07-16 14:12:30 +0200260static size_t imdr_entry_size(const struct imd_entry *e)
Aaron Durbincac50502015-03-24 23:14:46 -0500261{
262 return e->size;
263}
264
265static void *imdr_entry_at(const struct imdr *imdr, const struct imd_entry *e)
266{
267 return relative_pointer(imdr_root(imdr), e->start_offset);
268}
269
Aaron Durbin20686d82015-03-05 14:11:27 -0600270static struct imd_entry *imd_entry_add_to_root(struct imd_root *r, uint32_t id,
271 size_t size)
272{
273 struct imd_entry *entry;
274 struct imd_entry *last_entry;
275 ssize_t e_offset;
276 size_t used_size;
277
278 if (r->num_entries == r->max_entries)
279 return NULL;
280
281 /* Determine total size taken up by entry. */
282 used_size = ALIGN_UP(size, r->entry_align);
283
Aaron Durbin20686d82015-03-05 14:11:27 -0600284 /* See if size overflows imd total size. */
Aaron Durbincac50502015-03-24 23:14:46 -0500285 if (used_size > imd_root_data_left(r))
286 return NULL;
Aaron Durbin20686d82015-03-05 14:11:27 -0600287
288 /*
289 * Determine if offset field overflows. All offsets should be lower
290 * than the previous one.
291 */
Aaron Durbincac50502015-03-24 23:14:46 -0500292 last_entry = root_last_entry(r);
Aaron Durbin20686d82015-03-05 14:11:27 -0600293 e_offset = last_entry->start_offset;
294 e_offset -= (ssize_t)used_size;
Jan Dabros93d56f52020-08-20 08:29:49 +0200295 if (e_offset >= last_entry->start_offset)
Aaron Durbin20686d82015-03-05 14:11:27 -0600296 return NULL;
297
298 entry = root_last_entry(r) + 1;
299 r->num_entries++;
300
301 imd_entry_assign(entry, id, e_offset, size);
302
303 return entry;
304}
305
Aaron Durbincac50502015-03-24 23:14:46 -0500306static const struct imd_entry *imdr_entry_add(const struct imdr *imdr,
307 uint32_t id, size_t size)
Aaron Durbin20686d82015-03-05 14:11:27 -0600308{
309 struct imd_root *r;
310
Aaron Durbincac50502015-03-24 23:14:46 -0500311 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600312
313 if (r == NULL)
314 return NULL;
315
316 if (root_is_locked(r))
317 return NULL;
318
319 return imd_entry_add_to_root(r, id, size);
320}
321
Aaron Durbincac50502015-03-24 23:14:46 -0500322static bool imdr_has_entry(const struct imdr *imdr, const struct imd_entry *e)
323{
324 struct imd_root *r;
325 size_t idx;
326
327 r = imdr_root(imdr);
328 if (r == NULL)
329 return false;
330
331 /* Determine if the entry is within this root structure. */
332 idx = e - &r->entries[0];
333 if (idx >= r->num_entries)
334 return false;
335
336 return true;
337}
338
339static const struct imdr *imd_entry_to_imdr(const struct imd *imd,
340 const struct imd_entry *entry)
341{
342 if (imdr_has_entry(&imd->lg, entry))
343 return &imd->lg;
344
345 if (imdr_has_entry(&imd->sm, entry))
346 return &imd->sm;
347
348 return NULL;
349}
350
351/* Initialize imd handle. */
352void imd_handle_init(struct imd *imd, void *upper_limit)
353{
354 imdr_init(&imd->lg, upper_limit);
355 imdr_init(&imd->sm, NULL);
356}
357
358void imd_handle_init_partial_recovery(struct imd *imd)
359{
360 const struct imd_entry *e;
361 struct imd_root_pointer *rp;
362 struct imdr *imdr;
363
Aaron Durbin01562b62015-05-11 14:19:37 -0500364 if (imd->lg.limit == 0)
365 return;
366
Aaron Durbincac50502015-03-24 23:14:46 -0500367 imd_handle_init(imd, (void *)imd->lg.limit);
368
369 /* Initialize root pointer for the large regions. */
370 imdr = &imd->lg;
371 rp = imdr_get_root_pointer(imdr);
372 imdr->r = relative_pointer(rp, rp->root_offset);
373
374 e = imdr_entry_find(imdr, SMALL_REGION_ID);
375
376 if (e == NULL)
377 return;
378
379 imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
Anna Karas215e7fc2020-07-16 14:12:30 +0200380 imd->sm.limit += imdr_entry_size(e);
Aaron Durbincac50502015-03-24 23:14:46 -0500381 imdr = &imd->sm;
382 rp = imdr_get_root_pointer(imdr);
383 imdr->r = relative_pointer(rp, rp->root_offset);
384}
385
386int imd_create_empty(struct imd *imd, size_t root_size, size_t entry_align)
387{
388 return imdr_create_empty(&imd->lg, root_size, entry_align);
389}
390
391int imd_create_tiered_empty(struct imd *imd,
392 size_t lg_root_size, size_t lg_entry_align,
393 size_t sm_root_size, size_t sm_entry_align)
394{
Lee Leahy3e1cab42017-03-10 17:48:31 -0800395 size_t sm_region_size;
Aaron Durbincac50502015-03-24 23:14:46 -0500396 const struct imd_entry *e;
397 struct imdr *imdr;
398
399 imdr = &imd->lg;
400
401 if (imdr_create_empty(imdr, lg_root_size, lg_entry_align) != 0)
402 return -1;
403
404 /* Calculate the size of the small region to request. */
405 sm_region_size = root_num_entries(sm_root_size) * sm_entry_align;
406 sm_region_size += sm_root_size;
407 sm_region_size = ALIGN_UP(sm_region_size, lg_entry_align);
408
409 /* Add a new entry to the large region to cover the root and entries. */
410 e = imdr_entry_add(imdr, SMALL_REGION_ID, sm_region_size);
411
412 if (e == NULL)
413 goto fail;
414
415 imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
416 imd->sm.limit += sm_region_size;
417
418 if (imdr_create_empty(&imd->sm, sm_root_size, sm_entry_align) != 0 ||
419 imdr_limit_size(&imd->sm, sm_region_size))
420 goto fail;
421
422 return 0;
423fail:
424 imd_handle_init(imd, (void *)imdr->limit);
425 return -1;
426}
427
428int imd_recover(struct imd *imd)
429{
430 const struct imd_entry *e;
431 uintptr_t small_upper_limit;
432 struct imdr *imdr;
433
434 imdr = &imd->lg;
435 if (imdr_recover(imdr) != 0)
436 return -1;
437
Anna Karasb44b68b2020-08-04 13:35:29 +0200438 /* Determine if small region is present. */
Aaron Durbincac50502015-03-24 23:14:46 -0500439 e = imdr_entry_find(imdr, SMALL_REGION_ID);
440
441 if (e == NULL)
442 return 0;
443
444 small_upper_limit = (uintptr_t)imdr_entry_at(imdr, e);
Anna Karas215e7fc2020-07-16 14:12:30 +0200445 small_upper_limit += imdr_entry_size(e);
Aaron Durbincac50502015-03-24 23:14:46 -0500446
447 imd->sm.limit = small_upper_limit;
448
449 /* Tear down any changes on failure. */
450 if (imdr_recover(&imd->sm) != 0) {
451 imd_handle_init(imd, (void *)imd->lg.limit);
452 return -1;
453 }
454
455 return 0;
456}
457
458int imd_limit_size(struct imd *imd, size_t max_size)
459{
460 return imdr_limit_size(&imd->lg, max_size);
461}
462
463int imd_lockdown(struct imd *imd)
464{
465 struct imd_root *r;
466
467 r = imdr_root(&imd->lg);
468 if (r == NULL)
469 return -1;
470
471 r->flags |= IMD_FLAG_LOCKED;
472
473 r = imdr_root(&imd->sm);
474 if (r != NULL)
475 r->flags |= IMD_FLAG_LOCKED;
476
477 return 0;
478}
479
480int imd_region_used(struct imd *imd, void **base, size_t *size)
Aaron Durbin20686d82015-03-05 14:11:27 -0600481{
482 struct imd_root *r;
483 struct imd_entry *e;
Aaron Durbincac50502015-03-24 23:14:46 -0500484 void *low_addr;
485 size_t sz_used;
Aaron Durbin20686d82015-03-05 14:11:27 -0600486
Aaron Durbincac50502015-03-24 23:14:46 -0500487 if (!imd->lg.limit)
488 return -1;
489
490 r = imdr_root(&imd->lg);
Aaron Durbin20686d82015-03-05 14:11:27 -0600491
492 if (r == NULL)
Aaron Durbincac50502015-03-24 23:14:46 -0500493 return -1;
Aaron Durbin20686d82015-03-05 14:11:27 -0600494
Aaron Durbincac50502015-03-24 23:14:46 -0500495 /* Use last entry to obtain lowest address. */
496 e = root_last_entry(r);
497
498 low_addr = relative_pointer(r, e->start_offset);
499
500 /* Total size used is the last entry's base up to the limit. */
501 sz_used = imd->lg.limit - (uintptr_t)low_addr;
502
503 *base = low_addr;
504 *size = sz_used;
505
506 return 0;
507}
508
509const struct imd_entry *imd_entry_add(const struct imd *imd, uint32_t id,
510 size_t size)
511{
512 struct imd_root *r;
513 const struct imdr *imdr;
514 const struct imd_entry *e = NULL;
515
516 /*
517 * Determine if requested size is less than 1/4 of small data
518 * region is left.
519 */
520 imdr = &imd->sm;
521 r = imdr_root(imdr);
522
523 /* No small region. Use the large region. */
524 if (r == NULL)
525 return imdr_entry_add(&imd->lg, id, size);
526 else if (size <= r->entry_align || size <= imd_root_data_left(r) / 4)
527 e = imdr_entry_add(imdr, id, size);
528
529 /* Fall back on large region allocation. */
530 if (e == NULL)
531 e = imdr_entry_add(&imd->lg, id, size);
532
533 return e;
534}
535
536const struct imd_entry *imd_entry_find(const struct imd *imd, uint32_t id)
537{
538 const struct imd_entry *e;
539
540 /* Many of the smaller allocations are used a lot. Therefore, try
541 * the small region first. */
542 e = imdr_entry_find(&imd->sm, id);
543
544 if (e == NULL)
545 e = imdr_entry_find(&imd->lg, id);
Aaron Durbin20686d82015-03-05 14:11:27 -0600546
547 return e;
548}
549
550const struct imd_entry *imd_entry_find_or_add(const struct imd *imd,
551 uint32_t id, size_t size)
552{
553 const struct imd_entry *e;
554
555 e = imd_entry_find(imd, id);
556
557 if (e != NULL)
558 return e;
559
560 return imd_entry_add(imd, id, size);
561}
562
Anna Karas215e7fc2020-07-16 14:12:30 +0200563size_t imd_entry_size(const struct imd_entry *entry)
Aaron Durbin20686d82015-03-05 14:11:27 -0600564{
Anna Karas215e7fc2020-07-16 14:12:30 +0200565 return imdr_entry_size(entry);
Aaron Durbin20686d82015-03-05 14:11:27 -0600566}
567
568void *imd_entry_at(const struct imd *imd, const struct imd_entry *entry)
569{
Aaron Durbincac50502015-03-24 23:14:46 -0500570 const struct imdr *imdr;
Aaron Durbin20686d82015-03-05 14:11:27 -0600571
Aaron Durbincac50502015-03-24 23:14:46 -0500572 imdr = imd_entry_to_imdr(imd, entry);
Aaron Durbin20686d82015-03-05 14:11:27 -0600573
Aaron Durbincac50502015-03-24 23:14:46 -0500574 if (imdr == NULL)
Aaron Durbin20686d82015-03-05 14:11:27 -0600575 return NULL;
576
Aaron Durbincac50502015-03-24 23:14:46 -0500577 return imdr_entry_at(imdr, entry);
Aaron Durbin20686d82015-03-05 14:11:27 -0600578}
579
Anna Karas215e7fc2020-07-16 14:12:30 +0200580uint32_t imd_entry_id(const struct imd_entry *entry)
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500581{
582 return entry->id;
583}
584
Aaron Durbin20686d82015-03-05 14:11:27 -0600585int imd_entry_remove(const struct imd *imd, const struct imd_entry *entry)
586{
587 struct imd_root *r;
Aaron Durbincac50502015-03-24 23:14:46 -0500588 const struct imdr *imdr;
Aaron Durbin20686d82015-03-05 14:11:27 -0600589
Aaron Durbincac50502015-03-24 23:14:46 -0500590 imdr = imd_entry_to_imdr(imd, entry);
591
592 if (imdr == NULL)
Lee Leahy35af5c42017-03-09 17:35:28 -0800593 return -1;
Aaron Durbincac50502015-03-24 23:14:46 -0500594
595 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600596
Aaron Durbin20686d82015-03-05 14:11:27 -0600597 if (root_is_locked(r))
598 return -1;
599
600 if (entry != root_last_entry(r))
601 return -1;
602
Jan Dabros3b0d0402020-08-21 12:20:45 +0200603 /* Don't remove entry covering root region */
604 if (r->num_entries == 1)
605 return -1;
606
Aaron Durbin20686d82015-03-05 14:11:27 -0600607 r->num_entries--;
608
609 return 0;
610}
611
Aaron Durbincac50502015-03-24 23:14:46 -0500612static void imdr_print_entries(const struct imdr *imdr, const char *indent,
613 const struct imd_lookup *lookup, size_t size)
Aaron Durbin20686d82015-03-05 14:11:27 -0600614{
615 struct imd_root *r;
616 size_t i;
617 size_t j;
618
Aaron Durbincac50502015-03-24 23:14:46 -0500619 if (imdr == NULL)
620 return;
Aaron Durbin20686d82015-03-05 14:11:27 -0600621
Aaron Durbincac50502015-03-24 23:14:46 -0500622 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600623
624 for (i = 0; i < r->num_entries; i++) {
625 const char *name = NULL;
626 const struct imd_entry *e = &r->entries[i];
627
628 for (j = 0; j < size; j++) {
629 if (lookup[j].id == e->id) {
630 name = lookup[j].name;
631 break;
632 }
633 }
634
Aaron Durbincac50502015-03-24 23:14:46 -0500635 printk(BIOS_DEBUG, "%s", indent);
636
Aaron Durbin20686d82015-03-05 14:11:27 -0600637 if (name == NULL)
638 printk(BIOS_DEBUG, "%08x ", e->id);
639 else
640 printk(BIOS_DEBUG, "%s", name);
641 printk(BIOS_DEBUG, "%2zu. ", i);
Aaron Durbincac50502015-03-24 23:14:46 -0500642 printk(BIOS_DEBUG, "%p ", imdr_entry_at(imdr, e));
Anna Karas215e7fc2020-07-16 14:12:30 +0200643 printk(BIOS_DEBUG, "0x%08zx\n", imdr_entry_size(e));
Aaron Durbincac50502015-03-24 23:14:46 -0500644 }
645}
646
647int imd_print_entries(const struct imd *imd, const struct imd_lookup *lookup,
648 size_t size)
649{
650 if (imdr_root(&imd->lg) == NULL)
651 return -1;
652
653 imdr_print_entries(&imd->lg, "", lookup, size);
654 if (imdr_root(&imd->sm) != NULL) {
655 printk(BIOS_DEBUG, "IMD small region:\n");
656 imdr_print_entries(&imd->sm, " ", lookup, size);
Aaron Durbin20686d82015-03-05 14:11:27 -0600657 }
658
659 return 0;
660}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500661
662int imd_cursor_init(const struct imd *imd, struct imd_cursor *cursor)
663{
664 if (imd == NULL || cursor == NULL)
665 return -1;
666
667 memset(cursor, 0, sizeof(*cursor));
668
669 cursor->imdr[0] = &imd->lg;
670 cursor->imdr[1] = &imd->sm;
671
672 return 0;
673}
674
675const struct imd_entry *imd_cursor_next(struct imd_cursor *cursor)
676{
677 struct imd_root *r;
678 const struct imd_entry *e;
679
680 if (cursor->current_imdr >= ARRAY_SIZE(cursor->imdr))
681 return NULL;
682
683 r = imdr_root(cursor->imdr[cursor->current_imdr]);
684
685 if (r == NULL)
686 return NULL;
687
688 if (cursor->current_entry >= r->num_entries) {
689 /* Try next imdr. */
690 cursor->current_imdr++;
691 cursor->current_entry = 0;
692 return imd_cursor_next(cursor);
693 }
694
695 e = &r->entries[cursor->current_entry];
696 cursor->current_entry++;
697
698 return e;
699}