blob: ac19d76f528714e6790f62a2f10e392e49f3b6c1 [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin20686d82015-03-05 14:11:27 -06002
3#include <assert.h>
4#include <cbmem.h>
5#include <console/console.h>
6#include <imd.h>
7#include <stdlib.h>
8#include <string.h>
Julius Wernera2148372019-11-13 19:50:33 -08009#include <types.h>
Jakub Czapigaa01138b2020-10-05 10:20:29 +020010#include <imd_private.h>
11
Aaron Durbin20686d82015-03-05 14:11:27 -060012
13/* For more details on implementation and usage please see the imd.h header. */
14
Aaron Durbin20686d82015-03-05 14:11:27 -060015static void *relative_pointer(void *base, ssize_t offset)
16{
17 intptr_t b = (intptr_t)base;
18 b += offset;
19 return (void *)b;
20}
21
22static bool imd_root_pointer_valid(const struct imd_root_pointer *rp)
23{
24 return !!(rp->magic == IMD_ROOT_PTR_MAGIC);
25}
26
Aaron Durbincac50502015-03-24 23:14:46 -050027static struct imd_root *imdr_root(const struct imdr *imdr)
Aaron Durbin20686d82015-03-05 14:11:27 -060028{
Aaron Durbincac50502015-03-24 23:14:46 -050029 return imdr->r;
Aaron Durbin20686d82015-03-05 14:11:27 -060030}
31
32/*
33 * The root pointer is relative to the upper limit of the imd. i.e. It sits
34 * just below the upper limit.
35 */
Aaron Durbincac50502015-03-24 23:14:46 -050036static struct imd_root_pointer *imdr_get_root_pointer(const struct imdr *imdr)
Aaron Durbin20686d82015-03-05 14:11:27 -060037{
38 struct imd_root_pointer *rp;
39
Aaron Durbincac50502015-03-24 23:14:46 -050040 rp = relative_pointer((void *)imdr->limit, -sizeof(*rp));
Aaron Durbin20686d82015-03-05 14:11:27 -060041
42 return rp;
43}
44
45static void imd_link_root(struct imd_root_pointer *rp, struct imd_root *r)
46{
47 rp->magic = IMD_ROOT_PTR_MAGIC;
48 rp->root_offset = (int32_t)((intptr_t)r - (intptr_t)rp);
49}
50
Aaron Durbincac50502015-03-24 23:14:46 -050051static struct imd_entry *root_last_entry(struct imd_root *r)
52{
53 return &r->entries[r->num_entries - 1];
54}
55
56static size_t root_num_entries(size_t root_size)
57{
58 size_t entries_size;
59
60 entries_size = root_size;
61 entries_size -= sizeof(struct imd_root_pointer);
62 entries_size -= sizeof(struct imd_root);
63
64 return entries_size / sizeof(struct imd_entry);
65}
66
67static size_t imd_root_data_left(struct imd_root *r)
68{
69 struct imd_entry *last_entry;
70
71 last_entry = root_last_entry(r);
72
73 if (r->max_offset != 0)
74 return last_entry->start_offset - r->max_offset;
75
76 return ~(size_t)0;
77}
78
79static bool root_is_locked(const struct imd_root *r)
80{
81 return !!(r->flags & IMD_FLAG_LOCKED);
82}
83
Aaron Durbin20686d82015-03-05 14:11:27 -060084static void imd_entry_assign(struct imd_entry *e, uint32_t id,
85 ssize_t offset, size_t size)
86{
87 e->magic = IMD_ENTRY_MAGIC;
88 e->start_offset = offset;
89 e->size = size;
90 e->id = id;
91}
92
Aaron Durbincac50502015-03-24 23:14:46 -050093static void imdr_init(struct imdr *ir, void *upper_limit)
Aaron Durbin20686d82015-03-05 14:11:27 -060094{
95 uintptr_t limit = (uintptr_t)upper_limit;
96 /* Upper limit is aligned down to 4KiB */
Aaron Durbincac50502015-03-24 23:14:46 -050097 ir->limit = ALIGN_DOWN(limit, LIMIT_ALIGN);
98 ir->r = NULL;
Aaron Durbin20686d82015-03-05 14:11:27 -060099}
100
Aaron Durbincac50502015-03-24 23:14:46 -0500101static int imdr_create_empty(struct imdr *imdr, size_t root_size,
102 size_t entry_align)
Aaron Durbin20686d82015-03-05 14:11:27 -0600103{
104 struct imd_root_pointer *rp;
105 struct imd_root *r;
106 struct imd_entry *e;
107 ssize_t root_offset;
Aaron Durbin20686d82015-03-05 14:11:27 -0600108
Aaron Durbincac50502015-03-24 23:14:46 -0500109 if (!imdr->limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600110 return -1;
111
112 /* root_size and entry_align should be a power of 2. */
113 assert(IS_POWER_OF_2(root_size));
114 assert(IS_POWER_OF_2(entry_align));
115
116 /*
Martin Roth2ed0aa22016-01-05 20:58:58 -0700117 * root_size needs to be large enough to accommodate root pointer and
Jan Dabrosfc835882020-08-19 15:06:10 +0200118 * root book keeping structure. Furthermore, there needs to be a space
119 * for at least one entry covering root region. The caller needs to
120 * ensure there's enough room for tracking individual allocations.
Aaron Durbin20686d82015-03-05 14:11:27 -0600121 */
Jan Dabrosfc835882020-08-19 15:06:10 +0200122 if (root_size < (sizeof(*rp) + sizeof(*r) + sizeof(*e)))
Aaron Durbin20686d82015-03-05 14:11:27 -0600123 return -1;
124
Lee Leahy73402172017-03-10 15:23:24 -0800125 /* For simplicity don't allow sizes or alignments to exceed LIMIT_ALIGN.
126 */
Aaron Durbin20686d82015-03-05 14:11:27 -0600127 if (root_size > LIMIT_ALIGN || entry_align > LIMIT_ALIGN)
128 return -1;
129
130 /* Additionally, don't handle an entry alignment > root_size. */
131 if (entry_align > root_size)
132 return -1;
133
Aaron Durbincac50502015-03-24 23:14:46 -0500134 rp = imdr_get_root_pointer(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600135
136 root_offset = -(ssize_t)root_size;
137 /* Set root pointer. */
Aaron Durbincac50502015-03-24 23:14:46 -0500138 imdr->r = relative_pointer((void *)imdr->limit, root_offset);
139 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600140 imd_link_root(rp, r);
141
142 memset(r, 0, sizeof(*r));
143 r->entry_align = entry_align;
144
145 /* Calculate size left for entries. */
Aaron Durbincac50502015-03-24 23:14:46 -0500146 r->max_entries = root_num_entries(root_size);
Aaron Durbin20686d82015-03-05 14:11:27 -0600147
148 /* Fill in first entry covering the root region. */
149 r->num_entries = 1;
150 e = &r->entries[0];
151 imd_entry_assign(e, CBMEM_ID_IMD_ROOT, 0, root_size);
152
153 printk(BIOS_DEBUG, "IMD: root @ %p %u entries.\n", r, r->max_entries);
154
155 return 0;
156}
157
Aaron Durbincac50502015-03-24 23:14:46 -0500158static int imdr_recover(struct imdr *imdr)
Aaron Durbin20686d82015-03-05 14:11:27 -0600159{
160 struct imd_root_pointer *rp;
161 struct imd_root *r;
162 uintptr_t low_limit;
163 size_t i;
164
Aaron Durbincac50502015-03-24 23:14:46 -0500165 if (!imdr->limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600166 return -1;
167
Aaron Durbincac50502015-03-24 23:14:46 -0500168 rp = imdr_get_root_pointer(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600169
170 if (!imd_root_pointer_valid(rp))
171 return -1;
172
173 r = relative_pointer(rp, rp->root_offset);
174
Jan Dabros086c5e62020-08-21 08:24:41 +0200175 /* Ensure that root is just under the root pointer */
176 if ((intptr_t)rp - (intptr_t)&r->entries[r->max_entries] > sizeof(struct imd_entry))
Aaron Durbin20686d82015-03-05 14:11:27 -0600177 return -1;
178
179 if (r->num_entries > r->max_entries)
180 return -1;
181
182 /* Entry alignment should be power of 2. */
183 if (!IS_POWER_OF_2(r->entry_align))
184 return -1;
185
186 low_limit = (uintptr_t)relative_pointer(r, r->max_offset);
187
188 /* If no max_offset then lowest limit is 0. */
189 if (low_limit == (uintptr_t)r)
190 low_limit = 0;
191
192 for (i = 0; i < r->num_entries; i++) {
193 uintptr_t start_addr;
194 const struct imd_entry *e = &r->entries[i];
195
196 if (e->magic != IMD_ENTRY_MAGIC)
197 return -1;
198
199 start_addr = (uintptr_t)relative_pointer(r, e->start_offset);
200 if (start_addr < low_limit)
201 return -1;
Aaron Durbincac50502015-03-24 23:14:46 -0500202 if (start_addr >= imdr->limit ||
203 (start_addr + e->size) > imdr->limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600204 return -1;
205 }
206
207 /* Set root pointer. */
Aaron Durbincac50502015-03-24 23:14:46 -0500208 imdr->r = r;
Aaron Durbin20686d82015-03-05 14:11:27 -0600209
210 return 0;
211}
212
Aaron Durbincac50502015-03-24 23:14:46 -0500213static const struct imd_entry *imdr_entry_find(const struct imdr *imdr,
214 uint32_t id)
Aaron Durbin20686d82015-03-05 14:11:27 -0600215{
216 struct imd_root *r;
217 struct imd_entry *e;
Aaron Durbincac50502015-03-24 23:14:46 -0500218 size_t i;
Aaron Durbin20686d82015-03-05 14:11:27 -0600219
Aaron Durbincac50502015-03-24 23:14:46 -0500220 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600221
Aaron Durbincac50502015-03-24 23:14:46 -0500222 if (r == NULL)
223 return NULL;
Aaron Durbin20686d82015-03-05 14:11:27 -0600224
Aaron Durbincac50502015-03-24 23:14:46 -0500225 e = NULL;
226 /* Skip first entry covering the root. */
227 for (i = 1; i < r->num_entries; i++) {
228 if (id != r->entries[i].id)
229 continue;
230 e = &r->entries[i];
231 break;
232 }
233
234 return e;
235}
236
237static int imdr_limit_size(struct imdr *imdr, size_t max_size)
238{
239 struct imd_root *r;
240 ssize_t smax_size;
241 size_t root_size;
242
243 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600244 if (r == NULL)
245 return -1;
246
Aaron Durbincac50502015-03-24 23:14:46 -0500247 root_size = imdr->limit - (uintptr_t)r;
Aaron Durbin20686d82015-03-05 14:11:27 -0600248
Aaron Durbincac50502015-03-24 23:14:46 -0500249 if (max_size < root_size)
250 return -1;
Aaron Durbin20686d82015-03-05 14:11:27 -0600251
Aaron Durbincac50502015-03-24 23:14:46 -0500252 /* Take into account the root size. */
253 smax_size = max_size - root_size;
254 smax_size = -smax_size;
Aaron Durbin20686d82015-03-05 14:11:27 -0600255
Aaron Durbincac50502015-03-24 23:14:46 -0500256 r->max_offset = smax_size;
Aaron Durbin20686d82015-03-05 14:11:27 -0600257
258 return 0;
259}
260
Anna Karas215e7fc2020-07-16 14:12:30 +0200261static size_t imdr_entry_size(const struct imd_entry *e)
Aaron Durbincac50502015-03-24 23:14:46 -0500262{
263 return e->size;
264}
265
266static void *imdr_entry_at(const struct imdr *imdr, const struct imd_entry *e)
267{
268 return relative_pointer(imdr_root(imdr), e->start_offset);
269}
270
Aaron Durbin20686d82015-03-05 14:11:27 -0600271static struct imd_entry *imd_entry_add_to_root(struct imd_root *r, uint32_t id,
272 size_t size)
273{
274 struct imd_entry *entry;
275 struct imd_entry *last_entry;
276 ssize_t e_offset;
277 size_t used_size;
278
279 if (r->num_entries == r->max_entries)
280 return NULL;
281
282 /* Determine total size taken up by entry. */
283 used_size = ALIGN_UP(size, r->entry_align);
284
Aaron Durbin20686d82015-03-05 14:11:27 -0600285 /* See if size overflows imd total size. */
Aaron Durbincac50502015-03-24 23:14:46 -0500286 if (used_size > imd_root_data_left(r))
287 return NULL;
Aaron Durbin20686d82015-03-05 14:11:27 -0600288
289 /*
290 * Determine if offset field overflows. All offsets should be lower
291 * than the previous one.
292 */
Aaron Durbincac50502015-03-24 23:14:46 -0500293 last_entry = root_last_entry(r);
Aaron Durbin20686d82015-03-05 14:11:27 -0600294 e_offset = last_entry->start_offset;
295 e_offset -= (ssize_t)used_size;
Jan Dabros93d56f52020-08-20 08:29:49 +0200296 if (e_offset >= last_entry->start_offset)
Aaron Durbin20686d82015-03-05 14:11:27 -0600297 return NULL;
298
299 entry = root_last_entry(r) + 1;
300 r->num_entries++;
301
302 imd_entry_assign(entry, id, e_offset, size);
303
304 return entry;
305}
306
Aaron Durbincac50502015-03-24 23:14:46 -0500307static const struct imd_entry *imdr_entry_add(const struct imdr *imdr,
308 uint32_t id, size_t size)
Aaron Durbin20686d82015-03-05 14:11:27 -0600309{
310 struct imd_root *r;
311
Aaron Durbincac50502015-03-24 23:14:46 -0500312 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600313
314 if (r == NULL)
315 return NULL;
316
317 if (root_is_locked(r))
318 return NULL;
319
320 return imd_entry_add_to_root(r, id, size);
321}
322
Aaron Durbincac50502015-03-24 23:14:46 -0500323static bool imdr_has_entry(const struct imdr *imdr, const struct imd_entry *e)
324{
325 struct imd_root *r;
326 size_t idx;
327
328 r = imdr_root(imdr);
329 if (r == NULL)
330 return false;
331
332 /* Determine if the entry is within this root structure. */
333 idx = e - &r->entries[0];
334 if (idx >= r->num_entries)
335 return false;
336
337 return true;
338}
339
340static const struct imdr *imd_entry_to_imdr(const struct imd *imd,
341 const struct imd_entry *entry)
342{
343 if (imdr_has_entry(&imd->lg, entry))
344 return &imd->lg;
345
346 if (imdr_has_entry(&imd->sm, entry))
347 return &imd->sm;
348
349 return NULL;
350}
351
352/* Initialize imd handle. */
353void imd_handle_init(struct imd *imd, void *upper_limit)
354{
355 imdr_init(&imd->lg, upper_limit);
356 imdr_init(&imd->sm, NULL);
357}
358
359void imd_handle_init_partial_recovery(struct imd *imd)
360{
361 const struct imd_entry *e;
362 struct imd_root_pointer *rp;
363 struct imdr *imdr;
364
Aaron Durbin01562b62015-05-11 14:19:37 -0500365 if (imd->lg.limit == 0)
366 return;
367
Aaron Durbincac50502015-03-24 23:14:46 -0500368 imd_handle_init(imd, (void *)imd->lg.limit);
369
370 /* Initialize root pointer for the large regions. */
371 imdr = &imd->lg;
372 rp = imdr_get_root_pointer(imdr);
373 imdr->r = relative_pointer(rp, rp->root_offset);
374
375 e = imdr_entry_find(imdr, SMALL_REGION_ID);
376
377 if (e == NULL)
378 return;
379
380 imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
Anna Karas215e7fc2020-07-16 14:12:30 +0200381 imd->sm.limit += imdr_entry_size(e);
Aaron Durbincac50502015-03-24 23:14:46 -0500382 imdr = &imd->sm;
383 rp = imdr_get_root_pointer(imdr);
384 imdr->r = relative_pointer(rp, rp->root_offset);
385}
386
387int imd_create_empty(struct imd *imd, size_t root_size, size_t entry_align)
388{
389 return imdr_create_empty(&imd->lg, root_size, entry_align);
390}
391
392int imd_create_tiered_empty(struct imd *imd,
393 size_t lg_root_size, size_t lg_entry_align,
394 size_t sm_root_size, size_t sm_entry_align)
395{
Lee Leahy3e1cab42017-03-10 17:48:31 -0800396 size_t sm_region_size;
Aaron Durbincac50502015-03-24 23:14:46 -0500397 const struct imd_entry *e;
398 struct imdr *imdr;
399
400 imdr = &imd->lg;
401
402 if (imdr_create_empty(imdr, lg_root_size, lg_entry_align) != 0)
403 return -1;
404
405 /* Calculate the size of the small region to request. */
406 sm_region_size = root_num_entries(sm_root_size) * sm_entry_align;
407 sm_region_size += sm_root_size;
408 sm_region_size = ALIGN_UP(sm_region_size, lg_entry_align);
409
410 /* Add a new entry to the large region to cover the root and entries. */
411 e = imdr_entry_add(imdr, SMALL_REGION_ID, sm_region_size);
412
413 if (e == NULL)
414 goto fail;
415
416 imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
417 imd->sm.limit += sm_region_size;
418
419 if (imdr_create_empty(&imd->sm, sm_root_size, sm_entry_align) != 0 ||
420 imdr_limit_size(&imd->sm, sm_region_size))
421 goto fail;
422
423 return 0;
424fail:
425 imd_handle_init(imd, (void *)imdr->limit);
426 return -1;
427}
428
429int imd_recover(struct imd *imd)
430{
431 const struct imd_entry *e;
432 uintptr_t small_upper_limit;
433 struct imdr *imdr;
434
435 imdr = &imd->lg;
436 if (imdr_recover(imdr) != 0)
437 return -1;
438
Anna Karasb44b68b2020-08-04 13:35:29 +0200439 /* Determine if small region is present. */
Aaron Durbincac50502015-03-24 23:14:46 -0500440 e = imdr_entry_find(imdr, SMALL_REGION_ID);
441
442 if (e == NULL)
443 return 0;
444
445 small_upper_limit = (uintptr_t)imdr_entry_at(imdr, e);
Anna Karas215e7fc2020-07-16 14:12:30 +0200446 small_upper_limit += imdr_entry_size(e);
Aaron Durbincac50502015-03-24 23:14:46 -0500447
448 imd->sm.limit = small_upper_limit;
449
450 /* Tear down any changes on failure. */
451 if (imdr_recover(&imd->sm) != 0) {
452 imd_handle_init(imd, (void *)imd->lg.limit);
453 return -1;
454 }
455
456 return 0;
457}
458
459int imd_limit_size(struct imd *imd, size_t max_size)
460{
461 return imdr_limit_size(&imd->lg, max_size);
462}
463
464int imd_lockdown(struct imd *imd)
465{
466 struct imd_root *r;
467
468 r = imdr_root(&imd->lg);
469 if (r == NULL)
470 return -1;
471
472 r->flags |= IMD_FLAG_LOCKED;
473
474 r = imdr_root(&imd->sm);
475 if (r != NULL)
476 r->flags |= IMD_FLAG_LOCKED;
477
478 return 0;
479}
480
481int imd_region_used(struct imd *imd, void **base, size_t *size)
Aaron Durbin20686d82015-03-05 14:11:27 -0600482{
483 struct imd_root *r;
484 struct imd_entry *e;
Aaron Durbincac50502015-03-24 23:14:46 -0500485 void *low_addr;
486 size_t sz_used;
Aaron Durbin20686d82015-03-05 14:11:27 -0600487
Aaron Durbincac50502015-03-24 23:14:46 -0500488 if (!imd->lg.limit)
489 return -1;
490
491 r = imdr_root(&imd->lg);
Aaron Durbin20686d82015-03-05 14:11:27 -0600492
493 if (r == NULL)
Aaron Durbincac50502015-03-24 23:14:46 -0500494 return -1;
Aaron Durbin20686d82015-03-05 14:11:27 -0600495
Aaron Durbincac50502015-03-24 23:14:46 -0500496 /* Use last entry to obtain lowest address. */
497 e = root_last_entry(r);
498
499 low_addr = relative_pointer(r, e->start_offset);
500
501 /* Total size used is the last entry's base up to the limit. */
502 sz_used = imd->lg.limit - (uintptr_t)low_addr;
503
504 *base = low_addr;
505 *size = sz_used;
506
507 return 0;
508}
509
510const struct imd_entry *imd_entry_add(const struct imd *imd, uint32_t id,
511 size_t size)
512{
513 struct imd_root *r;
514 const struct imdr *imdr;
515 const struct imd_entry *e = NULL;
516
517 /*
518 * Determine if requested size is less than 1/4 of small data
519 * region is left.
520 */
521 imdr = &imd->sm;
522 r = imdr_root(imdr);
523
524 /* No small region. Use the large region. */
525 if (r == NULL)
526 return imdr_entry_add(&imd->lg, id, size);
527 else if (size <= r->entry_align || size <= imd_root_data_left(r) / 4)
528 e = imdr_entry_add(imdr, id, size);
529
530 /* Fall back on large region allocation. */
531 if (e == NULL)
532 e = imdr_entry_add(&imd->lg, id, size);
533
534 return e;
535}
536
537const struct imd_entry *imd_entry_find(const struct imd *imd, uint32_t id)
538{
539 const struct imd_entry *e;
540
541 /* Many of the smaller allocations are used a lot. Therefore, try
542 * the small region first. */
543 e = imdr_entry_find(&imd->sm, id);
544
545 if (e == NULL)
546 e = imdr_entry_find(&imd->lg, id);
Aaron Durbin20686d82015-03-05 14:11:27 -0600547
548 return e;
549}
550
551const struct imd_entry *imd_entry_find_or_add(const struct imd *imd,
552 uint32_t id, size_t size)
553{
554 const struct imd_entry *e;
555
556 e = imd_entry_find(imd, id);
557
558 if (e != NULL)
559 return e;
560
561 return imd_entry_add(imd, id, size);
562}
563
Anna Karas215e7fc2020-07-16 14:12:30 +0200564size_t imd_entry_size(const struct imd_entry *entry)
Aaron Durbin20686d82015-03-05 14:11:27 -0600565{
Anna Karas215e7fc2020-07-16 14:12:30 +0200566 return imdr_entry_size(entry);
Aaron Durbin20686d82015-03-05 14:11:27 -0600567}
568
569void *imd_entry_at(const struct imd *imd, const struct imd_entry *entry)
570{
Aaron Durbincac50502015-03-24 23:14:46 -0500571 const struct imdr *imdr;
Aaron Durbin20686d82015-03-05 14:11:27 -0600572
Aaron Durbincac50502015-03-24 23:14:46 -0500573 imdr = imd_entry_to_imdr(imd, entry);
Aaron Durbin20686d82015-03-05 14:11:27 -0600574
Aaron Durbincac50502015-03-24 23:14:46 -0500575 if (imdr == NULL)
Aaron Durbin20686d82015-03-05 14:11:27 -0600576 return NULL;
577
Aaron Durbincac50502015-03-24 23:14:46 -0500578 return imdr_entry_at(imdr, entry);
Aaron Durbin20686d82015-03-05 14:11:27 -0600579}
580
Anna Karas215e7fc2020-07-16 14:12:30 +0200581uint32_t imd_entry_id(const struct imd_entry *entry)
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500582{
583 return entry->id;
584}
585
Aaron Durbin20686d82015-03-05 14:11:27 -0600586int imd_entry_remove(const struct imd *imd, const struct imd_entry *entry)
587{
588 struct imd_root *r;
Aaron Durbincac50502015-03-24 23:14:46 -0500589 const struct imdr *imdr;
Aaron Durbin20686d82015-03-05 14:11:27 -0600590
Aaron Durbincac50502015-03-24 23:14:46 -0500591 imdr = imd_entry_to_imdr(imd, entry);
592
593 if (imdr == NULL)
Lee Leahy35af5c42017-03-09 17:35:28 -0800594 return -1;
Aaron Durbincac50502015-03-24 23:14:46 -0500595
596 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600597
Aaron Durbin20686d82015-03-05 14:11:27 -0600598 if (root_is_locked(r))
599 return -1;
600
601 if (entry != root_last_entry(r))
602 return -1;
603
Jan Dabros3b0d0402020-08-21 12:20:45 +0200604 /* Don't remove entry covering root region */
605 if (r->num_entries == 1)
606 return -1;
607
Aaron Durbin20686d82015-03-05 14:11:27 -0600608 r->num_entries--;
609
610 return 0;
611}
612
Aaron Durbincac50502015-03-24 23:14:46 -0500613static void imdr_print_entries(const struct imdr *imdr, const char *indent,
614 const struct imd_lookup *lookup, size_t size)
Aaron Durbin20686d82015-03-05 14:11:27 -0600615{
616 struct imd_root *r;
617 size_t i;
618 size_t j;
619
Aaron Durbincac50502015-03-24 23:14:46 -0500620 if (imdr == NULL)
621 return;
Aaron Durbin20686d82015-03-05 14:11:27 -0600622
Aaron Durbincac50502015-03-24 23:14:46 -0500623 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600624
625 for (i = 0; i < r->num_entries; i++) {
626 const char *name = NULL;
627 const struct imd_entry *e = &r->entries[i];
628
629 for (j = 0; j < size; j++) {
630 if (lookup[j].id == e->id) {
631 name = lookup[j].name;
632 break;
633 }
634 }
635
Aaron Durbincac50502015-03-24 23:14:46 -0500636 printk(BIOS_DEBUG, "%s", indent);
637
Aaron Durbin20686d82015-03-05 14:11:27 -0600638 if (name == NULL)
639 printk(BIOS_DEBUG, "%08x ", e->id);
640 else
641 printk(BIOS_DEBUG, "%s", name);
642 printk(BIOS_DEBUG, "%2zu. ", i);
Aaron Durbincac50502015-03-24 23:14:46 -0500643 printk(BIOS_DEBUG, "%p ", imdr_entry_at(imdr, e));
Anna Karas215e7fc2020-07-16 14:12:30 +0200644 printk(BIOS_DEBUG, "0x%08zx\n", imdr_entry_size(e));
Aaron Durbincac50502015-03-24 23:14:46 -0500645 }
646}
647
648int imd_print_entries(const struct imd *imd, const struct imd_lookup *lookup,
649 size_t size)
650{
651 if (imdr_root(&imd->lg) == NULL)
652 return -1;
653
654 imdr_print_entries(&imd->lg, "", lookup, size);
655 if (imdr_root(&imd->sm) != NULL) {
656 printk(BIOS_DEBUG, "IMD small region:\n");
657 imdr_print_entries(&imd->sm, " ", lookup, size);
Aaron Durbin20686d82015-03-05 14:11:27 -0600658 }
659
660 return 0;
661}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500662
663int imd_cursor_init(const struct imd *imd, struct imd_cursor *cursor)
664{
665 if (imd == NULL || cursor == NULL)
666 return -1;
667
668 memset(cursor, 0, sizeof(*cursor));
669
670 cursor->imdr[0] = &imd->lg;
671 cursor->imdr[1] = &imd->sm;
672
673 return 0;
674}
675
676const struct imd_entry *imd_cursor_next(struct imd_cursor *cursor)
677{
678 struct imd_root *r;
679 const struct imd_entry *e;
680
681 if (cursor->current_imdr >= ARRAY_SIZE(cursor->imdr))
682 return NULL;
683
684 r = imdr_root(cursor->imdr[cursor->current_imdr]);
685
686 if (r == NULL)
687 return NULL;
688
689 if (cursor->current_entry >= r->num_entries) {
690 /* Try next imdr. */
691 cursor->current_imdr++;
692 cursor->current_entry = 0;
693 return imd_cursor_next(cursor);
694 }
695
696 e = &r->entries[cursor->current_entry];
697 cursor->current_entry++;
698
699 return e;
700}