blob: 4fa8f7023b50f3a5161e79aba16f6d38f34b4692 [file] [log] [blame]
Aaron Durbin20686d82015-03-05 14:11:27 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright 2015 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbin20686d82015-03-05 14:11:27 -060014 */
15
16#include <assert.h>
17#include <cbmem.h>
18#include <console/console.h>
19#include <imd.h>
20#include <stdlib.h>
21#include <string.h>
Julius Wernera2148372019-11-13 19:50:33 -080022#include <types.h>
Aaron Durbin20686d82015-03-05 14:11:27 -060023
24/* For more details on implementation and usage please see the imd.h header. */
25
26static const uint32_t IMD_ROOT_PTR_MAGIC = 0xc0389481;
27static const uint32_t IMD_ENTRY_MAGIC = ~0xc0389481;
Aaron Durbincac50502015-03-24 23:14:46 -050028static const uint32_t SMALL_REGION_ID = CBMEM_ID_IMD_SMALL;
Aaron Durbin20686d82015-03-05 14:11:27 -060029static const size_t LIMIT_ALIGN = 4096;
30
31/* In-memory data structures. */
32struct imd_root_pointer {
33 uint32_t magic;
34 /* Relative to upper limit/offset. */
35 int32_t root_offset;
Stefan Reinauer6a001132017-07-13 02:20:27 +020036} __packed;
Aaron Durbin20686d82015-03-05 14:11:27 -060037
38struct imd_entry {
39 uint32_t magic;
40 /* start is located relative to imd_root */
41 int32_t start_offset;
42 uint32_t size;
43 uint32_t id;
Stefan Reinauer6a001132017-07-13 02:20:27 +020044} __packed;
Aaron Durbin20686d82015-03-05 14:11:27 -060045
46struct imd_root {
47 uint32_t max_entries;
48 uint32_t num_entries;
49 uint32_t flags;
50 uint32_t entry_align;
51 /* Used for fixing the size of an imd. Relative to the root. */
52 int32_t max_offset;
53 struct imd_entry entries[0];
Stefan Reinauer6a001132017-07-13 02:20:27 +020054} __packed;
Aaron Durbin20686d82015-03-05 14:11:27 -060055
56#define IMD_FLAG_LOCKED 1
57
58static void *relative_pointer(void *base, ssize_t offset)
59{
60 intptr_t b = (intptr_t)base;
61 b += offset;
62 return (void *)b;
63}
64
65static bool imd_root_pointer_valid(const struct imd_root_pointer *rp)
66{
67 return !!(rp->magic == IMD_ROOT_PTR_MAGIC);
68}
69
Aaron Durbincac50502015-03-24 23:14:46 -050070static struct imd_root *imdr_root(const struct imdr *imdr)
Aaron Durbin20686d82015-03-05 14:11:27 -060071{
Aaron Durbincac50502015-03-24 23:14:46 -050072 return imdr->r;
Aaron Durbin20686d82015-03-05 14:11:27 -060073}
74
75/*
76 * The root pointer is relative to the upper limit of the imd. i.e. It sits
77 * just below the upper limit.
78 */
Aaron Durbincac50502015-03-24 23:14:46 -050079static struct imd_root_pointer *imdr_get_root_pointer(const struct imdr *imdr)
Aaron Durbin20686d82015-03-05 14:11:27 -060080{
81 struct imd_root_pointer *rp;
82
Aaron Durbincac50502015-03-24 23:14:46 -050083 rp = relative_pointer((void *)imdr->limit, -sizeof(*rp));
Aaron Durbin20686d82015-03-05 14:11:27 -060084
85 return rp;
86}
87
88static void imd_link_root(struct imd_root_pointer *rp, struct imd_root *r)
89{
90 rp->magic = IMD_ROOT_PTR_MAGIC;
91 rp->root_offset = (int32_t)((intptr_t)r - (intptr_t)rp);
92}
93
Aaron Durbincac50502015-03-24 23:14:46 -050094static struct imd_entry *root_last_entry(struct imd_root *r)
95{
96 return &r->entries[r->num_entries - 1];
97}
98
99static size_t root_num_entries(size_t root_size)
100{
101 size_t entries_size;
102
103 entries_size = root_size;
104 entries_size -= sizeof(struct imd_root_pointer);
105 entries_size -= sizeof(struct imd_root);
106
107 return entries_size / sizeof(struct imd_entry);
108}
109
110static size_t imd_root_data_left(struct imd_root *r)
111{
112 struct imd_entry *last_entry;
113
114 last_entry = root_last_entry(r);
115
116 if (r->max_offset != 0)
117 return last_entry->start_offset - r->max_offset;
118
119 return ~(size_t)0;
120}
121
122static bool root_is_locked(const struct imd_root *r)
123{
124 return !!(r->flags & IMD_FLAG_LOCKED);
125}
126
Aaron Durbin20686d82015-03-05 14:11:27 -0600127static void imd_entry_assign(struct imd_entry *e, uint32_t id,
128 ssize_t offset, size_t size)
129{
130 e->magic = IMD_ENTRY_MAGIC;
131 e->start_offset = offset;
132 e->size = size;
133 e->id = id;
134}
135
Aaron Durbincac50502015-03-24 23:14:46 -0500136static void imdr_init(struct imdr *ir, void *upper_limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600137{
138 uintptr_t limit = (uintptr_t)upper_limit;
139 /* Upper limit is aligned down to 4KiB */
Aaron Durbincac50502015-03-24 23:14:46 -0500140 ir->limit = ALIGN_DOWN(limit, LIMIT_ALIGN);
141 ir->r = NULL;
Aaron Durbin20686d82015-03-05 14:11:27 -0600142}
143
Aaron Durbincac50502015-03-24 23:14:46 -0500144static int imdr_create_empty(struct imdr *imdr, size_t root_size,
145 size_t entry_align)
Aaron Durbin20686d82015-03-05 14:11:27 -0600146{
147 struct imd_root_pointer *rp;
148 struct imd_root *r;
149 struct imd_entry *e;
150 ssize_t root_offset;
Aaron Durbin20686d82015-03-05 14:11:27 -0600151
Aaron Durbincac50502015-03-24 23:14:46 -0500152 if (!imdr->limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600153 return -1;
154
155 /* root_size and entry_align should be a power of 2. */
156 assert(IS_POWER_OF_2(root_size));
157 assert(IS_POWER_OF_2(entry_align));
158
Aaron Durbincac50502015-03-24 23:14:46 -0500159 if (!imdr->limit)
160 return -1;
161
Aaron Durbin20686d82015-03-05 14:11:27 -0600162 /*
Martin Roth2ed0aa22016-01-05 20:58:58 -0700163 * root_size needs to be large enough to accommodate root pointer and
Aaron Durbin20686d82015-03-05 14:11:27 -0600164 * root book keeping structure. The caller needs to ensure there's
165 * enough room for tracking individual allocations.
166 */
167 if (root_size < (sizeof(*rp) + sizeof(*r)))
168 return -1;
169
Lee Leahy73402172017-03-10 15:23:24 -0800170 /* For simplicity don't allow sizes or alignments to exceed LIMIT_ALIGN.
171 */
Aaron Durbin20686d82015-03-05 14:11:27 -0600172 if (root_size > LIMIT_ALIGN || entry_align > LIMIT_ALIGN)
173 return -1;
174
175 /* Additionally, don't handle an entry alignment > root_size. */
176 if (entry_align > root_size)
177 return -1;
178
Aaron Durbincac50502015-03-24 23:14:46 -0500179 rp = imdr_get_root_pointer(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600180
181 root_offset = -(ssize_t)root_size;
182 /* Set root pointer. */
Aaron Durbincac50502015-03-24 23:14:46 -0500183 imdr->r = relative_pointer((void *)imdr->limit, root_offset);
184 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600185 imd_link_root(rp, r);
186
187 memset(r, 0, sizeof(*r));
188 r->entry_align = entry_align;
189
190 /* Calculate size left for entries. */
Aaron Durbincac50502015-03-24 23:14:46 -0500191 r->max_entries = root_num_entries(root_size);
Aaron Durbin20686d82015-03-05 14:11:27 -0600192
193 /* Fill in first entry covering the root region. */
194 r->num_entries = 1;
195 e = &r->entries[0];
196 imd_entry_assign(e, CBMEM_ID_IMD_ROOT, 0, root_size);
197
198 printk(BIOS_DEBUG, "IMD: root @ %p %u entries.\n", r, r->max_entries);
199
200 return 0;
201}
202
Aaron Durbincac50502015-03-24 23:14:46 -0500203static int imdr_recover(struct imdr *imdr)
Aaron Durbin20686d82015-03-05 14:11:27 -0600204{
205 struct imd_root_pointer *rp;
206 struct imd_root *r;
207 uintptr_t low_limit;
208 size_t i;
209
Aaron Durbincac50502015-03-24 23:14:46 -0500210 if (!imdr->limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600211 return -1;
212
Aaron Durbincac50502015-03-24 23:14:46 -0500213 rp = imdr_get_root_pointer(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600214
215 if (!imd_root_pointer_valid(rp))
216 return -1;
217
218 r = relative_pointer(rp, rp->root_offset);
219
220 /* Confirm the root and root pointer are just under the limit. */
221 if (ALIGN_UP((uintptr_t)&r->entries[r->max_entries], LIMIT_ALIGN) !=
Aaron Durbincac50502015-03-24 23:14:46 -0500222 imdr->limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600223 return -1;
224
225 if (r->num_entries > r->max_entries)
226 return -1;
227
228 /* Entry alignment should be power of 2. */
229 if (!IS_POWER_OF_2(r->entry_align))
230 return -1;
231
232 low_limit = (uintptr_t)relative_pointer(r, r->max_offset);
233
234 /* If no max_offset then lowest limit is 0. */
235 if (low_limit == (uintptr_t)r)
236 low_limit = 0;
237
238 for (i = 0; i < r->num_entries; i++) {
239 uintptr_t start_addr;
240 const struct imd_entry *e = &r->entries[i];
241
242 if (e->magic != IMD_ENTRY_MAGIC)
243 return -1;
244
245 start_addr = (uintptr_t)relative_pointer(r, e->start_offset);
246 if (start_addr < low_limit)
247 return -1;
Aaron Durbincac50502015-03-24 23:14:46 -0500248 if (start_addr >= imdr->limit ||
249 (start_addr + e->size) > imdr->limit)
Aaron Durbin20686d82015-03-05 14:11:27 -0600250 return -1;
251 }
252
253 /* Set root pointer. */
Aaron Durbincac50502015-03-24 23:14:46 -0500254 imdr->r = r;
Aaron Durbin20686d82015-03-05 14:11:27 -0600255
256 return 0;
257}
258
Aaron Durbincac50502015-03-24 23:14:46 -0500259static const struct imd_entry *imdr_entry_find(const struct imdr *imdr,
260 uint32_t id)
Aaron Durbin20686d82015-03-05 14:11:27 -0600261{
262 struct imd_root *r;
263 struct imd_entry *e;
Aaron Durbincac50502015-03-24 23:14:46 -0500264 size_t i;
Aaron Durbin20686d82015-03-05 14:11:27 -0600265
Aaron Durbincac50502015-03-24 23:14:46 -0500266 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600267
Aaron Durbincac50502015-03-24 23:14:46 -0500268 if (r == NULL)
269 return NULL;
Aaron Durbin20686d82015-03-05 14:11:27 -0600270
Aaron Durbincac50502015-03-24 23:14:46 -0500271 e = NULL;
272 /* Skip first entry covering the root. */
273 for (i = 1; i < r->num_entries; i++) {
274 if (id != r->entries[i].id)
275 continue;
276 e = &r->entries[i];
277 break;
278 }
279
280 return e;
281}
282
283static int imdr_limit_size(struct imdr *imdr, size_t max_size)
284{
285 struct imd_root *r;
286 ssize_t smax_size;
287 size_t root_size;
288
289 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600290 if (r == NULL)
291 return -1;
292
Aaron Durbincac50502015-03-24 23:14:46 -0500293 root_size = imdr->limit - (uintptr_t)r;
Aaron Durbin20686d82015-03-05 14:11:27 -0600294
Aaron Durbincac50502015-03-24 23:14:46 -0500295 if (max_size < root_size)
296 return -1;
Aaron Durbin20686d82015-03-05 14:11:27 -0600297
Aaron Durbincac50502015-03-24 23:14:46 -0500298 /* Take into account the root size. */
299 smax_size = max_size - root_size;
300 smax_size = -smax_size;
Aaron Durbin20686d82015-03-05 14:11:27 -0600301
Aaron Durbincac50502015-03-24 23:14:46 -0500302 r->max_offset = smax_size;
Aaron Durbin20686d82015-03-05 14:11:27 -0600303
304 return 0;
305}
306
Aaron Durbincac50502015-03-24 23:14:46 -0500307static size_t imdr_entry_size(const struct imdr *imdr,
308 const struct imd_entry *e)
309{
310 return e->size;
311}
312
313static void *imdr_entry_at(const struct imdr *imdr, const struct imd_entry *e)
314{
315 return relative_pointer(imdr_root(imdr), e->start_offset);
316}
317
Aaron Durbin20686d82015-03-05 14:11:27 -0600318static struct imd_entry *imd_entry_add_to_root(struct imd_root *r, uint32_t id,
319 size_t size)
320{
321 struct imd_entry *entry;
322 struct imd_entry *last_entry;
323 ssize_t e_offset;
324 size_t used_size;
325
326 if (r->num_entries == r->max_entries)
327 return NULL;
328
329 /* Determine total size taken up by entry. */
330 used_size = ALIGN_UP(size, r->entry_align);
331
Aaron Durbin20686d82015-03-05 14:11:27 -0600332 /* See if size overflows imd total size. */
Aaron Durbincac50502015-03-24 23:14:46 -0500333 if (used_size > imd_root_data_left(r))
334 return NULL;
Aaron Durbin20686d82015-03-05 14:11:27 -0600335
336 /*
337 * Determine if offset field overflows. All offsets should be lower
338 * than the previous one.
339 */
Aaron Durbincac50502015-03-24 23:14:46 -0500340 last_entry = root_last_entry(r);
Aaron Durbin20686d82015-03-05 14:11:27 -0600341 e_offset = last_entry->start_offset;
342 e_offset -= (ssize_t)used_size;
343 if (e_offset > last_entry->start_offset)
344 return NULL;
345
346 entry = root_last_entry(r) + 1;
347 r->num_entries++;
348
349 imd_entry_assign(entry, id, e_offset, size);
350
351 return entry;
352}
353
Aaron Durbincac50502015-03-24 23:14:46 -0500354static const struct imd_entry *imdr_entry_add(const struct imdr *imdr,
355 uint32_t id, size_t size)
Aaron Durbin20686d82015-03-05 14:11:27 -0600356{
357 struct imd_root *r;
358
Aaron Durbincac50502015-03-24 23:14:46 -0500359 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600360
361 if (r == NULL)
362 return NULL;
363
364 if (root_is_locked(r))
365 return NULL;
366
367 return imd_entry_add_to_root(r, id, size);
368}
369
Aaron Durbincac50502015-03-24 23:14:46 -0500370static bool imdr_has_entry(const struct imdr *imdr, const struct imd_entry *e)
371{
372 struct imd_root *r;
373 size_t idx;
374
375 r = imdr_root(imdr);
376 if (r == NULL)
377 return false;
378
379 /* Determine if the entry is within this root structure. */
380 idx = e - &r->entries[0];
381 if (idx >= r->num_entries)
382 return false;
383
384 return true;
385}
386
387static const struct imdr *imd_entry_to_imdr(const struct imd *imd,
388 const struct imd_entry *entry)
389{
390 if (imdr_has_entry(&imd->lg, entry))
391 return &imd->lg;
392
393 if (imdr_has_entry(&imd->sm, entry))
394 return &imd->sm;
395
396 return NULL;
397}
398
399/* Initialize imd handle. */
400void imd_handle_init(struct imd *imd, void *upper_limit)
401{
402 imdr_init(&imd->lg, upper_limit);
403 imdr_init(&imd->sm, NULL);
404}
405
406void imd_handle_init_partial_recovery(struct imd *imd)
407{
408 const struct imd_entry *e;
409 struct imd_root_pointer *rp;
410 struct imdr *imdr;
411
Aaron Durbin01562b62015-05-11 14:19:37 -0500412 if (imd->lg.limit == 0)
413 return;
414
Aaron Durbincac50502015-03-24 23:14:46 -0500415 imd_handle_init(imd, (void *)imd->lg.limit);
416
417 /* Initialize root pointer for the large regions. */
418 imdr = &imd->lg;
419 rp = imdr_get_root_pointer(imdr);
420 imdr->r = relative_pointer(rp, rp->root_offset);
421
422 e = imdr_entry_find(imdr, SMALL_REGION_ID);
423
424 if (e == NULL)
425 return;
426
427 imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
428 imd->sm.limit += imdr_entry_size(imdr, e);
429 imdr = &imd->sm;
430 rp = imdr_get_root_pointer(imdr);
431 imdr->r = relative_pointer(rp, rp->root_offset);
432}
433
434int imd_create_empty(struct imd *imd, size_t root_size, size_t entry_align)
435{
436 return imdr_create_empty(&imd->lg, root_size, entry_align);
437}
438
439int imd_create_tiered_empty(struct imd *imd,
440 size_t lg_root_size, size_t lg_entry_align,
441 size_t sm_root_size, size_t sm_entry_align)
442{
Lee Leahy3e1cab42017-03-10 17:48:31 -0800443 size_t sm_region_size;
Aaron Durbincac50502015-03-24 23:14:46 -0500444 const struct imd_entry *e;
445 struct imdr *imdr;
446
447 imdr = &imd->lg;
448
449 if (imdr_create_empty(imdr, lg_root_size, lg_entry_align) != 0)
450 return -1;
451
452 /* Calculate the size of the small region to request. */
453 sm_region_size = root_num_entries(sm_root_size) * sm_entry_align;
454 sm_region_size += sm_root_size;
455 sm_region_size = ALIGN_UP(sm_region_size, lg_entry_align);
456
457 /* Add a new entry to the large region to cover the root and entries. */
458 e = imdr_entry_add(imdr, SMALL_REGION_ID, sm_region_size);
459
460 if (e == NULL)
461 goto fail;
462
463 imd->sm.limit = (uintptr_t)imdr_entry_at(imdr, e);
464 imd->sm.limit += sm_region_size;
465
466 if (imdr_create_empty(&imd->sm, sm_root_size, sm_entry_align) != 0 ||
467 imdr_limit_size(&imd->sm, sm_region_size))
468 goto fail;
469
470 return 0;
471fail:
472 imd_handle_init(imd, (void *)imdr->limit);
473 return -1;
474}
475
476int imd_recover(struct imd *imd)
477{
478 const struct imd_entry *e;
479 uintptr_t small_upper_limit;
480 struct imdr *imdr;
481
482 imdr = &imd->lg;
483 if (imdr_recover(imdr) != 0)
484 return -1;
485
486 /* Determine if small region is region is present. */
487 e = imdr_entry_find(imdr, SMALL_REGION_ID);
488
489 if (e == NULL)
490 return 0;
491
492 small_upper_limit = (uintptr_t)imdr_entry_at(imdr, e);
493 small_upper_limit += imdr_entry_size(imdr, e);
494
495 imd->sm.limit = small_upper_limit;
496
497 /* Tear down any changes on failure. */
498 if (imdr_recover(&imd->sm) != 0) {
499 imd_handle_init(imd, (void *)imd->lg.limit);
500 return -1;
501 }
502
503 return 0;
504}
505
506int imd_limit_size(struct imd *imd, size_t max_size)
507{
508 return imdr_limit_size(&imd->lg, max_size);
509}
510
511int imd_lockdown(struct imd *imd)
512{
513 struct imd_root *r;
514
515 r = imdr_root(&imd->lg);
516 if (r == NULL)
517 return -1;
518
519 r->flags |= IMD_FLAG_LOCKED;
520
521 r = imdr_root(&imd->sm);
522 if (r != NULL)
523 r->flags |= IMD_FLAG_LOCKED;
524
525 return 0;
526}
527
528int imd_region_used(struct imd *imd, void **base, size_t *size)
Aaron Durbin20686d82015-03-05 14:11:27 -0600529{
530 struct imd_root *r;
531 struct imd_entry *e;
Aaron Durbincac50502015-03-24 23:14:46 -0500532 void *low_addr;
533 size_t sz_used;
Aaron Durbin20686d82015-03-05 14:11:27 -0600534
Aaron Durbincac50502015-03-24 23:14:46 -0500535 if (!imd->lg.limit)
536 return -1;
537
538 r = imdr_root(&imd->lg);
Aaron Durbin20686d82015-03-05 14:11:27 -0600539
540 if (r == NULL)
Aaron Durbincac50502015-03-24 23:14:46 -0500541 return -1;
Aaron Durbin20686d82015-03-05 14:11:27 -0600542
Aaron Durbincac50502015-03-24 23:14:46 -0500543 /* Use last entry to obtain lowest address. */
544 e = root_last_entry(r);
545
546 low_addr = relative_pointer(r, e->start_offset);
547
548 /* Total size used is the last entry's base up to the limit. */
549 sz_used = imd->lg.limit - (uintptr_t)low_addr;
550
551 *base = low_addr;
552 *size = sz_used;
553
554 return 0;
555}
556
557const struct imd_entry *imd_entry_add(const struct imd *imd, uint32_t id,
558 size_t size)
559{
560 struct imd_root *r;
561 const struct imdr *imdr;
562 const struct imd_entry *e = NULL;
563
564 /*
565 * Determine if requested size is less than 1/4 of small data
566 * region is left.
567 */
568 imdr = &imd->sm;
569 r = imdr_root(imdr);
570
571 /* No small region. Use the large region. */
572 if (r == NULL)
573 return imdr_entry_add(&imd->lg, id, size);
574 else if (size <= r->entry_align || size <= imd_root_data_left(r) / 4)
575 e = imdr_entry_add(imdr, id, size);
576
577 /* Fall back on large region allocation. */
578 if (e == NULL)
579 e = imdr_entry_add(&imd->lg, id, size);
580
581 return e;
582}
583
584const struct imd_entry *imd_entry_find(const struct imd *imd, uint32_t id)
585{
586 const struct imd_entry *e;
587
588 /* Many of the smaller allocations are used a lot. Therefore, try
589 * the small region first. */
590 e = imdr_entry_find(&imd->sm, id);
591
592 if (e == NULL)
593 e = imdr_entry_find(&imd->lg, id);
Aaron Durbin20686d82015-03-05 14:11:27 -0600594
595 return e;
596}
597
598const struct imd_entry *imd_entry_find_or_add(const struct imd *imd,
599 uint32_t id, size_t size)
600{
601 const struct imd_entry *e;
602
603 e = imd_entry_find(imd, id);
604
605 if (e != NULL)
606 return e;
607
608 return imd_entry_add(imd, id, size);
609}
610
611size_t imd_entry_size(const struct imd *imd, const struct imd_entry *entry)
612{
Aaron Durbincac50502015-03-24 23:14:46 -0500613 return imdr_entry_size(NULL, entry);
Aaron Durbin20686d82015-03-05 14:11:27 -0600614}
615
616void *imd_entry_at(const struct imd *imd, const struct imd_entry *entry)
617{
Aaron Durbincac50502015-03-24 23:14:46 -0500618 const struct imdr *imdr;
Aaron Durbin20686d82015-03-05 14:11:27 -0600619
Aaron Durbincac50502015-03-24 23:14:46 -0500620 imdr = imd_entry_to_imdr(imd, entry);
Aaron Durbin20686d82015-03-05 14:11:27 -0600621
Aaron Durbincac50502015-03-24 23:14:46 -0500622 if (imdr == NULL)
Aaron Durbin20686d82015-03-05 14:11:27 -0600623 return NULL;
624
Aaron Durbincac50502015-03-24 23:14:46 -0500625 return imdr_entry_at(imdr, entry);
Aaron Durbin20686d82015-03-05 14:11:27 -0600626}
627
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500628uint32_t imd_entry_id(const struct imd *imd, const struct imd_entry *entry)
629{
630 return entry->id;
631}
632
Aaron Durbin20686d82015-03-05 14:11:27 -0600633int imd_entry_remove(const struct imd *imd, const struct imd_entry *entry)
634{
635 struct imd_root *r;
Aaron Durbincac50502015-03-24 23:14:46 -0500636 const struct imdr *imdr;
Aaron Durbin20686d82015-03-05 14:11:27 -0600637
Aaron Durbincac50502015-03-24 23:14:46 -0500638 imdr = imd_entry_to_imdr(imd, entry);
639
640 if (imdr == NULL)
Lee Leahy35af5c42017-03-09 17:35:28 -0800641 return -1;
Aaron Durbincac50502015-03-24 23:14:46 -0500642
643 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600644
645 if (r == NULL)
646 return -1;
647
648 if (root_is_locked(r))
649 return -1;
650
651 if (entry != root_last_entry(r))
652 return -1;
653
654 r->num_entries--;
655
656 return 0;
657}
658
Aaron Durbincac50502015-03-24 23:14:46 -0500659static void imdr_print_entries(const struct imdr *imdr, const char *indent,
660 const struct imd_lookup *lookup, size_t size)
Aaron Durbin20686d82015-03-05 14:11:27 -0600661{
662 struct imd_root *r;
663 size_t i;
664 size_t j;
665
Aaron Durbincac50502015-03-24 23:14:46 -0500666 if (imdr == NULL)
667 return;
Aaron Durbin20686d82015-03-05 14:11:27 -0600668
Aaron Durbincac50502015-03-24 23:14:46 -0500669 r = imdr_root(imdr);
Aaron Durbin20686d82015-03-05 14:11:27 -0600670
671 for (i = 0; i < r->num_entries; i++) {
672 const char *name = NULL;
673 const struct imd_entry *e = &r->entries[i];
674
675 for (j = 0; j < size; j++) {
676 if (lookup[j].id == e->id) {
677 name = lookup[j].name;
678 break;
679 }
680 }
681
Aaron Durbincac50502015-03-24 23:14:46 -0500682 printk(BIOS_DEBUG, "%s", indent);
683
Aaron Durbin20686d82015-03-05 14:11:27 -0600684 if (name == NULL)
685 printk(BIOS_DEBUG, "%08x ", e->id);
686 else
687 printk(BIOS_DEBUG, "%s", name);
688 printk(BIOS_DEBUG, "%2zu. ", i);
Aaron Durbincac50502015-03-24 23:14:46 -0500689 printk(BIOS_DEBUG, "%p ", imdr_entry_at(imdr, e));
Julius Werner540a9802019-12-09 13:03:29 -0800690 printk(BIOS_DEBUG, "0x%08zx\n", imdr_entry_size(imdr, e));
Aaron Durbincac50502015-03-24 23:14:46 -0500691 }
692}
693
694int imd_print_entries(const struct imd *imd, const struct imd_lookup *lookup,
695 size_t size)
696{
697 if (imdr_root(&imd->lg) == NULL)
698 return -1;
699
700 imdr_print_entries(&imd->lg, "", lookup, size);
701 if (imdr_root(&imd->sm) != NULL) {
702 printk(BIOS_DEBUG, "IMD small region:\n");
703 imdr_print_entries(&imd->sm, " ", lookup, size);
Aaron Durbin20686d82015-03-05 14:11:27 -0600704 }
705
706 return 0;
707}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500708
709int imd_cursor_init(const struct imd *imd, struct imd_cursor *cursor)
710{
711 if (imd == NULL || cursor == NULL)
712 return -1;
713
714 memset(cursor, 0, sizeof(*cursor));
715
716 cursor->imdr[0] = &imd->lg;
717 cursor->imdr[1] = &imd->sm;
718
719 return 0;
720}
721
722const struct imd_entry *imd_cursor_next(struct imd_cursor *cursor)
723{
724 struct imd_root *r;
725 const struct imd_entry *e;
726
727 if (cursor->current_imdr >= ARRAY_SIZE(cursor->imdr))
728 return NULL;
729
730 r = imdr_root(cursor->imdr[cursor->current_imdr]);
731
732 if (r == NULL)
733 return NULL;
734
735 if (cursor->current_entry >= r->num_entries) {
736 /* Try next imdr. */
737 cursor->current_imdr++;
738 cursor->current_entry = 0;
739 return imd_cursor_next(cursor);
740 }
741
742 e = &r->entries[cursor->current_entry];
743 cursor->current_entry++;
744
745 return e;
746}