blob: de42e08e3c3dd40a198fed9c706f9f6d46d8610b [file] [log] [blame]
Jakub Czapiga466a3782020-10-05 10:44:46 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <stdlib.h>
4#include <types.h>
5#include <string.h>
6#include <tests/test.h>
7#include <imd.h>
8#include <imd_private.h>
9#include <cbmem.h>
10#include <commonlib/bsd/helpers.h>
11#include <lib.h>
12
13/* Auxiliary functions and definitions. */
14
Jakub Czapigac08b6a72022-01-10 13:36:47 +000015#define LG_ROOT_SIZE \
16 align_up_pow2(sizeof(struct imd_root_pointer) + sizeof(struct imd_root) \
17 + 3 * sizeof(struct imd_entry))
Jakub Czapiga466a3782020-10-05 10:44:46 +020018#define LG_ENTRY_ALIGN (2 * sizeof(int32_t))
19#define LG_ENTRY_SIZE (2 * sizeof(int32_t))
20#define LG_ENTRY_ID 0xA001
21
22#define SM_ROOT_SIZE LG_ROOT_SIZE
23#define SM_ENTRY_ALIGN sizeof(uint32_t)
24#define SM_ENTRY_SIZE sizeof(uint32_t)
25#define SM_ENTRY_ID 0xB001
26
27#define INVALID_REGION_ID 0xC001
28
29static uint32_t align_up_pow2(uint32_t x)
30{
31 return (1 << log2_ceil(x));
32}
33
34static size_t max_entries(size_t root_size)
35{
36 return (root_size - sizeof(struct imd_root_pointer) - sizeof(struct imd_root))
Jakub Czapigac08b6a72022-01-10 13:36:47 +000037 / sizeof(struct imd_entry);
Jakub Czapiga466a3782020-10-05 10:44:46 +020038}
39
40/*
41 * Mainly, we should check that imd_handle_init() aligns upper_limit properly
42 * for various inputs. Upper limit is the _exclusive_ address, so we expect
43 * ALIGN_DOWN.
44 */
45static void test_imd_handle_init(void **state)
46{
47 int i;
48 void *base;
49 struct imd imd;
50 uintptr_t test_inputs[] = {
Jakub Czapigac08b6a72022-01-10 13:36:47 +000051 0, /* Lowest possible address */
Paul Menzel77b1ff02022-03-19 09:56:40 +010052 0xA000, /* Fits in 16 bits, should not get rounded down */
Jakub Czapigac08b6a72022-01-10 13:36:47 +000053 0xDEAA, /* Fits in 16 bits */
54 0xB0B0B000, /* Fits in 32 bits, should not get rounded down */
55 0xF0F0F0F0, /* Fits in 32 bits */
56 ((1ULL << 32) + 4), /* Just above 32-bit limit */
57 0x6666777788889000, /* Fits in 64 bits, should not get rounded down */
58 ((1ULL << 60) - 100) /* Very large address, fitting in 64 bits */
Jakub Czapiga466a3782020-10-05 10:44:46 +020059 };
60
61 for (i = 0; i < ARRAY_SIZE(test_inputs); i++) {
62 base = (void *)test_inputs[i];
63
64 imd_handle_init(&imd, (void *)base);
65
66 assert_int_equal(imd.lg.limit % LIMIT_ALIGN, 0);
67 assert_int_equal(imd.lg.limit, ALIGN_DOWN(test_inputs[i], LIMIT_ALIGN));
68 assert_ptr_equal(imd.lg.r, NULL);
69
70 /* Small allocations not initialized */
71 assert_ptr_equal(imd.sm.limit, NULL);
72 assert_ptr_equal(imd.sm.r, NULL);
73 }
74}
75
76static void test_imd_handle_init_partial_recovery(void **state)
77{
78 void *base;
79 struct imd imd = {0};
80 const struct imd_entry *entry;
81
82 imd_handle_init_partial_recovery(&imd);
83 assert_null(imd.lg.limit);
84 assert_null(imd.sm.limit);
85
86 base = malloc(LIMIT_ALIGN);
87 if (base == NULL)
88 fail_msg("Cannot allocate enough memory - fail test");
89
90 imd_handle_init(&imd, (void *)(LIMIT_ALIGN + (uintptr_t)base));
91 imd_handle_init_partial_recovery(&imd);
92
93 assert_non_null(imd.lg.r);
94 assert_null(imd.sm.limit);
95
96 assert_int_equal(0, imd_create_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN));
97 entry = imd_entry_add(&imd, SMALL_REGION_ID, LG_ENTRY_SIZE);
98 assert_non_null(entry);
99
100 imd_handle_init_partial_recovery(&imd);
101
102 assert_non_null(imd.lg.r);
103 assert_non_null(imd.sm.limit);
104 assert_ptr_equal(imd.lg.r + entry->start_offset + LG_ENTRY_SIZE, imd.sm.limit);
105 assert_non_null(imd.sm.r);
106
107 free(base);
108}
109
110static void test_imd_create_empty(void **state)
111{
112 struct imd imd = {0};
113 void *base;
114 struct imd_root *r;
115 struct imd_entry *e;
116
117 /* Expect imd_create_empty to fail, since imd handle is not initialized */
118 assert_int_equal(-1, imd_create_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN));
119 base = malloc(sizeof(struct imd_root_pointer) + sizeof(struct imd_root));
120 if (base == NULL)
121 fail_msg("Cannot allocate enough memory - fail test");
122
123 imd_handle_init(&imd, (void *)(LIMIT_ALIGN + (uintptr_t)base));
124
125 /* Try incorrect sizes */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000126 assert_int_equal(
127 -1, imd_create_empty(&imd, sizeof(struct imd_root_pointer), LG_ENTRY_ALIGN));
Jakub Czapiga466a3782020-10-05 10:44:46 +0200128 assert_int_equal(-1, imd_create_empty(&imd, LG_ROOT_SIZE, 2 * LG_ROOT_SIZE));
129
130 /* Working case */
131 assert_int_equal(0, imd_create_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN));
132
133 /* Only large allocation initialized with one entry for the root region */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000134 r = (struct imd_root *)(imd.lg.r);
Jakub Czapiga466a3782020-10-05 10:44:46 +0200135 assert_non_null(r);
136
137 e = &r->entries[r->num_entries - 1];
138
139 assert_int_equal(max_entries(LG_ROOT_SIZE), r->max_entries);
140 assert_int_equal(1, r->num_entries);
141 assert_int_equal(0, r->flags);
142 assert_int_equal(LG_ENTRY_ALIGN, r->entry_align);
143 assert_int_equal(0, r->max_offset);
144 assert_ptr_equal(e, &r->entries);
145
146 assert_int_equal(IMD_ENTRY_MAGIC, e->magic);
147 assert_int_equal(0, e->start_offset);
148 assert_int_equal(LG_ROOT_SIZE, e->size);
149 assert_int_equal(CBMEM_ID_IMD_ROOT, e->id);
150
151 free(base);
152}
153
154static void test_imd_create_tiered_empty(void **state)
155{
156 void *base;
157 size_t sm_region_size, lg_region_wrong_size;
158 struct imd imd = {0};
159 struct imd_root *r;
160 struct imd_entry *fst_lg_entry, *snd_lg_entry, *sm_entry;
161
162 /* Uninitialized imd handle */
163 assert_int_equal(-1, imd_create_tiered_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN,
164 LG_ROOT_SIZE, SM_ENTRY_ALIGN));
165
166 base = malloc(LIMIT_ALIGN);
167 if (base == NULL)
168 fail_msg("Cannot allocate enough memory - fail test");
169
170 imd_handle_init(&imd, (void *)(LIMIT_ALIGN + (uintptr_t)base));
171
172 /* Too small root_size for small region */
173 assert_int_equal(-1, imd_create_tiered_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000174 sizeof(int32_t), 2 * sizeof(int32_t)));
Jakub Czapiga466a3782020-10-05 10:44:46 +0200175
176 /* Fail when large region doesn't have capacity for more than 1 entry */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000177 lg_region_wrong_size = sizeof(struct imd_root_pointer) + sizeof(struct imd_root)
178 + sizeof(struct imd_entry);
179 expect_assert_failure(imd_create_tiered_empty(
180 &imd, lg_region_wrong_size, LG_ENTRY_ALIGN, SM_ROOT_SIZE, SM_ENTRY_ALIGN));
Jakub Czapiga466a3782020-10-05 10:44:46 +0200181
182 assert_int_equal(0, imd_create_tiered_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN,
183 SM_ROOT_SIZE, SM_ENTRY_ALIGN));
184
185 r = imd.lg.r;
186
187 /* One entry for root_region and one for small allocations */
188 assert_int_equal(2, r->num_entries);
189
190 fst_lg_entry = &r->entries[0];
191 assert_int_equal(IMD_ENTRY_MAGIC, fst_lg_entry->magic);
192 assert_int_equal(0, fst_lg_entry->start_offset);
193 assert_int_equal(LG_ROOT_SIZE, fst_lg_entry->size);
194 assert_int_equal(CBMEM_ID_IMD_ROOT, fst_lg_entry->id);
195
196 /* Calculated like in imd_create_tiered_empty */
197 sm_region_size = max_entries(SM_ROOT_SIZE) * SM_ENTRY_ALIGN;
198 sm_region_size += SM_ROOT_SIZE;
199 sm_region_size = ALIGN_UP(sm_region_size, LG_ENTRY_ALIGN);
200
201 snd_lg_entry = &r->entries[1];
202 assert_int_equal(IMD_ENTRY_MAGIC, snd_lg_entry->magic);
203 assert_int_equal(-sm_region_size, snd_lg_entry->start_offset);
204 assert_int_equal(CBMEM_ID_IMD_SMALL, snd_lg_entry->id);
205
206 assert_int_equal(sm_region_size, snd_lg_entry->size);
207
208 r = imd.sm.r;
209 assert_int_equal(1, r->num_entries);
210
211 sm_entry = &r->entries[0];
212 assert_int_equal(IMD_ENTRY_MAGIC, sm_entry->magic);
213 assert_int_equal(0, sm_entry->start_offset);
214 assert_int_equal(SM_ROOT_SIZE, sm_entry->size);
215 assert_int_equal(CBMEM_ID_IMD_ROOT, sm_entry->id);
216
217 free(base);
218}
219
220/* Tests for imdr_recover. */
221static void test_imd_recover(void **state)
222{
223 int32_t offset_copy, max_offset_copy;
224 uint32_t rp_magic_copy, num_entries_copy;
225 uint32_t e_align_copy, e_magic_copy, e_id_copy;
226 uint32_t size_copy, diff;
227 void *base;
228 struct imd imd = {0};
229 struct imd_root_pointer *rp;
230 struct imd_root *r;
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000231 struct imd_entry *lg_root_entry, *sm_root_entry, *ptr;
Jakub Czapiga466a3782020-10-05 10:44:46 +0200232 const struct imd_entry *lg_entry;
233
234 /* Fail when the limit for lg was not set. */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000235 imd.lg.limit = (uintptr_t)NULL;
Jakub Czapiga466a3782020-10-05 10:44:46 +0200236 assert_int_equal(-1, imd_recover(&imd));
237
238 /* Set the limit for lg. */
239 base = malloc(LIMIT_ALIGN);
240 if (base == NULL)
241 fail_msg("Cannot allocate enough memory - fail test");
242
243 imd_handle_init(&imd, (void *)(LIMIT_ALIGN + (uintptr_t)base));
244
245 /* Fail when the root pointer is not valid. */
246 rp = (void *)imd.lg.limit - sizeof(struct imd_root_pointer);
247 assert_non_null(rp);
248 assert_int_equal(IMD_ROOT_PTR_MAGIC, rp->magic);
249
250 rp_magic_copy = rp->magic;
251 rp->magic = 0;
252 assert_int_equal(-1, imd_recover(&imd));
253 rp->magic = rp_magic_copy;
254
255 /* Set the root pointer. */
256 assert_int_equal(0, imd_create_tiered_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN,
257 SM_ROOT_SIZE, SM_ENTRY_ALIGN));
258 assert_int_equal(2, ((struct imd_root *)imd.lg.r)->num_entries);
259 assert_int_equal(1, ((struct imd_root *)imd.sm.r)->num_entries);
260
261 /* Fail if the number of entries exceeds the maximum number of entries. */
262 r = imd.lg.r;
263 num_entries_copy = r->num_entries;
264 r->num_entries = r->max_entries + 1;
265 assert_int_equal(-1, imd_recover(&imd));
266 r->num_entries = num_entries_copy;
267
268 /* Fail if entry align is not a power of 2. */
269 e_align_copy = r->entry_align;
270 r->entry_align++;
271 assert_int_equal(-1, imd_recover(&imd));
272 r->entry_align = e_align_copy;
273
274 /* Fail when an entry is not valid. */
275 lg_root_entry = &r->entries[0];
276 e_magic_copy = lg_root_entry->magic;
277 lg_root_entry->magic = 0;
278 assert_int_equal(-1, imd_recover(&imd));
279 lg_root_entry->magic = e_magic_copy;
280
281 /* Add new entries: large and small. */
282 lg_entry = imd_entry_add(&imd, LG_ENTRY_ID, LG_ENTRY_SIZE);
283 assert_non_null(lg_entry);
284 assert_int_equal(3, r->num_entries);
285
286 assert_non_null(imd_entry_add(&imd, SM_ENTRY_ID, SM_ENTRY_SIZE));
287 assert_int_equal(2, ((struct imd_root *)imd.sm.r)->num_entries);
288
289 /* Fail when start_addr is lower than low_limit. */
290 r = imd.lg.r;
291 max_offset_copy = r->max_offset;
292 r->max_offset = lg_entry->start_offset + sizeof(int32_t);
293 assert_int_equal(-1, imd_recover(&imd));
294 r->max_offset = max_offset_copy;
295
296 /* Fail when start_addr is at least imdr->limit. */
297 offset_copy = lg_entry->start_offset;
298 ptr = (struct imd_entry *)lg_entry;
299 ptr->start_offset = (void *)imd.lg.limit - (void *)r;
300 assert_int_equal(-1, imd_recover(&imd));
301 ptr->start_offset = offset_copy;
302
303 /* Fail when (start_addr + e->size) is higher than imdr->limit. */
304 size_copy = lg_entry->size;
305 diff = (void *)imd.lg.limit - ((void *)r + lg_entry->start_offset);
306 ptr->size = diff + 1;
307 assert_int_equal(-1, imd_recover(&imd));
308 ptr->size = size_copy;
309
310 /* Succeed if small region is not present. */
311 sm_root_entry = &r->entries[1];
312 e_id_copy = sm_root_entry->id;
313 sm_root_entry->id = 0;
314 assert_int_equal(0, imd_recover(&imd));
315 sm_root_entry->id = e_id_copy;
316
317 assert_int_equal(0, imd_recover(&imd));
318
319 free(base);
320}
321
322static void test_imd_limit_size(void **state)
323{
324 void *base;
325 struct imd imd = {0};
326 size_t root_size, max_size;
327
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000328 max_size = align_up_pow2(sizeof(struct imd_root_pointer) + sizeof(struct imd_root)
329 + 3 * sizeof(struct imd_entry));
Jakub Czapiga466a3782020-10-05 10:44:46 +0200330
331 assert_int_equal(-1, imd_limit_size(&imd, max_size));
332
333 base = malloc(LIMIT_ALIGN);
334 if (base == NULL)
335 fail_msg("Cannot allocate enough memory - fail test");
336 imd_handle_init(&imd, (void *)(LIMIT_ALIGN + (uintptr_t)base));
337
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000338 root_size = align_up_pow2(sizeof(struct imd_root_pointer) + sizeof(struct imd_root)
339 + 2 * sizeof(struct imd_entry));
Jakub Czapiga466a3782020-10-05 10:44:46 +0200340 imd.lg.r = (void *)imd.lg.limit - root_size;
341
342 imd_create_empty(&imd, root_size, LG_ENTRY_ALIGN);
343 assert_int_equal(-1, imd_limit_size(&imd, root_size - 1));
344 assert_int_equal(0, imd_limit_size(&imd, max_size));
345
346 /* Cannot create such a big entry */
347 assert_null(imd_entry_add(&imd, LG_ENTRY_ID, max_size - root_size + 1));
348
349 free(base);
350}
351
352static void test_imd_lockdown(void **state)
353{
354 struct imd imd = {0};
355 struct imd_root *r_lg, *r_sm;
356
357 assert_int_equal(-1, imd_lockdown(&imd));
358
359 imd.lg.r = malloc(sizeof(struct imd_root));
360 if (imd.lg.r == NULL)
361 fail_msg("Cannot allocate enough memory - fail test");
362
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000363 r_lg = (struct imd_root *)(imd.lg.r);
Jakub Czapiga466a3782020-10-05 10:44:46 +0200364
365 assert_int_equal(0, imd_lockdown(&imd));
366 assert_true(r_lg->flags & IMD_FLAG_LOCKED);
367
368 imd.sm.r = malloc(sizeof(struct imd_root));
369 if (imd.sm.r == NULL)
370 fail_msg("Cannot allocate enough memory - fail test");
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000371 r_sm = (struct imd_root *)(imd.sm.r);
Jakub Czapiga466a3782020-10-05 10:44:46 +0200372
373 assert_int_equal(0, imd_lockdown(&imd));
374 assert_true(r_sm->flags & IMD_FLAG_LOCKED);
375
376 free(imd.lg.r);
377 free(imd.sm.r);
378}
379
380static void test_imd_region_used(void **state)
381{
382 struct imd imd = {0};
383 struct imd_entry *first_entry, *new_entry;
384 struct imd_root *r;
385 size_t size;
386 void *imd_base;
387 void *base;
388
389 assert_int_equal(-1, imd_region_used(&imd, &base, &size));
390
391 imd_base = malloc(LIMIT_ALIGN);
392 if (imd_base == NULL)
393 fail_msg("Cannot allocate enough memory - fail test");
394 imd_handle_init(&imd, (void *)(LIMIT_ALIGN + (uintptr_t)imd_base));
395
396 assert_int_equal(-1, imd_region_used(&imd, &base, &size));
397 assert_int_equal(0, imd_create_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN));
398 assert_int_equal(0, imd_region_used(&imd, &base, &size));
399
400 r = (struct imd_root *)imd.lg.r;
401 first_entry = &r->entries[r->num_entries - 1];
402
403 assert_int_equal(r + first_entry->start_offset, (uintptr_t)base);
404 assert_int_equal(first_entry->size, size);
405
406 assert_non_null(imd_entry_add(&imd, LG_ENTRY_ID, LG_ENTRY_SIZE));
407 assert_int_equal(2, r->num_entries);
408
409 assert_int_equal(0, imd_region_used(&imd, &base, &size));
410
411 new_entry = &r->entries[r->num_entries - 1];
412
413 assert_true((void *)r + new_entry->start_offset == base);
414 assert_int_equal(first_entry->size + new_entry->size, size);
415
416 free(imd_base);
417}
418
419static void test_imd_entry_add(void **state)
420{
421 int i;
422 struct imd imd = {0};
423 size_t entry_size = 0;
424 size_t used_size;
425 ssize_t entry_offset;
426 void *base;
427 struct imd_root *r, *sm_r, *lg_r;
428 struct imd_entry *first_entry, *new_entry;
429 uint32_t num_entries_copy;
430 int32_t max_offset_copy;
431
432 /* No small region case. */
433 assert_null(imd_entry_add(&imd, LG_ENTRY_ID, entry_size));
434
435 base = malloc(LIMIT_ALIGN);
436 if (base == NULL)
437 fail_msg("Cannot allocate enough memory - fail test");
438
439 imd_handle_init(&imd, (void *)(LIMIT_ALIGN + (uintptr_t)base));
440
441 assert_int_equal(0, imd_create_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN));
442
443 r = (struct imd_root *)imd.lg.r;
444 first_entry = &r->entries[r->num_entries - 1];
445
446 /* Cannot add an entry when root is locked. */
447 r->flags = IMD_FLAG_LOCKED;
448 assert_null(imd_entry_add(&imd, LG_ENTRY_ID, entry_size));
449 r->flags = 0;
450
451 /* Fail when the maximum number of entries has been reached. */
452 num_entries_copy = r->num_entries;
453 r->num_entries = r->max_entries;
454 assert_null(imd_entry_add(&imd, LG_ENTRY_ID, entry_size));
455 r->num_entries = num_entries_copy;
456
457 /* Fail when entry size is 0 */
458 assert_null(imd_entry_add(&imd, LG_ENTRY_ID, 0));
459
460 /* Fail when entry size (after alignment) overflows imd total size. */
461 entry_size = 2049;
462 max_offset_copy = r->max_offset;
463 r->max_offset = -entry_size;
464 assert_null(imd_entry_add(&imd, LG_ENTRY_ID, entry_size));
465 r->max_offset = max_offset_copy;
466
467 /* Finally succeed. */
468 entry_size = 2 * sizeof(int32_t);
469 assert_non_null(imd_entry_add(&imd, LG_ENTRY_ID, entry_size));
470 assert_int_equal(2, r->num_entries);
471
472 new_entry = &r->entries[r->num_entries - 1];
473 assert_int_equal(sizeof(struct imd_entry), (void *)new_entry - (void *)first_entry);
474
475 assert_int_equal(IMD_ENTRY_MAGIC, new_entry->magic);
476 assert_int_equal(LG_ENTRY_ID, new_entry->id);
477 assert_int_equal(entry_size, new_entry->size);
478
479 used_size = ALIGN_UP(entry_size, r->entry_align);
480 entry_offset = first_entry->start_offset - used_size;
481 assert_int_equal(entry_offset, new_entry->start_offset);
482
483 /* Use small region case. */
484 imd_create_tiered_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN, SM_ROOT_SIZE,
485 SM_ENTRY_ALIGN);
486
487 lg_r = imd.lg.r;
488 sm_r = imd.sm.r;
489
490 /* All five new entries should be added to small allocations */
491 for (i = 0; i < 5; i++) {
492 assert_non_null(imd_entry_add(&imd, SM_ENTRY_ID, SM_ENTRY_SIZE));
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000493 assert_int_equal(i + 2, sm_r->num_entries);
Jakub Czapiga466a3782020-10-05 10:44:46 +0200494 assert_int_equal(2, lg_r->num_entries);
495 }
496
497 /* But next should fall back on large region */
498 assert_non_null(imd_entry_add(&imd, SM_ENTRY_ID, SM_ENTRY_SIZE));
499 assert_int_equal(6, sm_r->num_entries);
500 assert_int_equal(3, lg_r->num_entries);
501
502 /*
503 * Small allocation is created when occupies less than 1/4 of available
504 * small region. Verify this.
505 */
506 imd_create_tiered_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN, SM_ROOT_SIZE,
507 SM_ENTRY_ALIGN);
508
509 assert_non_null(imd_entry_add(&imd, SM_ENTRY_ID, -sm_r->max_offset / 4 + 1));
510 assert_int_equal(1, sm_r->num_entries);
511 assert_int_equal(3, lg_r->num_entries);
512
513 /* Next two should go into small region */
514 assert_non_null(imd_entry_add(&imd, SM_ENTRY_ID, -sm_r->max_offset / 4));
515 assert_int_equal(2, sm_r->num_entries);
516 assert_int_equal(3, lg_r->num_entries);
517
518 /* (1/4 * 3/4) */
519 assert_non_null(imd_entry_add(&imd, SM_ENTRY_ID, -sm_r->max_offset / 16 * 3));
520 assert_int_equal(3, sm_r->num_entries);
521 assert_int_equal(3, lg_r->num_entries);
522
523 free(base);
524}
525
526static void test_imd_entry_find(void **state)
527{
528 struct imd imd = {0};
529 void *base;
530
531 base = malloc(LIMIT_ALIGN);
532 if (base == NULL)
533 fail_msg("Cannot allocate enough memory - fail test");
534 imd_handle_init(&imd, (void *)(LIMIT_ALIGN + (uintptr_t)base));
535
536 assert_int_equal(0, imd_create_tiered_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN,
537 SM_ROOT_SIZE, SM_ENTRY_ALIGN));
538
539 assert_non_null(imd_entry_add(&imd, LG_ENTRY_ID, LG_ENTRY_SIZE));
540
541 assert_non_null(imd_entry_find(&imd, LG_ENTRY_ID));
542 assert_non_null(imd_entry_find(&imd, SMALL_REGION_ID));
543
544 /* Try invalid id, should fail */
545 assert_null(imd_entry_find(&imd, INVALID_REGION_ID));
546
547 free(base);
548}
549
550static void test_imd_entry_find_or_add(void **state)
551{
552 struct imd imd = {0};
553 const struct imd_entry *entry;
554 struct imd_root *r;
555 void *base;
556
557 base = malloc(LIMIT_ALIGN);
558 if (base == NULL)
559 fail_msg("Cannot allocate enough memory - fail test");
560 imd_handle_init(&imd, (void *)(LIMIT_ALIGN + (uintptr_t)base));
561
562 assert_null(imd_entry_find_or_add(&imd, LG_ENTRY_ID, LG_ENTRY_SIZE));
563
564 assert_int_equal(0, imd_create_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN));
565 entry = imd_entry_find_or_add(&imd, LG_ENTRY_ID, LG_ENTRY_SIZE);
566 assert_non_null(entry);
567
568 r = (struct imd_root *)imd.lg.r;
569
570 assert_int_equal(entry->id, LG_ENTRY_ID);
571 assert_int_equal(2, r->num_entries);
572 assert_non_null(imd_entry_find_or_add(&imd, LG_ENTRY_ID, LG_ENTRY_SIZE));
573 assert_int_equal(2, r->num_entries);
574
575 free(base);
576}
577
578static void test_imd_entry_size(void **state)
579{
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000580 struct imd_entry entry = {.size = LG_ENTRY_SIZE};
Jakub Czapiga466a3782020-10-05 10:44:46 +0200581
582 assert_int_equal(LG_ENTRY_SIZE, imd_entry_size(&entry));
583
584 entry.size = 0;
585 assert_int_equal(0, imd_entry_size(&entry));
586}
587
588static void test_imd_entry_at(void **state)
589{
590 struct imd imd = {0};
591 struct imd_root *r;
592 struct imd_entry *e = NULL;
593 const struct imd_entry *entry;
594 void *base;
595
596 base = malloc(LIMIT_ALIGN);
597 if (base == NULL)
598 fail_msg("Cannot allocate enough memory - fail test");
599 imd_handle_init(&imd, (void *)(LIMIT_ALIGN + (uintptr_t)base));
600
601 assert_int_equal(0, imd_create_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN));
602
603 /* Fail when entry is NULL */
604 assert_null(imd_entry_at(&imd, e));
605
606 entry = imd_entry_add(&imd, LG_ENTRY_ID, LG_ENTRY_SIZE);
607 assert_non_null(entry);
608
609 r = (struct imd_root *)imd.lg.r;
610 assert_ptr_equal((void *)r + entry->start_offset, imd_entry_at(&imd, entry));
611
612 free(base);
613}
614
615static void test_imd_entry_id(void **state)
616{
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000617 struct imd_entry entry = {.id = LG_ENTRY_ID};
Jakub Czapiga466a3782020-10-05 10:44:46 +0200618
619 assert_int_equal(LG_ENTRY_ID, imd_entry_id(&entry));
620}
621
622static void test_imd_entry_remove(void **state)
623{
624 void *base;
625 struct imd imd = {0};
626 struct imd_root *r;
627 const struct imd_entry *fst_lg_entry, *snd_lg_entry, *fst_sm_entry;
628 const struct imd_entry *e = NULL;
629
630 /* Uninitialized handle */
631 assert_int_equal(-1, imd_entry_remove(&imd, e));
632
633 base = malloc(LIMIT_ALIGN);
634 if (base == NULL)
635 fail_msg("Cannot allocate enough memory - fail test");
636
637 imd_handle_init(&imd, (void *)(LIMIT_ALIGN + (uintptr_t)base));
638
639 assert_int_equal(0, imd_create_tiered_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN,
640 SM_ROOT_SIZE, SM_ENTRY_ALIGN));
641
642 r = imd.lg.r;
643 assert_int_equal(2, r->num_entries);
644 fst_lg_entry = &r->entries[0];
645 snd_lg_entry = &r->entries[1];
646
647 /* Only last entry can be removed */
648 assert_int_equal(-1, imd_entry_remove(&imd, fst_lg_entry));
649 r->flags = IMD_FLAG_LOCKED;
650 assert_int_equal(-1, imd_entry_remove(&imd, snd_lg_entry));
651 r->flags = 0;
652
653 r = imd.sm.r;
654 assert_int_equal(1, r->num_entries);
655 fst_sm_entry = &r->entries[0];
656
657 /* Fail trying to remove root entry */
658 assert_int_equal(-1, imd_entry_remove(&imd, fst_sm_entry));
659 assert_int_equal(1, r->num_entries);
660
661 r = imd.lg.r;
662 assert_int_equal(0, imd_entry_remove(&imd, snd_lg_entry));
663 assert_int_equal(1, r->num_entries);
664
665 /* Fail trying to remove root entry */
666 assert_int_equal(-1, imd_entry_remove(&imd, fst_lg_entry));
667 assert_int_equal(1, r->num_entries);
668
669 free(base);
670}
671
672static void test_imd_cursor_init(void **state)
673{
674 struct imd imd = {0};
675 struct imd_cursor cursor;
676
677 assert_int_equal(-1, imd_cursor_init(NULL, NULL));
678 assert_int_equal(-1, imd_cursor_init(NULL, &cursor));
679 assert_int_equal(-1, imd_cursor_init(&imd, NULL));
680 assert_int_equal(0, imd_cursor_init(&imd, &cursor));
681
682 assert_ptr_equal(cursor.imdr[0], &imd.lg);
683 assert_ptr_equal(cursor.imdr[1], &imd.sm);
684}
685
686static void test_imd_cursor_next(void **state)
687{
688 void *base;
689 struct imd imd = {0};
690 struct imd_cursor cursor;
691 struct imd_root *r;
692 const struct imd_entry *entry;
693 struct imd_entry *fst_lg_entry, *snd_lg_entry, *fst_sm_entry;
694 assert_int_equal(0, imd_cursor_init(&imd, &cursor));
695
696 cursor.current_imdr = 3;
697 cursor.current_entry = 0;
698 assert_null(imd_cursor_next(&cursor));
699
700 cursor.current_imdr = 0;
701 assert_null(imd_cursor_next(&cursor));
702
703 base = malloc(LIMIT_ALIGN);
704 if (base == NULL)
705 fail_msg("Cannot allocate enough memory - fail test");
706 imd_handle_init(&imd, (void *)(LIMIT_ALIGN + (uintptr_t)base));
707
708 assert_int_equal(0, imd_create_tiered_empty(&imd, LG_ROOT_SIZE, LG_ENTRY_ALIGN,
709 SM_ROOT_SIZE, SM_ENTRY_ALIGN));
710
711 r = imd.lg.r;
712 entry = imd_cursor_next(&cursor);
713 assert_non_null(entry);
714
715 fst_lg_entry = &r->entries[0];
716 assert_int_equal(fst_lg_entry->id, entry->id);
717 assert_ptr_equal(fst_lg_entry, entry);
718
719 entry = imd_cursor_next(&cursor);
720 assert_non_null(entry);
721
722 snd_lg_entry = &r->entries[1];
723 assert_int_equal(snd_lg_entry->id, entry->id);
724 assert_ptr_equal(snd_lg_entry, entry);
725
726 entry = imd_cursor_next(&cursor);
727 assert_non_null(entry);
728
729 r = imd.sm.r;
730 fst_sm_entry = &r->entries[0];
731 assert_int_equal(fst_sm_entry->id, entry->id);
732 assert_ptr_equal(fst_sm_entry, entry);
733
734 entry = imd_cursor_next(&cursor);
735 assert_null(entry);
736}
737
738int main(void)
739{
740 const struct CMUnitTest tests[] = {
741 cmocka_unit_test(test_imd_handle_init),
742 cmocka_unit_test(test_imd_handle_init_partial_recovery),
743 cmocka_unit_test(test_imd_create_empty),
744 cmocka_unit_test(test_imd_create_tiered_empty),
745 cmocka_unit_test(test_imd_recover),
746 cmocka_unit_test(test_imd_limit_size),
747 cmocka_unit_test(test_imd_lockdown),
748 cmocka_unit_test(test_imd_region_used),
749 cmocka_unit_test(test_imd_entry_add),
750 cmocka_unit_test(test_imd_entry_find),
751 cmocka_unit_test(test_imd_entry_find_or_add),
752 cmocka_unit_test(test_imd_entry_size),
753 cmocka_unit_test(test_imd_entry_at),
754 cmocka_unit_test(test_imd_entry_id),
755 cmocka_unit_test(test_imd_entry_remove),
756 cmocka_unit_test(test_imd_cursor_init),
757 cmocka_unit_test(test_imd_cursor_next),
758 };
759
Jakub Czapiga7c6081e2021-08-25 16:27:35 +0200760 return cb_run_group_tests(tests, NULL, NULL);
Jakub Czapiga466a3782020-10-05 10:44:46 +0200761}