blob: 25eb1aaaa44fac0e8e8ac80a2646fa3506cd54c3 [file] [log] [blame]
Jan Dabros9e16ca92020-07-08 22:33:52 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <tests/test.h>
4
5#include <device/device.h>
6#include <device/resource.h>
7#include <commonlib/helpers.h>
8#include <memrange.h>
9
10#define MEMRANGE_ALIGN (POWER_OF_2(12))
11
12enum mem_types {
13 /* Avoid using 0 to verify that UUT really sets this memory,
14 but keep value small, as this will be an index in the table */
15 CACHEABLE_TAG = 0x10,
16 RESERVED_TAG,
17 READONLY_TAG,
18 INSERTED_TAG,
19 HOLE_TAG,
20 END_OF_RESOURCES
21};
22
23/* Indices of entries matters, since it must reflect mem_types enum */
24struct resource res_mock_1[] = {
Jakub Czapigac08b6a72022-01-10 13:36:47 +000025 [CACHEABLE_TAG] = {.base = 0xE000,
26 .size = 0xF2000,
27 .next = &res_mock_1[RESERVED_TAG],
28 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM},
29 [RESERVED_TAG] = {.base = 4ULL * GiB,
30 .size = 4ULL * KiB,
31 .next = &res_mock_1[READONLY_TAG],
32 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM},
33 [READONLY_TAG] = {.base = 0xFF0000,
34 .size = 0x10000,
35 .next = NULL,
36 .flags = IORESOURCE_READONLY | IORESOURCE_MEM}
Jan Dabros9e16ca92020-07-08 22:33:52 +020037};
38
39/* Boundary 1 byte below 4GiB and 1 byte above 4GiB. */
40struct resource res_mock_2[] = {
Jakub Czapigac08b6a72022-01-10 13:36:47 +000041 [CACHEABLE_TAG] = {.base = 0x1000000,
42 .size = 4ULL * GiB - 0x1000001ULL,
43 .next = &res_mock_2[RESERVED_TAG],
44 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM},
45 [RESERVED_TAG] = {.base = 4ULL * GiB + 1ULL,
46 .size = 4ULL * GiB,
47 .next = &res_mock_2[READONLY_TAG],
48 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM},
49 [READONLY_TAG] = {.base = 0,
50 .size = 0x10000,
51 .next = NULL,
52 .flags = IORESOURCE_READONLY | IORESOURCE_MEM}
Jan Dabros9e16ca92020-07-08 22:33:52 +020053};
54
55/* Boundary crossing 4GiB. */
56struct resource res_mock_3[] = {
Jakub Czapigac08b6a72022-01-10 13:36:47 +000057 [CACHEABLE_TAG] = {.base = 0xD000,
58 .size = 0xF3000,
59 .next = &res_mock_3[RESERVED_TAG],
60 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM},
61 [RESERVED_TAG] = {.base = 1ULL * GiB,
62 .size = 4ULL * GiB,
63 .next = &res_mock_3[READONLY_TAG],
64 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM},
65 [READONLY_TAG] = {.base = 0xFF0000,
66 .size = 0x10000,
67 .next = NULL,
68 .flags = IORESOURCE_READONLY | IORESOURCE_MEM}
Jan Dabros9e16ca92020-07-08 22:33:52 +020069};
70
71
Jakub Czapigac08b6a72022-01-10 13:36:47 +000072struct device mock_device = {.enabled = 1};
Jan Dabros9e16ca92020-07-08 22:33:52 +020073
74/* Fake memory devices handle */
75struct device *all_devices = &mock_device;
76
77int setup_test_1(void **state)
78{
79 *state = res_mock_1;
80 mock_device.resource_list = &res_mock_1[CACHEABLE_TAG];
81
82 return 0;
83}
84
85int setup_test_2(void **state)
86{
87 *state = res_mock_2;
88 mock_device.resource_list = &res_mock_2[CACHEABLE_TAG];
89
90 return 0;
91}
92
93int setup_test_3(void **state)
94{
95 *state = res_mock_3;
96 mock_device.resource_list = &res_mock_3[CACHEABLE_TAG];
97
98 return 0;
99}
100
101resource_t get_aligned_base(struct resource *res, struct range_entry *entry)
102{
103 return ALIGN_DOWN(res[range_entry_tag(entry)].base, MEMRANGE_ALIGN);
104}
105
106resource_t get_aligned_end(struct resource *res, struct range_entry *entry)
107{
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000108 resource_t end = res[range_entry_tag(entry)].base + res[range_entry_tag(entry)].size
109 + (res[range_entry_tag(entry)].base - range_entry_base(entry));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200110 return ALIGN_UP(end, MEMRANGE_ALIGN);
111}
112
113/*
114 * This test verifies memranges_init(), memranges_add_resources() and memranges_teardown()
115 * functions. It covers basic functionality of memrange library - implementation of creating
116 * memrange structure from resources available on the platform and method for free'ing
117 * allocated memory.
118 *
119 * Example memory ranges (res_mock1) for test_memrange_basic.
120 * Ranges marked with asterisks (***) are not added to the test_memrange.
121 *
122 * +--------CACHEABLE_TAG--------+ <-0xE000
123 * | |
124 * | |
125 * | |
126 * +-----------------------------+ <-0x100000
127 *
128 *
129 *
130 * +-----***READONLY_TAG***------+ <-0xFF0000
131 * | |
132 * | |
133 * | |
134 * +-----------------------------+ <-0x1000000
135 *
136 *
137 * +--------RESERVED_TAG---------+ <-0x100000000
138 * | |
139 * +-----------------------------+ <-0x100001000
140 */
141static void test_memrange_basic(void **state)
142{
143 int counter = 0;
144 const unsigned long cacheable = IORESOURCE_CACHEABLE;
145 const unsigned long reserved = IORESOURCE_RESERVE;
146 struct range_entry *ptr;
147 struct memranges test_memrange;
148 struct resource *res_mock = *state;
149 resource_t prev_base = 0;
150
151 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
152 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
153
154 /* There should be two entries, since cacheable and
155 reserved regions are not neighbors */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000156 memranges_each_entry(ptr, &test_memrange)
157 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200158 assert_in_range(range_entry_tag(ptr), CACHEABLE_TAG, RESERVED_TAG);
159 assert_int_equal(range_entry_base(ptr), get_aligned_base(res_mock, ptr));
160
161 assert_int_equal(range_entry_end(ptr), get_aligned_end(res_mock, ptr));
162
163 /* Ranges have to be returned in increasing order */
164 assert_true(prev_base <= range_entry_base(ptr));
165
166 prev_base = range_entry_base(ptr);
167 counter++;
168 };
169 assert_int_equal(counter, 2);
170 counter = 0;
171
172 /* Remove initial memrange */
173 memranges_teardown(&test_memrange);
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000174 memranges_each_entry(ptr, &test_memrange) counter++;
Jan Dabros9e16ca92020-07-08 22:33:52 +0200175 assert_int_equal(counter, 0);
176}
177
178/*
179 * This test verifies memranges_clone(), memranges_insert() and memranges_update_tag()
180 * functions. All operations are performed on cloned memrange. One of the most important thing
181 * to check, is that memrange_insert() should remove all ranges which are covered by the newly
182 * inserted one.
183 *
184 * Example memory ranges (res_mock1) for test_memrange_clone_insert.
185 * Ranges marked with asterisks (***) are not added to the clone_memrange.
186 * Ranges marked with (^) have tag value changed during test.
187 *
188 * +--------CACHEABLE_TAG--------+ <-0xE000
189 * +------|----INSERTED_TAG----------+ | <-0xF000
190 * | | (^READONLY_TAG^) | |
191 * | | | |
192 * | +-----------------------------+ <-0x100000
193 * +---------------------------------+ <-0x101000
194 *
195 *
196 * +-----***READONLY_TAG***------+ <-0xFF0000
197 * | |
198 * | |
199 * | |
200 * +-----------------------------+ <-0x1000000
201 *
202 *
203 * +------+---------RESERVED_TAG-----+--+ <-0x100000000
204 * | | | |
205 * | +-----------------------------+ <-0x100001000
206 * +-----------INSERTED_TAG----------+ <-0x100002000
207 */
208static void test_memrange_clone_insert(void **state)
209{
210 int counter = 0;
211 const unsigned long cacheable = IORESOURCE_CACHEABLE;
212 const unsigned long reserved = IORESOURCE_RESERVE;
213 struct range_entry *ptr;
214 struct memranges test_memrange, clone_memrange;
215 struct resource *res_mock = *state;
216 const resource_t new_range_begin_offset = 1ULL << 12;
217
218 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
219 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
220
221 memranges_clone(&clone_memrange, &test_memrange);
222 memranges_teardown(&test_memrange);
223
224 /* Verify that new one is really a clone */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000225 memranges_each_entry(ptr, &clone_memrange)
226 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200227 assert_in_range(range_entry_tag(ptr), CACHEABLE_TAG, END_OF_RESOURCES - 1);
228 assert_int_equal(range_entry_base(ptr), get_aligned_base(res_mock, ptr));
229
230 assert_int_equal(range_entry_end(ptr), get_aligned_end(res_mock, ptr));
231
232 counter++;
233 };
234 assert_int_equal(counter, 2);
235 counter = 0;
236
237 /* Insert new range, which will overlap with first region. */
238 memranges_insert(&clone_memrange, res_mock[CACHEABLE_TAG].base + new_range_begin_offset,
239 res_mock[CACHEABLE_TAG].size, INSERTED_TAG);
240
241 /* Three ranges should be there - CACHEABLE(shrunk), INSERTED and RESERVED */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000242 memranges_each_entry(ptr, &clone_memrange)
243 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200244 resource_t expected_end;
245
246 if (range_entry_tag(ptr) == CACHEABLE_TAG) {
247 assert_int_equal(range_entry_base(ptr), res_mock[CACHEABLE_TAG].base);
248
249 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset;
250 assert_int_equal(range_entry_end(ptr), expected_end);
251 }
252 if (range_entry_tag(ptr) == INSERTED_TAG) {
253 assert_int_equal(range_entry_base(ptr),
254 res_mock[CACHEABLE_TAG].base + new_range_begin_offset);
255
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000256 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset
257 + res_mock[CACHEABLE_TAG].size;
Jan Dabros9e16ca92020-07-08 22:33:52 +0200258 assert_int_equal(range_entry_end(ptr),
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000259 ALIGN_UP(expected_end, MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200260 }
261 counter++;
262 }
263 assert_int_equal(counter, 3);
264 counter = 0;
265
266 /* Insert new region, which will shadow readonly range.
267 * Additionally verify API for updating tags */
268 memranges_update_tag(&clone_memrange, INSERTED_TAG, READONLY_TAG);
269
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000270 memranges_each_entry(ptr, &clone_memrange)
271 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200272 resource_t expected_end;
273
274 assert_int_not_equal(range_entry_tag(ptr), INSERTED_TAG);
275 if (range_entry_tag(ptr) == READONLY_TAG) {
276 assert_int_equal(range_entry_base(ptr),
277 res_mock[CACHEABLE_TAG].base + new_range_begin_offset);
278
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000279 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset
280 + res_mock[CACHEABLE_TAG].size;
Jan Dabros9e16ca92020-07-08 22:33:52 +0200281 assert_int_equal(range_entry_end(ptr),
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000282 ALIGN_UP(expected_end, MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200283 }
284 };
285
286 /* Check if alignment (4KiB) is properly applied, that is begin - DOWN and end - UP */
287 memranges_insert(&clone_memrange, res_mock[RESERVED_TAG].base + 0xAD,
288 res_mock[RESERVED_TAG].size, INSERTED_TAG);
289
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000290 memranges_each_entry(ptr, &clone_memrange)
291 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200292 resource_t expected_end;
293
294 assert_int_not_equal(range_entry_tag(ptr), RESERVED_TAG);
295 if (range_entry_tag(ptr) == INSERTED_TAG) {
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000296 assert_int_equal(
297 range_entry_base(ptr),
298 ALIGN_DOWN(res_mock[RESERVED_TAG].base, MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200299
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000300 expected_end = ALIGN_DOWN(res_mock[RESERVED_TAG].base, MEMRANGE_ALIGN)
301 + new_range_begin_offset + res_mock[RESERVED_TAG].size;
Jan Dabros9e16ca92020-07-08 22:33:52 +0200302 expected_end = ALIGN_UP(expected_end, MEMRANGE_ALIGN);
303
304 assert_int_equal(range_entry_end(ptr), expected_end);
305 }
306 counter++;
307 }
308 assert_int_equal(counter, 3);
309
310 /* Free clone */
311 memranges_teardown(&clone_memrange);
312}
313
314/*
315 * This test verifies memranges_fill_holes_up_to() and memranges_create_hole(). Idea of the test
316 * is to fill all holes, so that we end up with contiguous address space fully covered by
317 * entries. Then, holes are created on the border of two different regions
318 *
319 * Example memory ranges (res_mock1) for test_memrange_holes.
320 * Space marked with (/) is not covered by any region at the end of the test.
321 *
322 * +--------CACHEABLE_TAG--------+ <-0xE000
323 * | |
324 * | |
325 * //|/////////////////////////////| <-0xFF000
326 * //+-----------HOLE_TAG----------+ <-0x100000
327 * //|/////////////////////////////| <-0x101000
328 * | |
329 * | |
330 * | |
331 * | |
332 * +--------RESERVED_TAG---------+ <-0x100000000
333 * | |
334 * +-----------------------------+ <-0x100001000
335 */
336static void test_memrange_holes(void **state)
337{
338 int counter = 0;
339 const unsigned long cacheable = IORESOURCE_CACHEABLE;
340 const unsigned long reserved = IORESOURCE_RESERVE;
341 struct range_entry *ptr;
342 struct range_entry *hole_ptr = NULL;
343 struct memranges test_memrange;
344 struct resource *res_mock = *state;
345 int holes_found = 0;
346 resource_t last_range_end = 0;
347 const resource_t holes_fill_end = res_mock[RESERVED_TAG].base;
348
349 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
350 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
351
352 /* Count holes in ranges */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000353 memranges_each_entry(ptr, &test_memrange)
354 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200355 if (!last_range_end) {
356 last_range_end = range_entry_end(ptr);
357 continue;
358 }
359
360
361 if (range_entry_base(ptr) != last_range_end) {
362 holes_found++;
363 last_range_end = range_entry_end(ptr);
364 }
365
366 if (range_entry_base(ptr) >= holes_fill_end)
367 break;
368 }
369
370 /* Create range entries which covers continuous memory range
371 (but with different tags) */
372 memranges_fill_holes_up_to(&test_memrange, holes_fill_end, HOLE_TAG);
373
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000374 memranges_each_entry(ptr, &test_memrange)
375 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200376 if (range_entry_tag(ptr) == HOLE_TAG) {
377 assert_int_equal(range_entry_base(ptr),
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000378 ALIGN_UP(res_mock[CACHEABLE_TAG].base
379 + res_mock[CACHEABLE_TAG].size,
380 MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200381 assert_int_equal(range_entry_end(ptr), holes_fill_end);
382 /* Store pointer to HOLE_TAG region for future use */
383 hole_ptr = ptr;
384 }
385 counter++;
386 }
387 assert_int_equal(counter, 2 + holes_found);
388
389 /* If test data does not have any holes in it then terminate this test */
390 if (holes_found == 0)
391 return;
392
393 assert_non_null(hole_ptr);
394 counter = 0;
395
396 /* Create hole crossing the border of two range entries */
397 const resource_t new_cacheable_end = ALIGN_DOWN(
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000398 res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size - 4 * KiB,
399 MEMRANGE_ALIGN);
400 const resource_t new_hole_begin =
401 ALIGN_UP(range_entry_base(hole_ptr) + 4 * KiB, MEMRANGE_ALIGN);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200402 const resource_t ranges_diff = new_hole_begin - new_cacheable_end;
403
404 memranges_create_hole(&test_memrange, new_cacheable_end, ranges_diff);
405
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000406 memranges_each_entry(ptr, &test_memrange)
407 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200408 switch (range_entry_tag(ptr)) {
409 case CACHEABLE_TAG:
410 assert_int_equal(range_entry_base(ptr), res_mock[CACHEABLE_TAG].base);
411 assert_int_equal(range_entry_end(ptr), new_cacheable_end);
412 break;
413 case RESERVED_TAG:
414 assert_int_equal(range_entry_base(ptr), res_mock[RESERVED_TAG].base);
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000415 assert_int_equal(range_entry_end(ptr),
416 res_mock[RESERVED_TAG].base
417 + res_mock[RESERVED_TAG].size);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200418 break;
419 case HOLE_TAG:
420 assert_int_equal(range_entry_base(ptr), new_hole_begin);
421 assert_int_equal(range_entry_end(ptr), res_mock[RESERVED_TAG].base);
422 break;
423 default:
424 break;
425 }
426 counter++;
427 }
428 assert_int_equal(counter, 3);
429
430 memranges_teardown(&test_memrange);
431}
432
433/*
434 * This test verifies memranges_steal() function. Simple check is done by attempt so steal some
435 * memory from region with READONLY_TAG.
436 *
437 * Example memory ranges (res_mock1) for test_memrange_steal.
438 * Space marked with (/) is not covered by any region at the end of the test.
439 *
440 * +--------CACHEABLE_TAG--------+ <-0xE000
441 * | |
442 * | |
443 * | |
444 * +-----------------------------+ <-0x100000
445 *
446 *
447 *
448 * +--------READONLY_TAG---------+ <-0xFF0000
449 * | |
450 * |/////////////////////////////| <-stolen_base
451 * |/////////////////////////////| <-stolen_base + 0x4000
452 * +-----------------------------+ <-0x1000000
453 *
454 *
455 * +--------RESERVED_TAG---------+ <-0x100000000
456 * | |
457 * +-----------------------------+ <-0x100001000
458 */
459static void test_memrange_steal(void **state)
460{
461 bool status = false;
462 resource_t stolen;
463 const unsigned long cacheable = IORESOURCE_CACHEABLE;
464 const unsigned long reserved = IORESOURCE_RESERVE;
465 const unsigned long readonly = IORESOURCE_READONLY;
466 const resource_t stolen_range_size = 0x4000;
467 struct memranges test_memrange;
468 struct resource *res_mock = *state;
469 struct range_entry *ptr;
470 size_t count = 0;
471
472 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
473 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
474 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
475
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000476 status = memranges_steal(&test_memrange,
477 res_mock[RESERVED_TAG].base + res_mock[RESERVED_TAG].size,
Jan Dabros9e16ca92020-07-08 22:33:52 +0200478 stolen_range_size, 12, READONLY_TAG, &stolen);
479 assert_true(status);
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000480 assert_in_range(stolen, res_mock[READONLY_TAG].base,
481 res_mock[READONLY_TAG].base + res_mock[READONLY_TAG].size);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200482
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000483 memranges_each_entry(ptr, &test_memrange)
484 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200485 if (range_entry_tag(ptr) == READONLY_TAG) {
486 assert_int_equal(range_entry_base(ptr),
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000487 ALIGN_DOWN(res_mock[READONLY_TAG].base, MEMRANGE_ALIGN)
488 + stolen_range_size);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200489 }
490 count++;
491 }
492 assert_int_equal(count, 3);
493 count = 0;
494
495 /* Check if inserting range in previously stolen area will merge it. */
496 memranges_insert(&test_memrange, res_mock[READONLY_TAG].base + 0xCC, stolen_range_size,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000497 READONLY_TAG);
498 memranges_each_entry(ptr, &test_memrange)
499 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200500 if (range_entry_tag(ptr) == READONLY_TAG) {
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000501 assert_int_equal(
502 range_entry_base(ptr),
503 ALIGN_DOWN(res_mock[READONLY_TAG].base, MEMRANGE_ALIGN));
504 assert_int_equal(
505 range_entry_end(ptr),
506 ALIGN_UP(range_entry_base(ptr) + res_mock[READONLY_TAG].size,
507 MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200508 }
509 count++;
510 }
511 assert_int_equal(count, 3);
512 count = 0;
513
514 memranges_teardown(&test_memrange);
515}
516
517/* Utility function checking number of entries and alignment of their base and end pointers */
518static void check_range_entries_count_and_alignment(struct memranges *ranges,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000519 size_t ranges_count, resource_t alignment)
Jan Dabros9e16ca92020-07-08 22:33:52 +0200520{
521 size_t count = 0;
522 struct range_entry *ptr;
523
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000524 memranges_each_entry(ptr, ranges)
525 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200526 assert_true(IS_ALIGNED(range_entry_base(ptr), alignment));
527 assert_true(IS_ALIGNED(range_entry_end(ptr), alignment));
528
529 count++;
530 }
531 assert_int_equal(ranges_count, count);
532}
533
534/* This test verifies memranges_init*() and memranges_teardown() functions.
535 Added ranges are checked correct count and alignment. */
536static void test_memrange_init_and_teardown(void **state)
537{
538 const unsigned long cacheable = IORESOURCE_CACHEABLE;
539 const unsigned long reserved = IORESOURCE_RESERVE;
540 const unsigned long readonly = IORESOURCE_READONLY;
541 struct memranges test_memrange;
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000542 struct range_entry range_entries[4] = {0};
Jan Dabros9e16ca92020-07-08 22:33:52 +0200543
544 /* Test memranges_init() correctness */
545 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
546 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
547 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
548
549 /* Expect all entries to be aligned to 4KiB (2^12) */
550 check_range_entries_count_and_alignment(&test_memrange, 3, MEMRANGE_ALIGN);
551
552 /* Expect ranges list to be empty after teardown */
553 memranges_teardown(&test_memrange);
554 assert_true(memranges_is_empty(&test_memrange));
555
556
557 /* Test memranges_init_with_alignment() correctness with alignment of 1KiB (2^10) */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000558 memranges_init_with_alignment(&test_memrange, cacheable, cacheable, CACHEABLE_TAG, 10);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200559 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
560 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
561
562 check_range_entries_count_and_alignment(&test_memrange, 3, POWER_OF_2(10));
563
564 memranges_teardown(&test_memrange);
565 assert_true(memranges_is_empty(&test_memrange));
566
567
568 /* Test memranges_init_empty() correctness */
569 memranges_init_empty(&test_memrange, &range_entries[0], ARRAY_SIZE(range_entries));
570 assert_true(memranges_is_empty(&test_memrange));
571
572 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
573 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
574 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
575
576 check_range_entries_count_and_alignment(&test_memrange, 3, MEMRANGE_ALIGN);
577
578 memranges_teardown(&test_memrange);
579 assert_true(memranges_is_empty(&test_memrange));
580
581
582 /* Test memranges_init_with_alignment() correctness with alignment of 8KiB (2^13) */
583 memranges_init_empty_with_alignment(&test_memrange, &range_entries[0],
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000584 ARRAY_SIZE(range_entries), 13);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200585 assert_true(memranges_is_empty(&test_memrange));
586
587 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
588 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
589 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
590
591 check_range_entries_count_and_alignment(&test_memrange, 3, POWER_OF_2(13));
592
593 memranges_teardown(&test_memrange);
594 assert_true(memranges_is_empty(&test_memrange));
595}
596
597/* Filter function accepting ranges having memory resource flag */
598static int memrange_filter_mem_only(struct device *dev, struct resource *res)
599{
600 /* Accept only memory resources */
601 return res->flags & IORESOURCE_MEM;
602}
603
604/* Filter function rejecting ranges having memory resource flag */
605static int memrange_filter_non_mem(struct device *dev, struct resource *res)
606{
607 /* Accept only memory resources */
608 return !(res->flags & IORESOURCE_MEM);
609}
610
611/* This test verifies memranges_add_resources_filter() function by providing filter functions
612 which accept or reject ranges. */
613static void test_memrange_add_resources_filter(void **state)
614{
615 const unsigned long cacheable = IORESOURCE_CACHEABLE;
616 const unsigned long reserved = IORESOURCE_RESERVE;
617 struct memranges test_memrange;
618 struct range_entry *ptr;
619 size_t count = 0;
620 size_t accepted_tags[] = {CACHEABLE_TAG, RESERVED_TAG};
621
622 /* Check if filter accepts range correctly */
623 memranges_init(&test_memrange, reserved, reserved, RESERVED_TAG);
624 memranges_add_resources_filter(&test_memrange, cacheable, cacheable, CACHEABLE_TAG,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000625 memrange_filter_mem_only);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200626
627 /* Check if filter accepted desired range. */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000628 memranges_each_entry(ptr, &test_memrange)
629 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200630 assert_in_set(range_entry_tag(ptr), accepted_tags, ARRAY_SIZE(accepted_tags));
631 assert_true(IS_ALIGNED(range_entry_base(ptr), MEMRANGE_ALIGN));
632 assert_true(IS_ALIGNED(range_entry_end(ptr), MEMRANGE_ALIGN));
633 count++;
634 }
635 assert_int_equal(2, count);
636 count = 0;
637 memranges_teardown(&test_memrange);
638
639 /* Check if filter rejects range correctly */
640 memranges_init(&test_memrange, reserved, reserved, RESERVED_TAG);
641 memranges_add_resources_filter(&test_memrange, cacheable, cacheable, CACHEABLE_TAG,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000642 memrange_filter_non_mem);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200643
644 check_range_entries_count_and_alignment(&test_memrange, 1, MEMRANGE_ALIGN);
645
646 memranges_teardown(&test_memrange);
647}
648
649int main(void)
650{
651 const struct CMUnitTest tests[] = {
652 cmocka_unit_test(test_memrange_basic),
653 cmocka_unit_test(test_memrange_clone_insert),
654 cmocka_unit_test(test_memrange_holes),
655 cmocka_unit_test(test_memrange_steal),
656 cmocka_unit_test(test_memrange_init_and_teardown),
657 cmocka_unit_test(test_memrange_add_resources_filter),
658 };
659
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000660 return cmocka_run_group_tests_name(__TEST_NAME__ "(Boundary on 4GiB)", tests,
661 setup_test_1, NULL)
662 + cmocka_run_group_tests_name(__TEST_NAME__ "(Boundaries 1 byte from 4GiB)",
663 tests, setup_test_2, NULL)
664 + cmocka_run_group_tests_name(__TEST_NAME__ "(Range over 4GiB boundary)", tests,
665 setup_test_3, NULL);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200666}