blob: f9e6056d35ffdfa3c12b99a5cc53521cf2caabab [file] [log] [blame]
Jan Dabros9e16ca92020-07-08 22:33:52 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <tests/test.h>
4
5#include <device/device.h>
6#include <device/resource.h>
7#include <commonlib/helpers.h>
8#include <memrange.h>
9
10#define MEMRANGE_ALIGN (POWER_OF_2(12))
11
12enum mem_types {
13 /* Avoid using 0 to verify that UUT really sets this memory,
14 but keep value small, as this will be an index in the table */
15 CACHEABLE_TAG = 0x10,
16 RESERVED_TAG,
17 READONLY_TAG,
18 INSERTED_TAG,
19 HOLE_TAG,
20 END_OF_RESOURCES
21};
22
23/* Indices of entries matters, since it must reflect mem_types enum */
24struct resource res_mock_1[] = {
25 [CACHEABLE_TAG] = { .base = 0xE000, .size = 0xF2000,
26 .next = &res_mock_1[RESERVED_TAG], .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM },
27 [RESERVED_TAG] = { .base = 4ULL * GiB, .size = 4ULL * KiB,
28 .next = &res_mock_1[READONLY_TAG], .flags = IORESOURCE_RESERVE | IORESOURCE_MEM },
29 [READONLY_TAG] = { .base = 0xFF0000, .size = 0x10000, .next = NULL,
30 .flags = IORESOURCE_READONLY | IORESOURCE_MEM }
31};
32
33/* Boundary 1 byte below 4GiB and 1 byte above 4GiB. */
34struct resource res_mock_2[] = {
35 [CACHEABLE_TAG] = { .base = 0x1000000, .size = 4ULL * GiB - 0x1000001ULL,
36 .next = &res_mock_2[RESERVED_TAG], .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM },
37 [RESERVED_TAG] = { .base = 4ULL * GiB + 1ULL, .size = 4ULL * GiB,
38 .next = &res_mock_2[READONLY_TAG], .flags = IORESOURCE_RESERVE | IORESOURCE_MEM },
39 [READONLY_TAG] = { .base = 0, .size = 0x10000, .next = NULL,
40 .flags = IORESOURCE_READONLY | IORESOURCE_MEM}
41};
42
43/* Boundary crossing 4GiB. */
44struct resource res_mock_3[] = {
45 [CACHEABLE_TAG] = { .base = 0xD000, .size = 0xF3000,
46 .next = &res_mock_3[RESERVED_TAG], .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM },
47 [RESERVED_TAG] = { .base = 1ULL * GiB, .size = 4ULL * GiB,
48 .next = &res_mock_3[READONLY_TAG], .flags = IORESOURCE_RESERVE | IORESOURCE_MEM },
49 [READONLY_TAG] = { .base = 0xFF0000, .size = 0x10000, .next = NULL,
50 .flags = IORESOURCE_READONLY | IORESOURCE_MEM}
51};
52
53
54struct device mock_device = { .enabled = 1 };
55
56/* Fake memory devices handle */
57struct device *all_devices = &mock_device;
58
59int setup_test_1(void **state)
60{
61 *state = res_mock_1;
62 mock_device.resource_list = &res_mock_1[CACHEABLE_TAG];
63
64 return 0;
65}
66
67int setup_test_2(void **state)
68{
69 *state = res_mock_2;
70 mock_device.resource_list = &res_mock_2[CACHEABLE_TAG];
71
72 return 0;
73}
74
75int setup_test_3(void **state)
76{
77 *state = res_mock_3;
78 mock_device.resource_list = &res_mock_3[CACHEABLE_TAG];
79
80 return 0;
81}
82
83resource_t get_aligned_base(struct resource *res, struct range_entry *entry)
84{
85 return ALIGN_DOWN(res[range_entry_tag(entry)].base, MEMRANGE_ALIGN);
86}
87
88resource_t get_aligned_end(struct resource *res, struct range_entry *entry)
89{
90 resource_t end = res[range_entry_tag(entry)].base +
91 res[range_entry_tag(entry)].size +
92 (res[range_entry_tag(entry)].base - range_entry_base(entry));
93 return ALIGN_UP(end, MEMRANGE_ALIGN);
94}
95
96/*
97 * This test verifies memranges_init(), memranges_add_resources() and memranges_teardown()
98 * functions. It covers basic functionality of memrange library - implementation of creating
99 * memrange structure from resources available on the platform and method for free'ing
100 * allocated memory.
101 *
102 * Example memory ranges (res_mock1) for test_memrange_basic.
103 * Ranges marked with asterisks (***) are not added to the test_memrange.
104 *
105 * +--------CACHEABLE_TAG--------+ <-0xE000
106 * | |
107 * | |
108 * | |
109 * +-----------------------------+ <-0x100000
110 *
111 *
112 *
113 * +-----***READONLY_TAG***------+ <-0xFF0000
114 * | |
115 * | |
116 * | |
117 * +-----------------------------+ <-0x1000000
118 *
119 *
120 * +--------RESERVED_TAG---------+ <-0x100000000
121 * | |
122 * +-----------------------------+ <-0x100001000
123 */
124static void test_memrange_basic(void **state)
125{
126 int counter = 0;
127 const unsigned long cacheable = IORESOURCE_CACHEABLE;
128 const unsigned long reserved = IORESOURCE_RESERVE;
129 struct range_entry *ptr;
130 struct memranges test_memrange;
131 struct resource *res_mock = *state;
132 resource_t prev_base = 0;
133
134 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
135 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
136
137 /* There should be two entries, since cacheable and
138 reserved regions are not neighbors */
139 memranges_each_entry(ptr, &test_memrange) {
140 assert_in_range(range_entry_tag(ptr), CACHEABLE_TAG, RESERVED_TAG);
141 assert_int_equal(range_entry_base(ptr), get_aligned_base(res_mock, ptr));
142
143 assert_int_equal(range_entry_end(ptr), get_aligned_end(res_mock, ptr));
144
145 /* Ranges have to be returned in increasing order */
146 assert_true(prev_base <= range_entry_base(ptr));
147
148 prev_base = range_entry_base(ptr);
149 counter++;
150 };
151 assert_int_equal(counter, 2);
152 counter = 0;
153
154 /* Remove initial memrange */
155 memranges_teardown(&test_memrange);
156 memranges_each_entry(ptr, &test_memrange)
157 counter++;
158 assert_int_equal(counter, 0);
159}
160
161/*
162 * This test verifies memranges_clone(), memranges_insert() and memranges_update_tag()
163 * functions. All operations are performed on cloned memrange. One of the most important thing
164 * to check, is that memrange_insert() should remove all ranges which are covered by the newly
165 * inserted one.
166 *
167 * Example memory ranges (res_mock1) for test_memrange_clone_insert.
168 * Ranges marked with asterisks (***) are not added to the clone_memrange.
169 * Ranges marked with (^) have tag value changed during test.
170 *
171 * +--------CACHEABLE_TAG--------+ <-0xE000
172 * +------|----INSERTED_TAG----------+ | <-0xF000
173 * | | (^READONLY_TAG^) | |
174 * | | | |
175 * | +-----------------------------+ <-0x100000
176 * +---------------------------------+ <-0x101000
177 *
178 *
179 * +-----***READONLY_TAG***------+ <-0xFF0000
180 * | |
181 * | |
182 * | |
183 * +-----------------------------+ <-0x1000000
184 *
185 *
186 * +------+---------RESERVED_TAG-----+--+ <-0x100000000
187 * | | | |
188 * | +-----------------------------+ <-0x100001000
189 * +-----------INSERTED_TAG----------+ <-0x100002000
190 */
191static void test_memrange_clone_insert(void **state)
192{
193 int counter = 0;
194 const unsigned long cacheable = IORESOURCE_CACHEABLE;
195 const unsigned long reserved = IORESOURCE_RESERVE;
196 struct range_entry *ptr;
197 struct memranges test_memrange, clone_memrange;
198 struct resource *res_mock = *state;
199 const resource_t new_range_begin_offset = 1ULL << 12;
200
201 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
202 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
203
204 memranges_clone(&clone_memrange, &test_memrange);
205 memranges_teardown(&test_memrange);
206
207 /* Verify that new one is really a clone */
208 memranges_each_entry(ptr, &clone_memrange) {
209 assert_in_range(range_entry_tag(ptr), CACHEABLE_TAG, END_OF_RESOURCES - 1);
210 assert_int_equal(range_entry_base(ptr), get_aligned_base(res_mock, ptr));
211
212 assert_int_equal(range_entry_end(ptr), get_aligned_end(res_mock, ptr));
213
214 counter++;
215 };
216 assert_int_equal(counter, 2);
217 counter = 0;
218
219 /* Insert new range, which will overlap with first region. */
220 memranges_insert(&clone_memrange, res_mock[CACHEABLE_TAG].base + new_range_begin_offset,
221 res_mock[CACHEABLE_TAG].size, INSERTED_TAG);
222
223 /* Three ranges should be there - CACHEABLE(shrunk), INSERTED and RESERVED */
224 memranges_each_entry(ptr, &clone_memrange) {
225 resource_t expected_end;
226
227 if (range_entry_tag(ptr) == CACHEABLE_TAG) {
228 assert_int_equal(range_entry_base(ptr), res_mock[CACHEABLE_TAG].base);
229
230 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset;
231 assert_int_equal(range_entry_end(ptr), expected_end);
232 }
233 if (range_entry_tag(ptr) == INSERTED_TAG) {
234 assert_int_equal(range_entry_base(ptr),
235 res_mock[CACHEABLE_TAG].base + new_range_begin_offset);
236
237 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset +
238 res_mock[CACHEABLE_TAG].size;
239 assert_int_equal(range_entry_end(ptr),
240 ALIGN_UP(expected_end, MEMRANGE_ALIGN));
241 }
242 counter++;
243 }
244 assert_int_equal(counter, 3);
245 counter = 0;
246
247 /* Insert new region, which will shadow readonly range.
248 * Additionally verify API for updating tags */
249 memranges_update_tag(&clone_memrange, INSERTED_TAG, READONLY_TAG);
250
251 memranges_each_entry(ptr, &clone_memrange) {
252 resource_t expected_end;
253
254 assert_int_not_equal(range_entry_tag(ptr), INSERTED_TAG);
255 if (range_entry_tag(ptr) == READONLY_TAG) {
256 assert_int_equal(range_entry_base(ptr),
257 res_mock[CACHEABLE_TAG].base + new_range_begin_offset);
258
259 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset +
260 res_mock[CACHEABLE_TAG].size;
261 assert_int_equal(range_entry_end(ptr),
262 ALIGN_UP(expected_end, MEMRANGE_ALIGN));
263 }
264 };
265
266 /* Check if alignment (4KiB) is properly applied, that is begin - DOWN and end - UP */
267 memranges_insert(&clone_memrange, res_mock[RESERVED_TAG].base + 0xAD,
268 res_mock[RESERVED_TAG].size, INSERTED_TAG);
269
270 memranges_each_entry(ptr, &clone_memrange) {
271 resource_t expected_end;
272
273 assert_int_not_equal(range_entry_tag(ptr), RESERVED_TAG);
274 if (range_entry_tag(ptr) == INSERTED_TAG) {
275 assert_int_equal(range_entry_base(ptr),
276 ALIGN_DOWN(res_mock[RESERVED_TAG].base,
277 MEMRANGE_ALIGN));
278
279 expected_end = ALIGN_DOWN(res_mock[RESERVED_TAG].base, MEMRANGE_ALIGN) +
280 new_range_begin_offset + res_mock[RESERVED_TAG].size;
281 expected_end = ALIGN_UP(expected_end, MEMRANGE_ALIGN);
282
283 assert_int_equal(range_entry_end(ptr), expected_end);
284 }
285 counter++;
286 }
287 assert_int_equal(counter, 3);
288
289 /* Free clone */
290 memranges_teardown(&clone_memrange);
291}
292
293/*
294 * This test verifies memranges_fill_holes_up_to() and memranges_create_hole(). Idea of the test
295 * is to fill all holes, so that we end up with contiguous address space fully covered by
296 * entries. Then, holes are created on the border of two different regions
297 *
298 * Example memory ranges (res_mock1) for test_memrange_holes.
299 * Space marked with (/) is not covered by any region at the end of the test.
300 *
301 * +--------CACHEABLE_TAG--------+ <-0xE000
302 * | |
303 * | |
304 * //|/////////////////////////////| <-0xFF000
305 * //+-----------HOLE_TAG----------+ <-0x100000
306 * //|/////////////////////////////| <-0x101000
307 * | |
308 * | |
309 * | |
310 * | |
311 * +--------RESERVED_TAG---------+ <-0x100000000
312 * | |
313 * +-----------------------------+ <-0x100001000
314 */
315static void test_memrange_holes(void **state)
316{
317 int counter = 0;
318 const unsigned long cacheable = IORESOURCE_CACHEABLE;
319 const unsigned long reserved = IORESOURCE_RESERVE;
320 struct range_entry *ptr;
321 struct range_entry *hole_ptr = NULL;
322 struct memranges test_memrange;
323 struct resource *res_mock = *state;
324 int holes_found = 0;
325 resource_t last_range_end = 0;
326 const resource_t holes_fill_end = res_mock[RESERVED_TAG].base;
327
328 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
329 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
330
331 /* Count holes in ranges */
332 memranges_each_entry(ptr, &test_memrange) {
333 if (!last_range_end) {
334 last_range_end = range_entry_end(ptr);
335 continue;
336 }
337
338
339 if (range_entry_base(ptr) != last_range_end) {
340 holes_found++;
341 last_range_end = range_entry_end(ptr);
342 }
343
344 if (range_entry_base(ptr) >= holes_fill_end)
345 break;
346 }
347
348 /* Create range entries which covers continuous memory range
349 (but with different tags) */
350 memranges_fill_holes_up_to(&test_memrange, holes_fill_end, HOLE_TAG);
351
352 memranges_each_entry(ptr, &test_memrange) {
353 if (range_entry_tag(ptr) == HOLE_TAG) {
354 assert_int_equal(range_entry_base(ptr),
355 ALIGN_UP(res_mock[CACHEABLE_TAG].base +
356 res_mock[CACHEABLE_TAG].size,
357 MEMRANGE_ALIGN));
358 assert_int_equal(range_entry_end(ptr), holes_fill_end);
359 /* Store pointer to HOLE_TAG region for future use */
360 hole_ptr = ptr;
361 }
362 counter++;
363 }
364 assert_int_equal(counter, 2 + holes_found);
365
366 /* If test data does not have any holes in it then terminate this test */
367 if (holes_found == 0)
368 return;
369
370 assert_non_null(hole_ptr);
371 counter = 0;
372
373 /* Create hole crossing the border of two range entries */
374 const resource_t new_cacheable_end = ALIGN_DOWN(
375 res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size - 4 * KiB,
376 MEMRANGE_ALIGN);
377 const resource_t new_hole_begin = ALIGN_UP(range_entry_base(hole_ptr) + 4 * KiB,
378 MEMRANGE_ALIGN);
379 const resource_t ranges_diff = new_hole_begin - new_cacheable_end;
380
381 memranges_create_hole(&test_memrange, new_cacheable_end, ranges_diff);
382
383 memranges_each_entry(ptr, &test_memrange) {
384 switch (range_entry_tag(ptr)) {
385 case CACHEABLE_TAG:
386 assert_int_equal(range_entry_base(ptr), res_mock[CACHEABLE_TAG].base);
387 assert_int_equal(range_entry_end(ptr), new_cacheable_end);
388 break;
389 case RESERVED_TAG:
390 assert_int_equal(range_entry_base(ptr), res_mock[RESERVED_TAG].base);
391 assert_int_equal(range_entry_end(ptr), res_mock[RESERVED_TAG].base +
392 res_mock[RESERVED_TAG].size);
393 break;
394 case HOLE_TAG:
395 assert_int_equal(range_entry_base(ptr), new_hole_begin);
396 assert_int_equal(range_entry_end(ptr), res_mock[RESERVED_TAG].base);
397 break;
398 default:
399 break;
400 }
401 counter++;
402 }
403 assert_int_equal(counter, 3);
404
405 memranges_teardown(&test_memrange);
406}
407
408/*
409 * This test verifies memranges_steal() function. Simple check is done by attempt so steal some
410 * memory from region with READONLY_TAG.
411 *
412 * Example memory ranges (res_mock1) for test_memrange_steal.
413 * Space marked with (/) is not covered by any region at the end of the test.
414 *
415 * +--------CACHEABLE_TAG--------+ <-0xE000
416 * | |
417 * | |
418 * | |
419 * +-----------------------------+ <-0x100000
420 *
421 *
422 *
423 * +--------READONLY_TAG---------+ <-0xFF0000
424 * | |
425 * |/////////////////////////////| <-stolen_base
426 * |/////////////////////////////| <-stolen_base + 0x4000
427 * +-----------------------------+ <-0x1000000
428 *
429 *
430 * +--------RESERVED_TAG---------+ <-0x100000000
431 * | |
432 * +-----------------------------+ <-0x100001000
433 */
434static void test_memrange_steal(void **state)
435{
436 bool status = false;
437 resource_t stolen;
438 const unsigned long cacheable = IORESOURCE_CACHEABLE;
439 const unsigned long reserved = IORESOURCE_RESERVE;
440 const unsigned long readonly = IORESOURCE_READONLY;
441 const resource_t stolen_range_size = 0x4000;
442 struct memranges test_memrange;
443 struct resource *res_mock = *state;
444 struct range_entry *ptr;
445 size_t count = 0;
446
447 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
448 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
449 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
450
451 status = memranges_steal(&test_memrange, res_mock[RESERVED_TAG].base +
452 res_mock[RESERVED_TAG].size,
453 stolen_range_size, 12, READONLY_TAG, &stolen);
454 assert_true(status);
455 assert_in_range(stolen, res_mock[READONLY_TAG].base, res_mock[READONLY_TAG].base +
456 res_mock[READONLY_TAG].size);
457
458 memranges_each_entry(ptr, &test_memrange) {
459 if (range_entry_tag(ptr) == READONLY_TAG) {
460 assert_int_equal(range_entry_base(ptr),
461 ALIGN_DOWN(res_mock[READONLY_TAG].base, MEMRANGE_ALIGN)
462 + stolen_range_size);
463 }
464 count++;
465 }
466 assert_int_equal(count, 3);
467 count = 0;
468
469 /* Check if inserting range in previously stolen area will merge it. */
470 memranges_insert(&test_memrange, res_mock[READONLY_TAG].base + 0xCC, stolen_range_size,
471 READONLY_TAG);
472 memranges_each_entry(ptr, &test_memrange) {
473 if (range_entry_tag(ptr) == READONLY_TAG) {
474 assert_int_equal(range_entry_base(ptr),
475 ALIGN_DOWN(res_mock[READONLY_TAG].base,
476 MEMRANGE_ALIGN));
477 assert_int_equal(range_entry_end(ptr),
478 ALIGN_UP(range_entry_base(ptr) +
479 res_mock[READONLY_TAG].size,
480 MEMRANGE_ALIGN));
481 }
482 count++;
483 }
484 assert_int_equal(count, 3);
485 count = 0;
486
487 memranges_teardown(&test_memrange);
488}
489
490/* Utility function checking number of entries and alignment of their base and end pointers */
491static void check_range_entries_count_and_alignment(struct memranges *ranges,
492 size_t ranges_count, resource_t alignment)
493{
494 size_t count = 0;
495 struct range_entry *ptr;
496
497 memranges_each_entry(ptr, ranges) {
498 assert_true(IS_ALIGNED(range_entry_base(ptr), alignment));
499 assert_true(IS_ALIGNED(range_entry_end(ptr), alignment));
500
501 count++;
502 }
503 assert_int_equal(ranges_count, count);
504}
505
506/* This test verifies memranges_init*() and memranges_teardown() functions.
507 Added ranges are checked correct count and alignment. */
508static void test_memrange_init_and_teardown(void **state)
509{
510 const unsigned long cacheable = IORESOURCE_CACHEABLE;
511 const unsigned long reserved = IORESOURCE_RESERVE;
512 const unsigned long readonly = IORESOURCE_READONLY;
513 struct memranges test_memrange;
514 struct range_entry range_entries[4] = { 0 };
515
516 /* Test memranges_init() correctness */
517 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
518 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
519 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
520
521 /* Expect all entries to be aligned to 4KiB (2^12) */
522 check_range_entries_count_and_alignment(&test_memrange, 3, MEMRANGE_ALIGN);
523
524 /* Expect ranges list to be empty after teardown */
525 memranges_teardown(&test_memrange);
526 assert_true(memranges_is_empty(&test_memrange));
527
528
529 /* Test memranges_init_with_alignment() correctness with alignment of 1KiB (2^10) */
530 memranges_init_with_alignment(&test_memrange, cacheable, cacheable,
531 CACHEABLE_TAG, 10);
532 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
533 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
534
535 check_range_entries_count_and_alignment(&test_memrange, 3, POWER_OF_2(10));
536
537 memranges_teardown(&test_memrange);
538 assert_true(memranges_is_empty(&test_memrange));
539
540
541 /* Test memranges_init_empty() correctness */
542 memranges_init_empty(&test_memrange, &range_entries[0], ARRAY_SIZE(range_entries));
543 assert_true(memranges_is_empty(&test_memrange));
544
545 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
546 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
547 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
548
549 check_range_entries_count_and_alignment(&test_memrange, 3, MEMRANGE_ALIGN);
550
551 memranges_teardown(&test_memrange);
552 assert_true(memranges_is_empty(&test_memrange));
553
554
555 /* Test memranges_init_with_alignment() correctness with alignment of 8KiB (2^13) */
556 memranges_init_empty_with_alignment(&test_memrange, &range_entries[0],
557 ARRAY_SIZE(range_entries), 13);
558 assert_true(memranges_is_empty(&test_memrange));
559
560 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
561 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
562 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
563
564 check_range_entries_count_and_alignment(&test_memrange, 3, POWER_OF_2(13));
565
566 memranges_teardown(&test_memrange);
567 assert_true(memranges_is_empty(&test_memrange));
568}
569
570/* Filter function accepting ranges having memory resource flag */
571static int memrange_filter_mem_only(struct device *dev, struct resource *res)
572{
573 /* Accept only memory resources */
574 return res->flags & IORESOURCE_MEM;
575}
576
577/* Filter function rejecting ranges having memory resource flag */
578static int memrange_filter_non_mem(struct device *dev, struct resource *res)
579{
580 /* Accept only memory resources */
581 return !(res->flags & IORESOURCE_MEM);
582}
583
584/* This test verifies memranges_add_resources_filter() function by providing filter functions
585 which accept or reject ranges. */
586static void test_memrange_add_resources_filter(void **state)
587{
588 const unsigned long cacheable = IORESOURCE_CACHEABLE;
589 const unsigned long reserved = IORESOURCE_RESERVE;
590 struct memranges test_memrange;
591 struct range_entry *ptr;
592 size_t count = 0;
593 size_t accepted_tags[] = {CACHEABLE_TAG, RESERVED_TAG};
594
595 /* Check if filter accepts range correctly */
596 memranges_init(&test_memrange, reserved, reserved, RESERVED_TAG);
597 memranges_add_resources_filter(&test_memrange, cacheable, cacheable, CACHEABLE_TAG,
598 memrange_filter_mem_only);
599
600 /* Check if filter accepted desired range. */
601 memranges_each_entry(ptr, &test_memrange) {
602 assert_in_set(range_entry_tag(ptr), accepted_tags, ARRAY_SIZE(accepted_tags));
603 assert_true(IS_ALIGNED(range_entry_base(ptr), MEMRANGE_ALIGN));
604 assert_true(IS_ALIGNED(range_entry_end(ptr), MEMRANGE_ALIGN));
605 count++;
606 }
607 assert_int_equal(2, count);
608 count = 0;
609 memranges_teardown(&test_memrange);
610
611 /* Check if filter rejects range correctly */
612 memranges_init(&test_memrange, reserved, reserved, RESERVED_TAG);
613 memranges_add_resources_filter(&test_memrange, cacheable, cacheable, CACHEABLE_TAG,
614 memrange_filter_non_mem);
615
616 check_range_entries_count_and_alignment(&test_memrange, 1, MEMRANGE_ALIGN);
617
618 memranges_teardown(&test_memrange);
619}
620
621int main(void)
622{
623 const struct CMUnitTest tests[] = {
624 cmocka_unit_test(test_memrange_basic),
625 cmocka_unit_test(test_memrange_clone_insert),
626 cmocka_unit_test(test_memrange_holes),
627 cmocka_unit_test(test_memrange_steal),
628 cmocka_unit_test(test_memrange_init_and_teardown),
629 cmocka_unit_test(test_memrange_add_resources_filter),
630 };
631
632 return cmocka_run_group_tests_name("Boundary on 4GiB",
633 tests, setup_test_1, NULL) +
634 cmocka_run_group_tests_name("Boundaries 1 byte from 4GiB",
635 tests, setup_test_2, NULL) +
636 cmocka_run_group_tests_name("Range over 4GiB boundary",
637 tests, setup_test_3, NULL);
638}