blob: eac4769240baf43b66a4a3cc2d8347b9cf0d2859 [file] [log] [blame]
Jan Dabros9e16ca92020-07-08 22:33:52 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <tests/test.h>
4
5#include <device/device.h>
Jan Dabros9e16ca92020-07-08 22:33:52 +02006#include <commonlib/helpers.h>
7#include <memrange.h>
8
9#define MEMRANGE_ALIGN (POWER_OF_2(12))
10
11enum mem_types {
12 /* Avoid using 0 to verify that UUT really sets this memory,
13 but keep value small, as this will be an index in the table */
14 CACHEABLE_TAG = 0x10,
15 RESERVED_TAG,
16 READONLY_TAG,
17 INSERTED_TAG,
18 HOLE_TAG,
Shuo Liud9142922022-08-06 02:30:47 +080019 UNASSIGNED_TAG,
Jan Dabros9e16ca92020-07-08 22:33:52 +020020 END_OF_RESOURCES
21};
22
23/* Indices of entries matters, since it must reflect mem_types enum */
24struct resource res_mock_1[] = {
Shuo Liud9142922022-08-06 02:30:47 +080025 [UNASSIGNED_TAG] = {.base = 0x0,
26 .size = 0x8000,
27 .next = &res_mock_1[CACHEABLE_TAG],
28 .flags = IORESOURCE_MEM | IORESOURCE_PREFETCH},
Jakub Czapigac08b6a72022-01-10 13:36:47 +000029 [CACHEABLE_TAG] = {.base = 0xE000,
30 .size = 0xF2000,
31 .next = &res_mock_1[RESERVED_TAG],
Shuo Liu85894aa52022-08-06 01:20:44 +080032 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM |
33 IORESOURCE_ASSIGNED },
Jakub Czapigac08b6a72022-01-10 13:36:47 +000034 [RESERVED_TAG] = {.base = 4ULL * GiB,
35 .size = 4ULL * KiB,
36 .next = &res_mock_1[READONLY_TAG],
Shuo Liu85894aa52022-08-06 01:20:44 +080037 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM |
38 IORESOURCE_ASSIGNED },
Jakub Czapigac08b6a72022-01-10 13:36:47 +000039 [READONLY_TAG] = {.base = 0xFF0000,
40 .size = 0x10000,
41 .next = NULL,
Shuo Liu85894aa52022-08-06 01:20:44 +080042 .flags = IORESOURCE_READONLY | IORESOURCE_MEM |
43 IORESOURCE_ASSIGNED }
Jan Dabros9e16ca92020-07-08 22:33:52 +020044};
45
46/* Boundary 1 byte below 4GiB and 1 byte above 4GiB. */
47struct resource res_mock_2[] = {
Jakub Czapigac08b6a72022-01-10 13:36:47 +000048 [CACHEABLE_TAG] = {.base = 0x1000000,
49 .size = 4ULL * GiB - 0x1000001ULL,
50 .next = &res_mock_2[RESERVED_TAG],
Shuo Liu85894aa52022-08-06 01:20:44 +080051 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM |
52 IORESOURCE_ASSIGNED },
Jakub Czapigac08b6a72022-01-10 13:36:47 +000053 [RESERVED_TAG] = {.base = 4ULL * GiB + 1ULL,
54 .size = 4ULL * GiB,
55 .next = &res_mock_2[READONLY_TAG],
Shuo Liu85894aa52022-08-06 01:20:44 +080056 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM |
57 IORESOURCE_ASSIGNED },
Jakub Czapigac08b6a72022-01-10 13:36:47 +000058 [READONLY_TAG] = {.base = 0,
59 .size = 0x10000,
60 .next = NULL,
Shuo Liu85894aa52022-08-06 01:20:44 +080061 .flags = IORESOURCE_READONLY | IORESOURCE_MEM |
62 IORESOURCE_ASSIGNED }
Jan Dabros9e16ca92020-07-08 22:33:52 +020063};
64
65/* Boundary crossing 4GiB. */
66struct resource res_mock_3[] = {
Jakub Czapigac08b6a72022-01-10 13:36:47 +000067 [CACHEABLE_TAG] = {.base = 0xD000,
68 .size = 0xF3000,
69 .next = &res_mock_3[RESERVED_TAG],
Shuo Liu85894aa52022-08-06 01:20:44 +080070 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM |
71 IORESOURCE_ASSIGNED },
Jakub Czapigac08b6a72022-01-10 13:36:47 +000072 [RESERVED_TAG] = {.base = 1ULL * GiB,
73 .size = 4ULL * GiB,
74 .next = &res_mock_3[READONLY_TAG],
Shuo Liu85894aa52022-08-06 01:20:44 +080075 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM |
76 IORESOURCE_ASSIGNED },
Jakub Czapigac08b6a72022-01-10 13:36:47 +000077 [READONLY_TAG] = {.base = 0xFF0000,
78 .size = 0x10000,
79 .next = NULL,
Shuo Liu85894aa52022-08-06 01:20:44 +080080 .flags = IORESOURCE_READONLY | IORESOURCE_MEM |
81 IORESOURCE_ASSIGNED}
Jan Dabros9e16ca92020-07-08 22:33:52 +020082};
83
84
Jakub Czapigac08b6a72022-01-10 13:36:47 +000085struct device mock_device = {.enabled = 1};
Jan Dabros9e16ca92020-07-08 22:33:52 +020086
87/* Fake memory devices handle */
88struct device *all_devices = &mock_device;
89
90int setup_test_1(void **state)
91{
92 *state = res_mock_1;
Shuo Liud9142922022-08-06 02:30:47 +080093 mock_device.resource_list = &res_mock_1[UNASSIGNED_TAG];
Jan Dabros9e16ca92020-07-08 22:33:52 +020094
95 return 0;
96}
97
98int setup_test_2(void **state)
99{
100 *state = res_mock_2;
101 mock_device.resource_list = &res_mock_2[CACHEABLE_TAG];
102
103 return 0;
104}
105
106int setup_test_3(void **state)
107{
108 *state = res_mock_3;
109 mock_device.resource_list = &res_mock_3[CACHEABLE_TAG];
110
111 return 0;
112}
113
114resource_t get_aligned_base(struct resource *res, struct range_entry *entry)
115{
116 return ALIGN_DOWN(res[range_entry_tag(entry)].base, MEMRANGE_ALIGN);
117}
118
119resource_t get_aligned_end(struct resource *res, struct range_entry *entry)
120{
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000121 resource_t end = res[range_entry_tag(entry)].base + res[range_entry_tag(entry)].size
122 + (res[range_entry_tag(entry)].base - range_entry_base(entry));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200123 return ALIGN_UP(end, MEMRANGE_ALIGN);
124}
125
126/*
127 * This test verifies memranges_init(), memranges_add_resources() and memranges_teardown()
128 * functions. It covers basic functionality of memrange library - implementation of creating
129 * memrange structure from resources available on the platform and method for free'ing
130 * allocated memory.
131 *
132 * Example memory ranges (res_mock1) for test_memrange_basic.
133 * Ranges marked with asterisks (***) are not added to the test_memrange.
134 *
Shuo Liud9142922022-08-06 02:30:47 +0800135 * +-------UNASSIGNED_TAG--------+ <-0x0
136 * | |
137 * +-----------------------------+ <-0x8000
138 *
139 *
140 *
Jan Dabros9e16ca92020-07-08 22:33:52 +0200141 * +--------CACHEABLE_TAG--------+ <-0xE000
142 * | |
143 * | |
144 * | |
145 * +-----------------------------+ <-0x100000
146 *
147 *
148 *
149 * +-----***READONLY_TAG***------+ <-0xFF0000
150 * | |
151 * | |
152 * | |
153 * +-----------------------------+ <-0x1000000
154 *
155 *
156 * +--------RESERVED_TAG---------+ <-0x100000000
157 * | |
158 * +-----------------------------+ <-0x100001000
159 */
160static void test_memrange_basic(void **state)
161{
162 int counter = 0;
163 const unsigned long cacheable = IORESOURCE_CACHEABLE;
164 const unsigned long reserved = IORESOURCE_RESERVE;
Shuo Liud9142922022-08-06 02:30:47 +0800165 const unsigned long prefetchable = IORESOURCE_PREFETCH;
Jan Dabros9e16ca92020-07-08 22:33:52 +0200166 struct range_entry *ptr;
167 struct memranges test_memrange;
168 struct resource *res_mock = *state;
169 resource_t prev_base = 0;
170
Shuo Liud9142922022-08-06 02:30:47 +0800171 memranges_init_empty(&test_memrange, NULL, 0);
172 memranges_add_resources(&test_memrange, prefetchable, prefetchable, UNASSIGNED_TAG);
173 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200174 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
175
Shuo Liud9142922022-08-06 02:30:47 +0800176 /* There should be two entries, since cacheable and reserved regions are not neighbors.
177 Besides these two, a region with an unassigned tag is defined, to emulate an unmapped
178 PCI BAR resource. This resource is not mapped into host physical address and hence
179 should not be picked up by memranges_add_resources().*/
180
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000181 memranges_each_entry(ptr, &test_memrange)
182 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200183 assert_in_range(range_entry_tag(ptr), CACHEABLE_TAG, RESERVED_TAG);
184 assert_int_equal(range_entry_base(ptr), get_aligned_base(res_mock, ptr));
185
186 assert_int_equal(range_entry_end(ptr), get_aligned_end(res_mock, ptr));
187
188 /* Ranges have to be returned in increasing order */
189 assert_true(prev_base <= range_entry_base(ptr));
190
191 prev_base = range_entry_base(ptr);
192 counter++;
193 };
194 assert_int_equal(counter, 2);
195 counter = 0;
196
197 /* Remove initial memrange */
198 memranges_teardown(&test_memrange);
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000199 memranges_each_entry(ptr, &test_memrange) counter++;
Jan Dabros9e16ca92020-07-08 22:33:52 +0200200 assert_int_equal(counter, 0);
201}
202
203/*
204 * This test verifies memranges_clone(), memranges_insert() and memranges_update_tag()
205 * functions. All operations are performed on cloned memrange. One of the most important thing
206 * to check, is that memrange_insert() should remove all ranges which are covered by the newly
207 * inserted one.
208 *
209 * Example memory ranges (res_mock1) for test_memrange_clone_insert.
210 * Ranges marked with asterisks (***) are not added to the clone_memrange.
211 * Ranges marked with (^) have tag value changed during test.
212 *
213 * +--------CACHEABLE_TAG--------+ <-0xE000
214 * +------|----INSERTED_TAG----------+ | <-0xF000
215 * | | (^READONLY_TAG^) | |
216 * | | | |
217 * | +-----------------------------+ <-0x100000
218 * +---------------------------------+ <-0x101000
219 *
220 *
221 * +-----***READONLY_TAG***------+ <-0xFF0000
222 * | |
223 * | |
224 * | |
225 * +-----------------------------+ <-0x1000000
226 *
227 *
228 * +------+---------RESERVED_TAG-----+--+ <-0x100000000
229 * | | | |
230 * | +-----------------------------+ <-0x100001000
231 * +-----------INSERTED_TAG----------+ <-0x100002000
232 */
233static void test_memrange_clone_insert(void **state)
234{
235 int counter = 0;
236 const unsigned long cacheable = IORESOURCE_CACHEABLE;
237 const unsigned long reserved = IORESOURCE_RESERVE;
238 struct range_entry *ptr;
239 struct memranges test_memrange, clone_memrange;
240 struct resource *res_mock = *state;
241 const resource_t new_range_begin_offset = 1ULL << 12;
242
243 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
244 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
245
246 memranges_clone(&clone_memrange, &test_memrange);
247 memranges_teardown(&test_memrange);
248
249 /* Verify that new one is really a clone */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000250 memranges_each_entry(ptr, &clone_memrange)
251 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200252 assert_in_range(range_entry_tag(ptr), CACHEABLE_TAG, END_OF_RESOURCES - 1);
253 assert_int_equal(range_entry_base(ptr), get_aligned_base(res_mock, ptr));
254
255 assert_int_equal(range_entry_end(ptr), get_aligned_end(res_mock, ptr));
256
257 counter++;
258 };
259 assert_int_equal(counter, 2);
260 counter = 0;
261
262 /* Insert new range, which will overlap with first region. */
263 memranges_insert(&clone_memrange, res_mock[CACHEABLE_TAG].base + new_range_begin_offset,
264 res_mock[CACHEABLE_TAG].size, INSERTED_TAG);
265
266 /* Three ranges should be there - CACHEABLE(shrunk), INSERTED and RESERVED */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000267 memranges_each_entry(ptr, &clone_memrange)
268 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200269 resource_t expected_end;
270
271 if (range_entry_tag(ptr) == CACHEABLE_TAG) {
272 assert_int_equal(range_entry_base(ptr), res_mock[CACHEABLE_TAG].base);
273
274 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset;
275 assert_int_equal(range_entry_end(ptr), expected_end);
276 }
277 if (range_entry_tag(ptr) == INSERTED_TAG) {
278 assert_int_equal(range_entry_base(ptr),
279 res_mock[CACHEABLE_TAG].base + new_range_begin_offset);
280
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000281 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset
282 + res_mock[CACHEABLE_TAG].size;
Jan Dabros9e16ca92020-07-08 22:33:52 +0200283 assert_int_equal(range_entry_end(ptr),
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000284 ALIGN_UP(expected_end, MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200285 }
286 counter++;
287 }
288 assert_int_equal(counter, 3);
289 counter = 0;
290
291 /* Insert new region, which will shadow readonly range.
292 * Additionally verify API for updating tags */
293 memranges_update_tag(&clone_memrange, INSERTED_TAG, READONLY_TAG);
294
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000295 memranges_each_entry(ptr, &clone_memrange)
296 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200297 resource_t expected_end;
298
299 assert_int_not_equal(range_entry_tag(ptr), INSERTED_TAG);
300 if (range_entry_tag(ptr) == READONLY_TAG) {
301 assert_int_equal(range_entry_base(ptr),
302 res_mock[CACHEABLE_TAG].base + new_range_begin_offset);
303
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000304 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset
305 + res_mock[CACHEABLE_TAG].size;
Jan Dabros9e16ca92020-07-08 22:33:52 +0200306 assert_int_equal(range_entry_end(ptr),
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000307 ALIGN_UP(expected_end, MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200308 }
309 };
310
311 /* Check if alignment (4KiB) is properly applied, that is begin - DOWN and end - UP */
312 memranges_insert(&clone_memrange, res_mock[RESERVED_TAG].base + 0xAD,
313 res_mock[RESERVED_TAG].size, INSERTED_TAG);
314
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000315 memranges_each_entry(ptr, &clone_memrange)
316 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200317 resource_t expected_end;
318
319 assert_int_not_equal(range_entry_tag(ptr), RESERVED_TAG);
320 if (range_entry_tag(ptr) == INSERTED_TAG) {
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000321 assert_int_equal(
322 range_entry_base(ptr),
323 ALIGN_DOWN(res_mock[RESERVED_TAG].base, MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200324
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000325 expected_end = ALIGN_DOWN(res_mock[RESERVED_TAG].base, MEMRANGE_ALIGN)
326 + new_range_begin_offset + res_mock[RESERVED_TAG].size;
Jan Dabros9e16ca92020-07-08 22:33:52 +0200327 expected_end = ALIGN_UP(expected_end, MEMRANGE_ALIGN);
328
329 assert_int_equal(range_entry_end(ptr), expected_end);
330 }
331 counter++;
332 }
333 assert_int_equal(counter, 3);
334
335 /* Free clone */
336 memranges_teardown(&clone_memrange);
337}
338
339/*
340 * This test verifies memranges_fill_holes_up_to() and memranges_create_hole(). Idea of the test
341 * is to fill all holes, so that we end up with contiguous address space fully covered by
342 * entries. Then, holes are created on the border of two different regions
343 *
344 * Example memory ranges (res_mock1) for test_memrange_holes.
345 * Space marked with (/) is not covered by any region at the end of the test.
346 *
347 * +--------CACHEABLE_TAG--------+ <-0xE000
348 * | |
349 * | |
350 * //|/////////////////////////////| <-0xFF000
351 * //+-----------HOLE_TAG----------+ <-0x100000
352 * //|/////////////////////////////| <-0x101000
353 * | |
354 * | |
355 * | |
356 * | |
357 * +--------RESERVED_TAG---------+ <-0x100000000
358 * | |
359 * +-----------------------------+ <-0x100001000
360 */
361static void test_memrange_holes(void **state)
362{
363 int counter = 0;
364 const unsigned long cacheable = IORESOURCE_CACHEABLE;
365 const unsigned long reserved = IORESOURCE_RESERVE;
366 struct range_entry *ptr;
367 struct range_entry *hole_ptr = NULL;
368 struct memranges test_memrange;
369 struct resource *res_mock = *state;
370 int holes_found = 0;
371 resource_t last_range_end = 0;
372 const resource_t holes_fill_end = res_mock[RESERVED_TAG].base;
373
374 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
375 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
376
377 /* Count holes in ranges */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000378 memranges_each_entry(ptr, &test_memrange)
379 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200380 if (!last_range_end) {
381 last_range_end = range_entry_end(ptr);
382 continue;
383 }
384
385
386 if (range_entry_base(ptr) != last_range_end) {
387 holes_found++;
388 last_range_end = range_entry_end(ptr);
389 }
390
391 if (range_entry_base(ptr) >= holes_fill_end)
392 break;
393 }
394
395 /* Create range entries which covers continuous memory range
396 (but with different tags) */
397 memranges_fill_holes_up_to(&test_memrange, holes_fill_end, HOLE_TAG);
398
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000399 memranges_each_entry(ptr, &test_memrange)
400 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200401 if (range_entry_tag(ptr) == HOLE_TAG) {
402 assert_int_equal(range_entry_base(ptr),
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000403 ALIGN_UP(res_mock[CACHEABLE_TAG].base
404 + res_mock[CACHEABLE_TAG].size,
405 MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200406 assert_int_equal(range_entry_end(ptr), holes_fill_end);
407 /* Store pointer to HOLE_TAG region for future use */
408 hole_ptr = ptr;
409 }
410 counter++;
411 }
412 assert_int_equal(counter, 2 + holes_found);
413
414 /* If test data does not have any holes in it then terminate this test */
415 if (holes_found == 0)
416 return;
417
418 assert_non_null(hole_ptr);
419 counter = 0;
420
421 /* Create hole crossing the border of two range entries */
422 const resource_t new_cacheable_end = ALIGN_DOWN(
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000423 res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size - 4 * KiB,
424 MEMRANGE_ALIGN);
425 const resource_t new_hole_begin =
426 ALIGN_UP(range_entry_base(hole_ptr) + 4 * KiB, MEMRANGE_ALIGN);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200427 const resource_t ranges_diff = new_hole_begin - new_cacheable_end;
428
429 memranges_create_hole(&test_memrange, new_cacheable_end, ranges_diff);
430
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000431 memranges_each_entry(ptr, &test_memrange)
432 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200433 switch (range_entry_tag(ptr)) {
434 case CACHEABLE_TAG:
435 assert_int_equal(range_entry_base(ptr), res_mock[CACHEABLE_TAG].base);
436 assert_int_equal(range_entry_end(ptr), new_cacheable_end);
437 break;
438 case RESERVED_TAG:
439 assert_int_equal(range_entry_base(ptr), res_mock[RESERVED_TAG].base);
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000440 assert_int_equal(range_entry_end(ptr),
441 res_mock[RESERVED_TAG].base
442 + res_mock[RESERVED_TAG].size);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200443 break;
444 case HOLE_TAG:
445 assert_int_equal(range_entry_base(ptr), new_hole_begin);
446 assert_int_equal(range_entry_end(ptr), res_mock[RESERVED_TAG].base);
447 break;
448 default:
449 break;
450 }
451 counter++;
452 }
453 assert_int_equal(counter, 3);
454
455 memranges_teardown(&test_memrange);
456}
457
458/*
Nico Huber526c6422020-05-25 00:03:14 +0200459 * This test verifies memranges_steal() function. Simple check is done by attempt
460 * to steal some memory from the top of region with CACHEABLE_TAG and some from
461 * the bottom of region with READONLY_TAG.
Jan Dabros9e16ca92020-07-08 22:33:52 +0200462 *
463 * Example memory ranges (res_mock1) for test_memrange_steal.
Nico Huber875f7312022-08-10 21:48:01 +0200464 * Space marked with (/) is stolen during the test.
Jan Dabros9e16ca92020-07-08 22:33:52 +0200465 *
466 * +--------CACHEABLE_TAG--------+ <-0xE000
467 * | |
468 * | |
Nico Huber526c6422020-05-25 00:03:14 +0200469 * |/////////////////////////////| <-stolen_base
470 * +-----------------------------+ <-0x100000 <-stolen_base + 0x4000
Jan Dabros9e16ca92020-07-08 22:33:52 +0200471 *
472 *
473 *
Nico Huber875f7312022-08-10 21:48:01 +0200474 * +--------READONLY_TAG---------+ <-0xFF0000 <-stolen_base
475 * |/////////////////////////////| <-stolen_base + 0x4000
Jan Dabros9e16ca92020-07-08 22:33:52 +0200476 * | |
Nico Huber875f7312022-08-10 21:48:01 +0200477 * | |
Jan Dabros9e16ca92020-07-08 22:33:52 +0200478 * +-----------------------------+ <-0x1000000
479 *
480 *
481 * +--------RESERVED_TAG---------+ <-0x100000000
482 * | |
483 * +-----------------------------+ <-0x100001000
484 */
485static void test_memrange_steal(void **state)
486{
487 bool status = false;
488 resource_t stolen;
489 const unsigned long cacheable = IORESOURCE_CACHEABLE;
490 const unsigned long reserved = IORESOURCE_RESERVE;
491 const unsigned long readonly = IORESOURCE_READONLY;
492 const resource_t stolen_range_size = 0x4000;
493 struct memranges test_memrange;
494 struct resource *res_mock = *state;
495 struct range_entry *ptr;
496 size_t count = 0;
497
498 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
499 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
500 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
501
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000502 status = memranges_steal(&test_memrange,
503 res_mock[RESERVED_TAG].base + res_mock[RESERVED_TAG].size,
Nico Huber526c6422020-05-25 00:03:14 +0200504 stolen_range_size, 12, CACHEABLE_TAG, &stolen, true);
505 assert_true(status);
506 assert_in_range(stolen, res_mock[CACHEABLE_TAG].base,
507 res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size);
508 status = memranges_steal(&test_memrange,
509 res_mock[RESERVED_TAG].base + res_mock[RESERVED_TAG].size,
510 stolen_range_size, 12, READONLY_TAG, &stolen, false);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200511 assert_true(status);
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000512 assert_in_range(stolen, res_mock[READONLY_TAG].base,
513 res_mock[READONLY_TAG].base + res_mock[READONLY_TAG].size);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200514
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000515 memranges_each_entry(ptr, &test_memrange)
516 {
Nico Huber526c6422020-05-25 00:03:14 +0200517 if (range_entry_tag(ptr) == CACHEABLE_TAG) {
518 assert_int_equal(range_entry_end(ptr),
519 ALIGN_DOWN(ALIGN_UP(res_mock[CACHEABLE_TAG].base
520 + res_mock[CACHEABLE_TAG].size,
521 MEMRANGE_ALIGN)
522 - stolen_range_size,
523 MEMRANGE_ALIGN));
524 }
Jan Dabros9e16ca92020-07-08 22:33:52 +0200525 if (range_entry_tag(ptr) == READONLY_TAG) {
526 assert_int_equal(range_entry_base(ptr),
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000527 ALIGN_DOWN(res_mock[READONLY_TAG].base, MEMRANGE_ALIGN)
528 + stolen_range_size);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200529 }
530 count++;
531 }
532 assert_int_equal(count, 3);
533 count = 0;
534
Nico Huber526c6422020-05-25 00:03:14 +0200535 /* Check if inserting ranges in previously stolen areas will merge them. */
536 memranges_insert(&test_memrange,
537 res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size
538 - stolen_range_size - 0x12,
539 stolen_range_size, CACHEABLE_TAG);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200540 memranges_insert(&test_memrange, res_mock[READONLY_TAG].base + 0xCC, stolen_range_size,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000541 READONLY_TAG);
542 memranges_each_entry(ptr, &test_memrange)
543 {
Nico Huber526c6422020-05-25 00:03:14 +0200544 const unsigned long tag = range_entry_tag(ptr);
545 assert_true(tag == CACHEABLE_TAG || tag == READONLY_TAG || tag == RESERVED_TAG);
546 assert_int_equal(
547 range_entry_base(ptr),
548 ALIGN_DOWN(res_mock[tag].base, MEMRANGE_ALIGN));
549 assert_int_equal(
550 range_entry_end(ptr),
551 ALIGN_UP(res_mock[tag].base + res_mock[tag].size, MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200552 count++;
553 }
554 assert_int_equal(count, 3);
555 count = 0;
556
557 memranges_teardown(&test_memrange);
558}
559
560/* Utility function checking number of entries and alignment of their base and end pointers */
561static void check_range_entries_count_and_alignment(struct memranges *ranges,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000562 size_t ranges_count, resource_t alignment)
Jan Dabros9e16ca92020-07-08 22:33:52 +0200563{
564 size_t count = 0;
565 struct range_entry *ptr;
566
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000567 memranges_each_entry(ptr, ranges)
568 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200569 assert_true(IS_ALIGNED(range_entry_base(ptr), alignment));
570 assert_true(IS_ALIGNED(range_entry_end(ptr), alignment));
571
572 count++;
573 }
574 assert_int_equal(ranges_count, count);
575}
576
577/* This test verifies memranges_init*() and memranges_teardown() functions.
578 Added ranges are checked correct count and alignment. */
579static void test_memrange_init_and_teardown(void **state)
580{
581 const unsigned long cacheable = IORESOURCE_CACHEABLE;
582 const unsigned long reserved = IORESOURCE_RESERVE;
583 const unsigned long readonly = IORESOURCE_READONLY;
584 struct memranges test_memrange;
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000585 struct range_entry range_entries[4] = {0};
Jan Dabros9e16ca92020-07-08 22:33:52 +0200586
587 /* Test memranges_init() correctness */
588 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
589 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
590 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
591
592 /* Expect all entries to be aligned to 4KiB (2^12) */
593 check_range_entries_count_and_alignment(&test_memrange, 3, MEMRANGE_ALIGN);
594
595 /* Expect ranges list to be empty after teardown */
596 memranges_teardown(&test_memrange);
597 assert_true(memranges_is_empty(&test_memrange));
598
599
600 /* Test memranges_init_with_alignment() correctness with alignment of 1KiB (2^10) */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000601 memranges_init_with_alignment(&test_memrange, cacheable, cacheable, CACHEABLE_TAG, 10);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200602 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
603 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
604
605 check_range_entries_count_and_alignment(&test_memrange, 3, POWER_OF_2(10));
606
607 memranges_teardown(&test_memrange);
608 assert_true(memranges_is_empty(&test_memrange));
609
610
611 /* Test memranges_init_empty() correctness */
612 memranges_init_empty(&test_memrange, &range_entries[0], ARRAY_SIZE(range_entries));
613 assert_true(memranges_is_empty(&test_memrange));
614
615 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
616 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
617 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
618
619 check_range_entries_count_and_alignment(&test_memrange, 3, MEMRANGE_ALIGN);
620
621 memranges_teardown(&test_memrange);
622 assert_true(memranges_is_empty(&test_memrange));
623
624
625 /* Test memranges_init_with_alignment() correctness with alignment of 8KiB (2^13) */
626 memranges_init_empty_with_alignment(&test_memrange, &range_entries[0],
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000627 ARRAY_SIZE(range_entries), 13);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200628 assert_true(memranges_is_empty(&test_memrange));
629
630 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
631 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
632 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
633
634 check_range_entries_count_and_alignment(&test_memrange, 3, POWER_OF_2(13));
635
636 memranges_teardown(&test_memrange);
637 assert_true(memranges_is_empty(&test_memrange));
638}
639
640/* Filter function accepting ranges having memory resource flag */
641static int memrange_filter_mem_only(struct device *dev, struct resource *res)
642{
643 /* Accept only memory resources */
644 return res->flags & IORESOURCE_MEM;
645}
646
647/* Filter function rejecting ranges having memory resource flag */
648static int memrange_filter_non_mem(struct device *dev, struct resource *res)
649{
650 /* Accept only memory resources */
651 return !(res->flags & IORESOURCE_MEM);
652}
653
654/* This test verifies memranges_add_resources_filter() function by providing filter functions
655 which accept or reject ranges. */
656static void test_memrange_add_resources_filter(void **state)
657{
658 const unsigned long cacheable = IORESOURCE_CACHEABLE;
659 const unsigned long reserved = IORESOURCE_RESERVE;
660 struct memranges test_memrange;
661 struct range_entry *ptr;
662 size_t count = 0;
663 size_t accepted_tags[] = {CACHEABLE_TAG, RESERVED_TAG};
664
665 /* Check if filter accepts range correctly */
666 memranges_init(&test_memrange, reserved, reserved, RESERVED_TAG);
667 memranges_add_resources_filter(&test_memrange, cacheable, cacheable, CACHEABLE_TAG,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000668 memrange_filter_mem_only);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200669
670 /* Check if filter accepted desired range. */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000671 memranges_each_entry(ptr, &test_memrange)
672 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200673 assert_in_set(range_entry_tag(ptr), accepted_tags, ARRAY_SIZE(accepted_tags));
674 assert_true(IS_ALIGNED(range_entry_base(ptr), MEMRANGE_ALIGN));
675 assert_true(IS_ALIGNED(range_entry_end(ptr), MEMRANGE_ALIGN));
676 count++;
677 }
678 assert_int_equal(2, count);
679 count = 0;
680 memranges_teardown(&test_memrange);
681
682 /* Check if filter rejects range correctly */
683 memranges_init(&test_memrange, reserved, reserved, RESERVED_TAG);
684 memranges_add_resources_filter(&test_memrange, cacheable, cacheable, CACHEABLE_TAG,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000685 memrange_filter_non_mem);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200686
687 check_range_entries_count_and_alignment(&test_memrange, 1, MEMRANGE_ALIGN);
688
689 memranges_teardown(&test_memrange);
690}
691
692int main(void)
693{
694 const struct CMUnitTest tests[] = {
695 cmocka_unit_test(test_memrange_basic),
696 cmocka_unit_test(test_memrange_clone_insert),
697 cmocka_unit_test(test_memrange_holes),
698 cmocka_unit_test(test_memrange_steal),
699 cmocka_unit_test(test_memrange_init_and_teardown),
700 cmocka_unit_test(test_memrange_add_resources_filter),
701 };
702
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000703 return cmocka_run_group_tests_name(__TEST_NAME__ "(Boundary on 4GiB)", tests,
704 setup_test_1, NULL)
705 + cmocka_run_group_tests_name(__TEST_NAME__ "(Boundaries 1 byte from 4GiB)",
706 tests, setup_test_2, NULL)
707 + cmocka_run_group_tests_name(__TEST_NAME__ "(Range over 4GiB boundary)", tests,
708 setup_test_3, NULL);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200709}