blob: 9871484a81e5b1d43caf112c9118add8be4a30a2 [file] [log] [blame]
Jan Dabros9e16ca92020-07-08 22:33:52 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <tests/test.h>
4
5#include <device/device.h>
6#include <device/resource.h>
7#include <commonlib/helpers.h>
8#include <memrange.h>
9
10#define MEMRANGE_ALIGN (POWER_OF_2(12))
11
12enum mem_types {
13 /* Avoid using 0 to verify that UUT really sets this memory,
14 but keep value small, as this will be an index in the table */
15 CACHEABLE_TAG = 0x10,
16 RESERVED_TAG,
17 READONLY_TAG,
18 INSERTED_TAG,
19 HOLE_TAG,
Shuo Liud9142922022-08-06 02:30:47 +080020 UNASSIGNED_TAG,
Jan Dabros9e16ca92020-07-08 22:33:52 +020021 END_OF_RESOURCES
22};
23
24/* Indices of entries matters, since it must reflect mem_types enum */
25struct resource res_mock_1[] = {
Shuo Liud9142922022-08-06 02:30:47 +080026 [UNASSIGNED_TAG] = {.base = 0x0,
27 .size = 0x8000,
28 .next = &res_mock_1[CACHEABLE_TAG],
29 .flags = IORESOURCE_MEM | IORESOURCE_PREFETCH},
Jakub Czapigac08b6a72022-01-10 13:36:47 +000030 [CACHEABLE_TAG] = {.base = 0xE000,
31 .size = 0xF2000,
32 .next = &res_mock_1[RESERVED_TAG],
Shuo Liu85894aa52022-08-06 01:20:44 +080033 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM |
34 IORESOURCE_ASSIGNED },
Jakub Czapigac08b6a72022-01-10 13:36:47 +000035 [RESERVED_TAG] = {.base = 4ULL * GiB,
36 .size = 4ULL * KiB,
37 .next = &res_mock_1[READONLY_TAG],
Shuo Liu85894aa52022-08-06 01:20:44 +080038 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM |
39 IORESOURCE_ASSIGNED },
Jakub Czapigac08b6a72022-01-10 13:36:47 +000040 [READONLY_TAG] = {.base = 0xFF0000,
41 .size = 0x10000,
42 .next = NULL,
Shuo Liu85894aa52022-08-06 01:20:44 +080043 .flags = IORESOURCE_READONLY | IORESOURCE_MEM |
44 IORESOURCE_ASSIGNED }
Jan Dabros9e16ca92020-07-08 22:33:52 +020045};
46
47/* Boundary 1 byte below 4GiB and 1 byte above 4GiB. */
48struct resource res_mock_2[] = {
Jakub Czapigac08b6a72022-01-10 13:36:47 +000049 [CACHEABLE_TAG] = {.base = 0x1000000,
50 .size = 4ULL * GiB - 0x1000001ULL,
51 .next = &res_mock_2[RESERVED_TAG],
Shuo Liu85894aa52022-08-06 01:20:44 +080052 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM |
53 IORESOURCE_ASSIGNED },
Jakub Czapigac08b6a72022-01-10 13:36:47 +000054 [RESERVED_TAG] = {.base = 4ULL * GiB + 1ULL,
55 .size = 4ULL * GiB,
56 .next = &res_mock_2[READONLY_TAG],
Shuo Liu85894aa52022-08-06 01:20:44 +080057 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM |
58 IORESOURCE_ASSIGNED },
Jakub Czapigac08b6a72022-01-10 13:36:47 +000059 [READONLY_TAG] = {.base = 0,
60 .size = 0x10000,
61 .next = NULL,
Shuo Liu85894aa52022-08-06 01:20:44 +080062 .flags = IORESOURCE_READONLY | IORESOURCE_MEM |
63 IORESOURCE_ASSIGNED }
Jan Dabros9e16ca92020-07-08 22:33:52 +020064};
65
66/* Boundary crossing 4GiB. */
67struct resource res_mock_3[] = {
Jakub Czapigac08b6a72022-01-10 13:36:47 +000068 [CACHEABLE_TAG] = {.base = 0xD000,
69 .size = 0xF3000,
70 .next = &res_mock_3[RESERVED_TAG],
Shuo Liu85894aa52022-08-06 01:20:44 +080071 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM |
72 IORESOURCE_ASSIGNED },
Jakub Czapigac08b6a72022-01-10 13:36:47 +000073 [RESERVED_TAG] = {.base = 1ULL * GiB,
74 .size = 4ULL * GiB,
75 .next = &res_mock_3[READONLY_TAG],
Shuo Liu85894aa52022-08-06 01:20:44 +080076 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM |
77 IORESOURCE_ASSIGNED },
Jakub Czapigac08b6a72022-01-10 13:36:47 +000078 [READONLY_TAG] = {.base = 0xFF0000,
79 .size = 0x10000,
80 .next = NULL,
Shuo Liu85894aa52022-08-06 01:20:44 +080081 .flags = IORESOURCE_READONLY | IORESOURCE_MEM |
82 IORESOURCE_ASSIGNED}
Jan Dabros9e16ca92020-07-08 22:33:52 +020083};
84
85
Jakub Czapigac08b6a72022-01-10 13:36:47 +000086struct device mock_device = {.enabled = 1};
Jan Dabros9e16ca92020-07-08 22:33:52 +020087
88/* Fake memory devices handle */
89struct device *all_devices = &mock_device;
90
91int setup_test_1(void **state)
92{
93 *state = res_mock_1;
Shuo Liud9142922022-08-06 02:30:47 +080094 mock_device.resource_list = &res_mock_1[UNASSIGNED_TAG];
Jan Dabros9e16ca92020-07-08 22:33:52 +020095
96 return 0;
97}
98
99int setup_test_2(void **state)
100{
101 *state = res_mock_2;
102 mock_device.resource_list = &res_mock_2[CACHEABLE_TAG];
103
104 return 0;
105}
106
107int setup_test_3(void **state)
108{
109 *state = res_mock_3;
110 mock_device.resource_list = &res_mock_3[CACHEABLE_TAG];
111
112 return 0;
113}
114
115resource_t get_aligned_base(struct resource *res, struct range_entry *entry)
116{
117 return ALIGN_DOWN(res[range_entry_tag(entry)].base, MEMRANGE_ALIGN);
118}
119
120resource_t get_aligned_end(struct resource *res, struct range_entry *entry)
121{
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000122 resource_t end = res[range_entry_tag(entry)].base + res[range_entry_tag(entry)].size
123 + (res[range_entry_tag(entry)].base - range_entry_base(entry));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200124 return ALIGN_UP(end, MEMRANGE_ALIGN);
125}
126
127/*
128 * This test verifies memranges_init(), memranges_add_resources() and memranges_teardown()
129 * functions. It covers basic functionality of memrange library - implementation of creating
130 * memrange structure from resources available on the platform and method for free'ing
131 * allocated memory.
132 *
133 * Example memory ranges (res_mock1) for test_memrange_basic.
134 * Ranges marked with asterisks (***) are not added to the test_memrange.
135 *
Shuo Liud9142922022-08-06 02:30:47 +0800136 * +-------UNASSIGNED_TAG--------+ <-0x0
137 * | |
138 * +-----------------------------+ <-0x8000
139 *
140 *
141 *
Jan Dabros9e16ca92020-07-08 22:33:52 +0200142 * +--------CACHEABLE_TAG--------+ <-0xE000
143 * | |
144 * | |
145 * | |
146 * +-----------------------------+ <-0x100000
147 *
148 *
149 *
150 * +-----***READONLY_TAG***------+ <-0xFF0000
151 * | |
152 * | |
153 * | |
154 * +-----------------------------+ <-0x1000000
155 *
156 *
157 * +--------RESERVED_TAG---------+ <-0x100000000
158 * | |
159 * +-----------------------------+ <-0x100001000
160 */
161static void test_memrange_basic(void **state)
162{
163 int counter = 0;
164 const unsigned long cacheable = IORESOURCE_CACHEABLE;
165 const unsigned long reserved = IORESOURCE_RESERVE;
Shuo Liud9142922022-08-06 02:30:47 +0800166 const unsigned long prefetchable = IORESOURCE_PREFETCH;
Jan Dabros9e16ca92020-07-08 22:33:52 +0200167 struct range_entry *ptr;
168 struct memranges test_memrange;
169 struct resource *res_mock = *state;
170 resource_t prev_base = 0;
171
Shuo Liud9142922022-08-06 02:30:47 +0800172 memranges_init_empty(&test_memrange, NULL, 0);
173 memranges_add_resources(&test_memrange, prefetchable, prefetchable, UNASSIGNED_TAG);
174 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200175 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
176
Shuo Liud9142922022-08-06 02:30:47 +0800177 /* There should be two entries, since cacheable and reserved regions are not neighbors.
178 Besides these two, a region with an unassigned tag is defined, to emulate an unmapped
179 PCI BAR resource. This resource is not mapped into host physical address and hence
180 should not be picked up by memranges_add_resources().*/
181
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000182 memranges_each_entry(ptr, &test_memrange)
183 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200184 assert_in_range(range_entry_tag(ptr), CACHEABLE_TAG, RESERVED_TAG);
185 assert_int_equal(range_entry_base(ptr), get_aligned_base(res_mock, ptr));
186
187 assert_int_equal(range_entry_end(ptr), get_aligned_end(res_mock, ptr));
188
189 /* Ranges have to be returned in increasing order */
190 assert_true(prev_base <= range_entry_base(ptr));
191
192 prev_base = range_entry_base(ptr);
193 counter++;
194 };
195 assert_int_equal(counter, 2);
196 counter = 0;
197
198 /* Remove initial memrange */
199 memranges_teardown(&test_memrange);
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000200 memranges_each_entry(ptr, &test_memrange) counter++;
Jan Dabros9e16ca92020-07-08 22:33:52 +0200201 assert_int_equal(counter, 0);
202}
203
204/*
205 * This test verifies memranges_clone(), memranges_insert() and memranges_update_tag()
206 * functions. All operations are performed on cloned memrange. One of the most important thing
207 * to check, is that memrange_insert() should remove all ranges which are covered by the newly
208 * inserted one.
209 *
210 * Example memory ranges (res_mock1) for test_memrange_clone_insert.
211 * Ranges marked with asterisks (***) are not added to the clone_memrange.
212 * Ranges marked with (^) have tag value changed during test.
213 *
214 * +--------CACHEABLE_TAG--------+ <-0xE000
215 * +------|----INSERTED_TAG----------+ | <-0xF000
216 * | | (^READONLY_TAG^) | |
217 * | | | |
218 * | +-----------------------------+ <-0x100000
219 * +---------------------------------+ <-0x101000
220 *
221 *
222 * +-----***READONLY_TAG***------+ <-0xFF0000
223 * | |
224 * | |
225 * | |
226 * +-----------------------------+ <-0x1000000
227 *
228 *
229 * +------+---------RESERVED_TAG-----+--+ <-0x100000000
230 * | | | |
231 * | +-----------------------------+ <-0x100001000
232 * +-----------INSERTED_TAG----------+ <-0x100002000
233 */
234static void test_memrange_clone_insert(void **state)
235{
236 int counter = 0;
237 const unsigned long cacheable = IORESOURCE_CACHEABLE;
238 const unsigned long reserved = IORESOURCE_RESERVE;
239 struct range_entry *ptr;
240 struct memranges test_memrange, clone_memrange;
241 struct resource *res_mock = *state;
242 const resource_t new_range_begin_offset = 1ULL << 12;
243
244 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
245 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
246
247 memranges_clone(&clone_memrange, &test_memrange);
248 memranges_teardown(&test_memrange);
249
250 /* Verify that new one is really a clone */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000251 memranges_each_entry(ptr, &clone_memrange)
252 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200253 assert_in_range(range_entry_tag(ptr), CACHEABLE_TAG, END_OF_RESOURCES - 1);
254 assert_int_equal(range_entry_base(ptr), get_aligned_base(res_mock, ptr));
255
256 assert_int_equal(range_entry_end(ptr), get_aligned_end(res_mock, ptr));
257
258 counter++;
259 };
260 assert_int_equal(counter, 2);
261 counter = 0;
262
263 /* Insert new range, which will overlap with first region. */
264 memranges_insert(&clone_memrange, res_mock[CACHEABLE_TAG].base + new_range_begin_offset,
265 res_mock[CACHEABLE_TAG].size, INSERTED_TAG);
266
267 /* Three ranges should be there - CACHEABLE(shrunk), INSERTED and RESERVED */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000268 memranges_each_entry(ptr, &clone_memrange)
269 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200270 resource_t expected_end;
271
272 if (range_entry_tag(ptr) == CACHEABLE_TAG) {
273 assert_int_equal(range_entry_base(ptr), res_mock[CACHEABLE_TAG].base);
274
275 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset;
276 assert_int_equal(range_entry_end(ptr), expected_end);
277 }
278 if (range_entry_tag(ptr) == INSERTED_TAG) {
279 assert_int_equal(range_entry_base(ptr),
280 res_mock[CACHEABLE_TAG].base + new_range_begin_offset);
281
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000282 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset
283 + res_mock[CACHEABLE_TAG].size;
Jan Dabros9e16ca92020-07-08 22:33:52 +0200284 assert_int_equal(range_entry_end(ptr),
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000285 ALIGN_UP(expected_end, MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200286 }
287 counter++;
288 }
289 assert_int_equal(counter, 3);
290 counter = 0;
291
292 /* Insert new region, which will shadow readonly range.
293 * Additionally verify API for updating tags */
294 memranges_update_tag(&clone_memrange, INSERTED_TAG, READONLY_TAG);
295
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000296 memranges_each_entry(ptr, &clone_memrange)
297 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200298 resource_t expected_end;
299
300 assert_int_not_equal(range_entry_tag(ptr), INSERTED_TAG);
301 if (range_entry_tag(ptr) == READONLY_TAG) {
302 assert_int_equal(range_entry_base(ptr),
303 res_mock[CACHEABLE_TAG].base + new_range_begin_offset);
304
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000305 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset
306 + res_mock[CACHEABLE_TAG].size;
Jan Dabros9e16ca92020-07-08 22:33:52 +0200307 assert_int_equal(range_entry_end(ptr),
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000308 ALIGN_UP(expected_end, MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200309 }
310 };
311
312 /* Check if alignment (4KiB) is properly applied, that is begin - DOWN and end - UP */
313 memranges_insert(&clone_memrange, res_mock[RESERVED_TAG].base + 0xAD,
314 res_mock[RESERVED_TAG].size, INSERTED_TAG);
315
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000316 memranges_each_entry(ptr, &clone_memrange)
317 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200318 resource_t expected_end;
319
320 assert_int_not_equal(range_entry_tag(ptr), RESERVED_TAG);
321 if (range_entry_tag(ptr) == INSERTED_TAG) {
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000322 assert_int_equal(
323 range_entry_base(ptr),
324 ALIGN_DOWN(res_mock[RESERVED_TAG].base, MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200325
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000326 expected_end = ALIGN_DOWN(res_mock[RESERVED_TAG].base, MEMRANGE_ALIGN)
327 + new_range_begin_offset + res_mock[RESERVED_TAG].size;
Jan Dabros9e16ca92020-07-08 22:33:52 +0200328 expected_end = ALIGN_UP(expected_end, MEMRANGE_ALIGN);
329
330 assert_int_equal(range_entry_end(ptr), expected_end);
331 }
332 counter++;
333 }
334 assert_int_equal(counter, 3);
335
336 /* Free clone */
337 memranges_teardown(&clone_memrange);
338}
339
340/*
341 * This test verifies memranges_fill_holes_up_to() and memranges_create_hole(). Idea of the test
342 * is to fill all holes, so that we end up with contiguous address space fully covered by
343 * entries. Then, holes are created on the border of two different regions
344 *
345 * Example memory ranges (res_mock1) for test_memrange_holes.
346 * Space marked with (/) is not covered by any region at the end of the test.
347 *
348 * +--------CACHEABLE_TAG--------+ <-0xE000
349 * | |
350 * | |
351 * //|/////////////////////////////| <-0xFF000
352 * //+-----------HOLE_TAG----------+ <-0x100000
353 * //|/////////////////////////////| <-0x101000
354 * | |
355 * | |
356 * | |
357 * | |
358 * +--------RESERVED_TAG---------+ <-0x100000000
359 * | |
360 * +-----------------------------+ <-0x100001000
361 */
362static void test_memrange_holes(void **state)
363{
364 int counter = 0;
365 const unsigned long cacheable = IORESOURCE_CACHEABLE;
366 const unsigned long reserved = IORESOURCE_RESERVE;
367 struct range_entry *ptr;
368 struct range_entry *hole_ptr = NULL;
369 struct memranges test_memrange;
370 struct resource *res_mock = *state;
371 int holes_found = 0;
372 resource_t last_range_end = 0;
373 const resource_t holes_fill_end = res_mock[RESERVED_TAG].base;
374
375 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
376 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
377
378 /* Count holes in ranges */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000379 memranges_each_entry(ptr, &test_memrange)
380 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200381 if (!last_range_end) {
382 last_range_end = range_entry_end(ptr);
383 continue;
384 }
385
386
387 if (range_entry_base(ptr) != last_range_end) {
388 holes_found++;
389 last_range_end = range_entry_end(ptr);
390 }
391
392 if (range_entry_base(ptr) >= holes_fill_end)
393 break;
394 }
395
396 /* Create range entries which covers continuous memory range
397 (but with different tags) */
398 memranges_fill_holes_up_to(&test_memrange, holes_fill_end, HOLE_TAG);
399
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000400 memranges_each_entry(ptr, &test_memrange)
401 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200402 if (range_entry_tag(ptr) == HOLE_TAG) {
403 assert_int_equal(range_entry_base(ptr),
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000404 ALIGN_UP(res_mock[CACHEABLE_TAG].base
405 + res_mock[CACHEABLE_TAG].size,
406 MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200407 assert_int_equal(range_entry_end(ptr), holes_fill_end);
408 /* Store pointer to HOLE_TAG region for future use */
409 hole_ptr = ptr;
410 }
411 counter++;
412 }
413 assert_int_equal(counter, 2 + holes_found);
414
415 /* If test data does not have any holes in it then terminate this test */
416 if (holes_found == 0)
417 return;
418
419 assert_non_null(hole_ptr);
420 counter = 0;
421
422 /* Create hole crossing the border of two range entries */
423 const resource_t new_cacheable_end = ALIGN_DOWN(
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000424 res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size - 4 * KiB,
425 MEMRANGE_ALIGN);
426 const resource_t new_hole_begin =
427 ALIGN_UP(range_entry_base(hole_ptr) + 4 * KiB, MEMRANGE_ALIGN);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200428 const resource_t ranges_diff = new_hole_begin - new_cacheable_end;
429
430 memranges_create_hole(&test_memrange, new_cacheable_end, ranges_diff);
431
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000432 memranges_each_entry(ptr, &test_memrange)
433 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200434 switch (range_entry_tag(ptr)) {
435 case CACHEABLE_TAG:
436 assert_int_equal(range_entry_base(ptr), res_mock[CACHEABLE_TAG].base);
437 assert_int_equal(range_entry_end(ptr), new_cacheable_end);
438 break;
439 case RESERVED_TAG:
440 assert_int_equal(range_entry_base(ptr), res_mock[RESERVED_TAG].base);
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000441 assert_int_equal(range_entry_end(ptr),
442 res_mock[RESERVED_TAG].base
443 + res_mock[RESERVED_TAG].size);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200444 break;
445 case HOLE_TAG:
446 assert_int_equal(range_entry_base(ptr), new_hole_begin);
447 assert_int_equal(range_entry_end(ptr), res_mock[RESERVED_TAG].base);
448 break;
449 default:
450 break;
451 }
452 counter++;
453 }
454 assert_int_equal(counter, 3);
455
456 memranges_teardown(&test_memrange);
457}
458
459/*
Nico Huber526c6422020-05-25 00:03:14 +0200460 * This test verifies memranges_steal() function. Simple check is done by attempt
461 * to steal some memory from the top of region with CACHEABLE_TAG and some from
462 * the bottom of region with READONLY_TAG.
Jan Dabros9e16ca92020-07-08 22:33:52 +0200463 *
464 * Example memory ranges (res_mock1) for test_memrange_steal.
Nico Huber875f7312022-08-10 21:48:01 +0200465 * Space marked with (/) is stolen during the test.
Jan Dabros9e16ca92020-07-08 22:33:52 +0200466 *
467 * +--------CACHEABLE_TAG--------+ <-0xE000
468 * | |
469 * | |
Nico Huber526c6422020-05-25 00:03:14 +0200470 * |/////////////////////////////| <-stolen_base
471 * +-----------------------------+ <-0x100000 <-stolen_base + 0x4000
Jan Dabros9e16ca92020-07-08 22:33:52 +0200472 *
473 *
474 *
Nico Huber875f7312022-08-10 21:48:01 +0200475 * +--------READONLY_TAG---------+ <-0xFF0000 <-stolen_base
476 * |/////////////////////////////| <-stolen_base + 0x4000
Jan Dabros9e16ca92020-07-08 22:33:52 +0200477 * | |
Nico Huber875f7312022-08-10 21:48:01 +0200478 * | |
Jan Dabros9e16ca92020-07-08 22:33:52 +0200479 * +-----------------------------+ <-0x1000000
480 *
481 *
482 * +--------RESERVED_TAG---------+ <-0x100000000
483 * | |
484 * +-----------------------------+ <-0x100001000
485 */
486static void test_memrange_steal(void **state)
487{
488 bool status = false;
489 resource_t stolen;
490 const unsigned long cacheable = IORESOURCE_CACHEABLE;
491 const unsigned long reserved = IORESOURCE_RESERVE;
492 const unsigned long readonly = IORESOURCE_READONLY;
493 const resource_t stolen_range_size = 0x4000;
494 struct memranges test_memrange;
495 struct resource *res_mock = *state;
496 struct range_entry *ptr;
497 size_t count = 0;
498
499 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
500 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
501 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
502
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000503 status = memranges_steal(&test_memrange,
504 res_mock[RESERVED_TAG].base + res_mock[RESERVED_TAG].size,
Nico Huber526c6422020-05-25 00:03:14 +0200505 stolen_range_size, 12, CACHEABLE_TAG, &stolen, true);
506 assert_true(status);
507 assert_in_range(stolen, res_mock[CACHEABLE_TAG].base,
508 res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size);
509 status = memranges_steal(&test_memrange,
510 res_mock[RESERVED_TAG].base + res_mock[RESERVED_TAG].size,
511 stolen_range_size, 12, READONLY_TAG, &stolen, false);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200512 assert_true(status);
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000513 assert_in_range(stolen, res_mock[READONLY_TAG].base,
514 res_mock[READONLY_TAG].base + res_mock[READONLY_TAG].size);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200515
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000516 memranges_each_entry(ptr, &test_memrange)
517 {
Nico Huber526c6422020-05-25 00:03:14 +0200518 if (range_entry_tag(ptr) == CACHEABLE_TAG) {
519 assert_int_equal(range_entry_end(ptr),
520 ALIGN_DOWN(ALIGN_UP(res_mock[CACHEABLE_TAG].base
521 + res_mock[CACHEABLE_TAG].size,
522 MEMRANGE_ALIGN)
523 - stolen_range_size,
524 MEMRANGE_ALIGN));
525 }
Jan Dabros9e16ca92020-07-08 22:33:52 +0200526 if (range_entry_tag(ptr) == READONLY_TAG) {
527 assert_int_equal(range_entry_base(ptr),
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000528 ALIGN_DOWN(res_mock[READONLY_TAG].base, MEMRANGE_ALIGN)
529 + stolen_range_size);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200530 }
531 count++;
532 }
533 assert_int_equal(count, 3);
534 count = 0;
535
Nico Huber526c6422020-05-25 00:03:14 +0200536 /* Check if inserting ranges in previously stolen areas will merge them. */
537 memranges_insert(&test_memrange,
538 res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size
539 - stolen_range_size - 0x12,
540 stolen_range_size, CACHEABLE_TAG);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200541 memranges_insert(&test_memrange, res_mock[READONLY_TAG].base + 0xCC, stolen_range_size,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000542 READONLY_TAG);
543 memranges_each_entry(ptr, &test_memrange)
544 {
Nico Huber526c6422020-05-25 00:03:14 +0200545 const unsigned long tag = range_entry_tag(ptr);
546 assert_true(tag == CACHEABLE_TAG || tag == READONLY_TAG || tag == RESERVED_TAG);
547 assert_int_equal(
548 range_entry_base(ptr),
549 ALIGN_DOWN(res_mock[tag].base, MEMRANGE_ALIGN));
550 assert_int_equal(
551 range_entry_end(ptr),
552 ALIGN_UP(res_mock[tag].base + res_mock[tag].size, MEMRANGE_ALIGN));
Jan Dabros9e16ca92020-07-08 22:33:52 +0200553 count++;
554 }
555 assert_int_equal(count, 3);
556 count = 0;
557
558 memranges_teardown(&test_memrange);
559}
560
561/* Utility function checking number of entries and alignment of their base and end pointers */
562static void check_range_entries_count_and_alignment(struct memranges *ranges,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000563 size_t ranges_count, resource_t alignment)
Jan Dabros9e16ca92020-07-08 22:33:52 +0200564{
565 size_t count = 0;
566 struct range_entry *ptr;
567
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000568 memranges_each_entry(ptr, ranges)
569 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200570 assert_true(IS_ALIGNED(range_entry_base(ptr), alignment));
571 assert_true(IS_ALIGNED(range_entry_end(ptr), alignment));
572
573 count++;
574 }
575 assert_int_equal(ranges_count, count);
576}
577
578/* This test verifies memranges_init*() and memranges_teardown() functions.
579 Added ranges are checked correct count and alignment. */
580static void test_memrange_init_and_teardown(void **state)
581{
582 const unsigned long cacheable = IORESOURCE_CACHEABLE;
583 const unsigned long reserved = IORESOURCE_RESERVE;
584 const unsigned long readonly = IORESOURCE_READONLY;
585 struct memranges test_memrange;
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000586 struct range_entry range_entries[4] = {0};
Jan Dabros9e16ca92020-07-08 22:33:52 +0200587
588 /* Test memranges_init() correctness */
589 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
590 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
591 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
592
593 /* Expect all entries to be aligned to 4KiB (2^12) */
594 check_range_entries_count_and_alignment(&test_memrange, 3, MEMRANGE_ALIGN);
595
596 /* Expect ranges list to be empty after teardown */
597 memranges_teardown(&test_memrange);
598 assert_true(memranges_is_empty(&test_memrange));
599
600
601 /* Test memranges_init_with_alignment() correctness with alignment of 1KiB (2^10) */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000602 memranges_init_with_alignment(&test_memrange, cacheable, cacheable, CACHEABLE_TAG, 10);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200603 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
604 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
605
606 check_range_entries_count_and_alignment(&test_memrange, 3, POWER_OF_2(10));
607
608 memranges_teardown(&test_memrange);
609 assert_true(memranges_is_empty(&test_memrange));
610
611
612 /* Test memranges_init_empty() correctness */
613 memranges_init_empty(&test_memrange, &range_entries[0], ARRAY_SIZE(range_entries));
614 assert_true(memranges_is_empty(&test_memrange));
615
616 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
617 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
618 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
619
620 check_range_entries_count_and_alignment(&test_memrange, 3, MEMRANGE_ALIGN);
621
622 memranges_teardown(&test_memrange);
623 assert_true(memranges_is_empty(&test_memrange));
624
625
626 /* Test memranges_init_with_alignment() correctness with alignment of 8KiB (2^13) */
627 memranges_init_empty_with_alignment(&test_memrange, &range_entries[0],
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000628 ARRAY_SIZE(range_entries), 13);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200629 assert_true(memranges_is_empty(&test_memrange));
630
631 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
632 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
633 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
634
635 check_range_entries_count_and_alignment(&test_memrange, 3, POWER_OF_2(13));
636
637 memranges_teardown(&test_memrange);
638 assert_true(memranges_is_empty(&test_memrange));
639}
640
641/* Filter function accepting ranges having memory resource flag */
642static int memrange_filter_mem_only(struct device *dev, struct resource *res)
643{
644 /* Accept only memory resources */
645 return res->flags & IORESOURCE_MEM;
646}
647
648/* Filter function rejecting ranges having memory resource flag */
649static int memrange_filter_non_mem(struct device *dev, struct resource *res)
650{
651 /* Accept only memory resources */
652 return !(res->flags & IORESOURCE_MEM);
653}
654
655/* This test verifies memranges_add_resources_filter() function by providing filter functions
656 which accept or reject ranges. */
657static void test_memrange_add_resources_filter(void **state)
658{
659 const unsigned long cacheable = IORESOURCE_CACHEABLE;
660 const unsigned long reserved = IORESOURCE_RESERVE;
661 struct memranges test_memrange;
662 struct range_entry *ptr;
663 size_t count = 0;
664 size_t accepted_tags[] = {CACHEABLE_TAG, RESERVED_TAG};
665
666 /* Check if filter accepts range correctly */
667 memranges_init(&test_memrange, reserved, reserved, RESERVED_TAG);
668 memranges_add_resources_filter(&test_memrange, cacheable, cacheable, CACHEABLE_TAG,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000669 memrange_filter_mem_only);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200670
671 /* Check if filter accepted desired range. */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000672 memranges_each_entry(ptr, &test_memrange)
673 {
Jan Dabros9e16ca92020-07-08 22:33:52 +0200674 assert_in_set(range_entry_tag(ptr), accepted_tags, ARRAY_SIZE(accepted_tags));
675 assert_true(IS_ALIGNED(range_entry_base(ptr), MEMRANGE_ALIGN));
676 assert_true(IS_ALIGNED(range_entry_end(ptr), MEMRANGE_ALIGN));
677 count++;
678 }
679 assert_int_equal(2, count);
680 count = 0;
681 memranges_teardown(&test_memrange);
682
683 /* Check if filter rejects range correctly */
684 memranges_init(&test_memrange, reserved, reserved, RESERVED_TAG);
685 memranges_add_resources_filter(&test_memrange, cacheable, cacheable, CACHEABLE_TAG,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000686 memrange_filter_non_mem);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200687
688 check_range_entries_count_and_alignment(&test_memrange, 1, MEMRANGE_ALIGN);
689
690 memranges_teardown(&test_memrange);
691}
692
693int main(void)
694{
695 const struct CMUnitTest tests[] = {
696 cmocka_unit_test(test_memrange_basic),
697 cmocka_unit_test(test_memrange_clone_insert),
698 cmocka_unit_test(test_memrange_holes),
699 cmocka_unit_test(test_memrange_steal),
700 cmocka_unit_test(test_memrange_init_and_teardown),
701 cmocka_unit_test(test_memrange_add_resources_filter),
702 };
703
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000704 return cmocka_run_group_tests_name(__TEST_NAME__ "(Boundary on 4GiB)", tests,
705 setup_test_1, NULL)
706 + cmocka_run_group_tests_name(__TEST_NAME__ "(Boundaries 1 byte from 4GiB)",
707 tests, setup_test_2, NULL)
708 + cmocka_run_group_tests_name(__TEST_NAME__ "(Range over 4GiB boundary)", tests,
709 setup_test_3, NULL);
Jan Dabros9e16ca92020-07-08 22:33:52 +0200710}