blob: 4cf9a2d03efe7bb43a9481752a4eb89630f17963 [file] [log] [blame]
Jan Dabros93576932020-07-16 12:05:47 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <bootmem.h>
4#include <commonlib/coreboot_tables.h>
5#include <device/device.h>
6#include <device/resource.h>
7#include <memrange.h>
8#include <stdlib.h>
9#include <string.h>
10#include <symbols.h>
11#include <tests/test.h>
12
13/* Stubs defined to satisfy linker dependencies */
14void cbmem_add_bootmem(void)
15{
16}
17
18void bootmem_arch_add_ranges(void)
19{
20}
21
22struct bootmem_ranges_t {
23 uint64_t start;
24 uint64_t size;
25 uint32_t type;
26};
27
28/* Define symbols for regions required by bootmem.
29 Define constants for regions that do not need to be defined in the executable.
30 There is no need for region memory, just start, end and size symbols are required.
31 Only used values are defined. */
32#define ZERO_REGION_START ((uintptr_t)0x0)
33#define ZERO_REGION_SIZE ((uintptr_t)0x10000)
34
35TEST_REGION_UNALLOCATED(program, 0x10000000, 0x40000);
36#define PROGRAM_START ((uintptr_t)_program)
37#define PROGRAM_SIZE REGION_SIZE(program)
38
39#define CACHEABLE_START ((uintptr_t)0x10000000ULL)
40#define CACHEABLE_SIZE ((uintptr_t)0x100000000ULL)
41#define CACHEABLE_END ((uintptr_t)(CACHEABLE_START + CACHEABLE_SIZE))
42
43/* Stack region end address is hardcoded because `<const> - <symbol>` does not work in GCC */
44TEST_REGION_UNALLOCATED(stack, 0x10040000, 0x1000);
45#define STACK_START ((uintptr_t)_stack)
46#define STACK_SIZE REGION_SIZE(stack)
47#define STACK_END ((uintptr_t)(0x10040000 + 0x1000))
48
49#define RESERVED_START ((uintptr_t)0x100000000ULL)
50#define RESERVED_SIZE ((uintptr_t)0x100000)
51#define RESERVED_END ((uintptr_t)(RESERVED_START + RESERVED_SIZE))
52
53TEST_REGION_UNALLOCATED(ramstage, 0x10000000, 0x41000);
54#define RAMSTAGE_START ((uintptr_t)_ramstage)
55#define RAMSTAGE_SIZE REGION_SIZE(ramstage)
56
57#define CACHEABLE_START_TO_RESERVED_START_SIZE (RESERVED_START - CACHEABLE_START)
58#define RESERVED_END_TO_CACHEABLE_END_SIZE (CACHEABLE_END - RESERVED_END)
59#define STACK_END_TO_RESERVED_START_SIZE (RESERVED_START - STACK_END)
60
61
62/* Bootmem layout for tests
63 *
64 * Regions marked with asterisks (***) are not visible for OS
65 *
66 * +------------------ZERO-----------------+ <-0x0
67 * | |
68 * +---------------------------------------+ <-0x10000
69 *
70 * +-------+----CACHEABLE_MEMORY---------+-+ <-0x10000000
71 * | | ***PROGRAM*** | |
72 * | +-----------------------------+ | <-0x10040000
73 * | | ***STACK*** | |
74 * | +-----------------------------+ | <-0x10041000
75 * | |
76 * | |
77 * | |
78 * | +-------RESERVED_MEMORY-------+ | <-0x100000000
79 * | | | |
80 * | | | |
81 * | | | |
82 * | +-----------------------------+ | <-0x100100000
83 * | |
84 * | |
85 * +---------------------------------------+ <-0x110000000
86 *
87 * Ramstage covers PROGRAM and STACK regions.
88 */
89struct bootmem_ranges_t os_ranges_mock[] = {
90 [0] = { .start = ZERO_REGION_START, .size = ZERO_REGION_SIZE,
91 .type = BM_MEM_RAM},
92 [1] = { .start = CACHEABLE_START, .size = CACHEABLE_START_TO_RESERVED_START_SIZE,
93 .type = BM_MEM_RAM },
94 [2] = { .start = RESERVED_START, .size = RESERVED_SIZE,
95 .type = BM_MEM_RESERVED },
96 [3] = { .start = RESERVED_END, .size = RESERVED_END_TO_CACHEABLE_END_SIZE,
97 .type = BM_MEM_RAM },
98};
99
100struct bootmem_ranges_t ranges_mock[] = {
101 [0] = { .start = ZERO_REGION_START, .size = ZERO_REGION_SIZE,
102 .type = BM_MEM_RAM },
103 [1] = { .start = RAMSTAGE_START, .size = RAMSTAGE_SIZE,
104 .type = BM_MEM_RAMSTAGE },
105 [2] = { .start = STACK_END, .size = STACK_END_TO_RESERVED_START_SIZE,
106 .type = BM_MEM_RAM },
107 [3] = { .start = RESERVED_START, .size = RESERVED_SIZE,
108 .type = BM_MEM_RESERVED },
109 [4] = { .start = RESERVED_END, .size = RESERVED_END_TO_CACHEABLE_END_SIZE,
110 .type = BM_MEM_RAM },
111};
112
113struct bootmem_ranges_t *os_ranges = os_ranges_mock;
114struct bootmem_ranges_t *ranges = ranges_mock;
115
116/* Note that second region overlaps first */
117struct resource res_mock[] = {
118 { .base = ZERO_REGION_START, .size = ZERO_REGION_SIZE, .next = &res_mock[1],
Shuo Liu85894aa52022-08-06 01:20:44 +0800119 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM | IORESOURCE_ASSIGNED },
Jan Dabros93576932020-07-16 12:05:47 +0200120 { .base = CACHEABLE_START, .size = CACHEABLE_SIZE, .next = &res_mock[2],
Shuo Liu85894aa52022-08-06 01:20:44 +0800121 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM | IORESOURCE_ASSIGNED },
Jan Dabros93576932020-07-16 12:05:47 +0200122 { .base = RESERVED_START, .size = RESERVED_SIZE, .next = NULL,
Shuo Liu85894aa52022-08-06 01:20:44 +0800123 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM | IORESOURCE_ASSIGNED }
Jan Dabros93576932020-07-16 12:05:47 +0200124};
125
126/* Device simulating RAM */
127struct device mem_device_mock = {
128 .enabled = 1,
129 .resource_list = res_mock,
130 .next = NULL
131};
132
133struct device *all_devices = &mem_device_mock;
134
135/* Simplified version for the purpose of tests */
136static uint32_t bootmem_to_lb_tag(const enum bootmem_type tag)
137{
138 switch (tag) {
139 case BM_MEM_RAM:
140 return LB_MEM_RAM;
141 case BM_MEM_RESERVED:
142 return LB_MEM_RESERVED;
143 default:
144 return LB_MEM_RESERVED;
145 }
146}
147
148static void test_bootmem_write_mem_table(void **state)
149{
150 /* Space for 10 lb_mem entries to be safe */
151 const size_t lb_mem_max_size = sizeof(struct lb_memory)
152 + 10 * sizeof(struct lb_memory_range);
153 const size_t expected_allocation_size =
154 (sizeof(struct lb_memory)
155 + ARRAY_SIZE(os_ranges_mock) * sizeof(struct lb_memory_range));
156 const size_t required_unused_space_size = lb_mem_max_size - expected_allocation_size;
157 int i;
158 struct lb_memory *lb_mem;
159 /* Allocate buffer and fill it. Use it to ensure correct size of space used
160 by bootmem_write_memory_table() */
161 u8 sentinel_value_buffer[required_unused_space_size];
162 memset(sentinel_value_buffer, 0x77, required_unused_space_size);
163
164 lb_mem = malloc(lb_mem_max_size);
Jakub Czapigaec8eef02021-04-12 11:25:44 +0200165 lb_mem->tag = LB_TAG_MEMORY;
166 lb_mem->size = sizeof(*lb_mem);
Jan Dabros93576932020-07-16 12:05:47 +0200167 /* Fill rest of buffer with sentinel value */
168 memset(((u8 *)lb_mem) + expected_allocation_size, 0x77, required_unused_space_size);
169
170 bootmem_write_memory_table(lb_mem);
171
172 /* There should be only `os_ranges_mock` entries visible in coreboot table */
Jakub Czapigaec8eef02021-04-12 11:25:44 +0200173 assert_int_equal(lb_mem->size, sizeof(*lb_mem) +
Jan Dabros93576932020-07-16 12:05:47 +0200174 ARRAY_SIZE(os_ranges_mock) * sizeof(struct lb_memory_range));
175 assert_memory_equal(sentinel_value_buffer,
176 ((u8 *)lb_mem) + expected_allocation_size,
177 required_unused_space_size);
178
179 for (i = 0; i < lb_mem->size / sizeof(struct lb_memory_range); i++) {
Jianjun Wangb2537bd2022-04-08 16:57:28 +0800180 assert_int_equal(lb_mem->map[i].start, os_ranges[i].start);
181 assert_int_equal(lb_mem->map[i].size, os_ranges[i].size);
Jan Dabros93576932020-07-16 12:05:47 +0200182 assert_int_equal(lb_mem->map[i].type, bootmem_to_lb_tag(os_ranges[i].type));
183 }
184
185 free(lb_mem);
186}
187
188int os_bootmem_walk_cnt;
189int bootmem_walk_cnt;
190
191static bool verify_os_bootmem_walk(const struct range_entry *r, void *arg)
192{
193 assert_int_equal(range_entry_base(r), os_ranges[os_bootmem_walk_cnt].start);
194 assert_int_equal(range_entry_size(r), os_ranges[os_bootmem_walk_cnt].size);
195 assert_int_equal(range_entry_tag(r), os_ranges[os_bootmem_walk_cnt].type);
196
197 os_bootmem_walk_cnt++;
198
199 return true;
200}
201
202static bool verify_bootmem_walk(const struct range_entry *r, void *arg)
203{
204 assert_int_equal(range_entry_base(r), ranges[bootmem_walk_cnt].start);
205 assert_int_equal(range_entry_size(r), ranges[bootmem_walk_cnt].size);
206 assert_int_equal(range_entry_tag(r), ranges[bootmem_walk_cnt].type);
207
208 bootmem_walk_cnt++;
209
210 return true;
211}
212
213static bool count_entries_os_bootmem_walk(const struct range_entry *r, void *arg)
214{
215 os_bootmem_walk_cnt++;
216
217 return true;
218}
219
220static bool count_entries_bootmem_walk(const struct range_entry *r, void *arg)
221{
222 bootmem_walk_cnt++;
223
224 return true;
225}
226
227/* This function initializes bootmem using bootmem_write_memory_table().
228 bootmem_init() is not accessible directly because it is static. */
229static void init_memory_table_library(void)
230{
231 struct lb_memory *lb_mem;
232
233 /* Allocate space for 10 lb_mem entries to be safe */
234 lb_mem = malloc(sizeof(*lb_mem) + 10 * sizeof(struct lb_memory_range));
Jakub Czapigaec8eef02021-04-12 11:25:44 +0200235 lb_mem->tag = LB_TAG_MEMORY;
236 lb_mem->size = sizeof(*lb_mem);
Jan Dabros93576932020-07-16 12:05:47 +0200237
238 /* We need to call this only to initialize library */
239 bootmem_write_memory_table(lb_mem);
Jakub Czapigaec8eef02021-04-12 11:25:44 +0200240
Jan Dabros93576932020-07-16 12:05:47 +0200241 free(lb_mem);
242}
243
244static void test_bootmem_add_range(void **state)
245{
246 init_memory_table_library();
247
248 os_bootmem_walk_cnt = 0;
249 bootmem_walk_os_mem(count_entries_os_bootmem_walk, NULL);
250 assert_int_equal(os_bootmem_walk_cnt, 4);
251
252 bootmem_walk_cnt = 0;
253 bootmem_walk(count_entries_bootmem_walk, NULL);
254 assert_int_equal(bootmem_walk_cnt, 5);
255
256 expect_assert_failure(
257 bootmem_add_range(ALIGN_UP(PROGRAM_START, 4096),
258 ALIGN_DOWN(PROGRAM_SIZE / 2, 4096),
259 BM_MEM_ACPI)
260 );
261
262 os_bootmem_walk_cnt = 0;
263 bootmem_walk_os_mem(count_entries_os_bootmem_walk, NULL);
264 assert_int_equal(os_bootmem_walk_cnt, 4);
265
266 bootmem_walk_cnt = 0;
267 bootmem_walk(count_entries_bootmem_walk, NULL);
268 assert_int_equal(bootmem_walk_cnt, 6);
269
270 /* Do not expect assert failure as BM_MEM_RAMSTAGE should not be added to os_bootmem */
271 bootmem_add_range(ALIGN_UP(STACK_END + 4096, 4096),
272 ALIGN_DOWN(STACK_END_TO_RESERVED_START_SIZE / 2, 4096),
273 BM_MEM_RAMSTAGE);
274
275 os_bootmem_walk_cnt = 0;
276 bootmem_walk_os_mem(count_entries_os_bootmem_walk, NULL);
277 assert_int_equal(os_bootmem_walk_cnt, 4);
278
279 /* Two entries are added because added range is in middle of another */
280 bootmem_walk_cnt = 0;
281 bootmem_walk(count_entries_bootmem_walk, NULL);
282 assert_int_equal(bootmem_walk_cnt, 8);
283}
284
285static void test_bootmem_walk(void **state)
286{
287 init_memory_table_library();
288
289 os_bootmem_walk_cnt = 0;
290 bootmem_walk_os_mem(verify_os_bootmem_walk, NULL);
291 assert_int_equal(os_bootmem_walk_cnt, 4);
292
293 bootmem_walk_cnt = 0;
294 bootmem_walk(verify_bootmem_walk, NULL);
295 assert_int_equal(bootmem_walk_cnt, 5);
296}
297
298static void test_bootmem_region_targets_type(void **state)
299{
300 int ret;
301 u64 subregion_start;
302 u64 subregion_size;
303
304 init_memory_table_library();
305
306 /* Single whole region */
307 ret = bootmem_region_targets_type(RAMSTAGE_START, RAMSTAGE_SIZE, BM_MEM_RAMSTAGE);
308 assert_int_equal(ret, 1);
309
310 /* Expect fail because of incorrect bootmem_type */
311 ret = bootmem_region_targets_type(RAMSTAGE_START, RAMSTAGE_SIZE, BM_MEM_RESERVED);
312 assert_int_equal(ret, 0);
313
Paul Menzel77b1ff02022-03-19 09:56:40 +0100314 /* Range covering one more byte than one region */
Jan Dabros93576932020-07-16 12:05:47 +0200315 ret = bootmem_region_targets_type(RAMSTAGE_START, RAMSTAGE_SIZE + 1, BM_MEM_RAMSTAGE);
316 assert_int_equal(ret, 0);
317
318 /* Expect success for subregion of ramstage stretching from point in program range
319 to point in stack range. */
320 subregion_start = PROGRAM_START + PROGRAM_SIZE / 4;
321 subregion_size = STACK_END - STACK_SIZE / 4 - subregion_start;
322 ret = bootmem_region_targets_type(subregion_start, subregion_size, BM_MEM_RAMSTAGE);
323 assert_int_equal(ret, 1);
324
325 /* Expect fail for range covering more than one tag as there is no BM_MEM_CACHEABLE */
326 subregion_start = STACK_START + STACK_SIZE / 2;
327 subregion_size = RESERVED_START + RESERVED_SIZE / 4 * 3 - subregion_start;
328 ret = bootmem_region_targets_type(subregion_start, subregion_size, BM_MEM_RAM);
329 assert_int_equal(ret, 0);
330
331 /* Middle of range should not fail */
332 ret = bootmem_region_targets_type(RESERVED_START + RESERVED_SIZE / 4,
333 RESERVED_SIZE / 2, BM_MEM_RESERVED);
334 assert_int_equal(ret, 1);
335
336 /* Subsection of range bordering end edge */
337 ret = bootmem_region_targets_type(RESERVED_END + RESERVED_END_TO_CACHEABLE_END_SIZE / 2,
338 RESERVED_END_TO_CACHEABLE_END_SIZE / 2, BM_MEM_RAM);
339 assert_int_equal(ret, 1);
340
341 /* Region touching zero */
342 ret = bootmem_region_targets_type(ZERO_REGION_START, ZERO_REGION_SIZE, BM_MEM_RAM);
343 assert_int_equal(ret, 1);
344
345 /* Expect failure when passing zero as size. */
346 ret = bootmem_region_targets_type(ZERO_REGION_START, 0, BM_MEM_RAM);
347 assert_int_equal(ret, 0);
348 ret = bootmem_region_targets_type(RESERVED_START, 0, BM_MEM_RESERVED);
349 assert_int_equal(ret, 0);
350}
351
352/* Action function used to check alignment of size and base of allocated ranges */
353static bool verify_bootmem_allocate_buffer(const struct range_entry *r, void *arg)
354{
355 if (range_entry_tag(r) == BM_MEM_PAYLOAD) {
356 assert_true(IS_ALIGNED(range_entry_base(r), 4096));
357 assert_true(IS_ALIGNED(range_entry_size(r), 4096));
358 }
359
360 return true;
361}
362
363
364static void test_bootmem_allocate_buffer(void **state)
365{
366 void *buf;
367 void *prev;
368
369 init_memory_table_library();
370
371 /* All allocated buffers should be below 32bit boundary */
372 buf = bootmem_allocate_buffer(1ULL << 32);
373 assert_null(buf);
374
375 /* Try too big size for our BM_MEM_RAM range below 32bit boundary */
376 buf = bootmem_allocate_buffer(RESERVED_START - PROGRAM_START);
377 assert_null(buf);
378
379 /* Two working cases */
380 buf = bootmem_allocate_buffer(0xE0000000);
381 assert_non_null(buf);
382 assert_int_equal(1, bootmem_region_targets_type((uintptr_t)buf,
383 0xE0000000, BM_MEM_PAYLOAD));
384 assert_in_range((uintptr_t)buf, CACHEABLE_START + RAMSTAGE_SIZE, RESERVED_START);
385 /* Check if allocated (payload) ranges have their base and size aligned */
386 bootmem_walk(verify_bootmem_allocate_buffer, NULL);
387
388 prev = buf;
389 buf = bootmem_allocate_buffer(0xF000000);
390 assert_non_null(buf);
391 assert_int_equal(1, bootmem_region_targets_type((uintptr_t)buf,
392 0xF000000, BM_MEM_PAYLOAD));
393 assert_in_range((uintptr_t)buf, CACHEABLE_START + RAMSTAGE_SIZE, RESERVED_START);
394 /* Check if newly allocated buffer does not overlap with previously allocated range */
395 assert_not_in_range((uintptr_t)buf, (uintptr_t)prev, (uintptr_t)prev + 0xE0000000);
396 /* Check if allocated (payload) ranges have their base and size aligned */
397 bootmem_walk(verify_bootmem_allocate_buffer, NULL);
398
399 /* Run out of memory for new allocations */
400 buf = bootmem_allocate_buffer(0x1000000);
401 assert_null(buf);
402}
403
404int main(void)
405{
406 const struct CMUnitTest tests[] = {
407 cmocka_unit_test(test_bootmem_write_mem_table),
408 cmocka_unit_test(test_bootmem_add_range),
409 cmocka_unit_test(test_bootmem_walk),
410 cmocka_unit_test(test_bootmem_allocate_buffer),
411 cmocka_unit_test(test_bootmem_region_targets_type)
412 };
413
Jakub Czapiga7c6081e2021-08-25 16:27:35 +0200414 return cb_run_group_tests(tests, NULL, NULL);
Jan Dabros93576932020-07-16 12:05:47 +0200415}