Jakub Czapiga | a2b21c2 | 2020-10-08 12:37:40 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | |
| 3 | /* |
| 4 | * Include Unit Under Test source code directly instead of linking it. |
| 5 | * This will allow access to internal structures and data without having |
| 6 | * to extract them to another header file. |
| 7 | */ |
| 8 | #include "../lib/imd_cbmem.c" |
| 9 | |
| 10 | #include <tests/test.h> |
| 11 | #include <stdlib.h> |
| 12 | #include <string.h> |
| 13 | #include <commonlib/bsd/helpers.h> |
| 14 | #include <imd.h> |
| 15 | #include <cbmem.h> |
| 16 | #include <imd_private.h> |
| 17 | |
| 18 | #include <tests/lib/imd_cbmem_data.h> |
| 19 | |
| 20 | #define CBMEM_ENTRY_ID 0xA001 |
| 21 | |
| 22 | static void reset_imd(void) |
| 23 | { |
| 24 | imd.lg.limit = (uintptr_t)NULL; |
| 25 | imd.lg.r = NULL; |
| 26 | imd.sm.limit = (uintptr_t)NULL; |
| 27 | imd.sm.r = NULL; |
| 28 | |
| 29 | cbmem_initialized = 0; |
| 30 | } |
| 31 | |
| 32 | /* This implementation allows imd_cbmem module tests without linking lib/cbmem_common.c |
| 33 | Function indicates to each hook if cbmem is being recovered or not. */ |
| 34 | void cbmem_run_init_hooks(int is_recovery) |
| 35 | { |
| 36 | function_called(); |
| 37 | } |
| 38 | |
Elyes Haouas | 799c321 | 2022-11-09 14:00:44 +0100 | [diff] [blame] | 39 | uintptr_t cbmem_top_chipset(void) |
Jakub Czapiga | a2b21c2 | 2020-10-08 12:37:40 +0200 | [diff] [blame] | 40 | { |
Elyes Haouas | 799c321 | 2022-11-09 14:00:44 +0100 | [diff] [blame] | 41 | return _cbmem_top_ptr; |
Jakub Czapiga | a2b21c2 | 2020-10-08 12:37:40 +0200 | [diff] [blame] | 42 | } |
| 43 | |
| 44 | static void *get_cbmem_ptr(void) |
| 45 | { |
| 46 | void *cbmem_top_ptr = (void *)_cbmem_top_ptr; |
| 47 | if (cbmem_top_ptr) |
| 48 | return cbmem_top_ptr - CBMEM_SIZE; |
| 49 | else |
| 50 | return NULL; |
| 51 | } |
| 52 | |
| 53 | static void clear_cbmem(void) |
| 54 | { |
| 55 | void *ptr = get_cbmem_ptr(); |
| 56 | if (ptr) |
| 57 | memset(ptr, 0, CBMEM_SIZE); |
| 58 | } |
| 59 | |
| 60 | static void reset_and_clear_cbmem(void) |
| 61 | { |
| 62 | reset_imd(); |
| 63 | clear_cbmem(); |
| 64 | } |
| 65 | |
| 66 | void prepare_simple_cbmem(void) |
| 67 | { |
| 68 | reset_and_clear_cbmem(); |
| 69 | |
| 70 | expect_function_call(cbmem_run_init_hooks); |
| 71 | cbmem_initialize_empty(); |
| 72 | |
| 73 | cbmem_entry_add(CBMEM_ENTRY_1_ID, CBMEM_ENTRY_1_SIZE); |
| 74 | cbmem_entry_add(CBMEM_ENTRY_2_ID, CBMEM_ENTRY_2_SIZE); |
| 75 | |
| 76 | cbmem_entry_add(CBMEM_ENTRY_SM_1_ID, CBMEM_ENTRY_SM_1_SIZE); |
| 77 | cbmem_entry_add(CBMEM_ENTRY_SM_2_ID, CBMEM_ENTRY_SM_2_SIZE); |
| 78 | } |
| 79 | |
| 80 | static void test_cbmem_top(void **state) |
| 81 | { |
| 82 | cbmem_top_init_once(); |
| 83 | |
| 84 | if (ENV_ROMSTAGE) |
| 85 | assert_ptr_equal(cbmem_top_chipset(), cbmem_top()); |
| 86 | |
| 87 | if (ENV_POSTCAR || ENV_RAMSTAGE) |
| 88 | assert_ptr_equal((void *)_cbmem_top_ptr, cbmem_top()); |
| 89 | } |
| 90 | |
| 91 | static void test_cbmem_initialize_empty(void **state) |
| 92 | { |
| 93 | const struct cbmem_entry *found; |
| 94 | |
| 95 | /* Expect clean call without recovery */ |
| 96 | expect_function_call(cbmem_run_init_hooks); |
| 97 | cbmem_initialize_empty(); |
| 98 | |
| 99 | found = cbmem_entry_find(SMALL_REGION_ID); |
| 100 | assert_non_null(found); |
| 101 | /* Check that cbmem has only root, large and small entry. */ |
| 102 | assert_int_equal(2, ((struct imd_root *)imd.lg.r)->num_entries); |
| 103 | assert_int_equal(1, ((struct imd_root *)imd.sm.r)->num_entries); |
| 104 | } |
| 105 | |
| 106 | static void test_cbmem_initialize_empty_id_size(void **state) |
| 107 | { |
| 108 | const struct cbmem_entry *entry1, *entry2; |
| 109 | |
| 110 | expect_function_call(cbmem_run_init_hooks); |
| 111 | cbmem_initialize_empty_id_size(CBMEM_ENTRY_ID, CBMEM_ROOT_SIZE); |
| 112 | |
| 113 | entry1 = cbmem_entry_find(SMALL_REGION_ID); |
| 114 | entry2 = cbmem_entry_find(CBMEM_ENTRY_ID); |
| 115 | |
| 116 | assert_non_null(entry1); |
| 117 | assert_non_null(entry2); |
| 118 | assert_ptr_not_equal(entry1, entry2); |
| 119 | /* Check that cbmem has root, large, small entries |
| 120 | and entry with id passed to init function. */ |
| 121 | assert_int_equal(3, ((struct imd_root *)imd.lg.r)->num_entries); |
| 122 | assert_int_equal(1, ((struct imd_root *)imd.sm.r)->num_entries); |
| 123 | } |
| 124 | |
| 125 | static void test_cbmem_initialize(void **state) |
| 126 | { |
| 127 | int res; |
| 128 | |
| 129 | /* Expect call to fail as there is no previous cbmem to recover */ |
| 130 | res = cbmem_initialize(); |
| 131 | assert_int_equal(1, res); |
| 132 | |
| 133 | /* Create cbmem with few entries and check if initialization will recover */ |
| 134 | prepare_simple_cbmem(); |
| 135 | reset_imd(); |
| 136 | expect_function_call(cbmem_run_init_hooks); |
| 137 | res = cbmem_initialize(); |
| 138 | assert_int_equal(0, res); |
| 139 | } |
| 140 | |
| 141 | void test_cbmem_initialize_id_size_ramstage(void **state) |
| 142 | { |
| 143 | int res; |
| 144 | const struct cbmem_entry *entry1, *entry2; |
| 145 | |
| 146 | /* Expect call to fail as there is no previous cbmem to recover */ |
| 147 | res = cbmem_initialize_id_size(0, 0); |
| 148 | assert_int_equal(1, res); |
| 149 | |
| 150 | reset_and_clear_cbmem(); |
| 151 | |
| 152 | res = cbmem_initialize_id_size(CBMEM_ENTRY_ID, CBMEM_ROOT_SIZE); |
| 153 | assert_int_equal(1, res); |
| 154 | |
| 155 | /* Initialize empty cbmem with small region and check if next initialization |
| 156 | correctly recovers and creates its root entry with small region */ |
| 157 | expect_function_call(cbmem_run_init_hooks); |
| 158 | cbmem_initialize_empty_id_size(0, 0); |
| 159 | expect_function_call(cbmem_run_init_hooks); |
| 160 | res = cbmem_initialize_id_size(CBMEM_ENTRY_ID, CBMEM_ROOT_SIZE); |
| 161 | assert_int_equal(0, res); |
| 162 | |
| 163 | entry1 = cbmem_entry_find(SMALL_REGION_ID); |
| 164 | entry2 = cbmem_entry_find(CBMEM_ENTRY_ID); |
| 165 | assert_non_null(entry1); |
| 166 | assert_non_null(entry2); |
| 167 | assert_ptr_not_equal(entry1, entry2); |
| 168 | /* Check that cbmem has root, large, small entries and entry with id passed |
| 169 | to init function. */ |
| 170 | assert_int_equal(3, ((struct imd_root *)imd.lg.r)->num_entries); |
| 171 | assert_int_equal(1, ((struct imd_root *)imd.sm.r)->num_entries); |
| 172 | } |
| 173 | |
| 174 | void test_cbmem_initialize_id_size_romstage(void **state) |
| 175 | { |
| 176 | int res; |
| 177 | const struct cbmem_entry *entry1, *entry2; |
| 178 | |
| 179 | /* Expect call to fail as there is no previous cbmem to recover */ |
| 180 | res = cbmem_initialize_id_size(0, 0); |
| 181 | assert_int_equal(1, res); |
| 182 | |
| 183 | /* Initialize empty cbmem with small region and check if next initialization |
| 184 | correctly recovers and creates its root entry with small region */ |
| 185 | expect_function_call(cbmem_run_init_hooks); |
| 186 | cbmem_initialize_empty_id_size(0, 0); |
| 187 | expect_function_call(cbmem_run_init_hooks); |
| 188 | res = cbmem_initialize_id_size(CBMEM_ENTRY_ID, CBMEM_ROOT_SIZE); |
| 189 | assert_int_equal(0, res); |
| 190 | |
| 191 | entry1 = cbmem_entry_find(SMALL_REGION_ID); |
| 192 | assert_non_null(entry1); |
| 193 | |
| 194 | /* Romstage locks imd cbmem initialization after recovery, |
| 195 | so entry with CBMEM_ENTRY_ID id is not present if it was not recovered. */ |
| 196 | entry2 = cbmem_entry_find(CBMEM_ENTRY_ID); |
| 197 | assert_null(entry2); |
| 198 | |
| 199 | /* Initialize cbmem with few large and small entries */ |
| 200 | prepare_simple_cbmem(); |
| 201 | |
| 202 | assert_non_null(cbmem_entry_find(CBMEM_ENTRY_1_ID)); |
| 203 | assert_non_null(cbmem_entry_find(CBMEM_ENTRY_2_ID)); |
| 204 | assert_non_null(cbmem_entry_find(CBMEM_ENTRY_SM_1_ID)); |
| 205 | assert_non_null(cbmem_entry_find(CBMEM_ENTRY_SM_2_ID)); |
| 206 | |
| 207 | reset_imd(); |
| 208 | |
| 209 | expect_function_call(cbmem_run_init_hooks); |
| 210 | res = cbmem_initialize_id_size(CBMEM_ENTRY_ID, CBMEM_ROOT_SIZE); |
| 211 | assert_int_equal(0, res); |
| 212 | |
| 213 | /* Initialization function should be able to recover entries left in cbmem |
| 214 | while having imd structure clean */ |
| 215 | entry1 = cbmem_entry_find(SMALL_REGION_ID); |
| 216 | assert_non_null(entry1); |
| 217 | assert_non_null(cbmem_entry_find(CBMEM_ENTRY_1_ID)); |
| 218 | assert_non_null(cbmem_entry_find(CBMEM_ENTRY_2_ID)); |
| 219 | assert_non_null(cbmem_entry_find(CBMEM_ENTRY_SM_1_ID)); |
| 220 | assert_non_null(cbmem_entry_find(CBMEM_ENTRY_SM_2_ID)); |
| 221 | } |
| 222 | |
| 223 | static void test_cbmem_recovery(void **state) |
| 224 | { |
| 225 | int is_wakeup = 1; |
| 226 | |
| 227 | /* Reset imd, initialize cbmem and add entries for recovery */ |
| 228 | prepare_simple_cbmem(); |
| 229 | expect_function_call(cbmem_run_init_hooks); |
| 230 | assert_int_equal(0, cbmem_recovery(is_wakeup)); |
| 231 | |
| 232 | /* Check that entries have been correctly recovered */ |
| 233 | assert_non_null(cbmem_entry_find(CBMEM_ENTRY_1_ID)); |
| 234 | assert_non_null(cbmem_entry_find(CBMEM_ENTRY_2_ID)); |
| 235 | assert_non_null(cbmem_entry_find(CBMEM_ENTRY_SM_1_ID)); |
| 236 | assert_non_null(cbmem_entry_find(CBMEM_ENTRY_SM_2_ID)); |
| 237 | |
| 238 | is_wakeup = 0; |
| 239 | expect_function_call(cbmem_run_init_hooks); |
| 240 | assert_int_equal(0, cbmem_recovery(is_wakeup)); |
| 241 | |
| 242 | /* Check that after recovery with is_wakeup equal to 0 the cbmem is empty |
| 243 | and in initial state. */ |
| 244 | assert_null(cbmem_entry_find(CBMEM_ENTRY_1_ID)); |
| 245 | assert_null(cbmem_entry_find(CBMEM_ENTRY_2_ID)); |
| 246 | assert_null(cbmem_entry_find(CBMEM_ENTRY_SM_1_ID)); |
| 247 | assert_null(cbmem_entry_find(CBMEM_ENTRY_SM_2_ID)); |
| 248 | /* Check that cbmem has root, large and small entry. */ |
| 249 | assert_int_equal(2, ((struct imd_root *)imd.lg.r)->num_entries); |
| 250 | assert_int_equal(1, ((struct imd_root *)imd.sm.r)->num_entries); |
| 251 | } |
| 252 | |
| 253 | static void test_cbmem_entry_add(void **state) |
| 254 | { |
| 255 | /* IDs used for testing. Don't have to be sequential. |
| 256 | Must not be equal to SMALL_REGION_ID. */ |
| 257 | const int id1 = 0x10; |
| 258 | const int id2 = 0x11; |
| 259 | const int id3 = 0x12; |
| 260 | const struct cbmem_entry *entry1, *entry2; |
| 261 | const struct cbmem_entry *entry_ret2, *entry_ret3; |
| 262 | |
| 263 | /* cbmem_run_init_hooks() will be called by init functions |
| 264 | but this test does not aim to check it */ |
| 265 | ignore_function_calls(cbmem_run_init_hooks); |
| 266 | |
| 267 | cbmem_initialize_empty_id_size(id1, CBMEM_ROOT_SIZE); |
| 268 | |
| 269 | /* Expect NULL while looking for nonexistent entries */ |
| 270 | assert_null(cbmem_entry_find(id2)); |
| 271 | assert_null(cbmem_entry_find(id3)); |
| 272 | |
| 273 | entry_ret2 = cbmem_entry_add(id2, CBMEM_ROOT_SIZE); |
| 274 | /* Expect error when trying to add entry with zero size */ |
| 275 | assert_null(cbmem_entry_add(id3, 0)); |
| 276 | |
| 277 | /* Check if entries have been added correctly and are not the same */ |
| 278 | entry1 = cbmem_entry_find(id1); |
| 279 | entry2 = cbmem_entry_find(id2); |
| 280 | assert_non_null(entry1); |
| 281 | assert_non_null(entry2); |
| 282 | assert_ptr_not_equal(entry1, entry2); |
| 283 | assert_ptr_equal(entry_ret2, entry2); |
| 284 | |
| 285 | /* Add entry again and make sure that it has been |
| 286 | found instead of creating again. */ |
| 287 | entry_ret3 = cbmem_entry_add(id2, CBMEM_ROOT_SIZE / 2); |
| 288 | assert_ptr_equal(entry_ret2, entry_ret3); |
| 289 | } |
| 290 | |
| 291 | static void test_cbmem_add(void **state) |
| 292 | { |
| 293 | const int id0 = 0x55; |
| 294 | const int id1 = 0x66; |
| 295 | const int id2 = 0x77; |
| 296 | const int id3 = 0x88; |
| 297 | const int entry1_size = 0x2000; |
| 298 | const int entry2_size = 0x4d1; |
| 299 | const int entry3_size = 0x30; |
| 300 | void *entry1, *entry2, *entry3, *entry4; |
| 301 | |
| 302 | ignore_function_calls(cbmem_run_init_hooks); |
| 303 | |
| 304 | cbmem_initialize_empty_id_size(id1, entry1_size); |
| 305 | entry2 = cbmem_add(id2, entry2_size); |
| 306 | entry3 = cbmem_add(id3, entry3_size); |
| 307 | entry1 = cbmem_find(id1); |
| 308 | |
| 309 | /* All pointers should be non-null and distinct. */ |
| 310 | assert_non_null(entry1); |
| 311 | assert_non_null(entry2); |
| 312 | assert_non_null(entry3); |
| 313 | assert_ptr_not_equal(entry1, entry2); |
| 314 | assert_ptr_not_equal(entry1, entry3); |
| 315 | assert_ptr_not_equal(entry2, entry3); |
| 316 | |
| 317 | /* Adding the same ID should yield the same entry pointer. */ |
| 318 | entry4 = cbmem_add(id2, entry2_size); |
| 319 | assert_ptr_equal(entry2, entry4); |
| 320 | |
| 321 | /* Expect error while trying to add range with zero size */ |
| 322 | assert_null(cbmem_add(id0, 0)); |
| 323 | } |
| 324 | |
| 325 | static void test_cbmem_entry_find(void **state) |
| 326 | { |
| 327 | const int id1 = 0xA0; |
| 328 | const int id2 = 0xDD; |
| 329 | const int id3 = 0xBD; |
| 330 | const size_t entry1_size = CBMEM_ROOT_SIZE; |
| 331 | const size_t entry2_size = CBMEM_ROOT_SIZE / 2; |
| 332 | const size_t entry3_size = 6321; |
| 333 | const struct cbmem_entry *cbm_e1, *cbm_e2, *cbm_e3; |
| 334 | const struct cbmem_entry *entry1, *entry2, *entry3; |
| 335 | |
| 336 | ignore_function_calls(cbmem_run_init_hooks); |
| 337 | |
| 338 | cbmem_initialize_empty(); |
| 339 | cbm_e1 = cbmem_entry_add(id1, entry1_size); |
| 340 | cbm_e2 = cbmem_entry_add(id2, entry2_size); |
| 341 | cbm_e3 = cbmem_entry_add(id3, entry3_size); |
| 342 | |
| 343 | /* Check pointers correctness and size for each entry */ |
| 344 | entry1 = cbmem_entry_find(id1); |
| 345 | assert_ptr_equal(cbm_e1, entry1); |
| 346 | assert_int_equal(0, (uintptr_t)cbmem_entry_start(cbm_e1) % CBMEM_SM_ALIGN); |
| 347 | assert_int_equal(entry1_size, cbmem_entry_size(entry1)); |
| 348 | |
| 349 | entry2 = cbmem_entry_find(id2); |
| 350 | assert_ptr_equal(cbm_e2, entry2); |
| 351 | assert_int_equal(0, (uintptr_t)cbmem_entry_start(cbm_e2) % CBMEM_SM_ALIGN); |
| 352 | assert_int_equal(entry2_size, cbmem_entry_size(entry2)); |
| 353 | |
| 354 | entry3 = cbmem_entry_find(id3); |
| 355 | assert_ptr_equal(cbm_e3, entry3); |
| 356 | assert_int_equal(0, (uintptr_t)cbmem_entry_start(cbm_e3) % CBMEM_SM_ALIGN); |
| 357 | assert_int_equal(entry3_size, cbmem_entry_size(entry3)); |
| 358 | } |
| 359 | |
| 360 | static void test_cbmem_find(void **state) |
| 361 | { |
| 362 | const int id1 = 0x30; |
| 363 | const int id2 = 0x22; |
| 364 | const int id3 = 0x101; |
| 365 | void *cbm_e1, *cbm_e2, *entry1, *entry2; |
| 366 | |
| 367 | ignore_function_calls(cbmem_run_init_hooks); |
| 368 | |
| 369 | cbmem_initialize_empty(); |
| 370 | cbm_e1 = cbmem_add(id1, CBMEM_ROOT_SIZE); |
| 371 | cbm_e2 = cbmem_add(id2, CBMEM_ROOT_SIZE); |
| 372 | |
| 373 | entry1 = cbmem_find(id1); |
| 374 | assert_non_null(entry1); |
| 375 | assert_ptr_equal(cbm_e1, entry1); |
| 376 | |
| 377 | entry2 = cbmem_find(id2); |
| 378 | assert_non_null(entry2); |
| 379 | assert_ptr_equal(cbm_e2, entry2); |
| 380 | |
| 381 | /* Expect error when looking for non-existent id */ |
| 382 | assert_null(cbmem_find(id3)); |
| 383 | } |
| 384 | |
| 385 | static void test_cbmem_entry_remove(void **state) |
| 386 | { |
| 387 | const int id1 = 0x2D; |
| 388 | const int id2 = 0x3D; |
| 389 | const int id3 = 0x4D; |
| 390 | const struct cbmem_entry *cbm_e1, *cbm_e2; |
| 391 | |
| 392 | ignore_function_calls(cbmem_run_init_hooks); |
| 393 | |
| 394 | cbmem_initialize_empty(); |
| 395 | cbm_e1 = cbmem_entry_add(id1, CBMEM_ROOT_SIZE); |
| 396 | cbm_e2 = cbmem_entry_add(id2, CBMEM_ROOT_SIZE); |
| 397 | |
| 398 | /* Entries can be removed only in reverse order they have been added. */ |
| 399 | assert_int_equal(-1, cbmem_entry_remove(cbm_e1)); |
| 400 | assert_int_equal(0, cbmem_entry_remove(cbm_e2)); |
| 401 | assert_int_equal(0, cbmem_entry_remove(cbm_e1)); |
| 402 | |
| 403 | /* Expect error when removing non-existent entry */ |
| 404 | assert_int_equal(-1, cbmem_entry_remove(cbmem_entry_find(id3))); |
| 405 | } |
| 406 | |
| 407 | static void test_cbmem_entry_size(void **state) |
| 408 | { |
| 409 | const int id1 = 0x4422; |
| 410 | const int id2 = 0x2137; |
| 411 | const int id3 = 0xb111; |
| 412 | const size_t size1 = CBMEM_ROOT_SIZE * 4; |
| 413 | const size_t size2 = 0x43; |
| 414 | const size_t size3 = CBMEM_ROOT_SIZE * 8 + 7; |
| 415 | |
| 416 | ignore_function_calls(cbmem_run_init_hooks); |
| 417 | |
| 418 | cbmem_initialize_empty_id_size(id1, size1); |
| 419 | assert_non_null(cbmem_entry_add(id2, size2)); |
| 420 | assert_non_null(cbmem_entry_add(id3, size3)); |
| 421 | |
| 422 | /* Entry size needs not to be aligned. |
| 423 | It has to be the same as provided while adding it. */ |
| 424 | assert_int_equal(size1, cbmem_entry_size(cbmem_entry_find(id1))); |
| 425 | assert_int_equal(size2, cbmem_entry_size(cbmem_entry_find(id2))); |
| 426 | assert_int_equal(size3, cbmem_entry_size(cbmem_entry_find(id3))); |
| 427 | } |
| 428 | |
| 429 | static void test_cbmem_entry_start(void **state) |
| 430 | { |
| 431 | const int id1 = 0x62; |
| 432 | const int id2 = 0x26; |
| 433 | |
| 434 | ignore_function_calls(cbmem_run_init_hooks); |
| 435 | |
| 436 | cbmem_initialize_empty_id_size(CBMEM_ENTRY_ID, CBMEM_ROOT_SIZE); |
| 437 | cbmem_entry_find(CBMEM_ENTRY_ID); |
| 438 | cbmem_entry_add(id1, 0x40); |
| 439 | cbmem_entry_add(id2, CBMEM_ROOT_SIZE * 2); |
| 440 | |
| 441 | /* Check if start address of found entry is the same |
| 442 | as the one returned by cbmem_find() function */ |
| 443 | assert_ptr_equal(cbmem_find(CBMEM_ENTRY_ID), |
Jakub Czapiga | c08b6a7 | 2022-01-10 13:36:47 +0000 | [diff] [blame] | 444 | cbmem_entry_start(cbmem_entry_find(CBMEM_ENTRY_ID))); |
Jakub Czapiga | a2b21c2 | 2020-10-08 12:37:40 +0200 | [diff] [blame] | 445 | assert_ptr_equal(cbmem_find(id1), cbmem_entry_start(cbmem_entry_find(id1))); |
| 446 | assert_ptr_equal(cbmem_find(id2), cbmem_entry_start(cbmem_entry_find(id2))); |
| 447 | } |
| 448 | |
| 449 | /* Reimplementation for testing purposes */ |
Jakub Czapiga | c08b6a7 | 2022-01-10 13:36:47 +0000 | [diff] [blame] | 450 | void bootmem_add_range(uint64_t start, uint64_t size, const enum bootmem_type tag) |
Jakub Czapiga | a2b21c2 | 2020-10-08 12:37:40 +0200 | [diff] [blame] | 451 | { |
| 452 | check_expected(start); |
| 453 | check_expected(size); |
| 454 | check_expected(tag); |
| 455 | } |
| 456 | |
| 457 | static void test_cbmem_add_bootmem(void **state) |
| 458 | { |
| 459 | void *base_ptr = NULL; |
| 460 | size_t size = 0; |
| 461 | const int id1 = 0xCA; |
| 462 | const int id2 = 0xEA; |
| 463 | const int id3 = 0xDA; |
| 464 | const size_t size1 = 1024; |
| 465 | const size_t size2 = 128; |
| 466 | const size_t size3 = 8192; |
| 467 | |
| 468 | ignore_function_calls(cbmem_run_init_hooks); |
| 469 | |
| 470 | cbmem_initialize_empty_id_size(CBMEM_ENTRY_ID, CBMEM_ROOT_SIZE); |
| 471 | cbmem_entry_add(id1, size1); |
| 472 | cbmem_entry_add(id2, size2); |
| 473 | cbmem_entry_add(id3, size3); |
| 474 | |
| 475 | cbmem_get_region(&base_ptr, &size); |
| 476 | assert_int_equal(ALIGN_DOWN(_cbmem_top_ptr, LIMIT_ALIGN), base_ptr + size); |
| 477 | |
| 478 | expect_value(bootmem_add_range, start, base_ptr); |
| 479 | expect_value(bootmem_add_range, size, size); |
| 480 | expect_value(bootmem_add_range, tag, BM_MEM_TABLE); |
| 481 | cbmem_add_bootmem(); |
| 482 | |
| 483 | /* Check that adding bootmem does not change base or size of cbmem */ |
| 484 | cbmem_get_region(&base_ptr, &size); |
| 485 | assert_int_equal(ALIGN_DOWN(_cbmem_top_ptr, LIMIT_ALIGN), base_ptr + size); |
| 486 | } |
| 487 | |
| 488 | static void test_cbmem_get_region(void **state) |
| 489 | { |
| 490 | int i; |
| 491 | void *base_ptr = NULL; |
| 492 | size_t size = 0; |
| 493 | size_t size_counter = 0; |
| 494 | const size_t entry_size = 0x2000; |
| 495 | const size_t alloc_num = 32; |
| 496 | const size_t small_entry_size = 64; |
| 497 | const size_t small_alloc_num = 3; |
| 498 | |
| 499 | ignore_function_calls(cbmem_run_init_hooks); |
| 500 | |
| 501 | cbmem_initialize_empty_id_size(CBMEM_ENTRY_ID, CBMEM_ROOT_SIZE); |
| 502 | |
| 503 | /* Check size and base pointer for empty initialized cbmem */ |
| 504 | cbmem_get_region(&base_ptr, &size); |
| 505 | assert_non_null(base_ptr); |
| 506 | assert_int_not_equal(0, size); |
| 507 | assert_int_equal(CBMEM_ROOT_SIZE + cbmem_overhead_size(), size); |
| 508 | assert_int_equal(ALIGN_DOWN(_cbmem_top_ptr, LIMIT_ALIGN), base_ptr + size); |
| 509 | |
| 510 | /* Check for multiple big allocations */ |
| 511 | for (i = 1; i <= alloc_num; ++i) { |
| 512 | const struct cbmem_entry *e = cbmem_entry_add(i, entry_size); |
| 513 | assert_non_null(e); |
| 514 | size_counter += cbmem_entry_size(e); |
| 515 | |
| 516 | /* Check if size is correct after each big allocation */ |
| 517 | cbmem_get_region(&base_ptr, &size); |
| 518 | assert_int_equal(size_counter + cbmem_overhead_size() + CBMEM_ROOT_SIZE, size); |
| 519 | } |
| 520 | |
| 521 | /* Check for few small allocations. */ |
| 522 | for (; i <= alloc_num + small_alloc_num; ++i) { |
| 523 | const struct cbmem_entry *e = cbmem_entry_add(i, small_entry_size); |
| 524 | assert_non_null(e); |
| 525 | |
| 526 | /* Check if size is correct after each small allocation. It should not change |
| 527 | as small entries have their region allocated and entry size is selected |
| 528 | to fit in it couple of times */ |
| 529 | cbmem_get_region(&base_ptr, &size); |
| 530 | assert_int_equal(size_counter + cbmem_overhead_size() + CBMEM_ROOT_SIZE, size); |
| 531 | } |
| 532 | } |
| 533 | |
| 534 | static void test_general_data_structure(void **state) |
| 535 | { |
| 536 | /* Initialize cbmem with few big and small entries, then check if binary data structure |
| 537 | is the same as stored in array containing hardcoded dumped cbmem */ |
| 538 | prepare_simple_cbmem(); |
| 539 | assert_memory_equal(get_cbmem_ptr(), test_cbmem_data, CBMEM_SIZE); |
| 540 | } |
| 541 | |
| 542 | static int setup_teardown_test_imd_cbmem(void **state) |
| 543 | { |
| 544 | reset_and_clear_cbmem(); |
| 545 | return 0; |
| 546 | } |
| 547 | |
| 548 | static int setup_group_imd_cbmem(void **state) |
| 549 | { |
| 550 | /* Allocate more data to have space for alignment */ |
| 551 | void *top_ptr = malloc(CBMEM_SIZE + DYN_CBMEM_ALIGN_SIZE); |
| 552 | |
| 553 | if (!top_ptr) |
| 554 | return -1; |
| 555 | |
| 556 | *state = top_ptr; |
| 557 | |
| 558 | _cbmem_top_ptr = ALIGN_UP((uintptr_t)top_ptr + CBMEM_SIZE, DYN_CBMEM_ALIGN_SIZE); |
| 559 | return 0; |
| 560 | } |
| 561 | |
| 562 | static int teardown_group_imd_cbmem(void **state) |
| 563 | { |
| 564 | reset_imd(); |
| 565 | free(*state); |
| 566 | return 0; |
| 567 | } |
| 568 | |
| 569 | int main(void) |
| 570 | { |
| 571 | const struct CMUnitTest tests[] = { |
| 572 | cmocka_unit_test_setup_teardown(test_cbmem_top, |
| 573 | setup_teardown_test_imd_cbmem, |
| 574 | setup_teardown_test_imd_cbmem), |
| 575 | cmocka_unit_test_setup_teardown(test_cbmem_initialize_empty, |
| 576 | setup_teardown_test_imd_cbmem, |
| 577 | setup_teardown_test_imd_cbmem), |
| 578 | cmocka_unit_test_setup_teardown(test_cbmem_initialize_empty_id_size, |
| 579 | setup_teardown_test_imd_cbmem, |
| 580 | setup_teardown_test_imd_cbmem), |
| 581 | cmocka_unit_test_setup_teardown(test_cbmem_initialize, |
| 582 | setup_teardown_test_imd_cbmem, |
| 583 | setup_teardown_test_imd_cbmem), |
| 584 | #if ENV_ROMSTAGE_OR_BEFORE |
| 585 | cmocka_unit_test_setup_teardown(test_cbmem_initialize_id_size_romstage, |
| 586 | setup_teardown_test_imd_cbmem, |
| 587 | setup_teardown_test_imd_cbmem), |
| 588 | #else |
| 589 | cmocka_unit_test_setup_teardown(test_cbmem_initialize_id_size_ramstage, |
| 590 | setup_teardown_test_imd_cbmem, |
| 591 | setup_teardown_test_imd_cbmem), |
| 592 | #endif |
| 593 | cmocka_unit_test_setup_teardown(test_cbmem_recovery, |
| 594 | setup_teardown_test_imd_cbmem, |
| 595 | setup_teardown_test_imd_cbmem), |
| 596 | cmocka_unit_test_setup_teardown(test_cbmem_entry_add, |
| 597 | setup_teardown_test_imd_cbmem, |
| 598 | setup_teardown_test_imd_cbmem), |
| 599 | cmocka_unit_test_setup_teardown(test_cbmem_add, |
| 600 | setup_teardown_test_imd_cbmem, |
| 601 | setup_teardown_test_imd_cbmem), |
| 602 | cmocka_unit_test_setup_teardown(test_cbmem_entry_find, |
| 603 | setup_teardown_test_imd_cbmem, |
| 604 | setup_teardown_test_imd_cbmem), |
| 605 | cmocka_unit_test_setup_teardown(test_cbmem_find, |
| 606 | setup_teardown_test_imd_cbmem, |
| 607 | setup_teardown_test_imd_cbmem), |
| 608 | cmocka_unit_test_setup_teardown(test_cbmem_entry_remove, |
| 609 | setup_teardown_test_imd_cbmem, |
| 610 | setup_teardown_test_imd_cbmem), |
| 611 | cmocka_unit_test_setup_teardown(test_cbmem_entry_size, |
| 612 | setup_teardown_test_imd_cbmem, |
| 613 | setup_teardown_test_imd_cbmem), |
| 614 | cmocka_unit_test_setup_teardown(test_cbmem_entry_start, |
| 615 | setup_teardown_test_imd_cbmem, |
| 616 | setup_teardown_test_imd_cbmem), |
| 617 | cmocka_unit_test_setup_teardown(test_cbmem_add_bootmem, |
| 618 | setup_teardown_test_imd_cbmem, |
| 619 | setup_teardown_test_imd_cbmem), |
| 620 | cmocka_unit_test_setup_teardown(test_cbmem_get_region, |
| 621 | setup_teardown_test_imd_cbmem, |
| 622 | setup_teardown_test_imd_cbmem), |
| 623 | cmocka_unit_test_setup_teardown(test_general_data_structure, |
| 624 | setup_teardown_test_imd_cbmem, |
| 625 | setup_teardown_test_imd_cbmem), |
| 626 | }; |
| 627 | |
Jakub Czapiga | 7c6081e | 2021-08-25 16:27:35 +0200 | [diff] [blame] | 628 | return cb_run_group_tests(tests, setup_group_imd_cbmem, teardown_group_imd_cbmem); |
Jakub Czapiga | a2b21c2 | 2020-10-08 12:37:40 +0200 | [diff] [blame] | 629 | } |