blob: b949d1b2975021297f3012cb9d0ba3c1dcb5e268 [file] [log] [blame]
Jakub Czapiga2b8d7212021-04-22 20:14:02 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <crc_byte.h>
4#include <spd_bin.h>
5#include <spd_cache.h>
6#include <stdlib.h>
7#include <string.h>
8#include <tests/test.h>
9#include <tests/lib/spd_cache_data.h>
10
11struct region_device flash_rdev_rw;
12static char *flash_buffer = NULL;
13static size_t flash_buffer_size = 0;
14
15static int setup_spd_cache(void **state)
16{
17 flash_buffer_size = SC_SPD_TOTAL_LEN + SC_CRC_LEN;
18 flash_buffer = malloc(flash_buffer_size);
19
20 if (flash_buffer == NULL) {
21 flash_buffer_size = 0;
22 return -1;
23 }
24
25 rdev_chain_mem_rw(&flash_rdev_rw, flash_buffer, flash_buffer_size);
26 return 0;
27}
28
29static int setup_spd_cache_test(void **state)
30{
31 memset(flash_buffer, 0xff, flash_buffer_size);
32 return 0;
33}
34
35static int teardown_spd_cache(void **state)
36{
37 rdev_chain_mem_rw(&flash_rdev_rw, NULL, 0);
38 free(flash_buffer);
39 flash_buffer = NULL;
40 flash_buffer_size = 0;
41 return 0;
42}
43
44
Patrick Georgice55ca22021-06-09 18:37:42 +020045int fmap_locate_area_as_rdev(const char *name, struct region_device *area)
Jakub Czapiga2b8d7212021-04-22 20:14:02 +020046{
47 return rdev_chain(area, &flash_rdev_rw, 0, flash_buffer_size);
48}
49
50/* This test verifies if load_spd_cache() correctly loads spd_cache pointer and size
Patrick Georgice55ca22021-06-09 18:37:42 +020051 from provided region_device. Memory region device is returned by our
52 fmap_locate_area_as_rdev() override. */
Jakub Czapiga2b8d7212021-04-22 20:14:02 +020053static void test_load_spd_cache(void **state)
54{
55 uint8_t *spd_cache;
56 size_t spd_cache_sz;
57
58 assert_int_equal(CB_SUCCESS, load_spd_cache(&spd_cache, &spd_cache_sz));
59 assert_ptr_equal(flash_buffer, spd_cache);
60 assert_int_equal(SC_SPD_TOTAL_LEN + SC_CRC_LEN, spd_cache_sz);
61}
62
63static void calc_spd_cache_crc(uint8_t *spd_cache)
64{
Jakub Czapigac08b6a72022-01-10 13:36:47 +000065 *(uint16_t *)(spd_cache + SC_CRC_OFFSET) = CRC(spd_cache, SC_SPD_TOTAL_LEN, crc16_byte);
Jakub Czapiga2b8d7212021-04-22 20:14:02 +020066}
67
Jakub Czapigac08b6a72022-01-10 13:36:47 +000068__attribute__((unused)) static void fill_spd_cache_ddr3(uint8_t *spd_cache, size_t spd_cache_sz)
Jakub Czapiga2b8d7212021-04-22 20:14:02 +020069{
70 assert_true(spd_cache_sz >= (spd_data_ddr3_1_sz + sizeof(uint16_t)));
71
72 memcpy(spd_cache, spd_data_ddr3_1, spd_data_ddr3_1_sz);
73 memset(spd_cache + spd_data_ddr3_1_sz, 0, spd_cache_sz - spd_data_ddr3_1_sz);
74 calc_spd_cache_crc(spd_cache);
75}
76
Jakub Czapigac08b6a72022-01-10 13:36:47 +000077__attribute__((unused)) static void fill_spd_cache_ddr4(uint8_t *spd_cache, size_t spd_cache_sz)
Jakub Czapiga2b8d7212021-04-22 20:14:02 +020078{
Jakub Czapigac08b6a72022-01-10 13:36:47 +000079 assert_true(spd_cache_sz
80 >= (spd_data_ddr4_1_sz + spd_data_ddr4_2_sz + sizeof(uint16_t)));
Jakub Czapiga2b8d7212021-04-22 20:14:02 +020081
82 memcpy(spd_cache, spd_data_ddr4_1, spd_data_ddr4_1_sz);
83 memcpy(spd_cache + spd_data_ddr4_1_sz, spd_data_ddr4_2, spd_data_ddr4_2_sz);
84 memset(spd_cache + spd_data_ddr4_1_sz + spd_data_ddr4_2_sz, 0,
Jakub Czapigac08b6a72022-01-10 13:36:47 +000085 spd_cache_sz - (spd_data_ddr4_1_sz + spd_data_ddr4_2_sz));
Jakub Czapiga2b8d7212021-04-22 20:14:02 +020086 calc_spd_cache_crc(spd_cache);
87}
88
89static void test_spd_fill_from_cache(void **state)
90{
91 struct spd_block blk;
92 uint8_t *spd_cache;
93 size_t spd_cache_sz;
94 assert_int_equal(CB_SUCCESS, load_spd_cache(&spd_cache, &spd_cache_sz));
95
96 /* Empty spd cache */
97 assert_int_equal(CB_ERR, spd_fill_from_cache(spd_cache, &blk));
98
99#if __TEST_SPD_CACHE_DDR == 3
100 fill_spd_cache_ddr3(spd_cache, spd_cache_sz);
101#elif __TEST_SPD_CACHE_DDR == 4
102 fill_spd_cache_ddr4(spd_cache, spd_cache_sz);
103#endif
104 assert_int_equal(CB_SUCCESS, spd_fill_from_cache(spd_cache, &blk));
105}
106
107
108static void test_spd_cache_is_valid(void **state)
109{
110 uint8_t *spd_cache;
111 size_t spd_cache_sz;
112 assert_int_equal(CB_SUCCESS, load_spd_cache(&spd_cache, &spd_cache_sz));
113
114 /* Empty, incorrect SPD */
115 assert_false(spd_cache_is_valid(spd_cache, spd_cache_sz));
116
117#if __TEST_SPD_CACHE_DDR == 3
118 fill_spd_cache_ddr3(spd_cache, spd_cache_sz);
119#elif __TEST_SPD_CACHE_DDR == 4
120 fill_spd_cache_ddr4(spd_cache, spd_cache_sz);
121#endif
122 assert_true(spd_cache_is_valid(spd_cache, spd_cache_sz));
123}
124
125
126/* Used for setting `sn` parameter value */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000127static u32 get_spd_sn_ret_sn[SC_SPD_NUMS] = {0};
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200128static size_t get_spd_sn_ret_sn_idx = 0;
129/* Implementation for testing purposes. */
130enum cb_err get_spd_sn(u8 addr, u32 *sn)
131{
132 *sn = get_spd_sn_ret_sn[get_spd_sn_ret_sn_idx];
133 get_spd_sn_ret_sn_idx = (get_spd_sn_ret_sn_idx + 1) % ARRAY_SIZE(get_spd_sn_ret_sn);
134
135 return mock_type(enum cb_err);
136}
137
138static void get_sn_from_spd_cache(uint8_t *spd_cache, u32 arr[])
139{
140 for (int i = 0; i < SC_SPD_NUMS; ++i)
141 arr[i] = *(u32 *)(spd_cache + SC_SPD_OFFSET(i) + DDR4_SPD_SN_OFF);
142}
143
144/* check_if_dimm_changed() has is used only with DDR4, so there tests are not used for DDR3 */
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000145__attribute__((unused)) static void test_check_if_dimm_changed_not_changed(void **state)
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200146{
147 uint8_t *spd_cache;
148 size_t spd_cache_sz;
Eric Lai30677012022-04-14 15:22:52 +0800149 struct spd_block blk = {.addr_map = {0x50, 0x51, 0x52, 0x53},
150 .spd_array = {0}, .len = 0};
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200151
152 assert_int_equal(CB_SUCCESS, load_spd_cache(&spd_cache, &spd_cache_sz));
153 fill_spd_cache_ddr4(spd_cache, spd_cache_sz);
154 assert_int_equal(CB_SUCCESS, spd_fill_from_cache(spd_cache, &blk));
155
156 get_sn_from_spd_cache(spd_cache, get_spd_sn_ret_sn);
157 get_spd_sn_ret_sn_idx = 0;
158 will_return_count(get_spd_sn, CB_SUCCESS, SC_SPD_NUMS);
159 assert_false(check_if_dimm_changed(spd_cache, &blk));
160}
161
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000162__attribute__((unused)) static void test_check_if_dimm_changed_sn_error(void **state)
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200163{
164 uint8_t *spd_cache;
165 size_t spd_cache_sz;
Eric Lai30677012022-04-14 15:22:52 +0800166 struct spd_block blk = {.addr_map = {0x50, 0x51, 0x52, 0x53},
167 .spd_array = {0}, .len = 0};
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200168
169 assert_int_equal(CB_SUCCESS, load_spd_cache(&spd_cache, &spd_cache_sz));
170 fill_spd_cache_ddr4(spd_cache, spd_cache_sz);
171 assert_int_equal(CB_SUCCESS, spd_fill_from_cache(spd_cache, &blk));
172
173 /* Simulate error */
174 will_return_count(get_spd_sn, CB_ERR, 1);
175 assert_true(check_if_dimm_changed(spd_cache, &blk));
176}
177
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000178__attribute__((unused)) static void test_check_if_dimm_changed_sodimm_lost(void **state)
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200179{
180 uint8_t *spd_cache;
181 size_t spd_cache_sz;
Eric Lai30677012022-04-14 15:22:52 +0800182 struct spd_block blk = {.addr_map = {0x50, 0x51, 0x52, 0x53},
183 .spd_array = {0}, .len = 0};
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200184
185 assert_int_equal(CB_SUCCESS, load_spd_cache(&spd_cache, &spd_cache_sz));
186 fill_spd_cache_ddr4(spd_cache, spd_cache_sz);
187 assert_int_equal(CB_SUCCESS, spd_fill_from_cache(spd_cache, &blk));
188 get_sn_from_spd_cache(spd_cache, get_spd_sn_ret_sn);
189 memset(spd_cache + spd_data_ddr4_1_sz, 0xff, spd_data_ddr4_2_sz);
190
191 get_spd_sn_ret_sn_idx = 0;
192 will_return_always(get_spd_sn, CB_SUCCESS);
193 assert_true(check_if_dimm_changed(spd_cache, &blk));
194}
195
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000196__attribute__((unused)) static void test_check_if_dimm_changed_new_sodimm(void **state)
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200197{
198 uint8_t *spd_cache;
199 size_t spd_cache_sz;
Eric Lai30677012022-04-14 15:22:52 +0800200 struct spd_block blk = {.addr_map = {0x50, 0x51, 0x52, 0x53},
201 .spd_array = {0}, .len = 0};
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200202
203 assert_int_equal(CB_SUCCESS, load_spd_cache(&spd_cache, &spd_cache_sz));
204 fill_spd_cache_ddr4(spd_cache, spd_cache_sz);
205 assert_int_equal(CB_SUCCESS, spd_fill_from_cache(spd_cache, &blk));
206 get_sn_from_spd_cache(spd_cache, get_spd_sn_ret_sn);
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000207 memcpy(spd_cache + spd_data_ddr4_1_sz + spd_data_ddr4_2_sz, spd_data_ddr4_2,
208 spd_data_ddr4_2_sz);
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200209
210 get_spd_sn_ret_sn_idx = 0;
211 will_return_always(get_spd_sn, CB_SUCCESS);
212 assert_true(check_if_dimm_changed(spd_cache, &blk));
213}
214
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000215__attribute__((unused)) static void test_check_if_dimm_changed_sn_changed(void **state)
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200216{
217 uint8_t *spd_cache;
218 size_t spd_cache_sz;
Eric Lai30677012022-04-14 15:22:52 +0800219 struct spd_block blk = {.addr_map = {0x50, 0x51, 0x52, 0x53},
220 .spd_array = {0}, .len = 0};
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200221
222 assert_int_equal(CB_SUCCESS, load_spd_cache(&spd_cache, &spd_cache_sz));
223 fill_spd_cache_ddr4(spd_cache, spd_cache_sz);
224 assert_int_equal(CB_SUCCESS, spd_fill_from_cache(spd_cache, &blk));
225 get_sn_from_spd_cache(spd_cache, get_spd_sn_ret_sn);
226 *(u32 *)(spd_cache + SC_SPD_OFFSET(0) + DDR4_SPD_SN_OFF) = 0x43211234;
227
228 get_spd_sn_ret_sn_idx = 0;
229 will_return_always(get_spd_sn, CB_SUCCESS);
230 assert_true(check_if_dimm_changed(spd_cache, &blk));
231}
232
Eric Lai01590222022-04-15 11:20:38 +0800233__attribute__((unused)) static void test_check_if_dimm_changed_with_nonexistent(void **state)
234{
235 uint8_t *spd_cache;
236 size_t spd_cache_sz;
237 struct spd_block blk = {.addr_map = {0x50, 0, 0, 0},
238 .spd_array = {0}, .len = 0};
239
240 assert_int_equal(CB_SUCCESS, load_spd_cache(&spd_cache, &spd_cache_sz));
241 memcpy(spd_cache, spd_data_ddr4_1, spd_data_ddr4_1_sz);
242 memset(spd_cache + spd_data_ddr4_1_sz, 0xff, spd_cache_sz - spd_data_ddr4_1_sz);
243 calc_spd_cache_crc(spd_cache);
244 assert_int_equal(CB_SUCCESS, spd_fill_from_cache(spd_cache, &blk));
245
246 get_sn_from_spd_cache(spd_cache, get_spd_sn_ret_sn);
247 get_spd_sn_ret_sn_idx = 0;
248 will_return_always(get_spd_sn, CB_SUCCESS);
249 assert_false(check_if_dimm_changed(spd_cache, &blk));
250}
251
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200252int main(void)
253{
254 const struct CMUnitTest tests[] = {
255 cmocka_unit_test_setup(test_load_spd_cache, setup_spd_cache_test),
256 cmocka_unit_test_setup(test_spd_fill_from_cache, setup_spd_cache_test),
257 cmocka_unit_test_setup(test_spd_cache_is_valid, setup_spd_cache_test),
258#if __TEST_SPD_CACHE_DDR == 4
259 cmocka_unit_test_setup(test_check_if_dimm_changed_not_changed,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000260 setup_spd_cache_test),
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200261 cmocka_unit_test_setup(test_check_if_dimm_changed_sn_error,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000262 setup_spd_cache_test),
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200263 cmocka_unit_test_setup(test_check_if_dimm_changed_sodimm_lost,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000264 setup_spd_cache_test),
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200265 cmocka_unit_test_setup(test_check_if_dimm_changed_new_sodimm,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000266 setup_spd_cache_test),
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200267 cmocka_unit_test_setup(test_check_if_dimm_changed_sn_changed,
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000268 setup_spd_cache_test),
Eric Lai01590222022-04-15 11:20:38 +0800269 cmocka_unit_test_setup(test_check_if_dimm_changed_with_nonexistent,
270 setup_spd_cache_test),
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200271#endif
272 };
273
Jakub Czapiga7c6081e2021-08-25 16:27:35 +0200274 return cb_run_group_tests(tests, setup_spd_cache, teardown_spd_cache);
Jakub Czapiga2b8d7212021-04-22 20:14:02 +0200275}