blob: 32804825a6ed7caee567ba601afb8fbbde405cb8 [file] [log] [blame]
Julius Werner25400372020-05-04 17:46:31 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Julius Werner25400372020-05-04 17:46:31 -07002
3#include <commonlib/region.h>
4#include <string.h>
5#include <tests/test.h>
6
7/* We'd like to test overflow conditions, but for tests size_t is dependent on the HOSTCC
8 architecture. We use this to normalize the available address space to [VAL(0x0):VAL(0xf)). */
9#define VAL(v) ((size_t)(v##ULL << (sizeof(size_t) * 8 - 4)))
10
11static void test_region(void **state)
12{
13 /* Self-test: make sure VAL() overflow works as intended. */
14 assert_true(VAL(5) + VAL(10) > VAL(10));
15 assert_true(VAL(7) + VAL(10) < VAL(10));
16
Jakub Czapigac08b6a72022-01-10 13:36:47 +000017 struct region outer = {.offset = VAL(2), .size = VAL(4)};
Julius Werner25400372020-05-04 17:46:31 -070018 assert_int_equal(region_offset(&outer), VAL(2));
19 assert_int_equal(region_sz(&outer), VAL(4));
20 assert_int_equal(region_end(&outer), VAL(6));
21
Jakub Czapigac08b6a72022-01-10 13:36:47 +000022 struct region inner = {.offset = VAL(3), .size = VAL(2)};
Julius Werner25400372020-05-04 17:46:31 -070023 assert_true(region_is_subregion(&outer, &inner));
24
Jakub Czapigac08b6a72022-01-10 13:36:47 +000025 struct region touching_bottom = {.offset = VAL(2), .size = VAL(1)};
Julius Werner25400372020-05-04 17:46:31 -070026 assert_true(region_is_subregion(&outer, &touching_bottom));
27
Jakub Czapigac08b6a72022-01-10 13:36:47 +000028 struct region touching_top = {.offset = VAL(5), .size = VAL(1)};
Julius Werner25400372020-05-04 17:46:31 -070029 assert_true(region_is_subregion(&outer, &touching_top));
30
Jakub Czapigac08b6a72022-01-10 13:36:47 +000031 struct region overlap_bottom = {.offset = VAL(1), .size = VAL(2)};
Julius Werner25400372020-05-04 17:46:31 -070032 assert_false(region_is_subregion(&outer, &overlap_bottom));
33
Jakub Czapigac08b6a72022-01-10 13:36:47 +000034 struct region overlap_top = {.offset = VAL(5), .size = VAL(2)};
Julius Werner25400372020-05-04 17:46:31 -070035 assert_false(region_is_subregion(&outer, &overlap_top));
36
Jakub Czapigac08b6a72022-01-10 13:36:47 +000037 struct region below = {.offset = 0, .size = VAL(1)};
Julius Werner25400372020-05-04 17:46:31 -070038 assert_false(region_is_subregion(&outer, &below));
39
Jakub Czapigac08b6a72022-01-10 13:36:47 +000040 struct region above = {.offset = VAL(0xf), .size = VAL(1)};
Julius Werner25400372020-05-04 17:46:31 -070041 assert_false(region_is_subregion(&outer, &above));
42}
43
44static void *mock_mmap(const struct region_device *rdev, size_t offset, size_t size)
45{
46 check_expected_ptr(rdev);
47 check_expected(offset);
48 check_expected(size);
49
50 return mock_ptr_type(void *);
51}
52
53static int mock_unmap(const struct region_device *rdev, void *mapping)
54{
55 check_expected_ptr(rdev);
56 check_expected_ptr(mapping);
57
58 return mock();
59}
60
Jakub Czapigac08b6a72022-01-10 13:36:47 +000061static ssize_t mock_readat(const struct region_device *rdev, void *buffer, size_t offset,
62 size_t size)
Julius Werner25400372020-05-04 17:46:31 -070063{
64 check_expected_ptr(rdev);
65 check_expected_ptr(buffer);
66 check_expected(offset);
67 check_expected(size);
68
69 ssize_t ret = mock();
70 if (!ret)
71 return size;
Julius Werner1e14de82020-06-10 15:35:08 -070072 else
73 return ret;
Julius Werner25400372020-05-04 17:46:31 -070074}
75
Jakub Czapigac08b6a72022-01-10 13:36:47 +000076static ssize_t mock_writeat(const struct region_device *rdev, const void *buffer, size_t offset,
77 size_t size)
Julius Werner25400372020-05-04 17:46:31 -070078{
79 check_expected_ptr(rdev);
80 check_expected_ptr(buffer);
81 check_expected(offset);
82 check_expected(size);
83
84 ssize_t ret = mock();
85 if (!ret)
86 return size;
Julius Werner1e14de82020-06-10 15:35:08 -070087 else
88 return ret;
Julius Werner25400372020-05-04 17:46:31 -070089}
90
91static ssize_t mock_eraseat(const struct region_device *rdev, size_t offset, size_t size)
92{
93 check_expected_ptr(rdev);
94 check_expected(offset);
95 check_expected(size);
96
97 ssize_t ret = mock();
98 if (!ret)
99 return size;
Julius Werner1e14de82020-06-10 15:35:08 -0700100 else
101 return ret;
Julius Werner25400372020-05-04 17:46:31 -0700102}
103
104struct region_device_ops mock_rdev_ops = {
105 .mmap = mock_mmap,
106 .munmap = mock_unmap,
107 .readat = mock_readat,
108 .writeat = mock_writeat,
109 .eraseat = mock_eraseat,
110};
111
112struct region_device mock_rdev = REGION_DEV_INIT(&mock_rdev_ops, 0, ~(size_t)0);
113void *mmap_result = (void *)0x12345678;
114const size_t mock_size = 256;
115u8 mock_buffer[256];
116
117static void test_rdev_basics(void **state)
118{
119 assert_int_equal(region_device_offset(&mock_rdev), 0);
120 assert_int_equal(region_device_sz(&mock_rdev), ~(size_t)0);
121 assert_int_equal(region_device_end(&mock_rdev), ~(size_t)0);
122}
123
124/*
125 * This function sets up defaults for the mock_rdev_ops functions so we don't have to explicitly
126 * mock every parameter every time. cmocka doesn't really work well for this sort of use case
127 * and won't let you override these anymore once they're set (because these are stored as
128 * queues, not stacks, and once you store an "infinite" element the test can never proceed
129 * behind it), so tests will always have to enqueue any custom values they may need for the rest
130 * of the test function before calling this.
131 */
132static void rdev_mock_defaults(void)
133{
134 will_return_maybe(mock_mmap, mmap_result);
135 will_return_maybe(mock_unmap, 0);
136 will_return_maybe(mock_readat, 0);
137 will_return_maybe(mock_writeat, 0);
138 will_return_maybe(mock_eraseat, 0);
139
140 expect_value_count(mock_mmap, rdev, &mock_rdev, -2);
141 expect_value_count(mock_unmap, rdev, &mock_rdev, -2);
142 expect_value_count(mock_readat, rdev, &mock_rdev, -2);
143 expect_value_count(mock_writeat, rdev, &mock_rdev, -2);
144 expect_value_count(mock_eraseat, rdev, &mock_rdev, -2);
145
146 expect_value_count(mock_readat, buffer, &mock_buffer, -2);
147 expect_value_count(mock_writeat, buffer, &mock_buffer, -2);
148
149 expect_value_count(mock_mmap, offset, 0, -2);
150 expect_value_count(mock_readat, offset, 0, -2);
151 expect_value_count(mock_writeat, offset, 0, -2);
152 expect_value_count(mock_eraseat, offset, 0, -2);
153
154 expect_value_count(mock_mmap, size, mock_size, -2);
155 expect_value_count(mock_readat, size, mock_size, -2);
156 expect_value_count(mock_writeat, size, mock_size, -2);
157 expect_value_count(mock_eraseat, size, mock_size, -2);
158
159 expect_value_count(mock_unmap, mapping, mmap_result, -2);
160}
161
162static void test_rdev_success(void **state)
163{
Julius Werner25400372020-05-04 17:46:31 -0700164 expect_value(mock_mmap, size, region_device_sz(&mock_rdev));
165
166 rdev_mock_defaults();
167
168 assert_ptr_equal(rdev_mmap_full(&mock_rdev), mmap_result);
169
170 assert_ptr_equal(rdev_mmap(&mock_rdev, 0, mock_size), mmap_result);
171 assert_int_equal(rdev_munmap(&mock_rdev, mmap_result), 0);
172 assert_int_equal(rdev_readat(&mock_rdev, mock_buffer, 0, mock_size), mock_size);
173 assert_int_equal(rdev_writeat(&mock_rdev, mock_buffer, 0, mock_size), mock_size);
174 assert_int_equal(rdev_eraseat(&mock_rdev, 0, mock_size), mock_size);
175}
176
177static void test_rdev_failure(void **state)
178{
179 will_return(mock_mmap, NULL);
180 will_return(mock_unmap, -1);
181 will_return(mock_readat, -1);
182 will_return(mock_writeat, -1);
183 will_return(mock_eraseat, -1);
184
185 rdev_mock_defaults();
186
187 assert_null(rdev_mmap(&mock_rdev, 0, mock_size));
188 assert_int_equal(rdev_munmap(&mock_rdev, mmap_result), -1);
189 assert_int_equal(rdev_readat(&mock_rdev, mock_buffer, 0, mock_size), -1);
190 assert_int_equal(rdev_writeat(&mock_rdev, mock_buffer, 0, mock_size), -1);
191 assert_int_equal(rdev_eraseat(&mock_rdev, 0, mock_size), -1);
192}
193
194static void test_rdev_wrap(void **state)
195{
196 struct region_device child;
197 const size_t offs = VAL(0xf);
198 const size_t wrap_size = VAL(2);
199 /* Known API limitation -- can't exactly touch address space limit from below. */
200 const size_t fit_size = VAL(1) - 1;
201
202 /* For the 'wrap' cases, the underlying rdev_ops aren't even called, so only add
203 expectations for the 'fit' cases. */
204 expect_value(mock_mmap, offset, offs);
205 expect_value(mock_readat, offset, offs);
206 expect_value(mock_writeat, offset, offs);
207 expect_value(mock_eraseat, offset, offs);
208
209 expect_value(mock_mmap, size, fit_size);
210 expect_value(mock_readat, size, fit_size);
211 expect_value(mock_writeat, size, fit_size);
212 expect_value(mock_eraseat, size, fit_size);
213
214 rdev_mock_defaults();
215
216 /* Accesses to regions that wrap around the end of the address space should fail. */
217 assert_null(rdev_mmap(&mock_rdev, offs, wrap_size));
218 assert_int_equal(rdev_readat(&mock_rdev, mock_buffer, offs, wrap_size), -1);
219 assert_int_equal(rdev_writeat(&mock_rdev, mock_buffer, offs, wrap_size), -1);
220 assert_int_equal(rdev_eraseat(&mock_rdev, offs, wrap_size), -1);
221 assert_int_equal(rdev_chain(&child, &mock_rdev, offs, wrap_size), -1);
222
223 /* Just barely touching the end of the address space (and the rdev) should be fine. */
224 assert_ptr_equal(rdev_mmap(&mock_rdev, offs, fit_size), mmap_result);
225 assert_int_equal(rdev_readat(&mock_rdev, mock_buffer, offs, fit_size), fit_size);
226 assert_int_equal(rdev_writeat(&mock_rdev, mock_buffer, offs, fit_size), fit_size);
227 assert_int_equal(rdev_eraseat(&mock_rdev, offs, fit_size), fit_size);
228 assert_int_equal(rdev_chain(&child, &mock_rdev, offs, fit_size), 0);
229}
230
231static void test_rdev_chain(void **state)
232{
233 struct region_device child;
234 const size_t child_offs = VAL(2);
235 const size_t child_size = VAL(4);
236 const size_t offs = VAL(1);
237 const size_t ovrflw_size = child_size - offs + 1;
238
239 /* The mock_size test is the only one that will go through to underlying rdev_ops. */
240 expect_value(mock_mmap, offset, child_offs + offs);
241 expect_value(mock_readat, offset, child_offs + offs);
242 expect_value(mock_writeat, offset, child_offs + offs);
243 expect_value(mock_eraseat, offset, child_offs + offs);
244
245 rdev_mock_defaults();
246
247 /* First a quick test for rdev_chain_full(). */
248 assert_int_equal(rdev_chain_full(&child, &mock_rdev), 0);
249 assert_int_equal(region_device_sz(&child), region_device_sz(&mock_rdev));
250 assert_int_equal(region_device_offset(&child), region_device_offset(&mock_rdev));
251 assert_int_equal(rdev_relative_offset(&mock_rdev, &child), 0);
252
253 /* Remaining tests use rdev chained to [child_offs:child_size) subregion. */
254 assert_int_equal(rdev_chain(&child, &mock_rdev, child_offs, child_size), 0);
255 assert_int_equal(region_device_sz(&child), child_size);
256 assert_int_equal(region_device_offset(&child), child_offs);
257 assert_int_equal(region_device_end(&child), child_offs + child_size);
258 assert_int_equal(rdev_relative_offset(&mock_rdev, &child), child_offs);
259 assert_int_equal(rdev_relative_offset(&child, &mock_rdev), -1);
260
261 /* offs + mock_size < child_size, so will succeed. */
262 assert_ptr_equal(rdev_mmap(&child, offs, mock_size), mmap_result);
263 assert_int_equal(rdev_munmap(&child, mmap_result), 0);
264 assert_int_equal(rdev_readat(&child, mock_buffer, offs, mock_size), mock_size);
265 assert_int_equal(rdev_writeat(&child, mock_buffer, offs, mock_size), mock_size);
266 assert_int_equal(rdev_eraseat(&child, offs, mock_size), mock_size);
267
268 /* offs + ovrflw_size > child_size, so will fail. */
269 assert_null(rdev_mmap(&child, offs, ovrflw_size));
270 assert_int_equal(rdev_readat(&child, mock_buffer, offs, ovrflw_size), -1);
271 assert_int_equal(rdev_writeat(&child, mock_buffer, offs, ovrflw_size), -1);
272 assert_int_equal(rdev_eraseat(&child, offs, ovrflw_size), -1);
273
274 /* Using child_size as offset, the start of the area will already be out of range. */
275 assert_null(rdev_mmap(&child, child_size, mock_size));
276 assert_int_equal(rdev_readat(&child, mock_buffer, child_size, mock_size), -1);
277 assert_int_equal(rdev_writeat(&child, mock_buffer, child_size, mock_size), -1);
278 assert_int_equal(rdev_eraseat(&child, child_size, mock_size), -1);
279}
280
281static void test_rdev_double_chain(void **state)
282{
283 struct region_device first, second;
284 const size_t first_offs = VAL(2);
285 const size_t first_size = VAL(6);
286 const size_t second_offs = VAL(2);
287 const size_t second_size = VAL(2);
288 const size_t offs = VAL(1);
289 const size_t ovrflw_size = second_size - offs + 1;
290
291 /* The mock_size test is the only one that will go through to underlying rdev_ops. */
292 expect_value(mock_mmap, offset, first_offs + second_offs + offs);
293 expect_value(mock_readat, offset, first_offs + second_offs + offs);
294 expect_value(mock_writeat, offset, first_offs + second_offs + offs);
295 expect_value(mock_eraseat, offset, first_offs + second_offs + offs);
296
297 rdev_mock_defaults();
298
299 /* First, chain an rdev to root over [first_offs:first_size). */
300 assert_int_equal(rdev_chain(&first, &mock_rdev, first_offs, first_size), 0);
301
302 /* Trying to chain a second to first beyond its end should fail. */
303 assert_int_equal(rdev_chain(&second, &first, second_offs, first_size), -1);
304
305 /* Chain second to first at [second_offs:second_size). */
306 assert_int_equal(rdev_chain(&second, &first, second_offs, second_size), 0);
307 assert_int_equal(rdev_relative_offset(&first, &second), second_offs);
308 assert_int_equal(rdev_relative_offset(&mock_rdev, &second), first_offs + second_offs);
309
310 /* offs + mock_size < second_size, so will succeed. */
311 assert_ptr_equal(rdev_mmap(&second, offs, mock_size), mmap_result);
312 assert_int_equal(rdev_munmap(&second, mmap_result), 0);
313 assert_int_equal(rdev_readat(&second, mock_buffer, offs, mock_size), mock_size);
314 assert_int_equal(rdev_writeat(&second, mock_buffer, offs, mock_size), mock_size);
315 assert_int_equal(rdev_eraseat(&second, offs, mock_size), mock_size);
316
317 /* offs + ovrflw_size > second_size, so will fail. */
318 assert_null(rdev_mmap(&second, offs, ovrflw_size));
319 assert_int_equal(rdev_readat(&second, mock_buffer, offs, ovrflw_size), -1);
320 assert_int_equal(rdev_writeat(&second, mock_buffer, offs, ovrflw_size), -1);
321 assert_int_equal(rdev_eraseat(&second, offs, ovrflw_size), -1);
322
323 /* offs + second_size + offs way out of range. */
324 assert_null(rdev_mmap(&second, second_size + offs, mock_size));
325 assert_int_equal(rdev_readat(&second, mock_buffer, second_size + offs, mock_size), -1);
326 assert_int_equal(rdev_writeat(&second, mock_buffer, second_size + offs, mock_size), -1);
327 assert_int_equal(rdev_eraseat(&second, second_size + offs, mock_size), -1);
328}
329
330static void test_mem_rdev(void **state)
331{
332 const size_t size = 256;
333 u8 backing[size];
334 u8 scratch[size];
335 int i;
Julius Wernerc8931972021-04-16 16:48:32 -0700336 struct region_device mem;
337 rdev_chain_mem_rw(&mem, backing, size);
Julius Werner25400372020-05-04 17:46:31 -0700338
339 /* Test writing to and reading from full mapping. */
340 memset(backing, 0xa5, size);
Julius Wernerc8931972021-04-16 16:48:32 -0700341 u8 *mapping = rdev_mmap_full(&mem);
Julius Werner25400372020-05-04 17:46:31 -0700342 assert_non_null(mapping);
343 for (i = 0; i < size; i++)
344 assert_int_equal(mapping[i], 0xa5);
345 memset(mapping, 0x5a, size);
346 for (i = 0; i < size; i++)
347 assert_int_equal(backing[i], 0x5a);
Julius Wernerc8931972021-04-16 16:48:32 -0700348 assert_int_equal(rdev_munmap(&mem, mapping), 0);
Julius Werner25400372020-05-04 17:46:31 -0700349
350 /* Test read/write/erase of single bytes. */
351 for (i = 0; i < size; i++) {
352 u8 val = i + 0xaa;
353 scratch[0] = val;
Julius Wernerc8931972021-04-16 16:48:32 -0700354 assert_int_equal(rdev_writeat(&mem, &scratch, i, 1), 1);
Julius Werner25400372020-05-04 17:46:31 -0700355 assert_int_equal(backing[i], val);
356 assert_int_equal(scratch[0], val);
357 val = i + 0x55;
358 backing[i] = val;
Julius Wernerc8931972021-04-16 16:48:32 -0700359 assert_int_equal(rdev_readat(&mem, &scratch, i, 1), 1);
Julius Werner25400372020-05-04 17:46:31 -0700360 assert_int_equal(scratch[0], val);
361 assert_int_equal(backing[i], val);
Julius Wernerc8931972021-04-16 16:48:32 -0700362 assert_int_equal(rdev_eraseat(&mem, i, 1), 1);
Julius Werner25400372020-05-04 17:46:31 -0700363 assert_int_equal(backing[i], 0);
364 }
365
366 /* Test read/write/erase of larger chunk. */
367 size_t offs = 0x47;
Jakub Czapigac08b6a72022-01-10 13:36:47 +0000368 size_t chunk = 0x72;
Julius Werner25400372020-05-04 17:46:31 -0700369 memset(backing, 0, size);
370 memset(scratch, 0, size);
371 memset(scratch + offs, 0x39, chunk);
Julius Wernerc8931972021-04-16 16:48:32 -0700372 assert_int_equal(rdev_writeat(&mem, scratch + offs, offs, chunk), chunk);
Julius Werner25400372020-05-04 17:46:31 -0700373 assert_memory_equal(backing, scratch, size);
374 memset(backing, 0, size);
Julius Wernerc8931972021-04-16 16:48:32 -0700375 assert_int_equal(rdev_readat(&mem, scratch + offs, offs, chunk), chunk);
Julius Werner25400372020-05-04 17:46:31 -0700376 assert_memory_equal(backing, scratch, size);
377 memset(scratch + offs + 1, 0, chunk - 1);
Julius Wernerc8931972021-04-16 16:48:32 -0700378 assert_int_equal(rdev_eraseat(&mem, offs + 1, chunk - 1), chunk - 1);
Julius Werner25400372020-05-04 17:46:31 -0700379 assert_memory_equal(backing, scratch, size);
380
381 /* Test mapping of larger chunk. */
382 memset(backing, 0, size);
Julius Wernerc8931972021-04-16 16:48:32 -0700383 mapping = rdev_mmap(&mem, offs, chunk);
Julius Werner25400372020-05-04 17:46:31 -0700384 assert_non_null(mapping);
385 memset(scratch, 0x93, size);
386 memcpy(mapping, scratch, chunk);
387 memset(scratch, 0, size);
388 memset(scratch + offs, 0x93, chunk);
389 assert_memory_equal(backing, scratch, size);
Julius Wernerc8931972021-04-16 16:48:32 -0700390 assert_int_equal(rdev_munmap(&mem, mapping), 0);
Julius Werner25400372020-05-04 17:46:31 -0700391 assert_memory_equal(backing, scratch, size);
392}
393
394int main(void)
395{
396 const struct CMUnitTest tests[] = {
397 cmocka_unit_test(test_region),
398 cmocka_unit_test(test_rdev_basics),
399 cmocka_unit_test(test_rdev_success),
400 cmocka_unit_test(test_rdev_failure),
401 cmocka_unit_test(test_rdev_wrap),
402 cmocka_unit_test(test_rdev_chain),
403 cmocka_unit_test(test_rdev_double_chain),
404 cmocka_unit_test(test_mem_rdev),
405 };
406
Jakub Czapiga7c6081e2021-08-25 16:27:35 +0200407 return cb_run_group_tests(tests, NULL, NULL);
Julius Werner25400372020-05-04 17:46:31 -0700408}