blob: f3e66bfcfb48b20a9ef1569cecb15afda1d34d36 [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbincd0bc982016-11-19 12:36:09 -06002
3#include <commonlib/helpers.h>
4#include <console/console.h>
5#include <region_file.h>
6#include <string.h>
7
8/*
9 * A region file provides generic support for appending new data
10 * within a storage region. The book keeping is tracked in metadata
11 * blocks where an offset pointer points to the last byte of a newly
Martin Roth0949e732021-10-01 14:28:22 -060012 * allocated byte sequence. Thus, by taking 2 block offsets one can
Aaron Durbincd0bc982016-11-19 12:36:09 -060013 * determine start and size of the latest update. The data does not
14 * have to be the same consistent size, but the data size has be small
15 * enough to fit a metadata block and one data write within the region.
16 *
17 * The granularity of the block offsets are 16 bytes. By using 16-bit
18 * block offsets a region's total size can be no larger than 1MiB.
19 * However, the last 32 bytes cannot be used in the 1MiB maximum region
20 * because one needs to put a block offset indicating last byte written.
21 * An unused block offset is the value 0xffff or 0xffff0 bytes. The last
22 * block offset that can be written is 0xfffe or 0xfffe0 byte offset.
23 *
24 * The goal of this library is to provide a simple mechanism for
25 * allocating blocks of data for updates. The metadata is written first
26 * followed by the data. That means a power event between the block offset
27 * write and the data write results in blocks being allocated but not
28 * entirely written. It's up to the user of the library to sanity check
29 * data stored.
30 */
31
32#define REGF_BLOCK_SHIFT 4
33#define REGF_BLOCK_GRANULARITY (1 << REGF_BLOCK_SHIFT)
34#define REGF_METADATA_BLOCK_SIZE REGF_BLOCK_GRANULARITY
35#define REGF_UNALLOCATED_BLOCK 0xffff
36#define REGF_UPDATES_PER_METADATA_BLOCK \
37 (REGF_METADATA_BLOCK_SIZE / sizeof(uint16_t))
38
39enum {
40 RF_ONLY_METADATA = 0,
41 RF_EMPTY = -1,
42 RF_NEED_TO_EMPTY = -2,
43 RF_FATAL = -3,
44};
45
46struct metadata_block {
47 uint16_t blocks[REGF_UPDATES_PER_METADATA_BLOCK];
48};
49
50static size_t block_to_bytes(uint16_t offset)
51{
52 return (size_t)offset << REGF_BLOCK_SHIFT;
53}
54
55static size_t bytes_to_block(size_t bytes)
56{
57 return bytes >> REGF_BLOCK_SHIFT;
58}
59
60static inline int block_offset_unallocated(uint16_t offset)
61{
62 return offset == REGF_UNALLOCATED_BLOCK;
63}
64
65static inline size_t region_file_data_begin(const struct region_file *f)
66{
67 return f->data_blocks[0];
68}
69
70static inline size_t region_file_data_end(const struct region_file *f)
71{
72 return f->data_blocks[1];
73}
74
75static int all_block_offsets_unallocated(const struct metadata_block *mb)
76{
77 size_t i;
78
79 for (i = 0; i < ARRAY_SIZE(mb->blocks); i++) {
80 if (!block_offset_unallocated(mb->blocks[i]))
81 return 0;
82 }
83
84 return 1;
85}
86
87/* Read metadata block at block i. */
88static int read_mb(size_t i, struct metadata_block *mb,
89 const struct region_file *f)
90{
91 size_t offset = block_to_bytes(i);
92
93 if (rdev_readat(&f->metadata, mb, offset, sizeof(*mb)) < 0)
94 return -1;
95
96 return 0;
97}
98
99/* Locate metadata block with the latest update */
100static int find_latest_mb(struct metadata_block *mb, size_t num_mb_blocks,
101 struct region_file *f)
102{
103 size_t l = 0;
104 size_t r = num_mb_blocks;
105
106 while (l + 1 < r) {
107 size_t mid = (l + r) / 2;
108
109 if (read_mb(mid, mb, f) < 0)
110 return -1;
111 if (all_block_offsets_unallocated(mb))
112 r = mid;
113 else
114 l = mid;
115 }
116
117 /* Set the base block slot. */
118 f->slot = l * REGF_UPDATES_PER_METADATA_BLOCK;
119
120 /* Re-read metadata block with the latest update. */
121 if (read_mb(l, mb, f) < 0)
122 return -1;
123
124 return 0;
125}
126
127static void find_latest_slot(struct metadata_block *mb, struct region_file *f)
128{
129 size_t i;
130
131 for (i = REGF_UPDATES_PER_METADATA_BLOCK - 1; i > 0; i--) {
132 if (!block_offset_unallocated(mb->blocks[i]))
133 break;
134 }
135
136 f->slot += i;
137}
138
139static int fill_data_boundaries(struct region_file *f)
140{
141 struct region_device slots;
142 size_t offset;
143 size_t size = sizeof(f->data_blocks);
144
145 if (f->slot == RF_ONLY_METADATA) {
146 size_t start = bytes_to_block(region_device_sz(&f->metadata));
147 f->data_blocks[0] = start;
148 f->data_blocks[1] = start;
149 return 0;
150 }
151
152 /* Sanity check the 2 slot sequence to read. If it's out of the
153 * metadata blocks' bounds then one needs to empty it. This is done
154 * to uniquely identify I/O vs data errors in the readat() below. */
155 offset = (f->slot - 1) * sizeof(f->data_blocks[0]);
156 if (rdev_chain(&slots, &f->metadata, offset, size)) {
157 f->slot = RF_NEED_TO_EMPTY;
158 return 0;
159 }
160
161 if (rdev_readat(&slots, &f->data_blocks, 0, size) < 0) {
162 printk(BIOS_ERR, "REGF failed to read data boundaries.\n");
163 return -1;
164 }
165
166 /* All used blocks should be incrementing from previous write. */
167 if (region_file_data_begin(f) >= region_file_data_end(f)) {
168 printk(BIOS_ERR, "REGF data boundaries wrong. [%zd,%zd) Need to empty.\n",
169 region_file_data_begin(f), region_file_data_end(f));
170 f->slot = RF_NEED_TO_EMPTY;
171 return 0;
172 }
173
174 /* Ensure data doesn't exceed the region. */
175 if (region_file_data_end(f) >
176 bytes_to_block(region_device_sz(&f->rdev))) {
177 printk(BIOS_ERR, "REGF data exceeds region %zd > %zd\n",
178 region_file_data_end(f),
179 bytes_to_block(region_device_sz(&f->rdev)));
180 f->slot = RF_NEED_TO_EMPTY;
181 }
182
183 return 0;
184}
185
186int region_file_init(struct region_file *f, const struct region_device *p)
187{
188 struct metadata_block mb;
189
190 /* Total number of metadata blocks is found by reading the first
191 * block offset as the metadata is allocated first. At least one
192 * metadata block is available. */
193
194 memset(f, 0, sizeof(*f));
195 f->slot = RF_FATAL;
196
197 /* Keep parent around for accessing data later. */
Aaron Durbinb1ea53d2019-11-08 09:51:15 -0700198 if (rdev_chain_full(&f->rdev, p))
Aaron Durbincd0bc982016-11-19 12:36:09 -0600199 return -1;
200
201 if (rdev_readat(p, &mb, 0, sizeof(mb)) < 0) {
202 printk(BIOS_ERR, "REGF fail reading first metadata block.\n");
203 return -1;
204 }
205
206 /* No metadata has been allocated. Assume region is empty. */
207 if (block_offset_unallocated(mb.blocks[0])) {
208 f->slot = RF_EMPTY;
209 return 0;
210 }
211
212 /* If metadata block is 0 in size then need to empty. */
213 if (mb.blocks[0] == 0) {
214 f->slot = RF_NEED_TO_EMPTY;
215 return 0;
216 }
217
218 /* The region needs to be emptied as the metadata is broken. */
219 if (rdev_chain(&f->metadata, p, 0, block_to_bytes(mb.blocks[0]))) {
220 f->slot = RF_NEED_TO_EMPTY;
221 return 0;
222 }
223
224 /* Locate latest metadata block with latest update. */
225 if (find_latest_mb(&mb, mb.blocks[0], f)) {
226 printk(BIOS_ERR, "REGF fail locating latest metadata block.\n");
227 f->slot = RF_FATAL;
228 return -1;
229 }
230
231 find_latest_slot(&mb, f);
232
233 /* Fill in the data blocks marking the latest update. */
234 if (fill_data_boundaries(f)) {
235 printk(BIOS_ERR, "REGF fail locating data boundaries.\n");
236 f->slot = RF_FATAL;
237 return -1;
238 }
239
240 return 0;
241}
242
243int region_file_data(const struct region_file *f, struct region_device *rdev)
244{
245
246 size_t offset;
247 size_t size;
248
249 /* Slot indicates if any data is available. */
250 if (f->slot <= RF_ONLY_METADATA)
251 return -1;
252
253 offset = block_to_bytes(region_file_data_begin(f));
254 size = block_to_bytes(region_file_data_end(f)) - offset;
255
256 return rdev_chain(rdev, &f->rdev, offset, size);
257}
258
259/*
260 * Allocate enough metadata blocks to maximize data updates. Do this in
261 * terms of blocks. To solve the balance of metadata vs data, 2 linear
262 * equations are solved in terms of blocks where 'x' is number of
263 * data updates and 'y' is number of metadata blocks:
264 *
265 * x = number of data updates
266 * y = number of metadata blocks
267 * T = total blocks in region
268 * D = data size in blocks
269 * M = metadata size in blocks
270 * A = updates accounted for in each metadata block
271 *
272 * T = D * x + M * y
273 * y = x / A
274 * -----------------
275 * T = D * x + M * x / A = x * (D + M / A)
276 * T * A = x * (D * A + M)
277 * x = T * A / (D * A + M)
278 */
279static int allocate_metadata(struct region_file *f, size_t data_blks)
280{
281 size_t t, m;
282 size_t x, y;
283 uint16_t tot_metadata;
284 const size_t a = REGF_UPDATES_PER_METADATA_BLOCK;
285 const size_t d = data_blks;
286
287 t = bytes_to_block(ALIGN_DOWN(region_device_sz(&f->rdev),
288 REGF_BLOCK_GRANULARITY));
289 m = bytes_to_block(ALIGN_UP(REGF_METADATA_BLOCK_SIZE,
290 REGF_BLOCK_GRANULARITY));
291
292 /* Ensure at least one data update can fit with 1 metadata block
293 * within the region. */
294 if (d > t - m)
295 return -1;
296
297 /* Maximize number of updates by aligning up to the number updates in
298 * a metadata block. May not really be able to achieve the number of
299 * updates in practice, but it ensures enough metadata blocks are
300 * allocated. */
301 x = ALIGN_UP(t * a / (d * a + m), a);
302
303 /* One data block has to fit. */
304 if (x == 0)
305 x = 1;
306
307 /* Now calculate how many metadata blocks are needed. */
308 y = ALIGN_UP(x, a) / a;
309
310 /* Need to commit the metadata allocation. */
311 tot_metadata = m * y;
312 if (rdev_writeat(&f->rdev, &tot_metadata, 0, sizeof(tot_metadata)) < 0)
313 return -1;
314
315 if (rdev_chain(&f->metadata, &f->rdev, 0,
316 block_to_bytes(tot_metadata)))
317 return -1;
318
319 /* Initialize a 0 data block to start appending from. */
320 f->data_blocks[0] = tot_metadata;
321 f->data_blocks[1] = tot_metadata;
322
323 return 0;
324}
325
326static int update_can_fit(const struct region_file *f, size_t data_blks)
327{
328 size_t metadata_slots;
329 size_t end_blk;
330
331 metadata_slots = region_device_sz(&f->metadata) / sizeof(uint16_t);
332
333 /* No more slots. */
334 if ((size_t)f->slot + 1 >= metadata_slots)
335 return 0;
336
337 /* See where the last block lies from the current one. */
338 end_blk = data_blks + region_file_data_end(f);
339
340 /* Update would have exceeded block addressing. */
341 if (end_blk >= REGF_UNALLOCATED_BLOCK)
342 return 0;
343
344 /* End block exceeds size of region. */
345 if (end_blk > bytes_to_block(region_device_sz(&f->rdev)))
346 return 0;
347
348 return 1;
349}
350
351static int commit_data_allocation(struct region_file *f, size_t data_blks)
352{
353 size_t offset;
354
355 f->slot++;
356
357 offset = f->slot * sizeof(uint16_t);
358 f->data_blocks[0] = region_file_data_end(f);
359 f->data_blocks[1] = region_file_data_begin(f) + data_blks;
360
361 if (rdev_writeat(&f->metadata, &f->data_blocks[1], offset,
362 sizeof(f->data_blocks[1])) < 0)
363 return -1;
364
365 return 0;
366}
367
Shelley Chen2d90ddd2020-09-15 00:41:14 -0700368static int commit_data(const struct region_file *f,
369 const struct update_region_file_entry *entries,
370 size_t num_entries)
Aaron Durbincd0bc982016-11-19 12:36:09 -0600371{
372 size_t offset = block_to_bytes(region_file_data_begin(f));
Shelley Chen2d90ddd2020-09-15 00:41:14 -0700373 for (int i = 0; i < num_entries; i++) {
374 if (rdev_writeat(&f->rdev, entries[i].data, offset, entries[i].size) < 0)
375 return -1;
376 offset += entries[i].size;
377 }
Aaron Durbincd0bc982016-11-19 12:36:09 -0600378 return 0;
379}
380
381static int handle_empty(struct region_file *f, size_t data_blks)
382{
383 if (allocate_metadata(f, data_blks)) {
384 printk(BIOS_ERR, "REGF metadata allocation failed: %zd data blocks %zd total blocks\n",
385 data_blks, bytes_to_block(region_device_sz(&f->rdev)));
386 return -1;
387 }
388
389 f->slot = RF_ONLY_METADATA;
390
391 return 0;
392}
393
394static int handle_need_to_empty(struct region_file *f)
395{
396 if (rdev_eraseat(&f->rdev, 0, region_device_sz(&f->rdev)) < 0) {
397 printk(BIOS_ERR, "REGF empty failed.\n");
398 return -1;
399 }
400
401 f->slot = RF_EMPTY;
402
403 return 0;
404}
405
Shelley Chen2d90ddd2020-09-15 00:41:14 -0700406static int handle_update(struct region_file *f, size_t blocks,
407 const struct update_region_file_entry *entries,
408 size_t num_entries)
Aaron Durbincd0bc982016-11-19 12:36:09 -0600409{
410 if (!update_can_fit(f, blocks)) {
411 printk(BIOS_INFO, "REGF update can't fit. Will empty.\n");
412 f->slot = RF_NEED_TO_EMPTY;
413 return 0;
414 }
415
416 if (commit_data_allocation(f, blocks)) {
417 printk(BIOS_ERR, "REGF failed to commit data allocation.\n");
418 return -1;
419 }
420
Shelley Chen2d90ddd2020-09-15 00:41:14 -0700421 if (commit_data(f, entries, num_entries)) {
Aaron Durbincd0bc982016-11-19 12:36:09 -0600422 printk(BIOS_ERR, "REGF failed to commit data.\n");
423 return -1;
424 }
425
426 return 0;
427}
428
Shelley Chen2d90ddd2020-09-15 00:41:14 -0700429int region_file_update_data_arr(struct region_file *f,
430 const struct update_region_file_entry *entries,
431 size_t num_entries)
Aaron Durbincd0bc982016-11-19 12:36:09 -0600432{
433 int ret;
434 size_t blocks;
Shelley Chen2d90ddd2020-09-15 00:41:14 -0700435 size_t size = 0;
Aaron Durbincd0bc982016-11-19 12:36:09 -0600436
Shelley Chen2d90ddd2020-09-15 00:41:14 -0700437 for (int i = 0; i < num_entries; i++)
438 size += entries[i].size;
Aaron Durbincd0bc982016-11-19 12:36:09 -0600439 blocks = bytes_to_block(ALIGN_UP(size, REGF_BLOCK_GRANULARITY));
440
441 while (1) {
442 int prev_slot = f->slot;
443
444 switch (f->slot) {
445 case RF_EMPTY:
446 ret = handle_empty(f, blocks);
447 break;
448 case RF_NEED_TO_EMPTY:
449 ret = handle_need_to_empty(f);
450 break;
451 case RF_FATAL:
452 ret = -1;
453 break;
454 default:
Shelley Chen2d90ddd2020-09-15 00:41:14 -0700455 ret = handle_update(f, blocks, entries, num_entries);
Aaron Durbincd0bc982016-11-19 12:36:09 -0600456 break;
457 }
458
459 /* Failing case. No more updates allowed to be attempted. */
460 if (ret) {
461 f->slot = RF_FATAL;
462 break;
463 }
464
Elyes HAOUAS1ec76442018-08-07 12:20:04 +0200465 /* No more state changes and data committed. */
Aaron Durbincd0bc982016-11-19 12:36:09 -0600466 if (f->slot > RF_ONLY_METADATA && prev_slot != f->slot)
467 break;
468 }
469
470 return ret;
471}
Shelley Chen2d90ddd2020-09-15 00:41:14 -0700472
473int region_file_update_data(struct region_file *f, const void *buf, size_t size)
474{
475 struct update_region_file_entry entry = {
476 .size = size,
477 .data = buf,
478 };
479 return region_file_update_data_arr(f, &entry, 1);
480}