blob: 9d103f2404a868eaa20d9cedf678148caacd09ee [file] [log] [blame]
Aaron Durbincd0bc982016-11-19 12:36:09 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright 2016 Google Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <commonlib/helpers.h>
17#include <console/console.h>
18#include <region_file.h>
19#include <string.h>
20
21/*
22 * A region file provides generic support for appending new data
23 * within a storage region. The book keeping is tracked in metadata
24 * blocks where an offset pointer points to the last byte of a newly
25 * allocated byte sequence. Thus, by taking 2 block offets one can
26 * determine start and size of the latest update. The data does not
27 * have to be the same consistent size, but the data size has be small
28 * enough to fit a metadata block and one data write within the region.
29 *
30 * The granularity of the block offsets are 16 bytes. By using 16-bit
31 * block offsets a region's total size can be no larger than 1MiB.
32 * However, the last 32 bytes cannot be used in the 1MiB maximum region
33 * because one needs to put a block offset indicating last byte written.
34 * An unused block offset is the value 0xffff or 0xffff0 bytes. The last
35 * block offset that can be written is 0xfffe or 0xfffe0 byte offset.
36 *
37 * The goal of this library is to provide a simple mechanism for
38 * allocating blocks of data for updates. The metadata is written first
39 * followed by the data. That means a power event between the block offset
40 * write and the data write results in blocks being allocated but not
41 * entirely written. It's up to the user of the library to sanity check
42 * data stored.
43 */
44
45#define REGF_BLOCK_SHIFT 4
46#define REGF_BLOCK_GRANULARITY (1 << REGF_BLOCK_SHIFT)
47#define REGF_METADATA_BLOCK_SIZE REGF_BLOCK_GRANULARITY
48#define REGF_UNALLOCATED_BLOCK 0xffff
49#define REGF_UPDATES_PER_METADATA_BLOCK \
50 (REGF_METADATA_BLOCK_SIZE / sizeof(uint16_t))
51
52enum {
53 RF_ONLY_METADATA = 0,
54 RF_EMPTY = -1,
55 RF_NEED_TO_EMPTY = -2,
56 RF_FATAL = -3,
57};
58
59struct metadata_block {
60 uint16_t blocks[REGF_UPDATES_PER_METADATA_BLOCK];
61};
62
63static size_t block_to_bytes(uint16_t offset)
64{
65 return (size_t)offset << REGF_BLOCK_SHIFT;
66}
67
68static size_t bytes_to_block(size_t bytes)
69{
70 return bytes >> REGF_BLOCK_SHIFT;
71}
72
73static inline int block_offset_unallocated(uint16_t offset)
74{
75 return offset == REGF_UNALLOCATED_BLOCK;
76}
77
78static inline size_t region_file_data_begin(const struct region_file *f)
79{
80 return f->data_blocks[0];
81}
82
83static inline size_t region_file_data_end(const struct region_file *f)
84{
85 return f->data_blocks[1];
86}
87
88static int all_block_offsets_unallocated(const struct metadata_block *mb)
89{
90 size_t i;
91
92 for (i = 0; i < ARRAY_SIZE(mb->blocks); i++) {
93 if (!block_offset_unallocated(mb->blocks[i]))
94 return 0;
95 }
96
97 return 1;
98}
99
100/* Read metadata block at block i. */
101static int read_mb(size_t i, struct metadata_block *mb,
102 const struct region_file *f)
103{
104 size_t offset = block_to_bytes(i);
105
106 if (rdev_readat(&f->metadata, mb, offset, sizeof(*mb)) < 0)
107 return -1;
108
109 return 0;
110}
111
112/* Locate metadata block with the latest update */
113static int find_latest_mb(struct metadata_block *mb, size_t num_mb_blocks,
114 struct region_file *f)
115{
116 size_t l = 0;
117 size_t r = num_mb_blocks;
118
119 while (l + 1 < r) {
120 size_t mid = (l + r) / 2;
121
122 if (read_mb(mid, mb, f) < 0)
123 return -1;
124 if (all_block_offsets_unallocated(mb))
125 r = mid;
126 else
127 l = mid;
128 }
129
130 /* Set the base block slot. */
131 f->slot = l * REGF_UPDATES_PER_METADATA_BLOCK;
132
133 /* Re-read metadata block with the latest update. */
134 if (read_mb(l, mb, f) < 0)
135 return -1;
136
137 return 0;
138}
139
140static void find_latest_slot(struct metadata_block *mb, struct region_file *f)
141{
142 size_t i;
143
144 for (i = REGF_UPDATES_PER_METADATA_BLOCK - 1; i > 0; i--) {
145 if (!block_offset_unallocated(mb->blocks[i]))
146 break;
147 }
148
149 f->slot += i;
150}
151
152static int fill_data_boundaries(struct region_file *f)
153{
154 struct region_device slots;
155 size_t offset;
156 size_t size = sizeof(f->data_blocks);
157
158 if (f->slot == RF_ONLY_METADATA) {
159 size_t start = bytes_to_block(region_device_sz(&f->metadata));
160 f->data_blocks[0] = start;
161 f->data_blocks[1] = start;
162 return 0;
163 }
164
165 /* Sanity check the 2 slot sequence to read. If it's out of the
166 * metadata blocks' bounds then one needs to empty it. This is done
167 * to uniquely identify I/O vs data errors in the readat() below. */
168 offset = (f->slot - 1) * sizeof(f->data_blocks[0]);
169 if (rdev_chain(&slots, &f->metadata, offset, size)) {
170 f->slot = RF_NEED_TO_EMPTY;
171 return 0;
172 }
173
174 if (rdev_readat(&slots, &f->data_blocks, 0, size) < 0) {
175 printk(BIOS_ERR, "REGF failed to read data boundaries.\n");
176 return -1;
177 }
178
179 /* All used blocks should be incrementing from previous write. */
180 if (region_file_data_begin(f) >= region_file_data_end(f)) {
181 printk(BIOS_ERR, "REGF data boundaries wrong. [%zd,%zd) Need to empty.\n",
182 region_file_data_begin(f), region_file_data_end(f));
183 f->slot = RF_NEED_TO_EMPTY;
184 return 0;
185 }
186
187 /* Ensure data doesn't exceed the region. */
188 if (region_file_data_end(f) >
189 bytes_to_block(region_device_sz(&f->rdev))) {
190 printk(BIOS_ERR, "REGF data exceeds region %zd > %zd\n",
191 region_file_data_end(f),
192 bytes_to_block(region_device_sz(&f->rdev)));
193 f->slot = RF_NEED_TO_EMPTY;
194 }
195
196 return 0;
197}
198
199int region_file_init(struct region_file *f, const struct region_device *p)
200{
201 struct metadata_block mb;
202
203 /* Total number of metadata blocks is found by reading the first
204 * block offset as the metadata is allocated first. At least one
205 * metadata block is available. */
206
207 memset(f, 0, sizeof(*f));
208 f->slot = RF_FATAL;
209
210 /* Keep parent around for accessing data later. */
211 if (rdev_chain(&f->rdev, p, 0, region_device_sz(p)))
212 return -1;
213
214 if (rdev_readat(p, &mb, 0, sizeof(mb)) < 0) {
215 printk(BIOS_ERR, "REGF fail reading first metadata block.\n");
216 return -1;
217 }
218
219 /* No metadata has been allocated. Assume region is empty. */
220 if (block_offset_unallocated(mb.blocks[0])) {
221 f->slot = RF_EMPTY;
222 return 0;
223 }
224
225 /* If metadata block is 0 in size then need to empty. */
226 if (mb.blocks[0] == 0) {
227 f->slot = RF_NEED_TO_EMPTY;
228 return 0;
229 }
230
231 /* The region needs to be emptied as the metadata is broken. */
232 if (rdev_chain(&f->metadata, p, 0, block_to_bytes(mb.blocks[0]))) {
233 f->slot = RF_NEED_TO_EMPTY;
234 return 0;
235 }
236
237 /* Locate latest metadata block with latest update. */
238 if (find_latest_mb(&mb, mb.blocks[0], f)) {
239 printk(BIOS_ERR, "REGF fail locating latest metadata block.\n");
240 f->slot = RF_FATAL;
241 return -1;
242 }
243
244 find_latest_slot(&mb, f);
245
246 /* Fill in the data blocks marking the latest update. */
247 if (fill_data_boundaries(f)) {
248 printk(BIOS_ERR, "REGF fail locating data boundaries.\n");
249 f->slot = RF_FATAL;
250 return -1;
251 }
252
253 return 0;
254}
255
256int region_file_data(const struct region_file *f, struct region_device *rdev)
257{
258
259 size_t offset;
260 size_t size;
261
262 /* Slot indicates if any data is available. */
263 if (f->slot <= RF_ONLY_METADATA)
264 return -1;
265
266 offset = block_to_bytes(region_file_data_begin(f));
267 size = block_to_bytes(region_file_data_end(f)) - offset;
268
269 return rdev_chain(rdev, &f->rdev, offset, size);
270}
271
272/*
273 * Allocate enough metadata blocks to maximize data updates. Do this in
274 * terms of blocks. To solve the balance of metadata vs data, 2 linear
275 * equations are solved in terms of blocks where 'x' is number of
276 * data updates and 'y' is number of metadata blocks:
277 *
278 * x = number of data updates
279 * y = number of metadata blocks
280 * T = total blocks in region
281 * D = data size in blocks
282 * M = metadata size in blocks
283 * A = updates accounted for in each metadata block
284 *
285 * T = D * x + M * y
286 * y = x / A
287 * -----------------
288 * T = D * x + M * x / A = x * (D + M / A)
289 * T * A = x * (D * A + M)
290 * x = T * A / (D * A + M)
291 */
292static int allocate_metadata(struct region_file *f, size_t data_blks)
293{
294 size_t t, m;
295 size_t x, y;
296 uint16_t tot_metadata;
297 const size_t a = REGF_UPDATES_PER_METADATA_BLOCK;
298 const size_t d = data_blks;
299
300 t = bytes_to_block(ALIGN_DOWN(region_device_sz(&f->rdev),
301 REGF_BLOCK_GRANULARITY));
302 m = bytes_to_block(ALIGN_UP(REGF_METADATA_BLOCK_SIZE,
303 REGF_BLOCK_GRANULARITY));
304
305 /* Ensure at least one data update can fit with 1 metadata block
306 * within the region. */
307 if (d > t - m)
308 return -1;
309
310 /* Maximize number of updates by aligning up to the number updates in
311 * a metadata block. May not really be able to achieve the number of
312 * updates in practice, but it ensures enough metadata blocks are
313 * allocated. */
314 x = ALIGN_UP(t * a / (d * a + m), a);
315
316 /* One data block has to fit. */
317 if (x == 0)
318 x = 1;
319
320 /* Now calculate how many metadata blocks are needed. */
321 y = ALIGN_UP(x, a) / a;
322
323 /* Need to commit the metadata allocation. */
324 tot_metadata = m * y;
325 if (rdev_writeat(&f->rdev, &tot_metadata, 0, sizeof(tot_metadata)) < 0)
326 return -1;
327
328 if (rdev_chain(&f->metadata, &f->rdev, 0,
329 block_to_bytes(tot_metadata)))
330 return -1;
331
332 /* Initialize a 0 data block to start appending from. */
333 f->data_blocks[0] = tot_metadata;
334 f->data_blocks[1] = tot_metadata;
335
336 return 0;
337}
338
339static int update_can_fit(const struct region_file *f, size_t data_blks)
340{
341 size_t metadata_slots;
342 size_t end_blk;
343
344 metadata_slots = region_device_sz(&f->metadata) / sizeof(uint16_t);
345
346 /* No more slots. */
347 if ((size_t)f->slot + 1 >= metadata_slots)
348 return 0;
349
350 /* See where the last block lies from the current one. */
351 end_blk = data_blks + region_file_data_end(f);
352
353 /* Update would have exceeded block addressing. */
354 if (end_blk >= REGF_UNALLOCATED_BLOCK)
355 return 0;
356
357 /* End block exceeds size of region. */
358 if (end_blk > bytes_to_block(region_device_sz(&f->rdev)))
359 return 0;
360
361 return 1;
362}
363
364static int commit_data_allocation(struct region_file *f, size_t data_blks)
365{
366 size_t offset;
367
368 f->slot++;
369
370 offset = f->slot * sizeof(uint16_t);
371 f->data_blocks[0] = region_file_data_end(f);
372 f->data_blocks[1] = region_file_data_begin(f) + data_blks;
373
374 if (rdev_writeat(&f->metadata, &f->data_blocks[1], offset,
375 sizeof(f->data_blocks[1])) < 0)
376 return -1;
377
378 return 0;
379}
380
381static int commit_data(const struct region_file *f, const void *buf,
382 size_t size)
383{
384 size_t offset = block_to_bytes(region_file_data_begin(f));
385 if (rdev_writeat(&f->rdev, buf, offset, size) < 0)
386 return -1;
387 return 0;
388}
389
390static int handle_empty(struct region_file *f, size_t data_blks)
391{
392 if (allocate_metadata(f, data_blks)) {
393 printk(BIOS_ERR, "REGF metadata allocation failed: %zd data blocks %zd total blocks\n",
394 data_blks, bytes_to_block(region_device_sz(&f->rdev)));
395 return -1;
396 }
397
398 f->slot = RF_ONLY_METADATA;
399
400 return 0;
401}
402
403static int handle_need_to_empty(struct region_file *f)
404{
405 if (rdev_eraseat(&f->rdev, 0, region_device_sz(&f->rdev)) < 0) {
406 printk(BIOS_ERR, "REGF empty failed.\n");
407 return -1;
408 }
409
410 f->slot = RF_EMPTY;
411
412 return 0;
413}
414
415static int handle_update(struct region_file *f, size_t blocks, const void *buf,
416 size_t size)
417{
418 if (!update_can_fit(f, blocks)) {
419 printk(BIOS_INFO, "REGF update can't fit. Will empty.\n");
420 f->slot = RF_NEED_TO_EMPTY;
421 return 0;
422 }
423
424 if (commit_data_allocation(f, blocks)) {
425 printk(BIOS_ERR, "REGF failed to commit data allocation.\n");
426 return -1;
427 }
428
429 if (commit_data(f, buf, size)) {
430 printk(BIOS_ERR, "REGF failed to commit data.\n");
431 return -1;
432 }
433
434 return 0;
435}
436
437int region_file_update_data(struct region_file *f, const void *buf, size_t size)
438{
439 int ret;
440 size_t blocks;
441
442 blocks = bytes_to_block(ALIGN_UP(size, REGF_BLOCK_GRANULARITY));
443
444 while (1) {
445 int prev_slot = f->slot;
446
447 switch (f->slot) {
448 case RF_EMPTY:
449 ret = handle_empty(f, blocks);
450 break;
451 case RF_NEED_TO_EMPTY:
452 ret = handle_need_to_empty(f);
453 break;
454 case RF_FATAL:
455 ret = -1;
456 break;
457 default:
458 ret = handle_update(f, blocks, buf, size);
459 break;
460 }
461
462 /* Failing case. No more updates allowed to be attempted. */
463 if (ret) {
464 f->slot = RF_FATAL;
465 break;
466 }
467
468 /* No more state changes and data commited. */
469 if (f->slot > RF_ONLY_METADATA && prev_slot != f->slot)
470 break;
471 }
472
473 return ret;
474}