blob: 3b7d4292d0d6322c192a3b947326470f5b7d3daf [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Peter Stuge483b7bb2009-04-14 07:40:01 +00002
Aaron Durbin899d13d2015-05-15 23:39:23 -05003#include <assert.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -05004#include <boot_device.h>
5#include <cbfs.h>
Julius Werner1e37c9c2019-12-11 17:09:39 -08006#include <cbmem.h>
Julius Werner1cd013b2019-12-11 16:50:02 -08007#include <commonlib/bsd/cbfs_private.h>
Julius Werner98eeb962019-12-11 15:47:42 -08008#include <commonlib/bsd/compression.h>
Aaron Durbin0b418bd2020-10-08 14:56:03 -06009#include <commonlib/endian.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080010#include <console/console.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080011#include <fmap.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -050012#include <lib.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080013#include <security/tpm/tspi/crtm.h>
14#include <security/vboot/vboot_common.h>
15#include <stdlib.h>
16#include <string.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -050017#include <symbols.h>
Julius Werner09f29212015-09-29 13:51:35 -070018#include <timestamp.h>
Patrick Georgi58a150a2016-05-02 17:22:29 +080019
Julius Werner1e37c9c2019-12-11 17:09:39 -080020static cb_err_t cbfs_boot_lookup(const struct cbfs_boot_device *cbd,
21 const char *name, union cbfs_mdata *mdata, size_t *data_offset)
22{
23 cb_err_t err = CB_CBFS_CACHE_FULL;
24 if (!CONFIG(NO_CBFS_MCACHE) && !ENV_SMM)
25 err = cbfs_mcache_lookup(cbd->mcache, cbd->mcache_size,
26 name, mdata, data_offset);
27 if (err == CB_CBFS_CACHE_FULL)
28 err = cbfs_lookup(&cbd->rdev, name, mdata, data_offset, NULL);
29 return err;
30}
31
Aaron Durbin37a5d152015-09-17 16:09:30 -050032int cbfs_boot_locate(struct cbfsf *fh, const char *name, uint32_t *type)
Aaron Durbin899d13d2015-05-15 23:39:23 -050033{
Julius Werner1e37c9c2019-12-11 17:09:39 -080034 const struct cbfs_boot_device *cbd = cbfs_get_boot_device(false);
35 if (!cbd)
Aaron Durbin899d13d2015-05-15 23:39:23 -050036 return -1;
Aaron Durbin899d13d2015-05-15 23:39:23 -050037
Julius Werner1cd013b2019-12-11 16:50:02 -080038 size_t data_offset;
Julius Werner1e37c9c2019-12-11 17:09:39 -080039 cb_err_t err = cbfs_boot_lookup(cbd, name, &fh->mdata, &data_offset);
Wim Vervoorn114e2e82019-11-05 14:09:16 +010040
Julius Werner1cd013b2019-12-11 16:50:02 -080041 if (CONFIG(VBOOT_ENABLE_CBFS_FALLBACK) && err == CB_CBFS_NOT_FOUND) {
42 printk(BIOS_INFO, "CBFS: Fall back to RO region for %s\n",
43 name);
Julius Werner1e37c9c2019-12-11 17:09:39 -080044 if (!(cbd = cbfs_get_boot_device(true)))
Julius Werner1cd013b2019-12-11 16:50:02 -080045 return -1;
Julius Werner1e37c9c2019-12-11 17:09:39 -080046 err = cbfs_boot_lookup(cbd, name, &fh->mdata, &data_offset);
Julius Werner1cd013b2019-12-11 16:50:02 -080047 }
48 if (err)
49 return -1;
50
51 size_t msize = be32toh(fh->mdata.h.offset);
52 if (rdev_chain(&fh->metadata, &addrspace_32bit.rdev,
53 (uintptr_t)&fh->mdata, msize) ||
Julius Werner1e37c9c2019-12-11 17:09:39 -080054 rdev_chain(&fh->data, &cbd->rdev, data_offset,
55 be32toh(fh->mdata.h.len)))
Julius Werner1cd013b2019-12-11 16:50:02 -080056 return -1;
57 if (type) {
58 if (!*type)
59 *type = be32toh(fh->mdata.h.type);
60 else if (*type != be32toh(fh->mdata.h.type))
61 return -1;
Wim Vervoorn114e2e82019-11-05 14:09:16 +010062 }
63
Julius Werner1cd013b2019-12-11 16:50:02 -080064 if (tspi_measure_cbfs_hook(fh, name))
65 return -1;
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +010066
Julius Werner1cd013b2019-12-11 16:50:02 -080067 return 0;
Aaron Durbin899d13d2015-05-15 23:39:23 -050068}
69
70void *cbfs_boot_map_with_leak(const char *name, uint32_t type, size_t *size)
71{
Aaron Durbin37a5d152015-09-17 16:09:30 -050072 struct cbfsf fh;
Aaron Durbin899d13d2015-05-15 23:39:23 -050073 size_t fsize;
74
75 if (cbfs_boot_locate(&fh, name, &type))
76 return NULL;
77
Aaron Durbin37a5d152015-09-17 16:09:30 -050078 fsize = region_device_sz(&fh.data);
Aaron Durbin899d13d2015-05-15 23:39:23 -050079
80 if (size != NULL)
81 *size = fsize;
82
Aaron Durbin37a5d152015-09-17 16:09:30 -050083 return rdev_mmap(&fh.data, 0, fsize);
Aaron Durbin899d13d2015-05-15 23:39:23 -050084}
85
Pratik Prajapati2a7708a2016-11-30 17:29:10 -080086int cbfs_locate_file_in_region(struct cbfsf *fh, const char *region_name,
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +010087 const char *name, uint32_t *type)
Pratik Prajapati2a7708a2016-11-30 17:29:10 -080088{
89 struct region_device rdev;
Bill XIEbad08c22020-02-13 11:11:35 +080090 int ret = 0;
Pratik Prajapati2a7708a2016-11-30 17:29:10 -080091 if (fmap_locate_area_as_rdev(region_name, &rdev)) {
92 LOG("%s region not found while looking for %s\n",
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +010093 region_name, name);
Pratik Prajapati2a7708a2016-11-30 17:29:10 -080094 return -1;
95 }
96
Bill XIEbad08c22020-02-13 11:11:35 +080097 ret = cbfs_locate(fh, &rdev, name, type);
98 if (!ret)
99 if (tspi_measure_cbfs_hook(fh, name))
100 return -1;
101 return ret;
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800102}
103
Aaron Durbina85febc2020-05-15 15:09:10 -0600104static inline bool fsps_env(void)
105{
106 /* FSP-S is assumed to be loaded in ramstage. */
107 if (ENV_RAMSTAGE)
108 return true;
109 return false;
110}
111
Aaron Durbinecbfa992020-05-15 17:01:58 -0600112static inline bool fspm_env(void)
113{
114 /* FSP-M is assumed to be loaded in romstage. */
115 if (ENV_ROMSTAGE)
116 return true;
117 return false;
118}
119
Aaron Durbina121f952020-05-26 15:48:10 -0600120static inline bool cbfs_lz4_enabled(void)
121{
Aaron Durbina85febc2020-05-15 15:09:10 -0600122 if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZ4))
123 return true;
Aaron Durbinecbfa992020-05-15 17:01:58 -0600124 if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZ4))
125 return true;
Aaron Durbina85febc2020-05-15 15:09:10 -0600126
Aaron Durbina121f952020-05-26 15:48:10 -0600127 if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) && !CONFIG(COMPRESS_PRERAM_STAGES))
128 return false;
129
130 return true;
131}
132
133static inline bool cbfs_lzma_enabled(void)
134{
Aaron Durbina85febc2020-05-15 15:09:10 -0600135 if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZMA))
136 return true;
Aaron Durbinecbfa992020-05-15 17:01:58 -0600137 if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZMA))
138 return true;
Aaron Durbina121f952020-05-26 15:48:10 -0600139 /* We assume here romstage and postcar are never compressed. */
140 if (ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE)
141 return false;
142 if (ENV_ROMSTAGE && CONFIG(POSTCAR_STAGE))
143 return false;
144 if ((ENV_ROMSTAGE || ENV_POSTCAR)
145 && !CONFIG(COMPRESS_RAMSTAGE))
146 return false;
147 return true;
148}
149
Julius Werner09f29212015-09-29 13:51:35 -0700150size_t cbfs_load_and_decompress(const struct region_device *rdev, size_t offset,
151 size_t in_size, void *buffer, size_t buffer_size, uint32_t compression)
Aaron Durbin899d13d2015-05-15 23:39:23 -0500152{
Julius Werner09f29212015-09-29 13:51:35 -0700153 size_t out_size;
Aaron Durbin84f394e2020-05-26 16:16:42 -0600154 void *map;
Julius Werner09f29212015-09-29 13:51:35 -0700155
156 switch (compression) {
157 case CBFS_COMPRESS_NONE:
Julius Wernerf975e552016-08-19 15:43:06 -0700158 if (buffer_size < in_size)
159 return 0;
Julius Werner09f29212015-09-29 13:51:35 -0700160 if (rdev_readat(rdev, buffer, offset, in_size) != in_size)
161 return 0;
162 return in_size;
163
164 case CBFS_COMPRESS_LZ4:
Aaron Durbina121f952020-05-26 15:48:10 -0600165 if (!cbfs_lz4_enabled())
Julius Werner09f29212015-09-29 13:51:35 -0700166 return 0;
167
Aaron Durbin84f394e2020-05-26 16:16:42 -0600168 /* cbfs_stage_load_and_decompress() takes care of in-place
169 lz4 decompression by setting up the rdev to be in memory. */
170 map = rdev_mmap(rdev, offset, in_size);
171 if (map == NULL)
Julius Werner09f29212015-09-29 13:51:35 -0700172 return 0;
173
174 timestamp_add_now(TS_START_ULZ4F);
Aaron Durbin84f394e2020-05-26 16:16:42 -0600175 out_size = ulz4fn(map, in_size, buffer, buffer_size);
Julius Werner09f29212015-09-29 13:51:35 -0700176 timestamp_add_now(TS_END_ULZ4F);
Aaron Durbin84f394e2020-05-26 16:16:42 -0600177
178 rdev_munmap(rdev, map);
179
Julius Werner09f29212015-09-29 13:51:35 -0700180 return out_size;
181
182 case CBFS_COMPRESS_LZMA:
Aaron Durbina121f952020-05-26 15:48:10 -0600183 if (!cbfs_lzma_enabled())
Julius Werner09f29212015-09-29 13:51:35 -0700184 return 0;
Aaron Durbin84f394e2020-05-26 16:16:42 -0600185 map = rdev_mmap(rdev, offset, in_size);
Julius Werner09f29212015-09-29 13:51:35 -0700186 if (map == NULL)
187 return 0;
188
189 /* Note: timestamp not useful for memory-mapped media (x86) */
190 timestamp_add_now(TS_START_ULZMA);
191 out_size = ulzman(map, in_size, buffer, buffer_size);
192 timestamp_add_now(TS_END_ULZMA);
193
194 rdev_munmap(rdev, map);
195
196 return out_size;
197
198 default:
Aaron Durbin899d13d2015-05-15 23:39:23 -0500199 return 0;
Julius Werner09f29212015-09-29 13:51:35 -0700200 }
Aaron Durbin899d13d2015-05-15 23:39:23 -0500201}
202
Aaron Durbin84f394e2020-05-26 16:16:42 -0600203static size_t cbfs_stage_load_and_decompress(const struct region_device *rdev,
204 size_t offset, size_t in_size, void *buffer, size_t buffer_size,
205 uint32_t compression)
206{
207 struct region_device rdev_src;
208
209 if (compression == CBFS_COMPRESS_LZ4) {
210 if (!cbfs_lz4_enabled())
211 return 0;
212 /* Load the compressed image to the end of the available memory
213 * area for in-place decompression. It is the responsibility of
214 * the caller to ensure that buffer_size is large enough
215 * (see compression.h, guaranteed by cbfstool for stages). */
216 void *compr_start = buffer + buffer_size - in_size;
217 if (rdev_readat(rdev, compr_start, offset, in_size) != in_size)
218 return 0;
219 /* Create a region device backed by memory. */
220 rdev_chain(&rdev_src, &addrspace_32bit.rdev,
221 (uintptr_t)compr_start, in_size);
222
223 return cbfs_load_and_decompress(&rdev_src, 0, in_size, buffer,
224 buffer_size, compression);
225 }
226
227 /* All other algorithms can use the generic implementation. */
228 return cbfs_load_and_decompress(rdev, offset, in_size, buffer,
229 buffer_size, compression);
230}
231
Stefan Reinauer800379f2010-03-01 08:34:19 +0000232static inline int tohex4(unsigned int c)
Patrick Georgib203c2f2009-08-20 14:48:03 +0000233{
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800234 return (c <= 9) ? (c + '0') : (c - 10 + 'a');
Patrick Georgib203c2f2009-08-20 14:48:03 +0000235}
236
Martin Rotha616a4b2020-01-21 09:28:40 -0700237static void tohex8(unsigned int val, char *dest)
238{
239 dest[0] = tohex4((val >> 4) & 0xf);
240 dest[1] = tohex4(val & 0xf);
241}
242
Lee Leahyb2d834a2017-03-08 16:52:22 -0800243static void tohex16(unsigned int val, char *dest)
Patrick Georgib203c2f2009-08-20 14:48:03 +0000244{
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +0100245 dest[0] = tohex4(val >> 12);
246 dest[1] = tohex4((val >> 8) & 0xf);
247 dest[2] = tohex4((val >> 4) & 0xf);
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800248 dest[3] = tohex4(val & 0xf);
Patrick Georgib203c2f2009-08-20 14:48:03 +0000249}
250
Aaron Durbin899d13d2015-05-15 23:39:23 -0500251void *cbfs_boot_map_optionrom(uint16_t vendor, uint16_t device)
Peter Stuge483b7bb2009-04-14 07:40:01 +0000252{
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800253 char name[17] = "pciXXXX,XXXX.rom";
Peter Stuge483b7bb2009-04-14 07:40:01 +0000254
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +0100255 tohex16(vendor, name + 3);
256 tohex16(device, name + 8);
Peter Stuge483b7bb2009-04-14 07:40:01 +0000257
Aaron Durbin899d13d2015-05-15 23:39:23 -0500258 return cbfs_boot_map_with_leak(name, CBFS_TYPE_OPTIONROM, NULL);
Peter Stuge483b7bb2009-04-14 07:40:01 +0000259}
260
Martin Rotha616a4b2020-01-21 09:28:40 -0700261void *cbfs_boot_map_optionrom_revision(uint16_t vendor, uint16_t device, uint8_t rev)
262{
263 char name[20] = "pciXXXX,XXXX,XX.rom";
264
265 tohex16(vendor, name + 3);
266 tohex16(device, name + 8);
267 tohex8(rev, name + 13);
268
269 return cbfs_boot_map_with_leak(name, CBFS_TYPE_OPTIONROM, NULL);
270}
271
T Michael Turney809fa7b2018-04-12 13:36:40 -0700272size_t cbfs_boot_load_file(const char *name, void *buf, size_t buf_size,
273 uint32_t type)
Julius Wernerf975e552016-08-19 15:43:06 -0700274{
275 struct cbfsf fh;
276 uint32_t compression_algo;
277 size_t decompressed_size;
Julius Wernerf975e552016-08-19 15:43:06 -0700278
279 if (cbfs_boot_locate(&fh, name, &type) < 0)
280 return 0;
281
282 if (cbfsf_decompression_info(&fh, &compression_algo,
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +0100283 &decompressed_size)
284 < 0
285 || decompressed_size > buf_size)
Julius Wernerf975e552016-08-19 15:43:06 -0700286 return 0;
287
288 return cbfs_load_and_decompress(&fh.data, 0, region_device_sz(&fh.data),
289 buf, buf_size, compression_algo);
290}
291
Aaron Durbin899d13d2015-05-15 23:39:23 -0500292int cbfs_prog_stage_load(struct prog *pstage)
293{
294 struct cbfs_stage stage;
295 uint8_t *load;
296 void *entry;
297 size_t fsize;
298 size_t foffset;
Aaron Durbin37a5d152015-09-17 16:09:30 -0500299 const struct region_device *fh = prog_rdev(pstage);
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800300
Aaron Durbin899d13d2015-05-15 23:39:23 -0500301 if (rdev_readat(fh, &stage, 0, sizeof(stage)) != sizeof(stage))
Julius Wernerb29bd27b2015-12-03 11:29:12 -0800302 return -1;
Aaron Durbin899d13d2015-05-15 23:39:23 -0500303
304 fsize = region_device_sz(fh);
305 fsize -= sizeof(stage);
306 foffset = 0;
307 foffset += sizeof(stage);
308
Aaron Durbin0b418bd2020-10-08 14:56:03 -0600309 /* cbfs_stage fields are written in little endian despite the other
310 cbfs data types being encoded in big endian. */
311 stage.compression = read_le32(&stage.compression);
312 stage.entry = read_le64(&stage.entry);
313 stage.load = read_le64(&stage.load);
314 stage.len = read_le32(&stage.len);
315 stage.memlen = read_le32(&stage.memlen);
316
Aaron Durbin899d13d2015-05-15 23:39:23 -0500317 assert(fsize == stage.len);
318
Aaron Durbin899d13d2015-05-15 23:39:23 -0500319 load = (void *)(uintptr_t)stage.load;
320 entry = (void *)(uintptr_t)stage.entry;
321
Aaron Durbined253c82015-10-07 17:22:42 -0500322 /* Hacky way to not load programs over read only media. The stages
323 * that would hit this path initialize themselves. */
Julius Werner21a40532020-04-21 16:03:53 -0700324 if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) &&
325 !CONFIG(NO_XIP_EARLY_STAGES) && CONFIG(BOOT_DEVICE_MEMORY_MAPPED)) {
Aaron Durbined253c82015-10-07 17:22:42 -0500326 void *mapping = rdev_mmap(fh, foffset, fsize);
327 rdev_munmap(fh, mapping);
328 if (mapping == load)
329 goto out;
330 }
331
Aaron Durbin84f394e2020-05-26 16:16:42 -0600332 fsize = cbfs_stage_load_and_decompress(fh, foffset, fsize, load,
Julius Werner09f29212015-09-29 13:51:35 -0700333 stage.memlen, stage.compression);
334 if (!fsize)
Aaron Durbin899d13d2015-05-15 23:39:23 -0500335 return -1;
336
337 /* Clear area not covered by file. */
338 memset(&load[fsize], 0, stage.memlen - fsize);
339
Aaron Durbin096f4572016-03-31 13:49:00 -0500340 prog_segment_loaded((uintptr_t)load, stage.memlen, SEG_FINAL);
Aaron Durbined253c82015-10-07 17:22:42 -0500341
342out:
Aaron Durbin899d13d2015-05-15 23:39:23 -0500343 prog_set_area(pstage, load, stage.memlen);
344 prog_set_entry(pstage, entry, NULL);
345
346 return 0;
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800347}
Aaron Durbin6d720f32015-12-08 17:00:23 -0600348
Julius Werner1e37c9c2019-12-11 17:09:39 -0800349void cbfs_boot_device_find_mcache(struct cbfs_boot_device *cbd, uint32_t id)
Aaron Durbin6d720f32015-12-08 17:00:23 -0600350{
Julius Werner1e37c9c2019-12-11 17:09:39 -0800351 if (CONFIG(NO_CBFS_MCACHE) || ENV_SMM)
352 return;
353
354 const struct cbmem_entry *entry;
355 if (cbmem_possibly_online() &&
356 (entry = cbmem_entry_find(id))) {
357 cbd->mcache = cbmem_entry_start(entry);
358 cbd->mcache_size = cbmem_entry_size(entry);
359 } else if (ENV_ROMSTAGE_OR_BEFORE) {
360 u8 *boundary = _ecbfs_mcache - REGION_SIZE(cbfs_mcache) *
361 CONFIG_CBFS_MCACHE_RW_PERCENTAGE / 100;
362 boundary = (u8 *)ALIGN_DOWN((uintptr_t)boundary,
363 CBFS_MCACHE_ALIGNMENT);
364 if (id == CBMEM_ID_CBFS_RO_MCACHE) {
365 cbd->mcache = _cbfs_mcache;
366 cbd->mcache_size = boundary - _cbfs_mcache;
367 } else if (id == CBMEM_ID_CBFS_RW_MCACHE) {
368 cbd->mcache = boundary;
369 cbd->mcache_size = _ecbfs_mcache - boundary;
370 }
371 }
Aaron Durbin6d720f32015-12-08 17:00:23 -0600372}
Julius Werner1e37c9c2019-12-11 17:09:39 -0800373
374const struct cbfs_boot_device *cbfs_get_boot_device(bool force_ro)
375{
376 static struct cbfs_boot_device ro;
377
378 /* Ensure we always init RO mcache, even if first file is from RW.
379 Otherwise it may not be available when needed in later stages. */
380 if (ENV_INITIAL_STAGE && !force_ro && !region_device_sz(&ro.rdev))
381 cbfs_get_boot_device(true);
382
383 if (!force_ro) {
384 const struct cbfs_boot_device *rw = vboot_get_cbfs_boot_device();
385 /* This will return NULL if vboot isn't enabled, didn't run yet
386 or decided to boot into recovery mode. */
387 if (rw)
388 return rw;
389 }
390
391 if (region_device_sz(&ro.rdev))
392 return &ro;
393
394 if (fmap_locate_area_as_rdev("COREBOOT", &ro.rdev))
395 return NULL;
396
397 cbfs_boot_device_find_mcache(&ro, CBMEM_ID_CBFS_RO_MCACHE);
398
399 if (ENV_INITIAL_STAGE && !CONFIG(NO_CBFS_MCACHE)) {
400 cb_err_t err = cbfs_mcache_build(&ro.rdev, ro.mcache,
401 ro.mcache_size, NULL);
402 if (err && err != CB_CBFS_CACHE_FULL)
403 die("Failed to build RO mcache");
404 }
405
406 return &ro;
407}
408
409#if !CONFIG(NO_CBFS_MCACHE)
410static void mcache_to_cbmem(const struct cbfs_boot_device *cbd, u32 cbmem_id)
411{
412 if (!cbd)
413 return;
414
415 size_t real_size = cbfs_mcache_real_size(cbd->mcache, cbd->mcache_size);
416 void *cbmem_mcache = cbmem_add(cbmem_id, real_size);
417 if (!cbmem_mcache) {
418 printk(BIOS_ERR, "ERROR: Cannot allocate CBMEM mcache %#x (%#zx bytes)!\n",
419 cbmem_id, real_size);
420 return;
421 }
422 memcpy(cbmem_mcache, cbd->mcache, real_size);
423}
424
425static void cbfs_mcache_migrate(int unused)
426{
427 mcache_to_cbmem(vboot_get_cbfs_boot_device(), CBMEM_ID_CBFS_RW_MCACHE);
428 mcache_to_cbmem(cbfs_get_boot_device(true), CBMEM_ID_CBFS_RO_MCACHE);
429}
430ROMSTAGE_CBMEM_INIT_HOOK(cbfs_mcache_migrate)
431#endif