blob: 6071437d05376e6ce8a73ee9955654df32a3a541 [file] [log] [blame]
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +00001/*
Peter Stuge45ae92ff2009-04-14 19:48:32 +00002 * cbfs-mkstage
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +00003 *
4 * Copyright (C) 2008 Jordan Crouse <jordan@cosmicpenguin.net>
Patrick Georgib7b56dd82009-09-14 13:29:27 +00005 * 2009 coresystems GmbH
6 * written by Patrick Georgi <patrick.georgi@coresystems.de>
David Hendricks90ca3b62012-11-16 14:48:22 -08007 * Copyright (C) 2012 Google, Inc.
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +00008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +000017 */
18
Francis Rowe3fb8b0d2014-11-21 02:38:48 +000019#include <inttypes.h>
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +000020#include <stdio.h>
21#include <stdlib.h>
22#include <string.h>
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +000023
Aaron Durbin54ef3062014-03-05 12:12:09 -060024#include "elfparsing.h"
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +000025#include "common.h"
Patrick Georgib7b56dd82009-09-14 13:29:27 +000026#include "cbfs.h"
Aaron Durbin4be16742015-09-15 17:00:23 -050027#include "rmodule.h"
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +000028
Julius Werner09f29212015-09-29 13:51:35 -070029#include <commonlib/compression.h>
30
Furquan Shaikh405304a2014-10-30 11:44:20 -070031/* Checks if program segment contains the ignored section */
32static int is_phdr_ignored(Elf64_Phdr *phdr, Elf64_Shdr *shdr)
33{
34 /* If no ignored section, return false. */
35 if (shdr == NULL)
36 return 0;
37
38 Elf64_Addr sh_start = shdr->sh_addr;
39 Elf64_Addr sh_end = shdr->sh_addr + shdr->sh_size;
40 Elf64_Addr ph_start = phdr->p_vaddr;
41 Elf64_Addr ph_end = phdr->p_vaddr + phdr->p_memsz;
42
43 /* Return true only if section occupies whole of segment. */
44 if ((sh_start == ph_start) && (sh_end == ph_end)) {
Francis Rowe3fb8b0d2014-11-21 02:38:48 +000045 DEBUG("Ignoring program segment at 0x%" PRIx64 "\n", ph_start);
Furquan Shaikh405304a2014-10-30 11:44:20 -070046 return 1;
47 }
48
49 /* If shdr intersects phdr at all, its a conflict */
50 if (((sh_start >= ph_start) && (sh_start <= ph_end)) ||
51 ((sh_end >= ph_start) && (sh_end <= ph_end))) {
52 ERROR("Conflicting sections in segment\n");
53 exit(1);
54 }
55
56 /* Program header doesn't need to be ignored. */
57 return 0;
58}
59
60/* Find section header based on ignored section name */
61static Elf64_Shdr *find_ignored_section_header(struct parsed_elf *pelf,
62 const char *ignore_section)
63{
64 int i;
65 const char *shstrtab;
66
67 /* No section needs to be ignored */
68 if (ignore_section == NULL)
69 return NULL;
70
71 DEBUG("Section to be ignored: %s\n", ignore_section);
72
73 /* Get pointer to string table */
74 shstrtab = buffer_get(pelf->strtabs[pelf->ehdr.e_shstrndx]);
75
76 for (i = 0; i < pelf->ehdr.e_shnum; i++) {
77 Elf64_Shdr *shdr;
78 const char *section_name;
79
80 shdr = &pelf->shdr[i];
81 section_name = &shstrtab[shdr->sh_name];
82
83 /* If section name matches ignored string, return shdr */
84 if (strcmp(section_name, ignore_section) == 0)
85 return shdr;
86 }
87
88 /* No section matches ignore string */
89 return NULL;
90}
91
Aaron Durbin4be16742015-09-15 17:00:23 -050092static void fill_cbfs_stage(struct buffer *outheader, enum comp_algo algo,
93 uint64_t entry, uint64_t loadaddr,
94 uint32_t filesize, uint32_t memsize)
95{
96 /* N.B. The original plan was that SELF data was B.E.
97 * but: this is all L.E.
98 * Maybe we should just change the spec.
99 */
100 xdr_le.put32(outheader, algo);
101 xdr_le.put64(outheader, entry);
102 xdr_le.put64(outheader, loadaddr);
103 xdr_le.put32(outheader, filesize);
104 xdr_le.put32(outheader, memsize);
105}
106
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800107/* returns size of result, or -1 if error.
108 * Note that, with the new code, this function
109 * works for all elf files, not just the restricted set.
110 */
111int parse_elf_to_stage(const struct buffer *input, struct buffer *output,
Sol Boucher6310ccc2015-05-07 21:12:28 -0700112 enum comp_algo algo, uint32_t *location,
Furquan Shaikh405304a2014-10-30 11:44:20 -0700113 const char *ignore_section)
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800114{
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700115 struct parsed_elf pelf;
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800116 Elf64_Phdr *phdr;
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700117 Elf64_Ehdr *ehdr;
Furquan Shaikh405304a2014-10-30 11:44:20 -0700118 Elf64_Shdr *shdr_ignored;
Furquan Shaikhf7a5b562015-05-29 12:46:18 -0700119 Elf64_Addr virt_to_phys;
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800120 char *buffer;
121 struct buffer outheader;
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700122 int ret = -1;
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800123
124 int headers;
125 int i, outlen;
HC Yen14ec1992015-01-22 09:57:34 +0800126 uint64_t data_start, data_end, mem_end;
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800127
128 comp_func_ptr compress = compression_function(algo);
129 if (!compress)
130 return -1;
131
132 DEBUG("start: parse_elf_to_stage(location=0x%x)\n", *location);
133
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700134 int flags = ELF_PARSE_PHDR | ELF_PARSE_SHDR | ELF_PARSE_STRTAB;
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800135
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700136 if (parse_elf(input, &pelf, flags)) {
137 ERROR("Couldn't parse ELF\n");
138 return -1;
139 }
140
141 ehdr = &pelf.ehdr;
142 phdr = &pelf.phdr[0];
143
Furquan Shaikh405304a2014-10-30 11:44:20 -0700144 /* Find the section header corresponding to ignored-section */
145 shdr_ignored = find_ignored_section_header(&pelf, ignore_section);
146
147 if (ignore_section && (shdr_ignored == NULL))
148 WARN("Ignore section not found\n");
149
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700150 headers = ehdr->e_phnum;
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800151
Furquan Shaikh405304a2014-10-30 11:44:20 -0700152 /* Ignore the program header containing ignored section */
153 for (i = 0; i < headers; i++) {
154 if (is_phdr_ignored(&phdr[i], shdr_ignored))
155 phdr[i].p_type = PT_NULL;
156 }
157
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800158 data_start = ~0;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000159 data_end = 0;
160 mem_end = 0;
Furquan Shaikhf7a5b562015-05-29 12:46:18 -0700161 virt_to_phys = 0;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000162
163 for (i = 0; i < headers; i++) {
HC Yen14ec1992015-01-22 09:57:34 +0800164 uint64_t start, mend, rend;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000165
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800166 if (phdr[i].p_type != PT_LOAD)
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000167 continue;
168
169 /* Empty segments are never interesting */
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800170 if (phdr[i].p_memsz == 0)
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000171 continue;
172
173 /* BSS */
174
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800175 start = phdr[i].p_paddr;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000176
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800177 mend = start + phdr[i].p_memsz;
178 rend = start + phdr[i].p_filesz;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000179
180 if (start < data_start)
181 data_start = start;
182
183 if (rend > data_end)
184 data_end = rend;
185
186 if (mend > mem_end)
187 mem_end = mend;
Furquan Shaikhf7a5b562015-05-29 12:46:18 -0700188
189 if (virt_to_phys == 0)
190 virt_to_phys = phdr[i].p_paddr - phdr[i].p_vaddr;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000191 }
192
Patrick Georgi9341acd2009-12-23 12:52:56 +0000193 if (data_start < *location) {
194 data_start = *location;
195 }
196
Patrick Georgia6c337d2010-02-03 17:56:37 +0000197 if (data_end <= data_start) {
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800198 ERROR("data ends (%08lx) before it starts (%08lx). Make sure "
199 "the ELF file is correct and resides in ROM space.\n",
200 (unsigned long)data_end, (unsigned long)data_start);
Patrick Georgia6c337d2010-02-03 17:56:37 +0000201 exit(1);
202 }
203
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000204 /* allocate an intermediate buffer for the data */
205 buffer = calloc(data_end - data_start, 1);
206
207 if (buffer == NULL) {
Hung-Te Lin4d87d4e2013-01-28 14:39:43 +0800208 ERROR("Unable to allocate memory: %m\n");
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700209 goto err;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000210 }
211
212 /* Copy the file data into the buffer */
213
214 for (i = 0; i < headers; i++) {
HC Yen14ec1992015-01-22 09:57:34 +0800215 uint64_t l_start, l_offset = 0;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000216
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800217 if (phdr[i].p_type != PT_LOAD)
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000218 continue;
219
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800220 if (phdr[i].p_memsz == 0)
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000221 continue;
222
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800223 l_start = phdr[i].p_paddr;
Patrick Georgi9341acd2009-12-23 12:52:56 +0000224 if (l_start < *location) {
225 l_offset = *location - l_start;
226 l_start = *location;
227 }
228
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800229 /* A legal ELF file can have a program header with
230 * non-zero length but zero-length file size and a
231 * non-zero offset which, added together, are > than
232 * input->size (i.e. the total file size). So we need
233 * to not even test in the case that p_filesz is zero.
234 */
235 if (! phdr[i].p_filesz)
236 continue;
237 if (input->size < (phdr[i].p_offset + phdr[i].p_filesz)){
238 ERROR("Underflow copying out the segment."
Paul Menzel470c37c2014-03-16 00:15:57 +0100239 "File has %zu bytes left, segment end is %zu\n",
240 input->size, (size_t)(phdr[i].p_offset + phdr[i].p_filesz));
Daniele Forsi8e898472014-07-27 12:01:40 +0200241 free(buffer);
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700242 goto err;
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800243 }
Patrick Georgi9341acd2009-12-23 12:52:56 +0000244 memcpy(buffer + (l_start - data_start),
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800245 &input->data[phdr[i].p_offset + l_offset],
246 phdr[i].p_filesz - l_offset);
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000247 }
248
249 /* Now make the output buffer */
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800250 if (buffer_create(output, sizeof(struct cbfs_stage) + data_end - data_start,
Hung-Te Linc13e4bf2013-01-29 15:22:11 +0800251 input->name) != 0) {
Hung-Te Lin4d87d4e2013-01-28 14:39:43 +0800252 ERROR("Unable to allocate memory: %m\n");
Paul Menzel2c8f81b2013-04-11 10:45:11 +0200253 free(buffer);
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700254 goto err;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000255 }
Hung-Te Linc13e4bf2013-01-29 15:22:11 +0800256 memset(output->data, 0, output->size);
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000257
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800258 /* Compress the data, at which point we'll know information
259 * to fill out the header. This seems backward but it works because
260 * - the output header is a known size (not always true in many xdr's)
261 * - we do need to know the compressed output size first
Gabe Black845aa142014-02-21 01:01:06 -0800262 * If compression fails or makes the data bigger, we'll warn about it
263 * and use the original data.
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800264 */
Gabe Blackdbd006b2014-02-20 23:38:49 -0800265 if (compress(buffer, data_end - data_start,
266 (output->data + sizeof(struct cbfs_stage)),
Sol Boucher0e539312015-03-05 15:38:03 -0800267 &outlen) < 0 || (unsigned)outlen > data_end - data_start) {
Gabe Black845aa142014-02-21 01:01:06 -0800268 WARN("Compression failed or would make the data bigger "
269 "- disabled.\n");
270 memcpy(output->data + sizeof(struct cbfs_stage),
271 buffer, data_end - data_start);
Julius Werner09f29212015-09-29 13:51:35 -0700272 outlen = data_end - data_start;
Gabe Black845aa142014-02-21 01:01:06 -0800273 algo = CBFS_COMPRESS_NONE;
Gabe Blackdbd006b2014-02-20 23:38:49 -0800274 }
Julius Werner09f29212015-09-29 13:51:35 -0700275
276 /* Check for enough BSS scratch space to decompress LZ4 in-place. */
277 if (algo == CBFS_COMPRESS_LZ4) {
278 size_t result;
279 size_t memlen = mem_end - data_start;
280 size_t compressed_size = outlen;
281 char *compare_buffer = malloc(memlen);
282 char *start = compare_buffer + memlen - compressed_size;
283
284 if (compare_buffer == NULL) {
285 ERROR("Can't allocate memory!\n");
286 free(buffer);
287 goto err;
288 }
289
290 memcpy(start, output->data + sizeof(struct cbfs_stage),
291 compressed_size);
292 result = ulz4fn(start, compressed_size, compare_buffer, memlen);
293
294 if (result == 0) {
295 ERROR("Not enough scratch space to decompress LZ4 in-place -- increase BSS size or disable compression!\n");
296 free(compare_buffer);
297 free(buffer);
298 goto err;
299 }
300 if (result != data_end - data_start ||
301 memcmp(compare_buffer, buffer, data_end - data_start)) {
302 ERROR("LZ4 compression BUG! Report to mailing list.\n");
303 free(compare_buffer);
304 free(buffer);
305 goto err;
306 }
307 free(compare_buffer);
308 }
309
Stefan Reinauer63217582012-10-29 16:52:36 -0700310 free(buffer);
311
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800312 /* Set up for output marshaling. */
313 outheader.data = output->data;
314 outheader.size = 0;
Aaron Durbin4be16742015-09-15 17:00:23 -0500315
Martin Rotha5648112017-06-03 20:05:42 -0600316 /* coreboot expects entry point to be physical address. Thus, adjust the
Furquan Shaikhf7a5b562015-05-29 12:46:18 -0700317 * entry point accordingly.
318 */
Aaron Durbin4be16742015-09-15 17:00:23 -0500319 fill_cbfs_stage(&outheader, algo, ehdr->e_entry + virt_to_phys,
320 data_start, outlen, mem_end - data_start);
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800321
Patrick Georgib7b56dd82009-09-14 13:29:27 +0000322 if (*location)
323 *location -= sizeof(struct cbfs_stage);
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800324 output->size = sizeof(struct cbfs_stage) + outlen;
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700325 ret = 0;
326
327err:
328 parsed_elf_destroy(&pelf);
329 return ret;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000330}
Aaron Durbin4be16742015-09-15 17:00:23 -0500331
332struct xip_context {
333 struct rmod_context rmodctx;
334 size_t ignored_section_idx;
335 Elf64_Shdr *ignored_section;
336};
337
338static int rmod_filter(struct reloc_filter *f, const Elf64_Rela *r)
339{
340 size_t symbol_index;
341 int reloc_type;
342 struct parsed_elf *pelf;
343 Elf64_Sym *sym;
344 struct xip_context *xipctx;
345
346 xipctx = f->context;
347 pelf = &xipctx->rmodctx.pelf;
348
349 /* Allow everything through if there isn't an ignored section. */
350 if (xipctx->ignored_section == NULL)
351 return 1;
352
353 reloc_type = ELF64_R_TYPE(r->r_info);
354 symbol_index = ELF64_R_SYM(r->r_info);
355 sym = &pelf->syms[symbol_index];
356
357 /* Nothing to filter. Relocation is not being applied to the
358 * ignored section. */
359 if (sym->st_shndx != xipctx->ignored_section_idx)
360 return 1;
361
362 /* If there is any relocation to the ignored section that isn't
363 * absolute fail as current assumptions are that all relocations
364 * are absolute. */
Patrick Rudolph21046a32018-11-26 15:37:51 +0100365 if ((reloc_type != R_386_32) &&
366 (reloc_type != R_AMD64_64) &&
367 (reloc_type != R_AMD64_32)) {
Aaron Durbin4be16742015-09-15 17:00:23 -0500368 ERROR("Invalid reloc to ignored section: %x\n", reloc_type);
369 return -1;
370 }
371
372 /* Relocation referencing ignored section. Don't emit it. */
373 return 0;
374}
375
376int parse_elf_to_xip_stage(const struct buffer *input, struct buffer *output,
377 uint32_t *location, const char *ignore_section)
378{
379 struct xip_context xipctx;
380 struct rmod_context *rmodctx;
381 struct reloc_filter filter;
382 struct parsed_elf *pelf;
383 size_t output_sz;
384 uint32_t adjustment;
385 struct buffer binput;
386 struct buffer boutput;
387 Elf64_Xword i;
388 int ret = -1;
389
390 xipctx.ignored_section_idx = 0;
391 rmodctx = &xipctx.rmodctx;
392 pelf = &rmodctx->pelf;
393
394 if (rmodule_init(rmodctx, input))
395 return -1;
396
Patrick Rudolph21046a32018-11-26 15:37:51 +0100397 /* Only support x86 / x86_64 XIP currently. */
398 if ((rmodctx->pelf.ehdr.e_machine != EM_386) &&
399 (rmodctx->pelf.ehdr.e_machine != EM_X86_64)) {
400 ERROR("Only support XIP stages for x86/x86_64\n");
Aaron Durbin4be16742015-09-15 17:00:23 -0500401 goto out;
402 }
403
404 xipctx.ignored_section =
405 find_ignored_section_header(pelf, ignore_section);
406
407 if (xipctx.ignored_section != NULL)
408 xipctx.ignored_section_idx =
409 xipctx.ignored_section - pelf->shdr;
410
411 filter.filter = rmod_filter;
412 filter.context = &xipctx;
413
414 if (rmodule_collect_relocations(rmodctx, &filter))
415 goto out;
416
417 output_sz = sizeof(struct cbfs_stage) + pelf->phdr->p_filesz;
418 if (buffer_create(output, output_sz, input->name) != 0) {
419 ERROR("Unable to allocate memory: %m\n");
420 goto out;
421 }
422 buffer_clone(&boutput, output);
423 memset(buffer_get(&boutput), 0, output_sz);
424 buffer_set_size(&boutput, 0);
425
426 /* Single loadable segment. The entire segment moves to final
427 * location from based on virtual address of loadable segment. */
428 adjustment = *location - pelf->phdr->p_vaddr;
429 DEBUG("Relocation adjustment: %08x\n", adjustment);
430
431 fill_cbfs_stage(&boutput, CBFS_COMPRESS_NONE,
432 (uint32_t)pelf->ehdr.e_entry + adjustment,
433 (uint32_t)pelf->phdr->p_vaddr + adjustment,
434 pelf->phdr->p_filesz, pelf->phdr->p_memsz);
435 /* Need an adjustable buffer. */
436 buffer_clone(&binput, input);
437 buffer_seek(&binput, pelf->phdr->p_offset);
438 bputs(&boutput, buffer_get(&binput), pelf->phdr->p_filesz);
439
440 buffer_clone(&boutput, output);
441 buffer_seek(&boutput, sizeof(struct cbfs_stage));
442
443 /* Make adjustments to all the relocations within the program. */
444 for (i = 0; i < rmodctx->nrelocs; i++) {
445 size_t reloc_offset;
446 uint32_t val;
447 struct buffer in, out;
448
449 /* The relocations represent in-program addresses of the
450 * linked program. Obtain the offset into the program to do
451 * the adjustment. */
452 reloc_offset = rmodctx->emitted_relocs[i] - pelf->phdr->p_vaddr;
453
454 buffer_clone(&out, &boutput);
455 buffer_seek(&out, reloc_offset);
456 buffer_clone(&in, &out);
457 /* Appease around xdr semantics: xdr decrements buffer
458 * size when get()ing and appends to size when put()ing. */
459 buffer_set_size(&out, 0);
460
461 val = xdr_le.get32(&in);
462 DEBUG("reloc %zx %08x -> %08x\n", reloc_offset, val,
463 val + adjustment);
464 xdr_le.put32(&out, val + adjustment);
465 }
466
467 /* Need to back up the location to include cbfs stage metadata. */
468 *location -= sizeof(struct cbfs_stage);
469 ret = 0;
470
471out:
472 rmodule_cleanup(rmodctx);
473 return ret;
474}