Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 1 | /* |
Peter Stuge | 45ae92ff | 2009-04-14 19:48:32 +0000 | [diff] [blame] | 2 | * cbfs-mkstage |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2008 Jordan Crouse <jordan@cosmicpenguin.net> |
Patrick Georgi | b7b56dd8 | 2009-09-14 13:29:27 +0000 | [diff] [blame] | 5 | * 2009 coresystems GmbH |
| 6 | * written by Patrick Georgi <patrick.georgi@coresystems.de> |
David Hendricks | 90ca3b6 | 2012-11-16 14:48:22 -0800 | [diff] [blame] | 7 | * Copyright (C) 2012 Google, Inc. |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License as published by |
| 11 | * the Free Software Foundation; version 2 of the License. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 17 | */ |
| 18 | |
Francis Rowe | 3fb8b0d | 2014-11-21 02:38:48 +0000 | [diff] [blame] | 19 | #include <inttypes.h> |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 20 | #include <stdio.h> |
| 21 | #include <stdlib.h> |
| 22 | #include <string.h> |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 23 | |
Aaron Durbin | 54ef306 | 2014-03-05 12:12:09 -0600 | [diff] [blame] | 24 | #include "elfparsing.h" |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 25 | #include "common.h" |
Patrick Georgi | b7b56dd8 | 2009-09-14 13:29:27 +0000 | [diff] [blame] | 26 | #include "cbfs.h" |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 27 | #include "rmodule.h" |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 28 | |
Julius Werner | 09f2921 | 2015-09-29 13:51:35 -0700 | [diff] [blame] | 29 | #include <commonlib/compression.h> |
| 30 | |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 31 | /* Checks if program segment contains the ignored section */ |
| 32 | static int is_phdr_ignored(Elf64_Phdr *phdr, Elf64_Shdr *shdr) |
| 33 | { |
| 34 | /* If no ignored section, return false. */ |
| 35 | if (shdr == NULL) |
| 36 | return 0; |
| 37 | |
| 38 | Elf64_Addr sh_start = shdr->sh_addr; |
| 39 | Elf64_Addr sh_end = shdr->sh_addr + shdr->sh_size; |
| 40 | Elf64_Addr ph_start = phdr->p_vaddr; |
| 41 | Elf64_Addr ph_end = phdr->p_vaddr + phdr->p_memsz; |
| 42 | |
| 43 | /* Return true only if section occupies whole of segment. */ |
| 44 | if ((sh_start == ph_start) && (sh_end == ph_end)) { |
Francis Rowe | 3fb8b0d | 2014-11-21 02:38:48 +0000 | [diff] [blame] | 45 | DEBUG("Ignoring program segment at 0x%" PRIx64 "\n", ph_start); |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 46 | return 1; |
| 47 | } |
| 48 | |
| 49 | /* If shdr intersects phdr at all, its a conflict */ |
| 50 | if (((sh_start >= ph_start) && (sh_start <= ph_end)) || |
| 51 | ((sh_end >= ph_start) && (sh_end <= ph_end))) { |
| 52 | ERROR("Conflicting sections in segment\n"); |
| 53 | exit(1); |
| 54 | } |
| 55 | |
| 56 | /* Program header doesn't need to be ignored. */ |
| 57 | return 0; |
| 58 | } |
| 59 | |
| 60 | /* Find section header based on ignored section name */ |
| 61 | static Elf64_Shdr *find_ignored_section_header(struct parsed_elf *pelf, |
| 62 | const char *ignore_section) |
| 63 | { |
| 64 | int i; |
| 65 | const char *shstrtab; |
| 66 | |
| 67 | /* No section needs to be ignored */ |
| 68 | if (ignore_section == NULL) |
| 69 | return NULL; |
| 70 | |
| 71 | DEBUG("Section to be ignored: %s\n", ignore_section); |
| 72 | |
| 73 | /* Get pointer to string table */ |
| 74 | shstrtab = buffer_get(pelf->strtabs[pelf->ehdr.e_shstrndx]); |
| 75 | |
| 76 | for (i = 0; i < pelf->ehdr.e_shnum; i++) { |
| 77 | Elf64_Shdr *shdr; |
| 78 | const char *section_name; |
| 79 | |
| 80 | shdr = &pelf->shdr[i]; |
| 81 | section_name = &shstrtab[shdr->sh_name]; |
| 82 | |
| 83 | /* If section name matches ignored string, return shdr */ |
| 84 | if (strcmp(section_name, ignore_section) == 0) |
| 85 | return shdr; |
| 86 | } |
| 87 | |
| 88 | /* No section matches ignore string */ |
| 89 | return NULL; |
| 90 | } |
| 91 | |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 92 | static void fill_cbfs_stage(struct buffer *outheader, enum comp_algo algo, |
| 93 | uint64_t entry, uint64_t loadaddr, |
| 94 | uint32_t filesize, uint32_t memsize) |
| 95 | { |
| 96 | /* N.B. The original plan was that SELF data was B.E. |
| 97 | * but: this is all L.E. |
| 98 | * Maybe we should just change the spec. |
| 99 | */ |
| 100 | xdr_le.put32(outheader, algo); |
| 101 | xdr_le.put64(outheader, entry); |
| 102 | xdr_le.put64(outheader, loadaddr); |
| 103 | xdr_le.put32(outheader, filesize); |
| 104 | xdr_le.put32(outheader, memsize); |
| 105 | } |
| 106 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 107 | /* returns size of result, or -1 if error. |
| 108 | * Note that, with the new code, this function |
| 109 | * works for all elf files, not just the restricted set. |
| 110 | */ |
| 111 | int parse_elf_to_stage(const struct buffer *input, struct buffer *output, |
Sol Boucher | 6310ccc | 2015-05-07 21:12:28 -0700 | [diff] [blame] | 112 | enum comp_algo algo, uint32_t *location, |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 113 | const char *ignore_section) |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 114 | { |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 115 | struct parsed_elf pelf; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 116 | Elf64_Phdr *phdr; |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 117 | Elf64_Ehdr *ehdr; |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 118 | Elf64_Shdr *shdr_ignored; |
Furquan Shaikh | f7a5b56 | 2015-05-29 12:46:18 -0700 | [diff] [blame] | 119 | Elf64_Addr virt_to_phys; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 120 | char *buffer; |
| 121 | struct buffer outheader; |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 122 | int ret = -1; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 123 | |
| 124 | int headers; |
| 125 | int i, outlen; |
HC Yen | 14ec199 | 2015-01-22 09:57:34 +0800 | [diff] [blame] | 126 | uint64_t data_start, data_end, mem_end; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 127 | |
| 128 | comp_func_ptr compress = compression_function(algo); |
| 129 | if (!compress) |
| 130 | return -1; |
| 131 | |
| 132 | DEBUG("start: parse_elf_to_stage(location=0x%x)\n", *location); |
| 133 | |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 134 | int flags = ELF_PARSE_PHDR | ELF_PARSE_SHDR | ELF_PARSE_STRTAB; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 135 | |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 136 | if (parse_elf(input, &pelf, flags)) { |
| 137 | ERROR("Couldn't parse ELF\n"); |
| 138 | return -1; |
| 139 | } |
| 140 | |
| 141 | ehdr = &pelf.ehdr; |
| 142 | phdr = &pelf.phdr[0]; |
| 143 | |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 144 | /* Find the section header corresponding to ignored-section */ |
| 145 | shdr_ignored = find_ignored_section_header(&pelf, ignore_section); |
| 146 | |
| 147 | if (ignore_section && (shdr_ignored == NULL)) |
| 148 | WARN("Ignore section not found\n"); |
| 149 | |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 150 | headers = ehdr->e_phnum; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 151 | |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 152 | /* Ignore the program header containing ignored section */ |
| 153 | for (i = 0; i < headers; i++) { |
| 154 | if (is_phdr_ignored(&phdr[i], shdr_ignored)) |
| 155 | phdr[i].p_type = PT_NULL; |
| 156 | } |
| 157 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 158 | data_start = ~0; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 159 | data_end = 0; |
| 160 | mem_end = 0; |
Furquan Shaikh | f7a5b56 | 2015-05-29 12:46:18 -0700 | [diff] [blame] | 161 | virt_to_phys = 0; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 162 | |
| 163 | for (i = 0; i < headers; i++) { |
HC Yen | 14ec199 | 2015-01-22 09:57:34 +0800 | [diff] [blame] | 164 | uint64_t start, mend, rend; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 165 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 166 | if (phdr[i].p_type != PT_LOAD) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 167 | continue; |
| 168 | |
| 169 | /* Empty segments are never interesting */ |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 170 | if (phdr[i].p_memsz == 0) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 171 | continue; |
| 172 | |
| 173 | /* BSS */ |
| 174 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 175 | start = phdr[i].p_paddr; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 176 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 177 | mend = start + phdr[i].p_memsz; |
| 178 | rend = start + phdr[i].p_filesz; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 179 | |
| 180 | if (start < data_start) |
| 181 | data_start = start; |
| 182 | |
| 183 | if (rend > data_end) |
| 184 | data_end = rend; |
| 185 | |
| 186 | if (mend > mem_end) |
| 187 | mem_end = mend; |
Furquan Shaikh | f7a5b56 | 2015-05-29 12:46:18 -0700 | [diff] [blame] | 188 | |
| 189 | if (virt_to_phys == 0) |
| 190 | virt_to_phys = phdr[i].p_paddr - phdr[i].p_vaddr; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 191 | } |
| 192 | |
Patrick Georgi | 9341acd | 2009-12-23 12:52:56 +0000 | [diff] [blame] | 193 | if (data_start < *location) { |
| 194 | data_start = *location; |
| 195 | } |
| 196 | |
Patrick Georgi | a6c337d | 2010-02-03 17:56:37 +0000 | [diff] [blame] | 197 | if (data_end <= data_start) { |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 198 | ERROR("data ends (%08lx) before it starts (%08lx). Make sure " |
| 199 | "the ELF file is correct and resides in ROM space.\n", |
| 200 | (unsigned long)data_end, (unsigned long)data_start); |
Patrick Georgi | a6c337d | 2010-02-03 17:56:37 +0000 | [diff] [blame] | 201 | exit(1); |
| 202 | } |
| 203 | |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 204 | /* allocate an intermediate buffer for the data */ |
| 205 | buffer = calloc(data_end - data_start, 1); |
| 206 | |
| 207 | if (buffer == NULL) { |
Hung-Te Lin | 4d87d4e | 2013-01-28 14:39:43 +0800 | [diff] [blame] | 208 | ERROR("Unable to allocate memory: %m\n"); |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 209 | goto err; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 210 | } |
| 211 | |
| 212 | /* Copy the file data into the buffer */ |
| 213 | |
| 214 | for (i = 0; i < headers; i++) { |
HC Yen | 14ec199 | 2015-01-22 09:57:34 +0800 | [diff] [blame] | 215 | uint64_t l_start, l_offset = 0; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 216 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 217 | if (phdr[i].p_type != PT_LOAD) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 218 | continue; |
| 219 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 220 | if (phdr[i].p_memsz == 0) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 221 | continue; |
| 222 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 223 | l_start = phdr[i].p_paddr; |
Patrick Georgi | 9341acd | 2009-12-23 12:52:56 +0000 | [diff] [blame] | 224 | if (l_start < *location) { |
| 225 | l_offset = *location - l_start; |
| 226 | l_start = *location; |
| 227 | } |
| 228 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 229 | /* A legal ELF file can have a program header with |
| 230 | * non-zero length but zero-length file size and a |
| 231 | * non-zero offset which, added together, are > than |
| 232 | * input->size (i.e. the total file size). So we need |
| 233 | * to not even test in the case that p_filesz is zero. |
| 234 | */ |
| 235 | if (! phdr[i].p_filesz) |
| 236 | continue; |
| 237 | if (input->size < (phdr[i].p_offset + phdr[i].p_filesz)){ |
| 238 | ERROR("Underflow copying out the segment." |
Paul Menzel | 470c37c | 2014-03-16 00:15:57 +0100 | [diff] [blame] | 239 | "File has %zu bytes left, segment end is %zu\n", |
| 240 | input->size, (size_t)(phdr[i].p_offset + phdr[i].p_filesz)); |
Daniele Forsi | 8e89847 | 2014-07-27 12:01:40 +0200 | [diff] [blame] | 241 | free(buffer); |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 242 | goto err; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 243 | } |
Patrick Georgi | 9341acd | 2009-12-23 12:52:56 +0000 | [diff] [blame] | 244 | memcpy(buffer + (l_start - data_start), |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 245 | &input->data[phdr[i].p_offset + l_offset], |
| 246 | phdr[i].p_filesz - l_offset); |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 247 | } |
| 248 | |
| 249 | /* Now make the output buffer */ |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 250 | if (buffer_create(output, sizeof(struct cbfs_stage) + data_end - data_start, |
Hung-Te Lin | c13e4bf | 2013-01-29 15:22:11 +0800 | [diff] [blame] | 251 | input->name) != 0) { |
Hung-Te Lin | 4d87d4e | 2013-01-28 14:39:43 +0800 | [diff] [blame] | 252 | ERROR("Unable to allocate memory: %m\n"); |
Paul Menzel | 2c8f81b | 2013-04-11 10:45:11 +0200 | [diff] [blame] | 253 | free(buffer); |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 254 | goto err; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 255 | } |
Hung-Te Lin | c13e4bf | 2013-01-29 15:22:11 +0800 | [diff] [blame] | 256 | memset(output->data, 0, output->size); |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 257 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 258 | /* Compress the data, at which point we'll know information |
| 259 | * to fill out the header. This seems backward but it works because |
| 260 | * - the output header is a known size (not always true in many xdr's) |
| 261 | * - we do need to know the compressed output size first |
Gabe Black | 845aa14 | 2014-02-21 01:01:06 -0800 | [diff] [blame] | 262 | * If compression fails or makes the data bigger, we'll warn about it |
| 263 | * and use the original data. |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 264 | */ |
Gabe Black | dbd006b | 2014-02-20 23:38:49 -0800 | [diff] [blame] | 265 | if (compress(buffer, data_end - data_start, |
| 266 | (output->data + sizeof(struct cbfs_stage)), |
Sol Boucher | 0e53931 | 2015-03-05 15:38:03 -0800 | [diff] [blame] | 267 | &outlen) < 0 || (unsigned)outlen > data_end - data_start) { |
Gabe Black | 845aa14 | 2014-02-21 01:01:06 -0800 | [diff] [blame] | 268 | WARN("Compression failed or would make the data bigger " |
| 269 | "- disabled.\n"); |
| 270 | memcpy(output->data + sizeof(struct cbfs_stage), |
| 271 | buffer, data_end - data_start); |
Julius Werner | 09f2921 | 2015-09-29 13:51:35 -0700 | [diff] [blame] | 272 | outlen = data_end - data_start; |
Gabe Black | 845aa14 | 2014-02-21 01:01:06 -0800 | [diff] [blame] | 273 | algo = CBFS_COMPRESS_NONE; |
Gabe Black | dbd006b | 2014-02-20 23:38:49 -0800 | [diff] [blame] | 274 | } |
Julius Werner | 09f2921 | 2015-09-29 13:51:35 -0700 | [diff] [blame] | 275 | |
| 276 | /* Check for enough BSS scratch space to decompress LZ4 in-place. */ |
| 277 | if (algo == CBFS_COMPRESS_LZ4) { |
| 278 | size_t result; |
| 279 | size_t memlen = mem_end - data_start; |
| 280 | size_t compressed_size = outlen; |
| 281 | char *compare_buffer = malloc(memlen); |
| 282 | char *start = compare_buffer + memlen - compressed_size; |
| 283 | |
| 284 | if (compare_buffer == NULL) { |
| 285 | ERROR("Can't allocate memory!\n"); |
| 286 | free(buffer); |
| 287 | goto err; |
| 288 | } |
| 289 | |
| 290 | memcpy(start, output->data + sizeof(struct cbfs_stage), |
| 291 | compressed_size); |
| 292 | result = ulz4fn(start, compressed_size, compare_buffer, memlen); |
| 293 | |
| 294 | if (result == 0) { |
| 295 | ERROR("Not enough scratch space to decompress LZ4 in-place -- increase BSS size or disable compression!\n"); |
| 296 | free(compare_buffer); |
| 297 | free(buffer); |
| 298 | goto err; |
| 299 | } |
| 300 | if (result != data_end - data_start || |
| 301 | memcmp(compare_buffer, buffer, data_end - data_start)) { |
| 302 | ERROR("LZ4 compression BUG! Report to mailing list.\n"); |
| 303 | free(compare_buffer); |
| 304 | free(buffer); |
| 305 | goto err; |
| 306 | } |
| 307 | free(compare_buffer); |
| 308 | } |
| 309 | |
Stefan Reinauer | 6321758 | 2012-10-29 16:52:36 -0700 | [diff] [blame] | 310 | free(buffer); |
| 311 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 312 | /* Set up for output marshaling. */ |
| 313 | outheader.data = output->data; |
| 314 | outheader.size = 0; |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 315 | |
Furquan Shaikh | f7a5b56 | 2015-05-29 12:46:18 -0700 | [diff] [blame] | 316 | /* Coreboot expects entry point to be physical address. Thus, adjust the |
| 317 | * entry point accordingly. |
| 318 | */ |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 319 | fill_cbfs_stage(&outheader, algo, ehdr->e_entry + virt_to_phys, |
| 320 | data_start, outlen, mem_end - data_start); |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 321 | |
Patrick Georgi | b7b56dd8 | 2009-09-14 13:29:27 +0000 | [diff] [blame] | 322 | if (*location) |
| 323 | *location -= sizeof(struct cbfs_stage); |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 324 | output->size = sizeof(struct cbfs_stage) + outlen; |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 325 | ret = 0; |
| 326 | |
| 327 | err: |
| 328 | parsed_elf_destroy(&pelf); |
| 329 | return ret; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 330 | } |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 331 | |
| 332 | struct xip_context { |
| 333 | struct rmod_context rmodctx; |
| 334 | size_t ignored_section_idx; |
| 335 | Elf64_Shdr *ignored_section; |
| 336 | }; |
| 337 | |
| 338 | static int rmod_filter(struct reloc_filter *f, const Elf64_Rela *r) |
| 339 | { |
| 340 | size_t symbol_index; |
| 341 | int reloc_type; |
| 342 | struct parsed_elf *pelf; |
| 343 | Elf64_Sym *sym; |
| 344 | struct xip_context *xipctx; |
| 345 | |
| 346 | xipctx = f->context; |
| 347 | pelf = &xipctx->rmodctx.pelf; |
| 348 | |
| 349 | /* Allow everything through if there isn't an ignored section. */ |
| 350 | if (xipctx->ignored_section == NULL) |
| 351 | return 1; |
| 352 | |
| 353 | reloc_type = ELF64_R_TYPE(r->r_info); |
| 354 | symbol_index = ELF64_R_SYM(r->r_info); |
| 355 | sym = &pelf->syms[symbol_index]; |
| 356 | |
| 357 | /* Nothing to filter. Relocation is not being applied to the |
| 358 | * ignored section. */ |
| 359 | if (sym->st_shndx != xipctx->ignored_section_idx) |
| 360 | return 1; |
| 361 | |
| 362 | /* If there is any relocation to the ignored section that isn't |
| 363 | * absolute fail as current assumptions are that all relocations |
| 364 | * are absolute. */ |
| 365 | if (reloc_type != R_386_32) { |
| 366 | ERROR("Invalid reloc to ignored section: %x\n", reloc_type); |
| 367 | return -1; |
| 368 | } |
| 369 | |
| 370 | /* Relocation referencing ignored section. Don't emit it. */ |
| 371 | return 0; |
| 372 | } |
| 373 | |
| 374 | int parse_elf_to_xip_stage(const struct buffer *input, struct buffer *output, |
| 375 | uint32_t *location, const char *ignore_section) |
| 376 | { |
| 377 | struct xip_context xipctx; |
| 378 | struct rmod_context *rmodctx; |
| 379 | struct reloc_filter filter; |
| 380 | struct parsed_elf *pelf; |
| 381 | size_t output_sz; |
| 382 | uint32_t adjustment; |
| 383 | struct buffer binput; |
| 384 | struct buffer boutput; |
| 385 | Elf64_Xword i; |
| 386 | int ret = -1; |
| 387 | |
| 388 | xipctx.ignored_section_idx = 0; |
| 389 | rmodctx = &xipctx.rmodctx; |
| 390 | pelf = &rmodctx->pelf; |
| 391 | |
| 392 | if (rmodule_init(rmodctx, input)) |
| 393 | return -1; |
| 394 | |
| 395 | /* Only support x86 XIP currently. */ |
| 396 | if (rmodctx->pelf.ehdr.e_machine != EM_386) { |
| 397 | ERROR("Only support XIP stages for x86\n"); |
| 398 | goto out; |
| 399 | } |
| 400 | |
| 401 | xipctx.ignored_section = |
| 402 | find_ignored_section_header(pelf, ignore_section); |
| 403 | |
| 404 | if (xipctx.ignored_section != NULL) |
| 405 | xipctx.ignored_section_idx = |
| 406 | xipctx.ignored_section - pelf->shdr; |
| 407 | |
| 408 | filter.filter = rmod_filter; |
| 409 | filter.context = &xipctx; |
| 410 | |
| 411 | if (rmodule_collect_relocations(rmodctx, &filter)) |
| 412 | goto out; |
| 413 | |
| 414 | output_sz = sizeof(struct cbfs_stage) + pelf->phdr->p_filesz; |
| 415 | if (buffer_create(output, output_sz, input->name) != 0) { |
| 416 | ERROR("Unable to allocate memory: %m\n"); |
| 417 | goto out; |
| 418 | } |
| 419 | buffer_clone(&boutput, output); |
| 420 | memset(buffer_get(&boutput), 0, output_sz); |
| 421 | buffer_set_size(&boutput, 0); |
| 422 | |
| 423 | /* Single loadable segment. The entire segment moves to final |
| 424 | * location from based on virtual address of loadable segment. */ |
| 425 | adjustment = *location - pelf->phdr->p_vaddr; |
| 426 | DEBUG("Relocation adjustment: %08x\n", adjustment); |
| 427 | |
| 428 | fill_cbfs_stage(&boutput, CBFS_COMPRESS_NONE, |
| 429 | (uint32_t)pelf->ehdr.e_entry + adjustment, |
| 430 | (uint32_t)pelf->phdr->p_vaddr + adjustment, |
| 431 | pelf->phdr->p_filesz, pelf->phdr->p_memsz); |
| 432 | /* Need an adjustable buffer. */ |
| 433 | buffer_clone(&binput, input); |
| 434 | buffer_seek(&binput, pelf->phdr->p_offset); |
| 435 | bputs(&boutput, buffer_get(&binput), pelf->phdr->p_filesz); |
| 436 | |
| 437 | buffer_clone(&boutput, output); |
| 438 | buffer_seek(&boutput, sizeof(struct cbfs_stage)); |
| 439 | |
| 440 | /* Make adjustments to all the relocations within the program. */ |
| 441 | for (i = 0; i < rmodctx->nrelocs; i++) { |
| 442 | size_t reloc_offset; |
| 443 | uint32_t val; |
| 444 | struct buffer in, out; |
| 445 | |
| 446 | /* The relocations represent in-program addresses of the |
| 447 | * linked program. Obtain the offset into the program to do |
| 448 | * the adjustment. */ |
| 449 | reloc_offset = rmodctx->emitted_relocs[i] - pelf->phdr->p_vaddr; |
| 450 | |
| 451 | buffer_clone(&out, &boutput); |
| 452 | buffer_seek(&out, reloc_offset); |
| 453 | buffer_clone(&in, &out); |
| 454 | /* Appease around xdr semantics: xdr decrements buffer |
| 455 | * size when get()ing and appends to size when put()ing. */ |
| 456 | buffer_set_size(&out, 0); |
| 457 | |
| 458 | val = xdr_le.get32(&in); |
| 459 | DEBUG("reloc %zx %08x -> %08x\n", reloc_offset, val, |
| 460 | val + adjustment); |
| 461 | xdr_le.put32(&out, val + adjustment); |
| 462 | } |
| 463 | |
| 464 | /* Need to back up the location to include cbfs stage metadata. */ |
| 465 | *location -= sizeof(struct cbfs_stage); |
| 466 | ret = 0; |
| 467 | |
| 468 | out: |
| 469 | rmodule_cleanup(rmodctx); |
| 470 | return ret; |
| 471 | } |