Patrick Georgi | 7333a11 | 2020-05-08 20:48:04 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 2 | |
Francis Rowe | 3fb8b0d | 2014-11-21 02:38:48 +0000 | [diff] [blame] | 3 | #include <inttypes.h> |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 4 | #include <stdio.h> |
| 5 | #include <stdlib.h> |
| 6 | #include <string.h> |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 7 | |
Aaron Durbin | 54ef306 | 2014-03-05 12:12:09 -0600 | [diff] [blame] | 8 | #include "elfparsing.h" |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 9 | #include "common.h" |
Patrick Georgi | b7b56dd8 | 2009-09-14 13:29:27 +0000 | [diff] [blame] | 10 | #include "cbfs.h" |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 11 | #include "rmodule.h" |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 12 | |
Julius Werner | 98eeb96 | 2019-12-11 15:47:42 -0800 | [diff] [blame] | 13 | #include <commonlib/bsd/compression.h> |
Julius Werner | 09f2921 | 2015-09-29 13:51:35 -0700 | [diff] [blame] | 14 | |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 15 | /* Checks if program segment contains the ignored section */ |
| 16 | static int is_phdr_ignored(Elf64_Phdr *phdr, Elf64_Shdr *shdr) |
| 17 | { |
| 18 | /* If no ignored section, return false. */ |
| 19 | if (shdr == NULL) |
| 20 | return 0; |
| 21 | |
| 22 | Elf64_Addr sh_start = shdr->sh_addr; |
| 23 | Elf64_Addr sh_end = shdr->sh_addr + shdr->sh_size; |
| 24 | Elf64_Addr ph_start = phdr->p_vaddr; |
| 25 | Elf64_Addr ph_end = phdr->p_vaddr + phdr->p_memsz; |
| 26 | |
| 27 | /* Return true only if section occupies whole of segment. */ |
| 28 | if ((sh_start == ph_start) && (sh_end == ph_end)) { |
Francis Rowe | 3fb8b0d | 2014-11-21 02:38:48 +0000 | [diff] [blame] | 29 | DEBUG("Ignoring program segment at 0x%" PRIx64 "\n", ph_start); |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 30 | return 1; |
| 31 | } |
| 32 | |
| 33 | /* If shdr intersects phdr at all, its a conflict */ |
| 34 | if (((sh_start >= ph_start) && (sh_start <= ph_end)) || |
| 35 | ((sh_end >= ph_start) && (sh_end <= ph_end))) { |
| 36 | ERROR("Conflicting sections in segment\n"); |
| 37 | exit(1); |
| 38 | } |
| 39 | |
| 40 | /* Program header doesn't need to be ignored. */ |
| 41 | return 0; |
| 42 | } |
| 43 | |
| 44 | /* Find section header based on ignored section name */ |
| 45 | static Elf64_Shdr *find_ignored_section_header(struct parsed_elf *pelf, |
| 46 | const char *ignore_section) |
| 47 | { |
| 48 | int i; |
| 49 | const char *shstrtab; |
| 50 | |
| 51 | /* No section needs to be ignored */ |
| 52 | if (ignore_section == NULL) |
| 53 | return NULL; |
| 54 | |
| 55 | DEBUG("Section to be ignored: %s\n", ignore_section); |
| 56 | |
| 57 | /* Get pointer to string table */ |
| 58 | shstrtab = buffer_get(pelf->strtabs[pelf->ehdr.e_shstrndx]); |
| 59 | |
| 60 | for (i = 0; i < pelf->ehdr.e_shnum; i++) { |
| 61 | Elf64_Shdr *shdr; |
| 62 | const char *section_name; |
| 63 | |
| 64 | shdr = &pelf->shdr[i]; |
| 65 | section_name = &shstrtab[shdr->sh_name]; |
| 66 | |
| 67 | /* If section name matches ignored string, return shdr */ |
| 68 | if (strcmp(section_name, ignore_section) == 0) |
| 69 | return shdr; |
| 70 | } |
| 71 | |
| 72 | /* No section matches ignore string */ |
| 73 | return NULL; |
| 74 | } |
| 75 | |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 76 | static void fill_cbfs_stage(struct buffer *outheader, enum comp_algo algo, |
| 77 | uint64_t entry, uint64_t loadaddr, |
| 78 | uint32_t filesize, uint32_t memsize) |
| 79 | { |
| 80 | /* N.B. The original plan was that SELF data was B.E. |
| 81 | * but: this is all L.E. |
| 82 | * Maybe we should just change the spec. |
| 83 | */ |
| 84 | xdr_le.put32(outheader, algo); |
| 85 | xdr_le.put64(outheader, entry); |
| 86 | xdr_le.put64(outheader, loadaddr); |
| 87 | xdr_le.put32(outheader, filesize); |
| 88 | xdr_le.put32(outheader, memsize); |
| 89 | } |
| 90 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 91 | /* returns size of result, or -1 if error. |
| 92 | * Note that, with the new code, this function |
| 93 | * works for all elf files, not just the restricted set. |
| 94 | */ |
| 95 | int parse_elf_to_stage(const struct buffer *input, struct buffer *output, |
Sol Boucher | 6310ccc | 2015-05-07 21:12:28 -0700 | [diff] [blame] | 96 | enum comp_algo algo, uint32_t *location, |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 97 | const char *ignore_section) |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 98 | { |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 99 | struct parsed_elf pelf; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 100 | Elf64_Phdr *phdr; |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 101 | Elf64_Ehdr *ehdr; |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 102 | Elf64_Shdr *shdr_ignored; |
Furquan Shaikh | f7a5b56 | 2015-05-29 12:46:18 -0700 | [diff] [blame] | 103 | Elf64_Addr virt_to_phys; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 104 | char *buffer; |
| 105 | struct buffer outheader; |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 106 | int ret = -1; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 107 | |
| 108 | int headers; |
| 109 | int i, outlen; |
HC Yen | 14ec199 | 2015-01-22 09:57:34 +0800 | [diff] [blame] | 110 | uint64_t data_start, data_end, mem_end; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 111 | |
| 112 | comp_func_ptr compress = compression_function(algo); |
| 113 | if (!compress) |
| 114 | return -1; |
| 115 | |
| 116 | DEBUG("start: parse_elf_to_stage(location=0x%x)\n", *location); |
| 117 | |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 118 | int flags = ELF_PARSE_PHDR | ELF_PARSE_SHDR | ELF_PARSE_STRTAB; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 119 | |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 120 | if (parse_elf(input, &pelf, flags)) { |
| 121 | ERROR("Couldn't parse ELF\n"); |
| 122 | return -1; |
| 123 | } |
| 124 | |
| 125 | ehdr = &pelf.ehdr; |
| 126 | phdr = &pelf.phdr[0]; |
| 127 | |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 128 | /* Find the section header corresponding to ignored-section */ |
| 129 | shdr_ignored = find_ignored_section_header(&pelf, ignore_section); |
| 130 | |
| 131 | if (ignore_section && (shdr_ignored == NULL)) |
| 132 | WARN("Ignore section not found\n"); |
| 133 | |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 134 | headers = ehdr->e_phnum; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 135 | |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 136 | /* Ignore the program header containing ignored section */ |
| 137 | for (i = 0; i < headers; i++) { |
| 138 | if (is_phdr_ignored(&phdr[i], shdr_ignored)) |
| 139 | phdr[i].p_type = PT_NULL; |
| 140 | } |
| 141 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 142 | data_start = ~0; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 143 | data_end = 0; |
| 144 | mem_end = 0; |
Furquan Shaikh | f7a5b56 | 2015-05-29 12:46:18 -0700 | [diff] [blame] | 145 | virt_to_phys = 0; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 146 | |
| 147 | for (i = 0; i < headers; i++) { |
HC Yen | 14ec199 | 2015-01-22 09:57:34 +0800 | [diff] [blame] | 148 | uint64_t start, mend, rend; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 149 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 150 | if (phdr[i].p_type != PT_LOAD) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 151 | continue; |
| 152 | |
| 153 | /* Empty segments are never interesting */ |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 154 | if (phdr[i].p_memsz == 0) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 155 | continue; |
| 156 | |
| 157 | /* BSS */ |
| 158 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 159 | start = phdr[i].p_paddr; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 160 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 161 | mend = start + phdr[i].p_memsz; |
| 162 | rend = start + phdr[i].p_filesz; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 163 | |
| 164 | if (start < data_start) |
| 165 | data_start = start; |
| 166 | |
| 167 | if (rend > data_end) |
| 168 | data_end = rend; |
| 169 | |
| 170 | if (mend > mem_end) |
| 171 | mem_end = mend; |
Furquan Shaikh | f7a5b56 | 2015-05-29 12:46:18 -0700 | [diff] [blame] | 172 | |
| 173 | if (virt_to_phys == 0) |
| 174 | virt_to_phys = phdr[i].p_paddr - phdr[i].p_vaddr; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 175 | } |
| 176 | |
Patrick Georgi | 9341acd | 2009-12-23 12:52:56 +0000 | [diff] [blame] | 177 | if (data_start < *location) { |
| 178 | data_start = *location; |
| 179 | } |
| 180 | |
Patrick Georgi | a6c337d | 2010-02-03 17:56:37 +0000 | [diff] [blame] | 181 | if (data_end <= data_start) { |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 182 | ERROR("data ends (%08lx) before it starts (%08lx). Make sure " |
| 183 | "the ELF file is correct and resides in ROM space.\n", |
| 184 | (unsigned long)data_end, (unsigned long)data_start); |
Patrick Georgi | a6c337d | 2010-02-03 17:56:37 +0000 | [diff] [blame] | 185 | exit(1); |
| 186 | } |
| 187 | |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 188 | /* allocate an intermediate buffer for the data */ |
| 189 | buffer = calloc(data_end - data_start, 1); |
| 190 | |
| 191 | if (buffer == NULL) { |
Hung-Te Lin | 4d87d4e | 2013-01-28 14:39:43 +0800 | [diff] [blame] | 192 | ERROR("Unable to allocate memory: %m\n"); |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 193 | goto err; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 194 | } |
| 195 | |
| 196 | /* Copy the file data into the buffer */ |
| 197 | |
| 198 | for (i = 0; i < headers; i++) { |
HC Yen | 14ec199 | 2015-01-22 09:57:34 +0800 | [diff] [blame] | 199 | uint64_t l_start, l_offset = 0; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 200 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 201 | if (phdr[i].p_type != PT_LOAD) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 202 | continue; |
| 203 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 204 | if (phdr[i].p_memsz == 0) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 205 | continue; |
| 206 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 207 | l_start = phdr[i].p_paddr; |
Patrick Georgi | 9341acd | 2009-12-23 12:52:56 +0000 | [diff] [blame] | 208 | if (l_start < *location) { |
| 209 | l_offset = *location - l_start; |
| 210 | l_start = *location; |
| 211 | } |
| 212 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 213 | /* A legal ELF file can have a program header with |
| 214 | * non-zero length but zero-length file size and a |
| 215 | * non-zero offset which, added together, are > than |
| 216 | * input->size (i.e. the total file size). So we need |
| 217 | * to not even test in the case that p_filesz is zero. |
| 218 | */ |
| 219 | if (! phdr[i].p_filesz) |
| 220 | continue; |
| 221 | if (input->size < (phdr[i].p_offset + phdr[i].p_filesz)){ |
| 222 | ERROR("Underflow copying out the segment." |
Paul Menzel | 470c37c | 2014-03-16 00:15:57 +0100 | [diff] [blame] | 223 | "File has %zu bytes left, segment end is %zu\n", |
| 224 | input->size, (size_t)(phdr[i].p_offset + phdr[i].p_filesz)); |
Daniele Forsi | 8e89847 | 2014-07-27 12:01:40 +0200 | [diff] [blame] | 225 | free(buffer); |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 226 | goto err; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 227 | } |
Patrick Georgi | 9341acd | 2009-12-23 12:52:56 +0000 | [diff] [blame] | 228 | memcpy(buffer + (l_start - data_start), |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 229 | &input->data[phdr[i].p_offset + l_offset], |
| 230 | phdr[i].p_filesz - l_offset); |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 231 | } |
| 232 | |
| 233 | /* Now make the output buffer */ |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 234 | if (buffer_create(output, sizeof(struct cbfs_stage) + data_end - data_start, |
Hung-Te Lin | c13e4bf | 2013-01-29 15:22:11 +0800 | [diff] [blame] | 235 | input->name) != 0) { |
Hung-Te Lin | 4d87d4e | 2013-01-28 14:39:43 +0800 | [diff] [blame] | 236 | ERROR("Unable to allocate memory: %m\n"); |
Paul Menzel | 2c8f81b | 2013-04-11 10:45:11 +0200 | [diff] [blame] | 237 | free(buffer); |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 238 | goto err; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 239 | } |
Hung-Te Lin | c13e4bf | 2013-01-29 15:22:11 +0800 | [diff] [blame] | 240 | memset(output->data, 0, output->size); |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 241 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 242 | /* Compress the data, at which point we'll know information |
| 243 | * to fill out the header. This seems backward but it works because |
| 244 | * - the output header is a known size (not always true in many xdr's) |
| 245 | * - we do need to know the compressed output size first |
Gabe Black | 845aa14 | 2014-02-21 01:01:06 -0800 | [diff] [blame] | 246 | * If compression fails or makes the data bigger, we'll warn about it |
| 247 | * and use the original data. |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 248 | */ |
Gabe Black | dbd006b | 2014-02-20 23:38:49 -0800 | [diff] [blame] | 249 | if (compress(buffer, data_end - data_start, |
| 250 | (output->data + sizeof(struct cbfs_stage)), |
Sol Boucher | 0e53931 | 2015-03-05 15:38:03 -0800 | [diff] [blame] | 251 | &outlen) < 0 || (unsigned)outlen > data_end - data_start) { |
Gabe Black | 845aa14 | 2014-02-21 01:01:06 -0800 | [diff] [blame] | 252 | WARN("Compression failed or would make the data bigger " |
| 253 | "- disabled.\n"); |
| 254 | memcpy(output->data + sizeof(struct cbfs_stage), |
| 255 | buffer, data_end - data_start); |
Julius Werner | 09f2921 | 2015-09-29 13:51:35 -0700 | [diff] [blame] | 256 | outlen = data_end - data_start; |
Gabe Black | 845aa14 | 2014-02-21 01:01:06 -0800 | [diff] [blame] | 257 | algo = CBFS_COMPRESS_NONE; |
Gabe Black | dbd006b | 2014-02-20 23:38:49 -0800 | [diff] [blame] | 258 | } |
Julius Werner | 09f2921 | 2015-09-29 13:51:35 -0700 | [diff] [blame] | 259 | |
| 260 | /* Check for enough BSS scratch space to decompress LZ4 in-place. */ |
| 261 | if (algo == CBFS_COMPRESS_LZ4) { |
| 262 | size_t result; |
| 263 | size_t memlen = mem_end - data_start; |
| 264 | size_t compressed_size = outlen; |
| 265 | char *compare_buffer = malloc(memlen); |
| 266 | char *start = compare_buffer + memlen - compressed_size; |
| 267 | |
| 268 | if (compare_buffer == NULL) { |
| 269 | ERROR("Can't allocate memory!\n"); |
| 270 | free(buffer); |
| 271 | goto err; |
| 272 | } |
| 273 | |
| 274 | memcpy(start, output->data + sizeof(struct cbfs_stage), |
| 275 | compressed_size); |
| 276 | result = ulz4fn(start, compressed_size, compare_buffer, memlen); |
| 277 | |
| 278 | if (result == 0) { |
| 279 | ERROR("Not enough scratch space to decompress LZ4 in-place -- increase BSS size or disable compression!\n"); |
| 280 | free(compare_buffer); |
| 281 | free(buffer); |
| 282 | goto err; |
| 283 | } |
| 284 | if (result != data_end - data_start || |
| 285 | memcmp(compare_buffer, buffer, data_end - data_start)) { |
| 286 | ERROR("LZ4 compression BUG! Report to mailing list.\n"); |
| 287 | free(compare_buffer); |
| 288 | free(buffer); |
| 289 | goto err; |
| 290 | } |
| 291 | free(compare_buffer); |
| 292 | } |
| 293 | |
Stefan Reinauer | 6321758 | 2012-10-29 16:52:36 -0700 | [diff] [blame] | 294 | free(buffer); |
| 295 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 296 | /* Set up for output marshaling. */ |
| 297 | outheader.data = output->data; |
| 298 | outheader.size = 0; |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 299 | |
Martin Roth | a564811 | 2017-06-03 20:05:42 -0600 | [diff] [blame] | 300 | /* coreboot expects entry point to be physical address. Thus, adjust the |
Furquan Shaikh | f7a5b56 | 2015-05-29 12:46:18 -0700 | [diff] [blame] | 301 | * entry point accordingly. |
| 302 | */ |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 303 | fill_cbfs_stage(&outheader, algo, ehdr->e_entry + virt_to_phys, |
| 304 | data_start, outlen, mem_end - data_start); |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 305 | |
Patrick Georgi | b7b56dd8 | 2009-09-14 13:29:27 +0000 | [diff] [blame] | 306 | if (*location) |
| 307 | *location -= sizeof(struct cbfs_stage); |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 308 | output->size = sizeof(struct cbfs_stage) + outlen; |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 309 | ret = 0; |
| 310 | |
| 311 | err: |
| 312 | parsed_elf_destroy(&pelf); |
| 313 | return ret; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 314 | } |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 315 | |
| 316 | struct xip_context { |
| 317 | struct rmod_context rmodctx; |
| 318 | size_t ignored_section_idx; |
| 319 | Elf64_Shdr *ignored_section; |
| 320 | }; |
| 321 | |
| 322 | static int rmod_filter(struct reloc_filter *f, const Elf64_Rela *r) |
| 323 | { |
| 324 | size_t symbol_index; |
| 325 | int reloc_type; |
| 326 | struct parsed_elf *pelf; |
| 327 | Elf64_Sym *sym; |
| 328 | struct xip_context *xipctx; |
| 329 | |
| 330 | xipctx = f->context; |
| 331 | pelf = &xipctx->rmodctx.pelf; |
| 332 | |
| 333 | /* Allow everything through if there isn't an ignored section. */ |
| 334 | if (xipctx->ignored_section == NULL) |
| 335 | return 1; |
| 336 | |
| 337 | reloc_type = ELF64_R_TYPE(r->r_info); |
| 338 | symbol_index = ELF64_R_SYM(r->r_info); |
| 339 | sym = &pelf->syms[symbol_index]; |
| 340 | |
| 341 | /* Nothing to filter. Relocation is not being applied to the |
| 342 | * ignored section. */ |
| 343 | if (sym->st_shndx != xipctx->ignored_section_idx) |
| 344 | return 1; |
| 345 | |
| 346 | /* If there is any relocation to the ignored section that isn't |
| 347 | * absolute fail as current assumptions are that all relocations |
| 348 | * are absolute. */ |
Patrick Rudolph | 21046a3 | 2018-11-26 15:37:51 +0100 | [diff] [blame] | 349 | if ((reloc_type != R_386_32) && |
| 350 | (reloc_type != R_AMD64_64) && |
| 351 | (reloc_type != R_AMD64_32)) { |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 352 | ERROR("Invalid reloc to ignored section: %x\n", reloc_type); |
| 353 | return -1; |
| 354 | } |
| 355 | |
| 356 | /* Relocation referencing ignored section. Don't emit it. */ |
| 357 | return 0; |
| 358 | } |
| 359 | |
| 360 | int parse_elf_to_xip_stage(const struct buffer *input, struct buffer *output, |
| 361 | uint32_t *location, const char *ignore_section) |
| 362 | { |
| 363 | struct xip_context xipctx; |
| 364 | struct rmod_context *rmodctx; |
| 365 | struct reloc_filter filter; |
| 366 | struct parsed_elf *pelf; |
| 367 | size_t output_sz; |
| 368 | uint32_t adjustment; |
| 369 | struct buffer binput; |
| 370 | struct buffer boutput; |
| 371 | Elf64_Xword i; |
| 372 | int ret = -1; |
| 373 | |
| 374 | xipctx.ignored_section_idx = 0; |
| 375 | rmodctx = &xipctx.rmodctx; |
| 376 | pelf = &rmodctx->pelf; |
| 377 | |
| 378 | if (rmodule_init(rmodctx, input)) |
| 379 | return -1; |
| 380 | |
Patrick Rudolph | 21046a3 | 2018-11-26 15:37:51 +0100 | [diff] [blame] | 381 | /* Only support x86 / x86_64 XIP currently. */ |
| 382 | if ((rmodctx->pelf.ehdr.e_machine != EM_386) && |
| 383 | (rmodctx->pelf.ehdr.e_machine != EM_X86_64)) { |
| 384 | ERROR("Only support XIP stages for x86/x86_64\n"); |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 385 | goto out; |
| 386 | } |
| 387 | |
| 388 | xipctx.ignored_section = |
| 389 | find_ignored_section_header(pelf, ignore_section); |
| 390 | |
| 391 | if (xipctx.ignored_section != NULL) |
| 392 | xipctx.ignored_section_idx = |
| 393 | xipctx.ignored_section - pelf->shdr; |
| 394 | |
| 395 | filter.filter = rmod_filter; |
| 396 | filter.context = &xipctx; |
| 397 | |
| 398 | if (rmodule_collect_relocations(rmodctx, &filter)) |
| 399 | goto out; |
| 400 | |
| 401 | output_sz = sizeof(struct cbfs_stage) + pelf->phdr->p_filesz; |
| 402 | if (buffer_create(output, output_sz, input->name) != 0) { |
| 403 | ERROR("Unable to allocate memory: %m\n"); |
| 404 | goto out; |
| 405 | } |
| 406 | buffer_clone(&boutput, output); |
| 407 | memset(buffer_get(&boutput), 0, output_sz); |
| 408 | buffer_set_size(&boutput, 0); |
| 409 | |
| 410 | /* Single loadable segment. The entire segment moves to final |
| 411 | * location from based on virtual address of loadable segment. */ |
| 412 | adjustment = *location - pelf->phdr->p_vaddr; |
| 413 | DEBUG("Relocation adjustment: %08x\n", adjustment); |
| 414 | |
| 415 | fill_cbfs_stage(&boutput, CBFS_COMPRESS_NONE, |
| 416 | (uint32_t)pelf->ehdr.e_entry + adjustment, |
| 417 | (uint32_t)pelf->phdr->p_vaddr + adjustment, |
| 418 | pelf->phdr->p_filesz, pelf->phdr->p_memsz); |
| 419 | /* Need an adjustable buffer. */ |
| 420 | buffer_clone(&binput, input); |
| 421 | buffer_seek(&binput, pelf->phdr->p_offset); |
| 422 | bputs(&boutput, buffer_get(&binput), pelf->phdr->p_filesz); |
| 423 | |
| 424 | buffer_clone(&boutput, output); |
| 425 | buffer_seek(&boutput, sizeof(struct cbfs_stage)); |
| 426 | |
| 427 | /* Make adjustments to all the relocations within the program. */ |
| 428 | for (i = 0; i < rmodctx->nrelocs; i++) { |
| 429 | size_t reloc_offset; |
| 430 | uint32_t val; |
| 431 | struct buffer in, out; |
| 432 | |
| 433 | /* The relocations represent in-program addresses of the |
| 434 | * linked program. Obtain the offset into the program to do |
| 435 | * the adjustment. */ |
| 436 | reloc_offset = rmodctx->emitted_relocs[i] - pelf->phdr->p_vaddr; |
| 437 | |
| 438 | buffer_clone(&out, &boutput); |
| 439 | buffer_seek(&out, reloc_offset); |
| 440 | buffer_clone(&in, &out); |
| 441 | /* Appease around xdr semantics: xdr decrements buffer |
| 442 | * size when get()ing and appends to size when put()ing. */ |
| 443 | buffer_set_size(&out, 0); |
| 444 | |
| 445 | val = xdr_le.get32(&in); |
| 446 | DEBUG("reloc %zx %08x -> %08x\n", reloc_offset, val, |
| 447 | val + adjustment); |
| 448 | xdr_le.put32(&out, val + adjustment); |
| 449 | } |
| 450 | |
| 451 | /* Need to back up the location to include cbfs stage metadata. */ |
| 452 | *location -= sizeof(struct cbfs_stage); |
| 453 | ret = 0; |
| 454 | |
| 455 | out: |
| 456 | rmodule_cleanup(rmodctx); |
| 457 | return ret; |
| 458 | } |