Patrick Georgi | 7333a11 | 2020-05-08 20:48:04 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 2 | |
Francis Rowe | 3fb8b0d | 2014-11-21 02:38:48 +0000 | [diff] [blame] | 3 | #include <inttypes.h> |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 4 | #include <stdio.h> |
| 5 | #include <stdlib.h> |
| 6 | #include <string.h> |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 7 | |
Aaron Durbin | 54ef306 | 2014-03-05 12:12:09 -0600 | [diff] [blame] | 8 | #include "elfparsing.h" |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 9 | #include "common.h" |
Patrick Georgi | b7b56dd8 | 2009-09-14 13:29:27 +0000 | [diff] [blame] | 10 | #include "cbfs.h" |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 11 | #include "rmodule.h" |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 12 | |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 13 | /* Checks if program segment contains the ignored sections */ |
| 14 | static int is_phdr_ignored(Elf64_Phdr *phdr, Elf64_Shdr **shdrs) |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 15 | { |
| 16 | /* If no ignored section, return false. */ |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 17 | if (shdrs == NULL) |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 18 | return 0; |
| 19 | |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 20 | while (*shdrs) { |
| 21 | Elf64_Addr sh_start = (*shdrs)->sh_addr; |
| 22 | Elf64_Addr sh_end = (*shdrs)->sh_addr + (*shdrs)->sh_size; |
| 23 | Elf64_Addr ph_start = phdr->p_vaddr; |
| 24 | Elf64_Addr ph_end = phdr->p_vaddr + phdr->p_memsz; |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 25 | |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 26 | /* Return true only if section occupies whole of segment. */ |
| 27 | if ((sh_start == ph_start) && (sh_end == ph_end)) { |
| 28 | DEBUG("Ignoring program segment at 0x%" PRIx64 "\n", ph_start); |
| 29 | return 1; |
| 30 | } |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 31 | |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 32 | /* If shdr intersects phdr at all, its a conflict */ |
| 33 | if (((sh_start >= ph_start) && (sh_start <= ph_end)) || |
| 34 | ((sh_end >= ph_start) && (sh_end <= ph_end))) { |
| 35 | ERROR("Conflicting sections in segment\n"); |
| 36 | exit(1); |
| 37 | } |
| 38 | shdrs++; |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 39 | } |
| 40 | |
| 41 | /* Program header doesn't need to be ignored. */ |
| 42 | return 0; |
| 43 | } |
| 44 | |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 45 | /* Sections to be ignored are comma separated */ |
| 46 | static bool is_ignored_sections(const char *section_name, |
| 47 | const char *ignore_sections) |
| 48 | { |
| 49 | const char *cur, *comma; |
| 50 | |
| 51 | for (cur = ignore_sections; (comma = strchr(cur, ',')); cur = comma + 1) |
| 52 | if (!strncmp(cur, section_name, comma - cur)) |
| 53 | return true; |
| 54 | return !strcmp(cur, section_name); |
| 55 | } |
| 56 | |
| 57 | /* Find section headers based on ignored section names. |
| 58 | * Returns a NULL-terminated list of section headers. |
| 59 | */ |
| 60 | static Elf64_Shdr **find_ignored_sections_header(struct parsed_elf *pelf, |
| 61 | const char *ignore_sections) |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 62 | { |
| 63 | int i; |
| 64 | const char *shstrtab; |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 65 | Elf64_Shdr **headers = NULL; |
| 66 | size_t size = 1; |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 67 | |
| 68 | /* No section needs to be ignored */ |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 69 | if (ignore_sections == NULL) |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 70 | return NULL; |
| 71 | |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 72 | DEBUG("Sections to be ignored: %s\n", ignore_sections); |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 73 | |
| 74 | /* Get pointer to string table */ |
| 75 | shstrtab = buffer_get(pelf->strtabs[pelf->ehdr.e_shstrndx]); |
| 76 | |
| 77 | for (i = 0; i < pelf->ehdr.e_shnum; i++) { |
| 78 | Elf64_Shdr *shdr; |
| 79 | const char *section_name; |
| 80 | |
| 81 | shdr = &pelf->shdr[i]; |
| 82 | section_name = &shstrtab[shdr->sh_name]; |
| 83 | |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 84 | /* If section name matches ignored string, add to list */ |
| 85 | if (is_ignored_sections(section_name, ignore_sections)) { |
| 86 | headers = realloc(headers, sizeof(*headers) * ++size); |
| 87 | if (!headers) { |
| 88 | ERROR("Memory allocation failed\n"); |
| 89 | exit(1); |
| 90 | } |
| 91 | headers[size - 2] = shdr; |
| 92 | } |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 93 | } |
| 94 | |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 95 | if (headers) |
| 96 | headers[size - 1] = NULL; |
| 97 | return headers; |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 98 | } |
| 99 | |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 100 | static int fill_cbfs_stageheader(struct cbfs_file_attr_stageheader *stageheader, |
| 101 | uint64_t entry, uint64_t loadaddr, |
| 102 | uint32_t memsize) |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 103 | { |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 104 | if (entry - loadaddr >= memsize) { |
| 105 | ERROR("stage entry point out of bounds!\n"); |
| 106 | return -1; |
| 107 | } |
| 108 | |
Alex James | 02001a38 | 2021-12-19 16:41:59 -0600 | [diff] [blame] | 109 | stageheader->loadaddr = htobe64(loadaddr); |
| 110 | stageheader->memlen = htobe32(memsize); |
| 111 | stageheader->entry_offset = htobe32(entry - loadaddr); |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 112 | |
| 113 | return 0; |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 114 | } |
| 115 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 116 | /* returns size of result, or -1 if error. |
| 117 | * Note that, with the new code, this function |
| 118 | * works for all elf files, not just the restricted set. |
| 119 | */ |
| 120 | int parse_elf_to_stage(const struct buffer *input, struct buffer *output, |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 121 | const char *ignore_section, |
| 122 | struct cbfs_file_attr_stageheader *stageheader) |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 123 | { |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 124 | struct parsed_elf pelf; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 125 | Elf64_Phdr *phdr; |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 126 | Elf64_Ehdr *ehdr; |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 127 | Elf64_Shdr **shdrs_ignored; |
Furquan Shaikh | f7a5b56 | 2015-05-29 12:46:18 -0700 | [diff] [blame] | 128 | Elf64_Addr virt_to_phys; |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 129 | int ret = -1; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 130 | |
| 131 | int headers; |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 132 | int i; |
HC Yen | 14ec199 | 2015-01-22 09:57:34 +0800 | [diff] [blame] | 133 | uint64_t data_start, data_end, mem_end; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 134 | |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 135 | int flags = ELF_PARSE_PHDR | ELF_PARSE_SHDR | ELF_PARSE_STRTAB; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 136 | |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 137 | if (parse_elf(input, &pelf, flags)) { |
| 138 | ERROR("Couldn't parse ELF\n"); |
| 139 | return -1; |
| 140 | } |
| 141 | |
| 142 | ehdr = &pelf.ehdr; |
| 143 | phdr = &pelf.phdr[0]; |
| 144 | |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 145 | /* Find the section headers corresponding to ignored-sections */ |
| 146 | shdrs_ignored = find_ignored_sections_header(&pelf, ignore_section); |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 147 | |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 148 | if (ignore_section && (shdrs_ignored == NULL)) |
| 149 | WARN("Ignore section(s) not found\n"); |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 150 | |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 151 | headers = ehdr->e_phnum; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 152 | |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 153 | /* Ignore the program header containing ignored section */ |
| 154 | for (i = 0; i < headers; i++) { |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 155 | if (is_phdr_ignored(&phdr[i], shdrs_ignored)) |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 156 | phdr[i].p_type = PT_NULL; |
| 157 | } |
| 158 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 159 | data_start = ~0; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 160 | data_end = 0; |
| 161 | mem_end = 0; |
Furquan Shaikh | f7a5b56 | 2015-05-29 12:46:18 -0700 | [diff] [blame] | 162 | virt_to_phys = 0; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 163 | |
| 164 | for (i = 0; i < headers; i++) { |
HC Yen | 14ec199 | 2015-01-22 09:57:34 +0800 | [diff] [blame] | 165 | uint64_t start, mend, rend; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 166 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 167 | if (phdr[i].p_type != PT_LOAD) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 168 | continue; |
| 169 | |
| 170 | /* Empty segments are never interesting */ |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 171 | if (phdr[i].p_memsz == 0) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 172 | continue; |
| 173 | |
| 174 | /* BSS */ |
| 175 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 176 | start = phdr[i].p_paddr; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 177 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 178 | mend = start + phdr[i].p_memsz; |
| 179 | rend = start + phdr[i].p_filesz; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 180 | |
| 181 | if (start < data_start) |
| 182 | data_start = start; |
| 183 | |
| 184 | if (rend > data_end) |
| 185 | data_end = rend; |
| 186 | |
| 187 | if (mend > mem_end) |
| 188 | mem_end = mend; |
Furquan Shaikh | f7a5b56 | 2015-05-29 12:46:18 -0700 | [diff] [blame] | 189 | |
| 190 | if (virt_to_phys == 0) |
| 191 | virt_to_phys = phdr[i].p_paddr - phdr[i].p_vaddr; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 192 | } |
| 193 | |
Patrick Georgi | a6c337d | 2010-02-03 17:56:37 +0000 | [diff] [blame] | 194 | if (data_end <= data_start) { |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 195 | ERROR("data ends (%08lx) before it starts (%08lx). Make sure " |
| 196 | "the ELF file is correct and resides in ROM space.\n", |
| 197 | (unsigned long)data_end, (unsigned long)data_start); |
Patrick Georgi | a6c337d | 2010-02-03 17:56:37 +0000 | [diff] [blame] | 198 | exit(1); |
| 199 | } |
| 200 | |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 201 | if (buffer_create(output, data_end - data_start, input->name) != 0) { |
Hung-Te Lin | 4d87d4e | 2013-01-28 14:39:43 +0800 | [diff] [blame] | 202 | ERROR("Unable to allocate memory: %m\n"); |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 203 | goto err; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 204 | } |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 205 | memset(output->data, 0, output->size); |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 206 | |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 207 | /* Copy the file data into the output buffer */ |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 208 | |
| 209 | for (i = 0; i < headers; i++) { |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 210 | if (phdr[i].p_type != PT_LOAD) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 211 | continue; |
| 212 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 213 | if (phdr[i].p_memsz == 0) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 214 | continue; |
| 215 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 216 | /* A legal ELF file can have a program header with |
| 217 | * non-zero length but zero-length file size and a |
| 218 | * non-zero offset which, added together, are > than |
| 219 | * input->size (i.e. the total file size). So we need |
| 220 | * to not even test in the case that p_filesz is zero. |
| 221 | */ |
Julius Werner | ff61a39 | 2021-01-12 15:21:03 -0800 | [diff] [blame] | 222 | if (!phdr[i].p_filesz) |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 223 | continue; |
| 224 | if (input->size < (phdr[i].p_offset + phdr[i].p_filesz)){ |
| 225 | ERROR("Underflow copying out the segment." |
Paul Menzel | 470c37c | 2014-03-16 00:15:57 +0100 | [diff] [blame] | 226 | "File has %zu bytes left, segment end is %zu\n", |
| 227 | input->size, (size_t)(phdr[i].p_offset + phdr[i].p_filesz)); |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 228 | goto err; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 229 | } |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 230 | memcpy(&output->data[phdr[i].p_paddr - data_start], |
Julius Werner | ff61a39 | 2021-01-12 15:21:03 -0800 | [diff] [blame] | 231 | &input->data[phdr[i].p_offset], |
| 232 | phdr[i].p_filesz); |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 233 | } |
| 234 | |
Martin Roth | a564811 | 2017-06-03 20:05:42 -0600 | [diff] [blame] | 235 | /* coreboot expects entry point to be physical address. Thus, adjust the |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 236 | entry point accordingly. */ |
| 237 | ret = fill_cbfs_stageheader(stageheader, ehdr->e_entry + virt_to_phys, |
| 238 | data_start, mem_end - data_start); |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 239 | err: |
| 240 | parsed_elf_destroy(&pelf); |
| 241 | return ret; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 242 | } |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 243 | |
| 244 | struct xip_context { |
| 245 | struct rmod_context rmodctx; |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 246 | Elf64_Shdr **ignored_sections; |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 247 | }; |
| 248 | |
| 249 | static int rmod_filter(struct reloc_filter *f, const Elf64_Rela *r) |
| 250 | { |
| 251 | size_t symbol_index; |
| 252 | int reloc_type; |
| 253 | struct parsed_elf *pelf; |
| 254 | Elf64_Sym *sym; |
| 255 | struct xip_context *xipctx; |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 256 | Elf64_Shdr **sections; |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 257 | |
| 258 | xipctx = f->context; |
| 259 | pelf = &xipctx->rmodctx.pelf; |
| 260 | |
| 261 | /* Allow everything through if there isn't an ignored section. */ |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 262 | if (xipctx->ignored_sections == NULL) |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 263 | return 1; |
| 264 | |
| 265 | reloc_type = ELF64_R_TYPE(r->r_info); |
| 266 | symbol_index = ELF64_R_SYM(r->r_info); |
| 267 | sym = &pelf->syms[symbol_index]; |
| 268 | |
| 269 | /* Nothing to filter. Relocation is not being applied to the |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 270 | * ignored sections. */ |
| 271 | for (sections = xipctx->ignored_sections; *sections; sections++) |
| 272 | if (sym->st_shndx == *sections - pelf->shdr) |
| 273 | break; |
| 274 | if (!*sections) |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 275 | return 1; |
| 276 | |
| 277 | /* If there is any relocation to the ignored section that isn't |
| 278 | * absolute fail as current assumptions are that all relocations |
| 279 | * are absolute. */ |
Patrick Rudolph | 21046a3 | 2018-11-26 15:37:51 +0100 | [diff] [blame] | 280 | if ((reloc_type != R_386_32) && |
| 281 | (reloc_type != R_AMD64_64) && |
| 282 | (reloc_type != R_AMD64_32)) { |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 283 | ERROR("Invalid reloc to ignored section: %x\n", reloc_type); |
| 284 | return -1; |
| 285 | } |
| 286 | |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 287 | /* Relocation referencing ignored sections. Don't emit it. */ |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 288 | return 0; |
| 289 | } |
| 290 | |
Jeremy Compostella | 79f2e1f | 2023-08-30 15:35:46 -0700 | [diff] [blame] | 291 | /* Returns a NULL-terminated list of loadable segments. Returns NULL if no |
| 292 | * loadable segments were found or if two consecutive segments are not |
| 293 | * consecutive in their physical address space. |
| 294 | */ |
| 295 | static Elf64_Phdr **find_loadable_segments(struct parsed_elf *pelf) |
| 296 | { |
| 297 | Elf64_Phdr **phdrs = NULL; |
| 298 | Elf64_Phdr *prev = NULL, *cur; |
| 299 | size_t size = 1, i; |
| 300 | |
| 301 | for (i = 0; i < pelf->ehdr.e_phnum; i++, prev = cur) { |
| 302 | cur = &pelf->phdr[i]; |
| 303 | |
| 304 | if (cur->p_type != PT_LOAD || cur->p_memsz == 0) |
| 305 | continue; |
| 306 | |
| 307 | phdrs = realloc(phdrs, sizeof(*phdrs) * ++size); |
| 308 | if (!phdrs) { |
| 309 | ERROR("Memory allocation failed\n"); |
| 310 | return NULL; |
| 311 | } |
| 312 | phdrs[size - 2] = cur; |
| 313 | |
| 314 | if (!prev) |
| 315 | continue; |
| 316 | |
| 317 | if (prev->p_paddr + prev->p_memsz != cur->p_paddr || |
| 318 | prev->p_filesz != prev->p_memsz) { |
| 319 | ERROR("Loadable segments physical addresses should " |
| 320 | "be consecutive\n"); |
| 321 | free(phdrs); |
| 322 | return NULL; |
| 323 | } |
| 324 | } |
| 325 | |
| 326 | if (phdrs) |
| 327 | phdrs[size - 1] = NULL; |
| 328 | return phdrs; |
| 329 | } |
| 330 | |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 331 | int parse_elf_to_xip_stage(const struct buffer *input, struct buffer *output, |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 332 | uint32_t location, const char *ignore_sections, |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 333 | struct cbfs_file_attr_stageheader *stageheader) |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 334 | { |
| 335 | struct xip_context xipctx; |
| 336 | struct rmod_context *rmodctx; |
| 337 | struct reloc_filter filter; |
| 338 | struct parsed_elf *pelf; |
Jeremy Compostella | 79f2e1f | 2023-08-30 15:35:46 -0700 | [diff] [blame] | 339 | uint32_t adjustment, memsz = 0; |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 340 | struct buffer binput; |
| 341 | struct buffer boutput; |
Jeremy Compostella | 79f2e1f | 2023-08-30 15:35:46 -0700 | [diff] [blame] | 342 | Elf64_Phdr **toload, **phdr; |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 343 | Elf64_Xword i; |
| 344 | int ret = -1; |
Jeremy Compostella | 79f2e1f | 2023-08-30 15:35:46 -0700 | [diff] [blame] | 345 | size_t filesz = 0; |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 346 | |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 347 | rmodctx = &xipctx.rmodctx; |
| 348 | pelf = &rmodctx->pelf; |
| 349 | |
| 350 | if (rmodule_init(rmodctx, input)) |
| 351 | return -1; |
| 352 | |
Patrick Rudolph | 21046a3 | 2018-11-26 15:37:51 +0100 | [diff] [blame] | 353 | /* Only support x86 / x86_64 XIP currently. */ |
| 354 | if ((rmodctx->pelf.ehdr.e_machine != EM_386) && |
| 355 | (rmodctx->pelf.ehdr.e_machine != EM_X86_64)) { |
| 356 | ERROR("Only support XIP stages for x86/x86_64\n"); |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 357 | goto out; |
| 358 | } |
| 359 | |
Jeremy Compostella | c9cae53 | 2023-08-30 10:25:33 -0700 | [diff] [blame] | 360 | xipctx.ignored_sections = |
| 361 | find_ignored_sections_header(pelf, ignore_sections); |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 362 | |
| 363 | filter.filter = rmod_filter; |
| 364 | filter.context = &xipctx; |
| 365 | |
| 366 | if (rmodule_collect_relocations(rmodctx, &filter)) |
| 367 | goto out; |
| 368 | |
Jeremy Compostella | 79f2e1f | 2023-08-30 15:35:46 -0700 | [diff] [blame] | 369 | toload = find_loadable_segments(pelf); |
| 370 | if (!toload) |
| 371 | goto out; |
| 372 | |
| 373 | for (phdr = toload; *phdr; phdr++) |
| 374 | filesz += (*phdr)->p_filesz; |
| 375 | if (buffer_create(output, filesz, input->name) != 0) { |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 376 | ERROR("Unable to allocate memory: %m\n"); |
| 377 | goto out; |
| 378 | } |
| 379 | buffer_clone(&boutput, output); |
Jeremy Compostella | 79f2e1f | 2023-08-30 15:35:46 -0700 | [diff] [blame] | 380 | memset(buffer_get(&boutput), 0, filesz); |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 381 | buffer_set_size(&boutput, 0); |
| 382 | |
Jeremy Compostella | 79f2e1f | 2023-08-30 15:35:46 -0700 | [diff] [blame] | 383 | /* The program segment moves to final location from based on virtual |
| 384 | * address of loadable segment. */ |
Arthur Heymans | 5bb7dc4 | 2021-06-22 15:21:46 +0200 | [diff] [blame] | 385 | adjustment = location - pelf->phdr->p_vaddr; |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 386 | DEBUG("Relocation adjustment: %08x\n", adjustment); |
| 387 | |
Jeremy Compostella | 79f2e1f | 2023-08-30 15:35:46 -0700 | [diff] [blame] | 388 | for (phdr = toload; *phdr; phdr++) |
| 389 | memsz += (*phdr)->p_memsz; |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 390 | fill_cbfs_stageheader(stageheader, |
| 391 | (uint32_t)pelf->ehdr.e_entry + adjustment, |
| 392 | (uint32_t)pelf->phdr->p_vaddr + adjustment, |
Jeremy Compostella | 79f2e1f | 2023-08-30 15:35:46 -0700 | [diff] [blame] | 393 | memsz); |
| 394 | for (phdr = toload; *phdr; phdr++) { |
| 395 | /* Need an adjustable buffer. */ |
| 396 | buffer_clone(&binput, input); |
| 397 | buffer_seek(&binput, (*phdr)->p_offset); |
| 398 | bputs(&boutput, buffer_get(&binput), (*phdr)->p_filesz); |
| 399 | } |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 400 | |
| 401 | buffer_clone(&boutput, output); |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 402 | |
| 403 | /* Make adjustments to all the relocations within the program. */ |
| 404 | for (i = 0; i < rmodctx->nrelocs; i++) { |
| 405 | size_t reloc_offset; |
| 406 | uint32_t val; |
| 407 | struct buffer in, out; |
| 408 | |
| 409 | /* The relocations represent in-program addresses of the |
| 410 | * linked program. Obtain the offset into the program to do |
| 411 | * the adjustment. */ |
| 412 | reloc_offset = rmodctx->emitted_relocs[i] - pelf->phdr->p_vaddr; |
| 413 | |
| 414 | buffer_clone(&out, &boutput); |
| 415 | buffer_seek(&out, reloc_offset); |
| 416 | buffer_clone(&in, &out); |
| 417 | /* Appease around xdr semantics: xdr decrements buffer |
| 418 | * size when get()ing and appends to size when put()ing. */ |
| 419 | buffer_set_size(&out, 0); |
| 420 | |
| 421 | val = xdr_le.get32(&in); |
| 422 | DEBUG("reloc %zx %08x -> %08x\n", reloc_offset, val, |
| 423 | val + adjustment); |
| 424 | xdr_le.put32(&out, val + adjustment); |
| 425 | } |
| 426 | |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 427 | ret = 0; |
| 428 | |
| 429 | out: |
| 430 | rmodule_cleanup(rmodctx); |
| 431 | return ret; |
| 432 | } |