Patrick Georgi | 7333a11 | 2020-05-08 20:48:04 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 2 | |
Francis Rowe | 3fb8b0d | 2014-11-21 02:38:48 +0000 | [diff] [blame] | 3 | #include <inttypes.h> |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 4 | #include <stdio.h> |
| 5 | #include <stdlib.h> |
| 6 | #include <string.h> |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 7 | |
Aaron Durbin | 54ef306 | 2014-03-05 12:12:09 -0600 | [diff] [blame] | 8 | #include "elfparsing.h" |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 9 | #include "common.h" |
Patrick Georgi | b7b56dd8 | 2009-09-14 13:29:27 +0000 | [diff] [blame] | 10 | #include "cbfs.h" |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 11 | #include "rmodule.h" |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 12 | |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 13 | /* Checks if program segment contains the ignored section */ |
| 14 | static int is_phdr_ignored(Elf64_Phdr *phdr, Elf64_Shdr *shdr) |
| 15 | { |
| 16 | /* If no ignored section, return false. */ |
| 17 | if (shdr == NULL) |
| 18 | return 0; |
| 19 | |
| 20 | Elf64_Addr sh_start = shdr->sh_addr; |
| 21 | Elf64_Addr sh_end = shdr->sh_addr + shdr->sh_size; |
| 22 | Elf64_Addr ph_start = phdr->p_vaddr; |
| 23 | Elf64_Addr ph_end = phdr->p_vaddr + phdr->p_memsz; |
| 24 | |
| 25 | /* Return true only if section occupies whole of segment. */ |
| 26 | if ((sh_start == ph_start) && (sh_end == ph_end)) { |
Francis Rowe | 3fb8b0d | 2014-11-21 02:38:48 +0000 | [diff] [blame] | 27 | DEBUG("Ignoring program segment at 0x%" PRIx64 "\n", ph_start); |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 28 | return 1; |
| 29 | } |
| 30 | |
| 31 | /* If shdr intersects phdr at all, its a conflict */ |
| 32 | if (((sh_start >= ph_start) && (sh_start <= ph_end)) || |
| 33 | ((sh_end >= ph_start) && (sh_end <= ph_end))) { |
| 34 | ERROR("Conflicting sections in segment\n"); |
| 35 | exit(1); |
| 36 | } |
| 37 | |
| 38 | /* Program header doesn't need to be ignored. */ |
| 39 | return 0; |
| 40 | } |
| 41 | |
| 42 | /* Find section header based on ignored section name */ |
| 43 | static Elf64_Shdr *find_ignored_section_header(struct parsed_elf *pelf, |
| 44 | const char *ignore_section) |
| 45 | { |
| 46 | int i; |
| 47 | const char *shstrtab; |
| 48 | |
| 49 | /* No section needs to be ignored */ |
| 50 | if (ignore_section == NULL) |
| 51 | return NULL; |
| 52 | |
| 53 | DEBUG("Section to be ignored: %s\n", ignore_section); |
| 54 | |
| 55 | /* Get pointer to string table */ |
| 56 | shstrtab = buffer_get(pelf->strtabs[pelf->ehdr.e_shstrndx]); |
| 57 | |
| 58 | for (i = 0; i < pelf->ehdr.e_shnum; i++) { |
| 59 | Elf64_Shdr *shdr; |
| 60 | const char *section_name; |
| 61 | |
| 62 | shdr = &pelf->shdr[i]; |
| 63 | section_name = &shstrtab[shdr->sh_name]; |
| 64 | |
| 65 | /* If section name matches ignored string, return shdr */ |
| 66 | if (strcmp(section_name, ignore_section) == 0) |
| 67 | return shdr; |
| 68 | } |
| 69 | |
| 70 | /* No section matches ignore string */ |
| 71 | return NULL; |
| 72 | } |
| 73 | |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 74 | static int fill_cbfs_stageheader(struct cbfs_file_attr_stageheader *stageheader, |
| 75 | uint64_t entry, uint64_t loadaddr, |
| 76 | uint32_t memsize) |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 77 | { |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 78 | if (entry - loadaddr >= memsize) { |
| 79 | ERROR("stage entry point out of bounds!\n"); |
| 80 | return -1; |
| 81 | } |
| 82 | |
Alex James | 02001a38 | 2021-12-19 16:41:59 -0600 | [diff] [blame] | 83 | stageheader->loadaddr = htobe64(loadaddr); |
| 84 | stageheader->memlen = htobe32(memsize); |
| 85 | stageheader->entry_offset = htobe32(entry - loadaddr); |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 86 | |
| 87 | return 0; |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 88 | } |
| 89 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 90 | /* returns size of result, or -1 if error. |
| 91 | * Note that, with the new code, this function |
| 92 | * works for all elf files, not just the restricted set. |
| 93 | */ |
| 94 | int parse_elf_to_stage(const struct buffer *input, struct buffer *output, |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 95 | const char *ignore_section, |
| 96 | struct cbfs_file_attr_stageheader *stageheader) |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 97 | { |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 98 | struct parsed_elf pelf; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 99 | Elf64_Phdr *phdr; |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 100 | Elf64_Ehdr *ehdr; |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 101 | Elf64_Shdr *shdr_ignored; |
Furquan Shaikh | f7a5b56 | 2015-05-29 12:46:18 -0700 | [diff] [blame] | 102 | Elf64_Addr virt_to_phys; |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 103 | int ret = -1; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 104 | |
| 105 | int headers; |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 106 | int i; |
HC Yen | 14ec199 | 2015-01-22 09:57:34 +0800 | [diff] [blame] | 107 | uint64_t data_start, data_end, mem_end; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 108 | |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 109 | int flags = ELF_PARSE_PHDR | ELF_PARSE_SHDR | ELF_PARSE_STRTAB; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 110 | |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 111 | if (parse_elf(input, &pelf, flags)) { |
| 112 | ERROR("Couldn't parse ELF\n"); |
| 113 | return -1; |
| 114 | } |
| 115 | |
| 116 | ehdr = &pelf.ehdr; |
| 117 | phdr = &pelf.phdr[0]; |
| 118 | |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 119 | /* Find the section header corresponding to ignored-section */ |
| 120 | shdr_ignored = find_ignored_section_header(&pelf, ignore_section); |
| 121 | |
| 122 | if (ignore_section && (shdr_ignored == NULL)) |
| 123 | WARN("Ignore section not found\n"); |
| 124 | |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 125 | headers = ehdr->e_phnum; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 126 | |
Furquan Shaikh | 405304a | 2014-10-30 11:44:20 -0700 | [diff] [blame] | 127 | /* Ignore the program header containing ignored section */ |
| 128 | for (i = 0; i < headers; i++) { |
| 129 | if (is_phdr_ignored(&phdr[i], shdr_ignored)) |
| 130 | phdr[i].p_type = PT_NULL; |
| 131 | } |
| 132 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 133 | data_start = ~0; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 134 | data_end = 0; |
| 135 | mem_end = 0; |
Furquan Shaikh | f7a5b56 | 2015-05-29 12:46:18 -0700 | [diff] [blame] | 136 | virt_to_phys = 0; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 137 | |
| 138 | for (i = 0; i < headers; i++) { |
HC Yen | 14ec199 | 2015-01-22 09:57:34 +0800 | [diff] [blame] | 139 | uint64_t start, mend, rend; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 140 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 141 | if (phdr[i].p_type != PT_LOAD) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 142 | continue; |
| 143 | |
| 144 | /* Empty segments are never interesting */ |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 145 | if (phdr[i].p_memsz == 0) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 146 | continue; |
| 147 | |
| 148 | /* BSS */ |
| 149 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 150 | start = phdr[i].p_paddr; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 151 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 152 | mend = start + phdr[i].p_memsz; |
| 153 | rend = start + phdr[i].p_filesz; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 154 | |
| 155 | if (start < data_start) |
| 156 | data_start = start; |
| 157 | |
| 158 | if (rend > data_end) |
| 159 | data_end = rend; |
| 160 | |
| 161 | if (mend > mem_end) |
| 162 | mem_end = mend; |
Furquan Shaikh | f7a5b56 | 2015-05-29 12:46:18 -0700 | [diff] [blame] | 163 | |
| 164 | if (virt_to_phys == 0) |
| 165 | virt_to_phys = phdr[i].p_paddr - phdr[i].p_vaddr; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 166 | } |
| 167 | |
Patrick Georgi | a6c337d | 2010-02-03 17:56:37 +0000 | [diff] [blame] | 168 | if (data_end <= data_start) { |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 169 | ERROR("data ends (%08lx) before it starts (%08lx). Make sure " |
| 170 | "the ELF file is correct and resides in ROM space.\n", |
| 171 | (unsigned long)data_end, (unsigned long)data_start); |
Patrick Georgi | a6c337d | 2010-02-03 17:56:37 +0000 | [diff] [blame] | 172 | exit(1); |
| 173 | } |
| 174 | |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 175 | if (buffer_create(output, data_end - data_start, input->name) != 0) { |
Hung-Te Lin | 4d87d4e | 2013-01-28 14:39:43 +0800 | [diff] [blame] | 176 | ERROR("Unable to allocate memory: %m\n"); |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 177 | goto err; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 178 | } |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 179 | memset(output->data, 0, output->size); |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 180 | |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 181 | /* Copy the file data into the output buffer */ |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 182 | |
| 183 | for (i = 0; i < headers; i++) { |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 184 | if (phdr[i].p_type != PT_LOAD) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 185 | continue; |
| 186 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 187 | if (phdr[i].p_memsz == 0) |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 188 | continue; |
| 189 | |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 190 | /* A legal ELF file can have a program header with |
| 191 | * non-zero length but zero-length file size and a |
| 192 | * non-zero offset which, added together, are > than |
| 193 | * input->size (i.e. the total file size). So we need |
| 194 | * to not even test in the case that p_filesz is zero. |
| 195 | */ |
Julius Werner | ff61a39 | 2021-01-12 15:21:03 -0800 | [diff] [blame] | 196 | if (!phdr[i].p_filesz) |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 197 | continue; |
| 198 | if (input->size < (phdr[i].p_offset + phdr[i].p_filesz)){ |
| 199 | ERROR("Underflow copying out the segment." |
Paul Menzel | 470c37c | 2014-03-16 00:15:57 +0100 | [diff] [blame] | 200 | "File has %zu bytes left, segment end is %zu\n", |
| 201 | input->size, (size_t)(phdr[i].p_offset + phdr[i].p_filesz)); |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 202 | goto err; |
Ronald G. Minnich | aa2f739 | 2013-12-03 11:13:35 -0800 | [diff] [blame] | 203 | } |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 204 | memcpy(&output->data[phdr[i].p_paddr - data_start], |
Julius Werner | ff61a39 | 2021-01-12 15:21:03 -0800 | [diff] [blame] | 205 | &input->data[phdr[i].p_offset], |
| 206 | phdr[i].p_filesz); |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 207 | } |
| 208 | |
Martin Roth | a564811 | 2017-06-03 20:05:42 -0600 | [diff] [blame] | 209 | /* coreboot expects entry point to be physical address. Thus, adjust the |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 210 | entry point accordingly. */ |
| 211 | ret = fill_cbfs_stageheader(stageheader, ehdr->e_entry + virt_to_phys, |
| 212 | data_start, mem_end - data_start); |
Furquan Shaikh | cc6f84c | 2014-10-30 11:28:27 -0700 | [diff] [blame] | 213 | err: |
| 214 | parsed_elf_destroy(&pelf); |
| 215 | return ret; |
Ronald G. Minnich | 5d01ec0 | 2009-03-31 11:57:36 +0000 | [diff] [blame] | 216 | } |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 217 | |
| 218 | struct xip_context { |
| 219 | struct rmod_context rmodctx; |
| 220 | size_t ignored_section_idx; |
| 221 | Elf64_Shdr *ignored_section; |
| 222 | }; |
| 223 | |
| 224 | static int rmod_filter(struct reloc_filter *f, const Elf64_Rela *r) |
| 225 | { |
| 226 | size_t symbol_index; |
| 227 | int reloc_type; |
| 228 | struct parsed_elf *pelf; |
| 229 | Elf64_Sym *sym; |
| 230 | struct xip_context *xipctx; |
| 231 | |
| 232 | xipctx = f->context; |
| 233 | pelf = &xipctx->rmodctx.pelf; |
| 234 | |
| 235 | /* Allow everything through if there isn't an ignored section. */ |
| 236 | if (xipctx->ignored_section == NULL) |
| 237 | return 1; |
| 238 | |
| 239 | reloc_type = ELF64_R_TYPE(r->r_info); |
| 240 | symbol_index = ELF64_R_SYM(r->r_info); |
| 241 | sym = &pelf->syms[symbol_index]; |
| 242 | |
| 243 | /* Nothing to filter. Relocation is not being applied to the |
| 244 | * ignored section. */ |
| 245 | if (sym->st_shndx != xipctx->ignored_section_idx) |
| 246 | return 1; |
| 247 | |
| 248 | /* If there is any relocation to the ignored section that isn't |
| 249 | * absolute fail as current assumptions are that all relocations |
| 250 | * are absolute. */ |
Patrick Rudolph | 21046a3 | 2018-11-26 15:37:51 +0100 | [diff] [blame] | 251 | if ((reloc_type != R_386_32) && |
| 252 | (reloc_type != R_AMD64_64) && |
| 253 | (reloc_type != R_AMD64_32)) { |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 254 | ERROR("Invalid reloc to ignored section: %x\n", reloc_type); |
| 255 | return -1; |
| 256 | } |
| 257 | |
| 258 | /* Relocation referencing ignored section. Don't emit it. */ |
| 259 | return 0; |
| 260 | } |
| 261 | |
| 262 | int parse_elf_to_xip_stage(const struct buffer *input, struct buffer *output, |
Arthur Heymans | 5bb7dc4 | 2021-06-22 15:21:46 +0200 | [diff] [blame] | 263 | uint32_t location, const char *ignore_section, |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 264 | struct cbfs_file_attr_stageheader *stageheader) |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 265 | { |
| 266 | struct xip_context xipctx; |
| 267 | struct rmod_context *rmodctx; |
| 268 | struct reloc_filter filter; |
| 269 | struct parsed_elf *pelf; |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 270 | uint32_t adjustment; |
| 271 | struct buffer binput; |
| 272 | struct buffer boutput; |
| 273 | Elf64_Xword i; |
| 274 | int ret = -1; |
| 275 | |
| 276 | xipctx.ignored_section_idx = 0; |
| 277 | rmodctx = &xipctx.rmodctx; |
| 278 | pelf = &rmodctx->pelf; |
| 279 | |
| 280 | if (rmodule_init(rmodctx, input)) |
| 281 | return -1; |
| 282 | |
Patrick Rudolph | 21046a3 | 2018-11-26 15:37:51 +0100 | [diff] [blame] | 283 | /* Only support x86 / x86_64 XIP currently. */ |
| 284 | if ((rmodctx->pelf.ehdr.e_machine != EM_386) && |
| 285 | (rmodctx->pelf.ehdr.e_machine != EM_X86_64)) { |
| 286 | ERROR("Only support XIP stages for x86/x86_64\n"); |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 287 | goto out; |
| 288 | } |
| 289 | |
| 290 | xipctx.ignored_section = |
| 291 | find_ignored_section_header(pelf, ignore_section); |
| 292 | |
| 293 | if (xipctx.ignored_section != NULL) |
| 294 | xipctx.ignored_section_idx = |
| 295 | xipctx.ignored_section - pelf->shdr; |
| 296 | |
| 297 | filter.filter = rmod_filter; |
| 298 | filter.context = &xipctx; |
| 299 | |
| 300 | if (rmodule_collect_relocations(rmodctx, &filter)) |
| 301 | goto out; |
| 302 | |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 303 | if (buffer_create(output, pelf->phdr->p_filesz, input->name) != 0) { |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 304 | ERROR("Unable to allocate memory: %m\n"); |
| 305 | goto out; |
| 306 | } |
| 307 | buffer_clone(&boutput, output); |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 308 | memset(buffer_get(&boutput), 0, pelf->phdr->p_filesz); |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 309 | buffer_set_size(&boutput, 0); |
| 310 | |
| 311 | /* Single loadable segment. The entire segment moves to final |
| 312 | * location from based on virtual address of loadable segment. */ |
Arthur Heymans | 5bb7dc4 | 2021-06-22 15:21:46 +0200 | [diff] [blame] | 313 | adjustment = location - pelf->phdr->p_vaddr; |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 314 | DEBUG("Relocation adjustment: %08x\n", adjustment); |
| 315 | |
Julius Werner | 81dc20e | 2020-10-15 17:37:57 -0700 | [diff] [blame] | 316 | fill_cbfs_stageheader(stageheader, |
| 317 | (uint32_t)pelf->ehdr.e_entry + adjustment, |
| 318 | (uint32_t)pelf->phdr->p_vaddr + adjustment, |
| 319 | pelf->phdr->p_memsz); |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 320 | /* Need an adjustable buffer. */ |
| 321 | buffer_clone(&binput, input); |
| 322 | buffer_seek(&binput, pelf->phdr->p_offset); |
| 323 | bputs(&boutput, buffer_get(&binput), pelf->phdr->p_filesz); |
| 324 | |
| 325 | buffer_clone(&boutput, output); |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 326 | |
| 327 | /* Make adjustments to all the relocations within the program. */ |
| 328 | for (i = 0; i < rmodctx->nrelocs; i++) { |
| 329 | size_t reloc_offset; |
| 330 | uint32_t val; |
| 331 | struct buffer in, out; |
| 332 | |
| 333 | /* The relocations represent in-program addresses of the |
| 334 | * linked program. Obtain the offset into the program to do |
| 335 | * the adjustment. */ |
| 336 | reloc_offset = rmodctx->emitted_relocs[i] - pelf->phdr->p_vaddr; |
| 337 | |
| 338 | buffer_clone(&out, &boutput); |
| 339 | buffer_seek(&out, reloc_offset); |
| 340 | buffer_clone(&in, &out); |
| 341 | /* Appease around xdr semantics: xdr decrements buffer |
| 342 | * size when get()ing and appends to size when put()ing. */ |
| 343 | buffer_set_size(&out, 0); |
| 344 | |
| 345 | val = xdr_le.get32(&in); |
| 346 | DEBUG("reloc %zx %08x -> %08x\n", reloc_offset, val, |
| 347 | val + adjustment); |
| 348 | xdr_le.put32(&out, val + adjustment); |
| 349 | } |
| 350 | |
Aaron Durbin | 4be1674 | 2015-09-15 17:00:23 -0500 | [diff] [blame] | 351 | ret = 0; |
| 352 | |
| 353 | out: |
| 354 | rmodule_cleanup(rmodctx); |
| 355 | return ret; |
| 356 | } |