blob: 8129f0b4d613d57c67049e91cf78afddd252c669 [file] [log] [blame]
Patrick Georgi7333a112020-05-08 20:48:04 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +00002
Francis Rowe3fb8b0d2014-11-21 02:38:48 +00003#include <inttypes.h>
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +00004#include <stdio.h>
5#include <stdlib.h>
6#include <string.h>
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +00007
Aaron Durbin54ef3062014-03-05 12:12:09 -06008#include "elfparsing.h"
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +00009#include "common.h"
Patrick Georgib7b56dd82009-09-14 13:29:27 +000010#include "cbfs.h"
Aaron Durbin4be16742015-09-15 17:00:23 -050011#include "rmodule.h"
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +000012
Jeremy Compostellac9cae532023-08-30 10:25:33 -070013/* Checks if program segment contains the ignored sections */
14static int is_phdr_ignored(Elf64_Phdr *phdr, Elf64_Shdr **shdrs)
Furquan Shaikh405304a2014-10-30 11:44:20 -070015{
16 /* If no ignored section, return false. */
Jeremy Compostellac9cae532023-08-30 10:25:33 -070017 if (shdrs == NULL)
Furquan Shaikh405304a2014-10-30 11:44:20 -070018 return 0;
19
Jeremy Compostellac9cae532023-08-30 10:25:33 -070020 while (*shdrs) {
21 Elf64_Addr sh_start = (*shdrs)->sh_addr;
22 Elf64_Addr sh_end = (*shdrs)->sh_addr + (*shdrs)->sh_size;
23 Elf64_Addr ph_start = phdr->p_vaddr;
24 Elf64_Addr ph_end = phdr->p_vaddr + phdr->p_memsz;
Furquan Shaikh405304a2014-10-30 11:44:20 -070025
Jeremy Compostellac9cae532023-08-30 10:25:33 -070026 /* Return true only if section occupies whole of segment. */
27 if ((sh_start == ph_start) && (sh_end == ph_end)) {
28 DEBUG("Ignoring program segment at 0x%" PRIx64 "\n", ph_start);
29 return 1;
30 }
Furquan Shaikh405304a2014-10-30 11:44:20 -070031
Jeremy Compostellac9cae532023-08-30 10:25:33 -070032 /* If shdr intersects phdr at all, its a conflict */
33 if (((sh_start >= ph_start) && (sh_start <= ph_end)) ||
34 ((sh_end >= ph_start) && (sh_end <= ph_end))) {
35 ERROR("Conflicting sections in segment\n");
36 exit(1);
37 }
38 shdrs++;
Furquan Shaikh405304a2014-10-30 11:44:20 -070039 }
40
41 /* Program header doesn't need to be ignored. */
42 return 0;
43}
44
Jeremy Compostellac9cae532023-08-30 10:25:33 -070045/* Sections to be ignored are comma separated */
46static bool is_ignored_sections(const char *section_name,
47 const char *ignore_sections)
48{
49 const char *cur, *comma;
50
51 for (cur = ignore_sections; (comma = strchr(cur, ',')); cur = comma + 1)
52 if (!strncmp(cur, section_name, comma - cur))
53 return true;
54 return !strcmp(cur, section_name);
55}
56
57/* Find section headers based on ignored section names.
58 * Returns a NULL-terminated list of section headers.
59 */
60static Elf64_Shdr **find_ignored_sections_header(struct parsed_elf *pelf,
61 const char *ignore_sections)
Furquan Shaikh405304a2014-10-30 11:44:20 -070062{
63 int i;
64 const char *shstrtab;
Jeremy Compostellac9cae532023-08-30 10:25:33 -070065 Elf64_Shdr **headers = NULL;
66 size_t size = 1;
Furquan Shaikh405304a2014-10-30 11:44:20 -070067
68 /* No section needs to be ignored */
Jeremy Compostellac9cae532023-08-30 10:25:33 -070069 if (ignore_sections == NULL)
Furquan Shaikh405304a2014-10-30 11:44:20 -070070 return NULL;
71
Jeremy Compostellac9cae532023-08-30 10:25:33 -070072 DEBUG("Sections to be ignored: %s\n", ignore_sections);
Furquan Shaikh405304a2014-10-30 11:44:20 -070073
74 /* Get pointer to string table */
75 shstrtab = buffer_get(pelf->strtabs[pelf->ehdr.e_shstrndx]);
76
77 for (i = 0; i < pelf->ehdr.e_shnum; i++) {
78 Elf64_Shdr *shdr;
79 const char *section_name;
80
81 shdr = &pelf->shdr[i];
82 section_name = &shstrtab[shdr->sh_name];
83
Jeremy Compostellac9cae532023-08-30 10:25:33 -070084 /* If section name matches ignored string, add to list */
85 if (is_ignored_sections(section_name, ignore_sections)) {
86 headers = realloc(headers, sizeof(*headers) * ++size);
87 if (!headers) {
88 ERROR("Memory allocation failed\n");
89 exit(1);
90 }
91 headers[size - 2] = shdr;
92 }
Furquan Shaikh405304a2014-10-30 11:44:20 -070093 }
94
Jeremy Compostellac9cae532023-08-30 10:25:33 -070095 if (headers)
96 headers[size - 1] = NULL;
97 return headers;
Furquan Shaikh405304a2014-10-30 11:44:20 -070098}
99
Julius Werner81dc20e2020-10-15 17:37:57 -0700100static int fill_cbfs_stageheader(struct cbfs_file_attr_stageheader *stageheader,
101 uint64_t entry, uint64_t loadaddr,
102 uint32_t memsize)
Aaron Durbin4be16742015-09-15 17:00:23 -0500103{
Julius Werner81dc20e2020-10-15 17:37:57 -0700104 if (entry - loadaddr >= memsize) {
105 ERROR("stage entry point out of bounds!\n");
106 return -1;
107 }
108
Alex James02001a382021-12-19 16:41:59 -0600109 stageheader->loadaddr = htobe64(loadaddr);
110 stageheader->memlen = htobe32(memsize);
111 stageheader->entry_offset = htobe32(entry - loadaddr);
Julius Werner81dc20e2020-10-15 17:37:57 -0700112
113 return 0;
Aaron Durbin4be16742015-09-15 17:00:23 -0500114}
115
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800116/* returns size of result, or -1 if error.
117 * Note that, with the new code, this function
118 * works for all elf files, not just the restricted set.
119 */
120int parse_elf_to_stage(const struct buffer *input, struct buffer *output,
Julius Werner81dc20e2020-10-15 17:37:57 -0700121 const char *ignore_section,
122 struct cbfs_file_attr_stageheader *stageheader)
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800123{
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700124 struct parsed_elf pelf;
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800125 Elf64_Phdr *phdr;
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700126 Elf64_Ehdr *ehdr;
Jeremy Compostellac9cae532023-08-30 10:25:33 -0700127 Elf64_Shdr **shdrs_ignored;
Furquan Shaikhf7a5b562015-05-29 12:46:18 -0700128 Elf64_Addr virt_to_phys;
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700129 int ret = -1;
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800130
131 int headers;
Julius Werner81dc20e2020-10-15 17:37:57 -0700132 int i;
HC Yen14ec1992015-01-22 09:57:34 +0800133 uint64_t data_start, data_end, mem_end;
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800134
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700135 int flags = ELF_PARSE_PHDR | ELF_PARSE_SHDR | ELF_PARSE_STRTAB;
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800136
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700137 if (parse_elf(input, &pelf, flags)) {
138 ERROR("Couldn't parse ELF\n");
139 return -1;
140 }
141
142 ehdr = &pelf.ehdr;
143 phdr = &pelf.phdr[0];
144
Jeremy Compostellac9cae532023-08-30 10:25:33 -0700145 /* Find the section headers corresponding to ignored-sections */
146 shdrs_ignored = find_ignored_sections_header(&pelf, ignore_section);
Furquan Shaikh405304a2014-10-30 11:44:20 -0700147
Jeremy Compostellac9cae532023-08-30 10:25:33 -0700148 if (ignore_section && (shdrs_ignored == NULL))
149 WARN("Ignore section(s) not found\n");
Furquan Shaikh405304a2014-10-30 11:44:20 -0700150
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700151 headers = ehdr->e_phnum;
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800152
Furquan Shaikh405304a2014-10-30 11:44:20 -0700153 /* Ignore the program header containing ignored section */
154 for (i = 0; i < headers; i++) {
Jeremy Compostellac9cae532023-08-30 10:25:33 -0700155 if (is_phdr_ignored(&phdr[i], shdrs_ignored))
Furquan Shaikh405304a2014-10-30 11:44:20 -0700156 phdr[i].p_type = PT_NULL;
157 }
158
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800159 data_start = ~0;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000160 data_end = 0;
161 mem_end = 0;
Furquan Shaikhf7a5b562015-05-29 12:46:18 -0700162 virt_to_phys = 0;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000163
164 for (i = 0; i < headers; i++) {
HC Yen14ec1992015-01-22 09:57:34 +0800165 uint64_t start, mend, rend;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000166
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800167 if (phdr[i].p_type != PT_LOAD)
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000168 continue;
169
170 /* Empty segments are never interesting */
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800171 if (phdr[i].p_memsz == 0)
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000172 continue;
173
174 /* BSS */
175
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800176 start = phdr[i].p_paddr;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000177
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800178 mend = start + phdr[i].p_memsz;
179 rend = start + phdr[i].p_filesz;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000180
181 if (start < data_start)
182 data_start = start;
183
184 if (rend > data_end)
185 data_end = rend;
186
187 if (mend > mem_end)
188 mem_end = mend;
Furquan Shaikhf7a5b562015-05-29 12:46:18 -0700189
190 if (virt_to_phys == 0)
191 virt_to_phys = phdr[i].p_paddr - phdr[i].p_vaddr;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000192 }
193
Patrick Georgia6c337d2010-02-03 17:56:37 +0000194 if (data_end <= data_start) {
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800195 ERROR("data ends (%08lx) before it starts (%08lx). Make sure "
196 "the ELF file is correct and resides in ROM space.\n",
197 (unsigned long)data_end, (unsigned long)data_start);
Patrick Georgia6c337d2010-02-03 17:56:37 +0000198 exit(1);
199 }
200
Julius Werner81dc20e2020-10-15 17:37:57 -0700201 if (buffer_create(output, data_end - data_start, input->name) != 0) {
Hung-Te Lin4d87d4e2013-01-28 14:39:43 +0800202 ERROR("Unable to allocate memory: %m\n");
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700203 goto err;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000204 }
Julius Werner81dc20e2020-10-15 17:37:57 -0700205 memset(output->data, 0, output->size);
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000206
Julius Werner81dc20e2020-10-15 17:37:57 -0700207 /* Copy the file data into the output buffer */
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000208
209 for (i = 0; i < headers; i++) {
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800210 if (phdr[i].p_type != PT_LOAD)
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000211 continue;
212
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800213 if (phdr[i].p_memsz == 0)
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000214 continue;
215
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800216 /* A legal ELF file can have a program header with
217 * non-zero length but zero-length file size and a
218 * non-zero offset which, added together, are > than
219 * input->size (i.e. the total file size). So we need
220 * to not even test in the case that p_filesz is zero.
221 */
Julius Wernerff61a392021-01-12 15:21:03 -0800222 if (!phdr[i].p_filesz)
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800223 continue;
224 if (input->size < (phdr[i].p_offset + phdr[i].p_filesz)){
225 ERROR("Underflow copying out the segment."
Paul Menzel470c37c2014-03-16 00:15:57 +0100226 "File has %zu bytes left, segment end is %zu\n",
227 input->size, (size_t)(phdr[i].p_offset + phdr[i].p_filesz));
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700228 goto err;
Ronald G. Minnichaa2f7392013-12-03 11:13:35 -0800229 }
Julius Werner81dc20e2020-10-15 17:37:57 -0700230 memcpy(&output->data[phdr[i].p_paddr - data_start],
Julius Wernerff61a392021-01-12 15:21:03 -0800231 &input->data[phdr[i].p_offset],
232 phdr[i].p_filesz);
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000233 }
234
Martin Rotha5648112017-06-03 20:05:42 -0600235 /* coreboot expects entry point to be physical address. Thus, adjust the
Julius Werner81dc20e2020-10-15 17:37:57 -0700236 entry point accordingly. */
237 ret = fill_cbfs_stageheader(stageheader, ehdr->e_entry + virt_to_phys,
238 data_start, mem_end - data_start);
Furquan Shaikhcc6f84c2014-10-30 11:28:27 -0700239err:
240 parsed_elf_destroy(&pelf);
241 return ret;
Ronald G. Minnich5d01ec02009-03-31 11:57:36 +0000242}
Aaron Durbin4be16742015-09-15 17:00:23 -0500243
244struct xip_context {
245 struct rmod_context rmodctx;
Jeremy Compostellac9cae532023-08-30 10:25:33 -0700246 Elf64_Shdr **ignored_sections;
Aaron Durbin4be16742015-09-15 17:00:23 -0500247};
248
249static int rmod_filter(struct reloc_filter *f, const Elf64_Rela *r)
250{
251 size_t symbol_index;
252 int reloc_type;
253 struct parsed_elf *pelf;
254 Elf64_Sym *sym;
255 struct xip_context *xipctx;
Jeremy Compostellac9cae532023-08-30 10:25:33 -0700256 Elf64_Shdr **sections;
Aaron Durbin4be16742015-09-15 17:00:23 -0500257
258 xipctx = f->context;
259 pelf = &xipctx->rmodctx.pelf;
260
261 /* Allow everything through if there isn't an ignored section. */
Jeremy Compostellac9cae532023-08-30 10:25:33 -0700262 if (xipctx->ignored_sections == NULL)
Aaron Durbin4be16742015-09-15 17:00:23 -0500263 return 1;
264
265 reloc_type = ELF64_R_TYPE(r->r_info);
266 symbol_index = ELF64_R_SYM(r->r_info);
267 sym = &pelf->syms[symbol_index];
268
269 /* Nothing to filter. Relocation is not being applied to the
Jeremy Compostellac9cae532023-08-30 10:25:33 -0700270 * ignored sections. */
271 for (sections = xipctx->ignored_sections; *sections; sections++)
272 if (sym->st_shndx == *sections - pelf->shdr)
273 break;
274 if (!*sections)
Aaron Durbin4be16742015-09-15 17:00:23 -0500275 return 1;
276
277 /* If there is any relocation to the ignored section that isn't
278 * absolute fail as current assumptions are that all relocations
279 * are absolute. */
Patrick Rudolph21046a32018-11-26 15:37:51 +0100280 if ((reloc_type != R_386_32) &&
281 (reloc_type != R_AMD64_64) &&
282 (reloc_type != R_AMD64_32)) {
Aaron Durbin4be16742015-09-15 17:00:23 -0500283 ERROR("Invalid reloc to ignored section: %x\n", reloc_type);
284 return -1;
285 }
286
Jeremy Compostellac9cae532023-08-30 10:25:33 -0700287 /* Relocation referencing ignored sections. Don't emit it. */
Aaron Durbin4be16742015-09-15 17:00:23 -0500288 return 0;
289}
290
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700291/* Returns a NULL-terminated list of loadable segments. Returns NULL if no
292 * loadable segments were found or if two consecutive segments are not
293 * consecutive in their physical address space.
294 */
295static Elf64_Phdr **find_loadable_segments(struct parsed_elf *pelf)
296{
297 Elf64_Phdr **phdrs = NULL;
298 Elf64_Phdr *prev = NULL, *cur;
299 size_t size = 1, i;
300
301 for (i = 0; i < pelf->ehdr.e_phnum; i++, prev = cur) {
302 cur = &pelf->phdr[i];
303
304 if (cur->p_type != PT_LOAD || cur->p_memsz == 0)
305 continue;
306
307 phdrs = realloc(phdrs, sizeof(*phdrs) * ++size);
308 if (!phdrs) {
309 ERROR("Memory allocation failed\n");
310 return NULL;
311 }
312 phdrs[size - 2] = cur;
313
314 if (!prev)
315 continue;
316
317 if (prev->p_paddr + prev->p_memsz != cur->p_paddr ||
318 prev->p_filesz != prev->p_memsz) {
319 ERROR("Loadable segments physical addresses should "
320 "be consecutive\n");
321 free(phdrs);
322 return NULL;
323 }
324 }
325
326 if (phdrs)
327 phdrs[size - 1] = NULL;
328 return phdrs;
329}
330
Aaron Durbin4be16742015-09-15 17:00:23 -0500331int parse_elf_to_xip_stage(const struct buffer *input, struct buffer *output,
Jeremy Compostellac9cae532023-08-30 10:25:33 -0700332 uint32_t location, const char *ignore_sections,
Julius Werner81dc20e2020-10-15 17:37:57 -0700333 struct cbfs_file_attr_stageheader *stageheader)
Aaron Durbin4be16742015-09-15 17:00:23 -0500334{
335 struct xip_context xipctx;
336 struct rmod_context *rmodctx;
337 struct reloc_filter filter;
338 struct parsed_elf *pelf;
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700339 uint32_t adjustment, memsz = 0;
Aaron Durbin4be16742015-09-15 17:00:23 -0500340 struct buffer binput;
341 struct buffer boutput;
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700342 Elf64_Phdr **toload, **phdr;
Aaron Durbin4be16742015-09-15 17:00:23 -0500343 Elf64_Xword i;
344 int ret = -1;
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700345 size_t filesz = 0;
Aaron Durbin4be16742015-09-15 17:00:23 -0500346
Aaron Durbin4be16742015-09-15 17:00:23 -0500347 rmodctx = &xipctx.rmodctx;
348 pelf = &rmodctx->pelf;
349
350 if (rmodule_init(rmodctx, input))
351 return -1;
352
Patrick Rudolph21046a32018-11-26 15:37:51 +0100353 /* Only support x86 / x86_64 XIP currently. */
354 if ((rmodctx->pelf.ehdr.e_machine != EM_386) &&
355 (rmodctx->pelf.ehdr.e_machine != EM_X86_64)) {
356 ERROR("Only support XIP stages for x86/x86_64\n");
Aaron Durbin4be16742015-09-15 17:00:23 -0500357 goto out;
358 }
359
Jeremy Compostellac9cae532023-08-30 10:25:33 -0700360 xipctx.ignored_sections =
361 find_ignored_sections_header(pelf, ignore_sections);
Aaron Durbin4be16742015-09-15 17:00:23 -0500362
363 filter.filter = rmod_filter;
364 filter.context = &xipctx;
365
366 if (rmodule_collect_relocations(rmodctx, &filter))
367 goto out;
368
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700369 toload = find_loadable_segments(pelf);
370 if (!toload)
371 goto out;
372
373 for (phdr = toload; *phdr; phdr++)
374 filesz += (*phdr)->p_filesz;
375 if (buffer_create(output, filesz, input->name) != 0) {
Aaron Durbin4be16742015-09-15 17:00:23 -0500376 ERROR("Unable to allocate memory: %m\n");
377 goto out;
378 }
379 buffer_clone(&boutput, output);
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700380 memset(buffer_get(&boutput), 0, filesz);
Aaron Durbin4be16742015-09-15 17:00:23 -0500381 buffer_set_size(&boutput, 0);
382
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700383 /* The program segment moves to final location from based on virtual
384 * address of loadable segment. */
Arthur Heymans5bb7dc42021-06-22 15:21:46 +0200385 adjustment = location - pelf->phdr->p_vaddr;
Aaron Durbin4be16742015-09-15 17:00:23 -0500386 DEBUG("Relocation adjustment: %08x\n", adjustment);
387
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700388 for (phdr = toload; *phdr; phdr++)
389 memsz += (*phdr)->p_memsz;
Julius Werner81dc20e2020-10-15 17:37:57 -0700390 fill_cbfs_stageheader(stageheader,
391 (uint32_t)pelf->ehdr.e_entry + adjustment,
392 (uint32_t)pelf->phdr->p_vaddr + adjustment,
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700393 memsz);
394 for (phdr = toload; *phdr; phdr++) {
395 /* Need an adjustable buffer. */
396 buffer_clone(&binput, input);
397 buffer_seek(&binput, (*phdr)->p_offset);
398 bputs(&boutput, buffer_get(&binput), (*phdr)->p_filesz);
399 }
Aaron Durbin4be16742015-09-15 17:00:23 -0500400
401 buffer_clone(&boutput, output);
Aaron Durbin4be16742015-09-15 17:00:23 -0500402
403 /* Make adjustments to all the relocations within the program. */
404 for (i = 0; i < rmodctx->nrelocs; i++) {
405 size_t reloc_offset;
406 uint32_t val;
407 struct buffer in, out;
408
409 /* The relocations represent in-program addresses of the
410 * linked program. Obtain the offset into the program to do
411 * the adjustment. */
412 reloc_offset = rmodctx->emitted_relocs[i] - pelf->phdr->p_vaddr;
413
414 buffer_clone(&out, &boutput);
415 buffer_seek(&out, reloc_offset);
416 buffer_clone(&in, &out);
417 /* Appease around xdr semantics: xdr decrements buffer
418 * size when get()ing and appends to size when put()ing. */
419 buffer_set_size(&out, 0);
420
421 val = xdr_le.get32(&in);
422 DEBUG("reloc %zx %08x -> %08x\n", reloc_offset, val,
423 val + adjustment);
424 xdr_le.put32(&out, val + adjustment);
425 }
426
Aaron Durbin4be16742015-09-15 17:00:23 -0500427 ret = 0;
428
429out:
430 rmodule_cleanup(rmodctx);
431 return ret;
432}