blob: d47639c8c184260c39f4d7d6ff86f7b2eeddf0c9 [file] [log] [blame]
Patrick Georgi7333a112020-05-08 20:48:04 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin4fde5a62014-03-07 15:11:53 -06002
Sol Boucher0e539312015-03-05 15:38:03 -08003#include <inttypes.h>
Aaron Durbin4fde5a62014-03-07 15:11:53 -06004#include <stdio.h>
5#include <stdlib.h>
6#include <string.h>
7
8#include "elfparsing.h"
9#include "rmodule.h"
Aaron Durbindc9f5cd2015-09-08 13:34:43 -050010#include <commonlib/rmodule-defs.h>
Aaron Durbin4fde5a62014-03-07 15:11:53 -060011
Aaron Durbin4fde5a62014-03-07 15:11:53 -060012/*
13 * Architecture specific support operations.
14 */
Sol Boucher0e539312015-03-05 15:38:03 -080015static int valid_reloc_386(Elf64_Rela *rel)
Aaron Durbin4fde5a62014-03-07 15:11:53 -060016{
17 int type;
18
19 type = ELF64_R_TYPE(rel->r_info);
20
21 /* Only these 2 relocations are expected to be found. */
22 return (type == R_386_32 || type == R_386_PC32);
23}
24
Sol Boucher0e539312015-03-05 15:38:03 -080025static int should_emit_386(Elf64_Rela *rel)
Aaron Durbin4fde5a62014-03-07 15:11:53 -060026{
27 int type;
28
29 type = ELF64_R_TYPE(rel->r_info);
30
31 /* R_386_32 relocations are absolute. Must emit these. */
32 return (type == R_386_32);
33}
34
Patrick Rudolph565bebe2018-11-26 15:54:21 +010035static int valid_reloc_amd64(Elf64_Rela *rel)
36{
37 int type;
38
39 type = ELF64_R_TYPE(rel->r_info);
40
Patrick Rudolphd0239092021-06-11 21:24:10 +020041 /*
42 * Relocation R_AMD64_32S is not allowed. It can only be safely used in protected mode,
43 * and when the address pointed to is below 2 GiB in long mode.
44 * Using it in assembly operations will break compilation with error:
45 * E: Invalid reloc type: 11
46 */
47
48 /* Only these 5 relocations are expected to be found. */
Patrick Rudolph565bebe2018-11-26 15:54:21 +010049 return (type == R_AMD64_64 ||
50 type == R_AMD64_PC64 ||
Patrick Rudolph565bebe2018-11-26 15:54:21 +010051 type == R_AMD64_32 ||
Patrick Rudolph44b4ec72019-02-15 14:41:20 +010052 type == R_AMD64_PC32 ||
53 /*
54 * binutils 2.31 introduced R_AMD64_PLT32 for non local
55 * functions. As we don't care about procedure linkage
56 * table entries handle it as R_X86_64_PC32.
57 */
58 type == R_AMD64_PLT32);
Patrick Rudolph565bebe2018-11-26 15:54:21 +010059}
60
61static int should_emit_amd64(Elf64_Rela *rel)
62{
63 int type;
64
65 type = ELF64_R_TYPE(rel->r_info);
66
67 /* Only emit absolute relocations */
68 return (type == R_AMD64_64 ||
Patrick Rudolph565bebe2018-11-26 15:54:21 +010069 type == R_AMD64_32);
70}
71
Sol Boucher0e539312015-03-05 15:38:03 -080072static int valid_reloc_arm(Elf64_Rela *rel)
Aaron Durbin785e47b2014-03-20 11:08:02 -050073{
74 int type;
75
76 type = ELF64_R_TYPE(rel->r_info);
77
Furquan Shaikhc4f08f72014-07-23 13:42:22 -070078 /* Only these 6 relocations are expected to be found. */
Aaron Durbin785e47b2014-03-20 11:08:02 -050079 return (type == R_ARM_ABS32 || type == R_ARM_THM_PC22 ||
Julius Werner84446e62021-02-12 17:37:27 -080080 type == R_ARM_THM_JUMP24 || type == R_ARM_V4BX ||
Furquan Shaikhc4f08f72014-07-23 13:42:22 -070081 type == R_ARM_CALL || type == R_ARM_JUMP24);
Aaron Durbin785e47b2014-03-20 11:08:02 -050082}
83
Sol Boucher0e539312015-03-05 15:38:03 -080084static int should_emit_arm(Elf64_Rela *rel)
Aaron Durbin785e47b2014-03-20 11:08:02 -050085{
86 int type;
87
88 type = ELF64_R_TYPE(rel->r_info);
89
90 /* R_ARM_ABS32 relocations are absolute. Must emit these. */
91 return (type == R_ARM_ABS32);
92}
93
Sol Boucher0e539312015-03-05 15:38:03 -080094static int valid_reloc_aarch64(Elf64_Rela *rel)
Furquan Shaikhd2338ba2014-08-26 15:21:15 -070095{
96 int type;
97
98 type = ELF64_R_TYPE(rel->r_info);
99
100 return (type == R_AARCH64_ADR_PREL_PG_HI21 ||
101 type == R_AARCH64_ADD_ABS_LO12_NC ||
Furquan Shaikhde77e6a2014-11-21 15:41:10 -0800102 type == R_AARCH64_LDST8_ABS_LO12_NC ||
Furquan Shaikh16c0a412015-06-08 11:58:04 -0700103 type == R_AARCH64_CONDBR19 ||
Furquan Shaikhd2338ba2014-08-26 15:21:15 -0700104 type == R_AARCH64_JUMP26 ||
105 type == R_AARCH64_LDST32_ABS_LO12_NC ||
Aaron Durbina47898e2014-09-18 13:39:16 -0500106 type == R_AARCH64_LDST64_ABS_LO12_NC ||
Furquan Shaikhd2338ba2014-08-26 15:21:15 -0700107 type == R_AARCH64_CALL26 ||
108 type == R_AARCH64_ABS64 ||
109 type == R_AARCH64_LD_PREL_LO19 ||
110 type == R_AARCH64_ADR_PREL_LO21);
111}
112
Sol Boucher0e539312015-03-05 15:38:03 -0800113static int should_emit_aarch64(Elf64_Rela *rel)
Furquan Shaikhd2338ba2014-08-26 15:21:15 -0700114{
115 int type;
116
117 type = ELF64_R_TYPE(rel->r_info);
118
119 return (type == R_AARCH64_ABS64);
120}
121
Aaron Durbinb39a9742015-09-08 17:24:04 -0500122static const struct arch_ops reloc_ops[] = {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600123 {
124 .arch = EM_386,
125 .valid_type = valid_reloc_386,
126 .should_emit = should_emit_386,
127 },
Aaron Durbin785e47b2014-03-20 11:08:02 -0500128 {
Patrick Rudolph565bebe2018-11-26 15:54:21 +0100129 .arch = EM_X86_64,
130 .valid_type = valid_reloc_amd64,
131 .should_emit = should_emit_amd64,
132 },
133 {
Aaron Durbin785e47b2014-03-20 11:08:02 -0500134 .arch = EM_ARM,
135 .valid_type = valid_reloc_arm,
136 .should_emit = should_emit_arm,
137 },
Furquan Shaikhd2338ba2014-08-26 15:21:15 -0700138 {
139 .arch = EM_AARCH64,
140 .valid_type = valid_reloc_aarch64,
141 .should_emit = should_emit_aarch64,
142 },
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600143};
144
Julius Werner84446e62021-02-12 17:37:27 -0800145static int relocation_for_absolute_symbol(struct rmod_context *ctx, Elf64_Rela *r)
146{
147 Elf64_Sym *s = &ctx->pelf.syms[ELF64_R_SYM(r->r_info)];
148
149 if (s->st_shndx == SHN_ABS) {
150 DEBUG("Omitting relocation for absolute symbol: %s\n",
151 &ctx->strtab[s->st_name]);
152 return 1;
153 }
154
155 return 0;
156}
157
Raul E Rangel41ba01b2021-07-23 14:53:05 -0600158static int relocation_for_weak_extern_symbols(struct rmod_context *ctx, Elf64_Rela *r)
159{
160 Elf64_Sym *s = &ctx->pelf.syms[ELF64_R_SYM(r->r_info)];
161
162 if (ELF64_ST_BIND(s->st_info) == STB_WEAK && ELF64_ST_TYPE(s->st_info) == STT_NOTYPE) {
163 DEBUG("Omitting relocation for undefined extern: %s\n",
164 &ctx->strtab[s->st_name]);
165 return 1;
166 }
167
168 return 0;
169}
170
Jeremy Compostella621ccf82023-09-07 20:26:56 -0700171static int relocation_for_undefined_symbol(struct rmod_context *ctx, Elf64_Rela *r)
172{
173 Elf64_Sym *s = &ctx->pelf.syms[ELF64_R_SYM(r->r_info)];
174
175 if (s->st_shndx == SHN_UNDEF) {
176 DEBUG("Omitting relocation for undefined symbol: %s\n",
177 &ctx->strtab[s->st_name]);
178 return 1;
179 }
180
181 return 0;
182}
183
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600184/*
185 * Relocation processing loops.
186 */
187
Aaron Durbinb39a9742015-09-08 17:24:04 -0500188static int for_each_reloc(struct rmod_context *ctx, struct reloc_filter *f,
189 int do_emit)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600190{
191 Elf64_Half i;
192 struct parsed_elf *pelf = &ctx->pelf;
193
194 for (i = 0; i < pelf->ehdr.e_shnum; i++) {
195 Elf64_Shdr *shdr;
196 Elf64_Rela *relocs;
197 Elf64_Xword nrelocs;
198 Elf64_Xword j;
199
200 relocs = pelf->relocs[i];
201
202 /* No relocations in this section. */
203 if (relocs == NULL)
204 continue;
205
206 shdr = &pelf->shdr[i];
207 nrelocs = shdr->sh_size / shdr->sh_entsize;
208
209 for (j = 0; j < nrelocs; j++) {
Aaron Durbinb39a9742015-09-08 17:24:04 -0500210 int filter_emit = 1;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600211 Elf64_Rela *r = &relocs[j];
212
Sol Boucher0e539312015-03-05 15:38:03 -0800213 if (!ctx->ops->valid_type(r)) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600214 ERROR("Invalid reloc type: %u\n",
215 (unsigned int)ELF64_R_TYPE(r->r_info));
Patrick Rudolphd0239092021-06-11 21:24:10 +0200216 if ((ctx->ops->arch == EM_X86_64) &&
217 (ELF64_R_TYPE(r->r_info) == R_AMD64_32S))
218 ERROR("Illegal use of 32bit sign extended addressing at offset 0x%x\n",
219 (unsigned int)r->r_offset);
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600220 return -1;
221 }
222
Julius Werner84446e62021-02-12 17:37:27 -0800223 if (relocation_for_absolute_symbol(ctx, r))
224 continue;
225
Raul E Rangel41ba01b2021-07-23 14:53:05 -0600226 if (relocation_for_weak_extern_symbols(ctx, r))
227 continue;
228
Jeremy Compostella621ccf82023-09-07 20:26:56 -0700229 if (relocation_for_undefined_symbol(ctx, r))
230 continue;
231
Aaron Durbinb39a9742015-09-08 17:24:04 -0500232 /* Allow the provided filter to have precedence. */
233 if (f != NULL) {
234 filter_emit = f->filter(f, r);
235
236 if (filter_emit < 0)
237 return filter_emit;
238 }
239
240 if (filter_emit && ctx->ops->should_emit(r)) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600241 int n = ctx->nrelocs;
242 if (do_emit)
243 ctx->emitted_relocs[n] = r->r_offset;
244 ctx->nrelocs++;
245 }
246 }
247 }
248
249 return 0;
250}
251
252static int find_program_segment(struct rmod_context *ctx)
253{
254 int i;
255 int nsegments;
256 struct parsed_elf *pelf;
Anatol Pomozov8cce7012015-07-10 17:30:01 -0700257 Elf64_Phdr *phdr = NULL;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600258
259 pelf = &ctx->pelf;
260
261 /* There should only be a single loadable segment. */
262 nsegments = 0;
263 for (i = 0; i < pelf->ehdr.e_phnum; i++) {
264 if (pelf->phdr[i].p_type != PT_LOAD)
265 continue;
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700266 if (!phdr)
267 phdr = &pelf->phdr[i];
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600268 nsegments++;
269 }
270
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700271 if (nsegments == 0) {
272 ERROR("No loadable segment found.\n");
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600273 return -1;
274 }
275
276 INFO("Segment at 0x%0llx, file size 0x%0llx, mem size 0x%0llx.\n",
277 (long long)phdr->p_vaddr, (long long)phdr->p_filesz,
278 (long long)phdr->p_memsz);
279
280 ctx->phdr = phdr;
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700281 ctx->nsegments = nsegments;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600282
283 return 0;
284}
285
286static int
287filter_relocation_sections(struct rmod_context *ctx)
288{
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700289 int i, j;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600290 const char *shstrtab;
291 struct parsed_elf *pelf;
292 const Elf64_Phdr *phdr;
293
294 pelf = &ctx->pelf;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600295 shstrtab = buffer_get(pelf->strtabs[pelf->ehdr.e_shstrndx]);
296
297 /*
298 * Find all relocation sections that contain relocation entries
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700299 * for sections that fall within the bounds of the segments. For
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600300 * easier processing the pointer to the relocation array for the
301 * sections that don't fall within the loadable program are NULL'd
302 * out.
303 */
304 for (i = 0; i < pelf->ehdr.e_shnum; i++) {
305 Elf64_Shdr *shdr;
306 Elf64_Word sh_info;
307 const char *section_name;
308
309 shdr = &pelf->shdr[i];
310
311 /* Ignore non-relocation sections. */
312 if (shdr->sh_type != SHT_RELA && shdr->sh_type != SHT_REL)
313 continue;
314
315 /* Obtain section which relocations apply. */
316 sh_info = shdr->sh_info;
317 shdr = &pelf->shdr[sh_info];
318
319 section_name = &shstrtab[shdr->sh_name];
320 DEBUG("Relocation section found for '%s' section.\n",
321 section_name);
322
323 /* Do not process relocations for debug sections. */
324 if (strstr(section_name, ".debug") != NULL) {
325 pelf->relocs[i] = NULL;
326 continue;
327 }
328
329 /*
330 * If relocations apply to a non program section ignore the
331 * relocations for future processing.
332 */
333 if (shdr->sh_type != SHT_PROGBITS) {
334 pelf->relocs[i] = NULL;
335 continue;
336 }
337
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700338 for (j = 0; j < pelf->ehdr.e_phnum; j++) {
339 phdr = &pelf->phdr[j];
340 if (phdr->p_type == PT_LOAD &&
341 shdr->sh_addr >= phdr->p_vaddr &&
342 ((shdr->sh_addr + shdr->sh_size) <=
343 (phdr->p_vaddr + phdr->p_memsz)))
344 break;
345 }
346 if (j == pelf->ehdr.e_phnum) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600347 ERROR("Relocations being applied to section %d not "
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700348 "within segments region.\n", sh_info);
349 pelf->relocs[i] = NULL;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600350 return -1;
351 }
352 }
353
354 return 0;
355}
356
357static int vaddr_cmp(const void *a, const void *b)
358{
359 const Elf64_Addr *pa = a;
360 const Elf64_Addr *pb = b;
361
362 if (*pa < *pb)
363 return -1;
364 if (*pa > *pb)
365 return 1;
366 return 0;
367}
368
Aaron Durbinb39a9742015-09-08 17:24:04 -0500369int rmodule_collect_relocations(struct rmod_context *ctx,
370 struct reloc_filter *f)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600371{
Sol Boucher0e539312015-03-05 15:38:03 -0800372 Elf64_Xword nrelocs;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600373
374 /*
375 * The relocs array in the pelf should only contain relocations that
376 * apply to the program. Count the number relocations. Then collect
377 * them into the allocated buffer.
378 */
Aaron Durbinb39a9742015-09-08 17:24:04 -0500379 if (for_each_reloc(ctx, f, 0))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600380 return -1;
381
382 nrelocs = ctx->nrelocs;
Sol Boucher0e539312015-03-05 15:38:03 -0800383 INFO("%" PRIu64 " relocations to be emitted.\n", nrelocs);
Furquan Shaikhb237c102014-08-26 14:59:36 -0700384 if (!nrelocs)
385 return 0;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600386
387 /* Reset the counter for indexing into the array. */
388 ctx->nrelocs = 0;
389 ctx->emitted_relocs = calloc(nrelocs, sizeof(Elf64_Addr));
390 /* Write out the relocations into the emitted_relocs array. */
Aaron Durbinb39a9742015-09-08 17:24:04 -0500391 if (for_each_reloc(ctx, f, 1))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600392 return -1;
393
394 if (ctx->nrelocs != nrelocs) {
395 ERROR("Mismatch counted and emitted relocations: %zu vs %zu.\n",
396 (size_t)nrelocs, (size_t)ctx->nrelocs);
397 return -1;
398 }
399
400 /* Sort the relocations by their address. */
401 qsort(ctx->emitted_relocs, nrelocs, sizeof(Elf64_Addr), vaddr_cmp);
402
403 return 0;
404}
405
406static int
407populate_sym(struct rmod_context *ctx, const char *sym_name, Elf64_Addr *addr,
Julius Werner84446e62021-02-12 17:37:27 -0800408 int nsyms, int optional)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600409{
410 int i;
411 Elf64_Sym *syms;
412
413 syms = ctx->pelf.syms;
414
415 for (i = 0; i < nsyms; i++) {
416 if (syms[i].st_name == 0)
417 continue;
Julius Werner84446e62021-02-12 17:37:27 -0800418 if (strcmp(sym_name, &ctx->strtab[syms[i].st_name]))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600419 continue;
420 DEBUG("%s -> 0x%llx\n", sym_name, (long long)syms[i].st_value);
421 *addr = syms[i].st_value;
422 return 0;
423 }
Aaron Durbinc9b053d2015-09-06 10:39:10 -0500424
425 if (optional) {
426 DEBUG("optional symbol '%s' not found.\n", sym_name);
427 *addr = 0;
428 return 0;
429 }
430
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600431 ERROR("symbol '%s' not found.\n", sym_name);
432 return -1;
433}
434
Aaron Durbin051a1812015-09-08 15:52:01 -0500435static int populate_rmodule_info(struct rmod_context *ctx)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600436{
437 int i;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600438 struct parsed_elf *pelf;
439 Elf64_Ehdr *ehdr;
440 int nsyms;
441
442 pelf = &ctx->pelf;
443 ehdr = &pelf->ehdr;
444
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600445 /* Determine number of symbols. */
446 nsyms = 0;
447 for (i = 0; i < ehdr->e_shnum; i++) {
448 if (pelf->shdr[i].sh_type != SHT_SYMTAB)
449 continue;
450
451 nsyms = pelf->shdr[i].sh_size / pelf->shdr[i].sh_entsize;
452 break;
453 }
454
Julius Werner84446e62021-02-12 17:37:27 -0800455 if (populate_sym(ctx, "_rmodule_params", &ctx->parameters_begin, nsyms, 1))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600456 return -1;
457
Julius Werner84446e62021-02-12 17:37:27 -0800458 if (populate_sym(ctx, "_ermodule_params", &ctx->parameters_end, nsyms, 1))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600459 return -1;
460
Julius Werner84446e62021-02-12 17:37:27 -0800461 if (populate_sym(ctx, "_bss", &ctx->bss_begin, nsyms, 0))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600462 return -1;
463
Julius Werner84446e62021-02-12 17:37:27 -0800464 if (populate_sym(ctx, "_ebss", &ctx->bss_end, nsyms, 0))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600465 return -1;
466
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600467 return 0;
468}
469
470static int
471add_section(struct elf_writer *ew, struct buffer *data, const char *name,
Julius Werner84446e62021-02-12 17:37:27 -0800472 Elf64_Addr addr, Elf64_Word size)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600473{
474 Elf64_Shdr shdr;
475 int ret;
476
477 memset(&shdr, 0, sizeof(shdr));
478 if (data != NULL) {
479 shdr.sh_type = SHT_PROGBITS;
480 shdr.sh_flags = SHF_ALLOC | SHF_WRITE | SHF_EXECINSTR;
481 } else {
482 shdr.sh_type = SHT_NOBITS;
483 shdr.sh_flags = SHF_ALLOC;
484 }
485 shdr.sh_addr = addr;
486 shdr.sh_offset = addr;
487 shdr.sh_size = size;
488
489 ret = elf_writer_add_section(ew, &shdr, data, name);
490
491 if (ret)
492 ERROR("Could not add '%s' section.\n", name);
493
494 return ret;
495}
496
497static int
498write_elf(const struct rmod_context *ctx, const struct buffer *in,
Julius Werner84446e62021-02-12 17:37:27 -0800499 struct buffer *out)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600500{
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600501 int ret;
502 int bit64;
503 size_t loc;
504 size_t rmod_data_size;
505 struct elf_writer *ew;
506 struct buffer rmod_data;
507 struct buffer rmod_header;
508 struct buffer program;
509 struct buffer relocs;
510 Elf64_Xword total_size;
511 Elf64_Addr addr;
512 Elf64_Ehdr ehdr;
513
Jeremy Compostella79f2e1f2023-08-30 15:35:46 -0700514 if (ctx->nsegments != 1) {
515 ERROR("Multiple loadable segments is not supported.\n");
516 return -1;
517 }
518
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600519 bit64 = ctx->pelf.ehdr.e_ident[EI_CLASS] == ELFCLASS64;
520
521 /*
522 * 3 sections will be added to the ELF file.
523 * +------------------+
524 * | rmodule header |
525 * +------------------+
526 * | program |
527 * +------------------+
528 * | relocations |
529 * +------------------+
530 */
531
532 /* Create buffer for header and relocations. */
533 rmod_data_size = sizeof(struct rmodule_header);
534 if (bit64)
535 rmod_data_size += ctx->nrelocs * sizeof(Elf64_Addr);
536 else
537 rmod_data_size += ctx->nrelocs * sizeof(Elf32_Addr);
538
539 if (buffer_create(&rmod_data, rmod_data_size, "rmod"))
540 return -1;
541
542 buffer_splice(&rmod_header, &rmod_data,
543 0, sizeof(struct rmodule_header));
544 buffer_clone(&relocs, &rmod_data);
545 buffer_seek(&relocs, sizeof(struct rmodule_header));
546
547 /* Reset current location. */
548 buffer_set_size(&rmod_header, 0);
549 buffer_set_size(&relocs, 0);
550
551 /* Program contents. */
552 buffer_splice(&program, in, ctx->phdr->p_offset, ctx->phdr->p_filesz);
553
Julius Werner81dc20e2020-10-15 17:37:57 -0700554 /* Create ELF writer. Set entry point to 0 to match section offsets. */
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600555 memcpy(&ehdr, &ctx->pelf.ehdr, sizeof(ehdr));
Julius Werner81dc20e2020-10-15 17:37:57 -0700556 ehdr.e_entry = 0;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600557 ew = elf_writer_init(&ehdr);
558
559 if (ew == NULL) {
560 ERROR("Failed to create ELF writer.\n");
561 buffer_delete(&rmod_data);
562 return -1;
563 }
564
565 /* Write out rmodule_header. */
566 ctx->xdr->put16(&rmod_header, RMODULE_MAGIC);
567 ctx->xdr->put8(&rmod_header, RMODULE_VERSION_1);
568 ctx->xdr->put8(&rmod_header, 0);
569 /* payload_begin_offset */
570 loc = sizeof(struct rmodule_header);
571 ctx->xdr->put32(&rmod_header, loc);
572 /* payload_end_offset */
573 loc += ctx->phdr->p_filesz;
574 ctx->xdr->put32(&rmod_header, loc);
575 /* relocations_begin_offset */
576 ctx->xdr->put32(&rmod_header, loc);
577 /* relocations_end_offset */
578 if (bit64)
579 loc += ctx->nrelocs * sizeof(Elf64_Addr);
580 else
581 loc += ctx->nrelocs * sizeof(Elf32_Addr);
582 ctx->xdr->put32(&rmod_header, loc);
583 /* module_link_start_address */
Aaron Durbin051a1812015-09-08 15:52:01 -0500584 ctx->xdr->put32(&rmod_header, ctx->phdr->p_vaddr);
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600585 /* module_program_size */
Aaron Durbin051a1812015-09-08 15:52:01 -0500586 ctx->xdr->put32(&rmod_header, ctx->phdr->p_memsz);
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600587 /* module_entry_point */
Aaron Durbin051a1812015-09-08 15:52:01 -0500588 ctx->xdr->put32(&rmod_header, ctx->pelf.ehdr.e_entry);
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600589 /* parameters_begin */
590 ctx->xdr->put32(&rmod_header, ctx->parameters_begin);
591 /* parameters_end */
592 ctx->xdr->put32(&rmod_header, ctx->parameters_end);
593 /* bss_begin */
594 ctx->xdr->put32(&rmod_header, ctx->bss_begin);
595 /* bss_end */
596 ctx->xdr->put32(&rmod_header, ctx->bss_end);
597 /* padding[4] */
598 ctx->xdr->put32(&rmod_header, 0);
599 ctx->xdr->put32(&rmod_header, 0);
600 ctx->xdr->put32(&rmod_header, 0);
601 ctx->xdr->put32(&rmod_header, 0);
602
603 /* Write the relocations. */
Sol Boucher0e539312015-03-05 15:38:03 -0800604 for (unsigned i = 0; i < ctx->nrelocs; i++) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600605 if (bit64)
606 ctx->xdr->put64(&relocs, ctx->emitted_relocs[i]);
607 else
608 ctx->xdr->put32(&relocs, ctx->emitted_relocs[i]);
609 }
610
611 total_size = 0;
612 addr = 0;
613
614 /*
615 * There are 2 cases to deal with. The program has a large NOBITS
616 * section and the relocations can fit entirely within occupied memory
617 * region for the program. The other is that the relocations increase
618 * the memory footprint of the program if it was loaded directly into
Frans Hendriks166cbde2018-11-22 14:21:12 +0100619 * the region it would run. The rmodule header is a fixed cost that
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600620 * is considered a part of the program.
621 */
622 total_size += buffer_size(&rmod_header);
Aaron Durbin518a3222014-08-26 13:52:30 -0500623 if (buffer_size(&relocs) + ctx->phdr->p_filesz > ctx->phdr->p_memsz) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600624 total_size += buffer_size(&relocs);
625 total_size += ctx->phdr->p_filesz;
Aaron Durbin518a3222014-08-26 13:52:30 -0500626 } else {
627 total_size += ctx->phdr->p_memsz;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600628 }
629
630 ret = add_section(ew, &rmod_header, ".header", addr,
631 buffer_size(&rmod_header));
632 if (ret < 0)
633 goto out;
634 addr += buffer_size(&rmod_header);
635
636 ret = add_section(ew, &program, ".program", addr, ctx->phdr->p_filesz);
637 if (ret < 0)
638 goto out;
639 addr += ctx->phdr->p_filesz;
640
Furquan Shaikhb237c102014-08-26 14:59:36 -0700641 if (ctx->nrelocs) {
642 ret = add_section(ew, &relocs, ".relocs", addr,
643 buffer_size(&relocs));
644 if (ret < 0)
645 goto out;
646 addr += buffer_size(&relocs);
647 }
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600648
649 if (total_size != addr) {
650 ret = add_section(ew, NULL, ".empty", addr, total_size - addr);
651 if (ret < 0)
652 goto out;
653 }
654
655 /*
656 * Ensure last section has a memory usage that meets the required
657 * total size of the program in memory.
658 */
659
660 ret = elf_writer_serialize(ew, out);
661 if (ret < 0)
662 ERROR("Failed to serialize ELF to buffer.\n");
663
664out:
665 buffer_delete(&rmod_data);
666 elf_writer_destroy(ew);
667
668 return ret;
669}
670
Aaron Durbinb39a9742015-09-08 17:24:04 -0500671int rmodule_init(struct rmod_context *ctx, const struct buffer *elfin)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600672{
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600673 struct parsed_elf *pelf;
Furquan Shaikh161d2332016-05-26 14:41:02 -0700674 size_t i;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600675 int ret;
676
677 ret = -1;
Aaron Durbin051a1812015-09-08 15:52:01 -0500678 memset(ctx, 0, sizeof(*ctx));
679 pelf = &ctx->pelf;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600680
681 if (parse_elf(elfin, pelf, ELF_PARSE_ALL)) {
682 ERROR("Couldn't parse ELF!\n");
683 return -1;
684 }
685
686 /* Only allow executables to be turned into rmodules. */
687 if (pelf->ehdr.e_type != ET_EXEC) {
688 ERROR("ELF is not an executable: %u.\n", pelf->ehdr.e_type);
689 goto out;
690 }
691
692 /* Determine if architecture is supported. */
693 for (i = 0; i < ARRAY_SIZE(reloc_ops); i++) {
694 if (reloc_ops[i].arch == pelf->ehdr.e_machine) {
Aaron Durbin051a1812015-09-08 15:52:01 -0500695 ctx->ops = &reloc_ops[i];
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600696 break;
697 }
698 }
699
Aaron Durbin051a1812015-09-08 15:52:01 -0500700 if (ctx->ops == NULL) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600701 ERROR("ELF is unsupported arch: %u.\n", pelf->ehdr.e_machine);
702 goto out;
703 }
704
705 /* Set the endian ops. */
Aaron Durbin051a1812015-09-08 15:52:01 -0500706 if (ctx->pelf.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
707 ctx->xdr = &xdr_be;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600708 else
Aaron Durbin051a1812015-09-08 15:52:01 -0500709 ctx->xdr = &xdr_le;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600710
Julius Werner84446e62021-02-12 17:37:27 -0800711 /* Obtain the string table. */
712 for (i = 0; i < pelf->ehdr.e_shnum; i++) {
713 if (pelf->strtabs[i] == NULL)
714 continue;
715 /* Don't use the section headers' string table. */
716 if (i == pelf->ehdr.e_shstrndx)
717 continue;
718 ctx->strtab = buffer_get(pelf->strtabs[i]);
719 break;
720 }
721
722 if (ctx->strtab == NULL) {
723 ERROR("No string table found.\n");
724 return -1;
725 }
726
Aaron Durbin051a1812015-09-08 15:52:01 -0500727 if (find_program_segment(ctx))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600728 goto out;
729
Aaron Durbin051a1812015-09-08 15:52:01 -0500730 if (filter_relocation_sections(ctx))
731 goto out;
732
733 ret = 0;
734
735out:
736 return ret;
737}
738
Aaron Durbinb39a9742015-09-08 17:24:04 -0500739void rmodule_cleanup(struct rmod_context *ctx)
Aaron Durbin051a1812015-09-08 15:52:01 -0500740{
741 free(ctx->emitted_relocs);
742 parsed_elf_destroy(&ctx->pelf);
743}
744
745int rmodule_create(const struct buffer *elfin, struct buffer *elfout)
746{
747 struct rmod_context ctx;
748 int ret = -1;
749
750 if (rmodule_init(&ctx, elfin))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600751 goto out;
752
Aaron Durbinb39a9742015-09-08 17:24:04 -0500753 if (rmodule_collect_relocations(&ctx, NULL))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600754 goto out;
755
Aaron Durbin051a1812015-09-08 15:52:01 -0500756 if (populate_rmodule_info(&ctx))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600757 goto out;
758
759 if (write_elf(&ctx, elfin, elfout))
760 goto out;
761
762 ret = 0;
763
764out:
Aaron Durbin051a1812015-09-08 15:52:01 -0500765 rmodule_cleanup(&ctx);
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600766 return ret;
767}
Aaron Durbin694fd132015-10-28 11:39:34 -0500768
769static void rmod_deserialize(struct rmodule_header *rmod, struct buffer *buff,
770 struct xdr *xdr)
771{
772 rmod->magic = xdr->get16(buff);
773 rmod->version = xdr->get8(buff);
774 rmod->type = xdr->get8(buff);
775 rmod->payload_begin_offset = xdr->get32(buff);
776 rmod->payload_end_offset = xdr->get32(buff);
777 rmod->relocations_begin_offset = xdr->get32(buff);
778 rmod->relocations_end_offset = xdr->get32(buff);
779 rmod->module_link_start_address = xdr->get32(buff);
780 rmod->module_program_size = xdr->get32(buff);
781 rmod->module_entry_point = xdr->get32(buff);
782 rmod->parameters_begin = xdr->get32(buff);
783 rmod->parameters_end = xdr->get32(buff);
784 rmod->bss_begin = xdr->get32(buff);
785 rmod->bss_end = xdr->get32(buff);
786 rmod->padding[0] = xdr->get32(buff);
787 rmod->padding[1] = xdr->get32(buff);
788 rmod->padding[2] = xdr->get32(buff);
789 rmod->padding[3] = xdr->get32(buff);
790}
791
792int rmodule_stage_to_elf(Elf64_Ehdr *ehdr, struct buffer *buff)
793{
794 struct buffer reader;
795 struct buffer elf_out;
796 struct rmodule_header rmod;
797 struct xdr *xdr;
798 struct elf_writer *ew;
799 Elf64_Shdr shdr;
800 int bit64;
801 size_t payload_sz;
802 const char *section_name = ".program";
803 const size_t input_sz = buffer_size(buff);
804
805 buffer_clone(&reader, buff);
806
807 xdr = (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) ? &xdr_be : &xdr_le;
808 bit64 = ehdr->e_ident[EI_CLASS] == ELFCLASS64;
809
810 rmod_deserialize(&rmod, &reader, xdr);
811
812 /* Indicate that file is not an rmodule if initial checks fail. */
813 if (rmod.magic != RMODULE_MAGIC)
814 return 1;
815 if (rmod.version != RMODULE_VERSION_1)
816 return 1;
817
818 if (rmod.payload_begin_offset > input_sz ||
819 rmod.payload_end_offset > input_sz ||
820 rmod.relocations_begin_offset > input_sz ||
821 rmod.relocations_end_offset > input_sz) {
822 ERROR("Rmodule fields out of bounds.\n");
823 return -1;
824 }
825
826 ehdr->e_entry = rmod.module_entry_point;
827 ew = elf_writer_init(ehdr);
828
829 if (ew == NULL)
830 return -1;
831
832 payload_sz = rmod.payload_end_offset - rmod.payload_begin_offset;
833 memset(&shdr, 0, sizeof(shdr));
834 shdr.sh_type = SHT_PROGBITS;
835 shdr.sh_flags = SHF_WRITE | SHF_ALLOC | SHF_EXECINSTR;
836 shdr.sh_addr = rmod.module_link_start_address;
837 shdr.sh_size = payload_sz;
838 buffer_splice(&reader, buff, rmod.payload_begin_offset, payload_sz);
839
840 if (elf_writer_add_section(ew, &shdr, &reader, section_name)) {
841 ERROR("Unable to add ELF section: %s\n", section_name);
842 elf_writer_destroy(ew);
843 return -1;
844 }
845
846 if (payload_sz != rmod.module_program_size) {
847 struct buffer b;
848
849 buffer_init(&b, NULL, NULL, 0);
850 memset(&shdr, 0, sizeof(shdr));
851 shdr.sh_type = SHT_NOBITS;
852 shdr.sh_flags = SHF_WRITE | SHF_ALLOC;
853 shdr.sh_addr = rmod.module_link_start_address + payload_sz;
854 shdr.sh_size = rmod.module_program_size - payload_sz;
855 if (elf_writer_add_section(ew, &shdr, &b, ".empty")) {
856 ERROR("Unable to add ELF section: .empty\n");
857 elf_writer_destroy(ew);
858 return -1;
859 }
860 }
861
862 /* Provide a section symbol so the relcoations can reference that. */
863 if (elf_writer_add_symbol(ew, section_name, section_name, shdr.sh_addr,
864 0, STB_LOCAL, STT_SECTION)) {
865 ERROR("Unable to add section symbol to ELF.\n");
866 elf_writer_destroy(ew);
867 return -1;
868 }
869
870 /* Add symbols for the parameters if they are non-zero. */
871 if (rmod.parameters_begin != rmod.parameters_end) {
872 int ret = 0;
873
874 ret |= elf_writer_add_symbol(ew, "_rmodule_params",
875 section_name,
876 rmod.parameters_begin, 0,
877 STB_GLOBAL, STT_NOTYPE);
878 ret |= elf_writer_add_symbol(ew, "_ermodule_params",
879 section_name,
880 rmod.parameters_end, 0,
881 STB_GLOBAL, STT_NOTYPE);
882
883 if (ret != 0) {
884 ERROR("Unable to add module params symbols to ELF\n");
885 elf_writer_destroy(ew);
886 return -1;
887 }
888 }
889
890 if (elf_writer_add_symbol(ew, "_bss", section_name, rmod.bss_begin, 0,
891 STB_GLOBAL, STT_NOTYPE) ||
892 elf_writer_add_symbol(ew, "_ebss", section_name, rmod.bss_end, 0,
893 STB_GLOBAL, STT_NOTYPE)) {
894 ERROR("Unable to add bss symbols to ELF\n");
895 elf_writer_destroy(ew);
896 return -1;
897 }
898
899 ssize_t relocs_sz = rmod.relocations_end_offset;
900 relocs_sz -= rmod.relocations_begin_offset;
901 buffer_splice(&reader, buff, rmod.relocations_begin_offset, relocs_sz);
902 while (relocs_sz > 0) {
903 Elf64_Addr addr;
904
905 if (bit64) {
906 relocs_sz -= sizeof(Elf64_Addr);
907 addr = xdr->get64(&reader);
908 } else {
909 relocs_sz -= sizeof(Elf32_Addr);
910 addr = xdr->get32(&reader);
911 }
912
913 /* Skip any relocations that are below the link address. */
914 if (addr < rmod.module_link_start_address)
915 continue;
916
917 if (elf_writer_add_rel(ew, section_name, addr)) {
918 ERROR("Relocation addition failure.\n");
919 elf_writer_destroy(ew);
920 return -1;
921 }
922 }
923
924 if (elf_writer_serialize(ew, &elf_out)) {
925 ERROR("ELF writer serialize failure.\n");
926 elf_writer_destroy(ew);
927 return -1;
928 }
929
930 elf_writer_destroy(ew);
931
932 /* Flip buffer with the created ELF one. */
933 buffer_delete(buff);
934 *buff = elf_out;
935
936 return 0;
937}