blob: 429bbf37fb0625af03ae8a91c52fb1136ea48b09 [file] [log] [blame]
Patrick Georgi7333a112020-05-08 20:48:04 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin4fde5a62014-03-07 15:11:53 -06002
Sol Boucher0e539312015-03-05 15:38:03 -08003#include <inttypes.h>
Aaron Durbin4fde5a62014-03-07 15:11:53 -06004#include <stdio.h>
5#include <stdlib.h>
6#include <string.h>
7
8#include "elfparsing.h"
9#include "rmodule.h"
Aaron Durbindc9f5cd2015-09-08 13:34:43 -050010#include <commonlib/rmodule-defs.h>
Aaron Durbin4fde5a62014-03-07 15:11:53 -060011
Aaron Durbin4fde5a62014-03-07 15:11:53 -060012/*
13 * Architecture specific support operations.
14 */
Sol Boucher0e539312015-03-05 15:38:03 -080015static int valid_reloc_386(Elf64_Rela *rel)
Aaron Durbin4fde5a62014-03-07 15:11:53 -060016{
17 int type;
18
19 type = ELF64_R_TYPE(rel->r_info);
20
21 /* Only these 2 relocations are expected to be found. */
22 return (type == R_386_32 || type == R_386_PC32);
23}
24
Sol Boucher0e539312015-03-05 15:38:03 -080025static int should_emit_386(Elf64_Rela *rel)
Aaron Durbin4fde5a62014-03-07 15:11:53 -060026{
27 int type;
28
29 type = ELF64_R_TYPE(rel->r_info);
30
31 /* R_386_32 relocations are absolute. Must emit these. */
32 return (type == R_386_32);
33}
34
Patrick Rudolph565bebe2018-11-26 15:54:21 +010035static int valid_reloc_amd64(Elf64_Rela *rel)
36{
37 int type;
38
39 type = ELF64_R_TYPE(rel->r_info);
40
Patrick Rudolph44b4ec72019-02-15 14:41:20 +010041 /* Only these 6 relocations are expected to be found. */
Patrick Rudolph565bebe2018-11-26 15:54:21 +010042 return (type == R_AMD64_64 ||
43 type == R_AMD64_PC64 ||
44 type == R_AMD64_32S ||
45 type == R_AMD64_32 ||
Patrick Rudolph44b4ec72019-02-15 14:41:20 +010046 type == R_AMD64_PC32 ||
47 /*
48 * binutils 2.31 introduced R_AMD64_PLT32 for non local
49 * functions. As we don't care about procedure linkage
50 * table entries handle it as R_X86_64_PC32.
51 */
52 type == R_AMD64_PLT32);
Patrick Rudolph565bebe2018-11-26 15:54:21 +010053}
54
55static int should_emit_amd64(Elf64_Rela *rel)
56{
57 int type;
58
59 type = ELF64_R_TYPE(rel->r_info);
60
61 /* Only emit absolute relocations */
62 return (type == R_AMD64_64 ||
Patrick Rudolph565bebe2018-11-26 15:54:21 +010063 type == R_AMD64_32S ||
64 type == R_AMD64_32);
65}
66
Sol Boucher0e539312015-03-05 15:38:03 -080067static int valid_reloc_arm(Elf64_Rela *rel)
Aaron Durbin785e47b2014-03-20 11:08:02 -050068{
69 int type;
70
71 type = ELF64_R_TYPE(rel->r_info);
72
Furquan Shaikhc4f08f72014-07-23 13:42:22 -070073 /* Only these 6 relocations are expected to be found. */
Aaron Durbin785e47b2014-03-20 11:08:02 -050074 return (type == R_ARM_ABS32 || type == R_ARM_THM_PC22 ||
Furquan Shaikhc4f08f72014-07-23 13:42:22 -070075 type == R_ARM_THM_JUMP24 || type == R_ARM_V4BX ||
76 type == R_ARM_CALL || type == R_ARM_JUMP24);
Aaron Durbin785e47b2014-03-20 11:08:02 -050077}
78
Sol Boucher0e539312015-03-05 15:38:03 -080079static int should_emit_arm(Elf64_Rela *rel)
Aaron Durbin785e47b2014-03-20 11:08:02 -050080{
81 int type;
82
83 type = ELF64_R_TYPE(rel->r_info);
84
85 /* R_ARM_ABS32 relocations are absolute. Must emit these. */
86 return (type == R_ARM_ABS32);
87}
88
Sol Boucher0e539312015-03-05 15:38:03 -080089static int valid_reloc_aarch64(Elf64_Rela *rel)
Furquan Shaikhd2338ba2014-08-26 15:21:15 -070090{
91 int type;
92
93 type = ELF64_R_TYPE(rel->r_info);
94
95 return (type == R_AARCH64_ADR_PREL_PG_HI21 ||
96 type == R_AARCH64_ADD_ABS_LO12_NC ||
Furquan Shaikhde77e6a2014-11-21 15:41:10 -080097 type == R_AARCH64_LDST8_ABS_LO12_NC ||
Furquan Shaikh16c0a412015-06-08 11:58:04 -070098 type == R_AARCH64_CONDBR19 ||
Furquan Shaikhd2338ba2014-08-26 15:21:15 -070099 type == R_AARCH64_JUMP26 ||
100 type == R_AARCH64_LDST32_ABS_LO12_NC ||
Aaron Durbina47898e2014-09-18 13:39:16 -0500101 type == R_AARCH64_LDST64_ABS_LO12_NC ||
Furquan Shaikhd2338ba2014-08-26 15:21:15 -0700102 type == R_AARCH64_CALL26 ||
103 type == R_AARCH64_ABS64 ||
104 type == R_AARCH64_LD_PREL_LO19 ||
105 type == R_AARCH64_ADR_PREL_LO21);
106}
107
Sol Boucher0e539312015-03-05 15:38:03 -0800108static int should_emit_aarch64(Elf64_Rela *rel)
Furquan Shaikhd2338ba2014-08-26 15:21:15 -0700109{
110 int type;
111
112 type = ELF64_R_TYPE(rel->r_info);
113
114 return (type == R_AARCH64_ABS64);
115}
116
Aaron Durbinb39a9742015-09-08 17:24:04 -0500117static const struct arch_ops reloc_ops[] = {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600118 {
119 .arch = EM_386,
120 .valid_type = valid_reloc_386,
121 .should_emit = should_emit_386,
122 },
Aaron Durbin785e47b2014-03-20 11:08:02 -0500123 {
Patrick Rudolph565bebe2018-11-26 15:54:21 +0100124 .arch = EM_X86_64,
125 .valid_type = valid_reloc_amd64,
126 .should_emit = should_emit_amd64,
127 },
128 {
Aaron Durbin785e47b2014-03-20 11:08:02 -0500129 .arch = EM_ARM,
130 .valid_type = valid_reloc_arm,
131 .should_emit = should_emit_arm,
132 },
Furquan Shaikhd2338ba2014-08-26 15:21:15 -0700133 {
134 .arch = EM_AARCH64,
135 .valid_type = valid_reloc_aarch64,
136 .should_emit = should_emit_aarch64,
137 },
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600138};
139
140/*
141 * Relocation processing loops.
142 */
143
Aaron Durbinb39a9742015-09-08 17:24:04 -0500144static int for_each_reloc(struct rmod_context *ctx, struct reloc_filter *f,
145 int do_emit)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600146{
147 Elf64_Half i;
148 struct parsed_elf *pelf = &ctx->pelf;
149
150 for (i = 0; i < pelf->ehdr.e_shnum; i++) {
151 Elf64_Shdr *shdr;
152 Elf64_Rela *relocs;
153 Elf64_Xword nrelocs;
154 Elf64_Xword j;
155
156 relocs = pelf->relocs[i];
157
158 /* No relocations in this section. */
159 if (relocs == NULL)
160 continue;
161
162 shdr = &pelf->shdr[i];
163 nrelocs = shdr->sh_size / shdr->sh_entsize;
164
165 for (j = 0; j < nrelocs; j++) {
Aaron Durbinb39a9742015-09-08 17:24:04 -0500166 int filter_emit = 1;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600167 Elf64_Rela *r = &relocs[j];
168
Sol Boucher0e539312015-03-05 15:38:03 -0800169 if (!ctx->ops->valid_type(r)) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600170 ERROR("Invalid reloc type: %u\n",
171 (unsigned int)ELF64_R_TYPE(r->r_info));
172 return -1;
173 }
174
Aaron Durbinb39a9742015-09-08 17:24:04 -0500175 /* Allow the provided filter to have precedence. */
176 if (f != NULL) {
177 filter_emit = f->filter(f, r);
178
179 if (filter_emit < 0)
180 return filter_emit;
181 }
182
183 if (filter_emit && ctx->ops->should_emit(r)) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600184 int n = ctx->nrelocs;
185 if (do_emit)
186 ctx->emitted_relocs[n] = r->r_offset;
187 ctx->nrelocs++;
188 }
189 }
190 }
191
192 return 0;
193}
194
195static int find_program_segment(struct rmod_context *ctx)
196{
197 int i;
198 int nsegments;
199 struct parsed_elf *pelf;
Anatol Pomozov8cce7012015-07-10 17:30:01 -0700200 Elf64_Phdr *phdr = NULL;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600201
202 pelf = &ctx->pelf;
203
204 /* There should only be a single loadable segment. */
205 nsegments = 0;
206 for (i = 0; i < pelf->ehdr.e_phnum; i++) {
207 if (pelf->phdr[i].p_type != PT_LOAD)
208 continue;
209 phdr = &pelf->phdr[i];
210 nsegments++;
211 }
212
213 if (nsegments != 1) {
Patrick Georgi01cfecc2020-01-29 13:31:16 +0100214 ERROR("Unexpected number of loadable segments: %d.\n",
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600215 nsegments);
216 return -1;
217 }
218
219 INFO("Segment at 0x%0llx, file size 0x%0llx, mem size 0x%0llx.\n",
220 (long long)phdr->p_vaddr, (long long)phdr->p_filesz,
221 (long long)phdr->p_memsz);
222
223 ctx->phdr = phdr;
224
225 return 0;
226}
227
228static int
229filter_relocation_sections(struct rmod_context *ctx)
230{
231 int i;
232 const char *shstrtab;
233 struct parsed_elf *pelf;
234 const Elf64_Phdr *phdr;
235
236 pelf = &ctx->pelf;
237 phdr = ctx->phdr;
238 shstrtab = buffer_get(pelf->strtabs[pelf->ehdr.e_shstrndx]);
239
240 /*
241 * Find all relocation sections that contain relocation entries
242 * for sections that fall within the bounds of the segment. For
243 * easier processing the pointer to the relocation array for the
244 * sections that don't fall within the loadable program are NULL'd
245 * out.
246 */
247 for (i = 0; i < pelf->ehdr.e_shnum; i++) {
248 Elf64_Shdr *shdr;
249 Elf64_Word sh_info;
250 const char *section_name;
251
252 shdr = &pelf->shdr[i];
253
254 /* Ignore non-relocation sections. */
255 if (shdr->sh_type != SHT_RELA && shdr->sh_type != SHT_REL)
256 continue;
257
258 /* Obtain section which relocations apply. */
259 sh_info = shdr->sh_info;
260 shdr = &pelf->shdr[sh_info];
261
262 section_name = &shstrtab[shdr->sh_name];
263 DEBUG("Relocation section found for '%s' section.\n",
264 section_name);
265
266 /* Do not process relocations for debug sections. */
267 if (strstr(section_name, ".debug") != NULL) {
268 pelf->relocs[i] = NULL;
269 continue;
270 }
271
272 /*
273 * If relocations apply to a non program section ignore the
274 * relocations for future processing.
275 */
276 if (shdr->sh_type != SHT_PROGBITS) {
277 pelf->relocs[i] = NULL;
278 continue;
279 }
280
281 if (shdr->sh_addr < phdr->p_vaddr ||
282 ((shdr->sh_addr + shdr->sh_size) >
283 (phdr->p_vaddr + phdr->p_memsz))) {
284 ERROR("Relocations being applied to section %d not "
285 "within segment region.\n", sh_info);
286 return -1;
287 }
288 }
289
290 return 0;
291}
292
293static int vaddr_cmp(const void *a, const void *b)
294{
295 const Elf64_Addr *pa = a;
296 const Elf64_Addr *pb = b;
297
298 if (*pa < *pb)
299 return -1;
300 if (*pa > *pb)
301 return 1;
302 return 0;
303}
304
Aaron Durbinb39a9742015-09-08 17:24:04 -0500305int rmodule_collect_relocations(struct rmod_context *ctx,
306 struct reloc_filter *f)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600307{
Sol Boucher0e539312015-03-05 15:38:03 -0800308 Elf64_Xword nrelocs;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600309
310 /*
311 * The relocs array in the pelf should only contain relocations that
312 * apply to the program. Count the number relocations. Then collect
313 * them into the allocated buffer.
314 */
Aaron Durbinb39a9742015-09-08 17:24:04 -0500315 if (for_each_reloc(ctx, f, 0))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600316 return -1;
317
318 nrelocs = ctx->nrelocs;
Sol Boucher0e539312015-03-05 15:38:03 -0800319 INFO("%" PRIu64 " relocations to be emitted.\n", nrelocs);
Furquan Shaikhb237c102014-08-26 14:59:36 -0700320 if (!nrelocs)
321 return 0;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600322
323 /* Reset the counter for indexing into the array. */
324 ctx->nrelocs = 0;
325 ctx->emitted_relocs = calloc(nrelocs, sizeof(Elf64_Addr));
326 /* Write out the relocations into the emitted_relocs array. */
Aaron Durbinb39a9742015-09-08 17:24:04 -0500327 if (for_each_reloc(ctx, f, 1))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600328 return -1;
329
330 if (ctx->nrelocs != nrelocs) {
331 ERROR("Mismatch counted and emitted relocations: %zu vs %zu.\n",
332 (size_t)nrelocs, (size_t)ctx->nrelocs);
333 return -1;
334 }
335
336 /* Sort the relocations by their address. */
337 qsort(ctx->emitted_relocs, nrelocs, sizeof(Elf64_Addr), vaddr_cmp);
338
339 return 0;
340}
341
342static int
343populate_sym(struct rmod_context *ctx, const char *sym_name, Elf64_Addr *addr,
Aaron Durbinc9b053d2015-09-06 10:39:10 -0500344 int nsyms, const char *strtab, int optional)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600345{
346 int i;
347 Elf64_Sym *syms;
348
349 syms = ctx->pelf.syms;
350
351 for (i = 0; i < nsyms; i++) {
352 if (syms[i].st_name == 0)
353 continue;
354 if (strcmp(sym_name, &strtab[syms[i].st_name]))
355 continue;
356 DEBUG("%s -> 0x%llx\n", sym_name, (long long)syms[i].st_value);
357 *addr = syms[i].st_value;
358 return 0;
359 }
Aaron Durbinc9b053d2015-09-06 10:39:10 -0500360
361 if (optional) {
362 DEBUG("optional symbol '%s' not found.\n", sym_name);
363 *addr = 0;
364 return 0;
365 }
366
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600367 ERROR("symbol '%s' not found.\n", sym_name);
368 return -1;
369}
370
Aaron Durbin051a1812015-09-08 15:52:01 -0500371static int populate_rmodule_info(struct rmod_context *ctx)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600372{
373 int i;
374 const char *strtab;
375 struct parsed_elf *pelf;
376 Elf64_Ehdr *ehdr;
377 int nsyms;
378
379 pelf = &ctx->pelf;
380 ehdr = &pelf->ehdr;
381
382 /* Obtain the string table. */
383 strtab = NULL;
384 for (i = 0; i < ehdr->e_shnum; i++) {
385 if (ctx->pelf.strtabs[i] == NULL)
386 continue;
387 /* Don't use the section headers' string table. */
388 if (i == ehdr->e_shstrndx)
389 continue;
390 strtab = buffer_get(ctx->pelf.strtabs[i]);
391 break;
392 }
393
394 if (strtab == NULL) {
395 ERROR("No string table found.\n");
396 return -1;
397 }
398
399 /* Determine number of symbols. */
400 nsyms = 0;
401 for (i = 0; i < ehdr->e_shnum; i++) {
402 if (pelf->shdr[i].sh_type != SHT_SYMTAB)
403 continue;
404
405 nsyms = pelf->shdr[i].sh_size / pelf->shdr[i].sh_entsize;
406 break;
407 }
408
Aaron Durbindde76292015-09-05 12:59:26 -0500409 if (populate_sym(ctx, "_rmodule_params", &ctx->parameters_begin,
Aaron Durbinc9b053d2015-09-06 10:39:10 -0500410 nsyms, strtab, 1))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600411 return -1;
412
Aaron Durbindde76292015-09-05 12:59:26 -0500413 if (populate_sym(ctx, "_ermodule_params", &ctx->parameters_end,
Aaron Durbinc9b053d2015-09-06 10:39:10 -0500414 nsyms, strtab, 1))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600415 return -1;
416
Aaron Durbinc9b053d2015-09-06 10:39:10 -0500417 if (populate_sym(ctx, "_bss", &ctx->bss_begin, nsyms, strtab, 0))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600418 return -1;
419
Aaron Durbinc9b053d2015-09-06 10:39:10 -0500420 if (populate_sym(ctx, "_ebss", &ctx->bss_end, nsyms, strtab, 0))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600421 return -1;
422
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600423 return 0;
424}
425
426static int
427add_section(struct elf_writer *ew, struct buffer *data, const char *name,
428 Elf64_Addr addr, Elf64_Word size)
429{
430 Elf64_Shdr shdr;
431 int ret;
432
433 memset(&shdr, 0, sizeof(shdr));
434 if (data != NULL) {
435 shdr.sh_type = SHT_PROGBITS;
436 shdr.sh_flags = SHF_ALLOC | SHF_WRITE | SHF_EXECINSTR;
437 } else {
438 shdr.sh_type = SHT_NOBITS;
439 shdr.sh_flags = SHF_ALLOC;
440 }
441 shdr.sh_addr = addr;
442 shdr.sh_offset = addr;
443 shdr.sh_size = size;
444
445 ret = elf_writer_add_section(ew, &shdr, data, name);
446
447 if (ret)
448 ERROR("Could not add '%s' section.\n", name);
449
450 return ret;
451}
452
453static int
454write_elf(const struct rmod_context *ctx, const struct buffer *in,
455 struct buffer *out)
456{
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600457 int ret;
458 int bit64;
459 size_t loc;
460 size_t rmod_data_size;
461 struct elf_writer *ew;
462 struct buffer rmod_data;
463 struct buffer rmod_header;
464 struct buffer program;
465 struct buffer relocs;
466 Elf64_Xword total_size;
467 Elf64_Addr addr;
468 Elf64_Ehdr ehdr;
469
470 bit64 = ctx->pelf.ehdr.e_ident[EI_CLASS] == ELFCLASS64;
471
472 /*
473 * 3 sections will be added to the ELF file.
474 * +------------------+
475 * | rmodule header |
476 * +------------------+
477 * | program |
478 * +------------------+
479 * | relocations |
480 * +------------------+
481 */
482
483 /* Create buffer for header and relocations. */
484 rmod_data_size = sizeof(struct rmodule_header);
485 if (bit64)
486 rmod_data_size += ctx->nrelocs * sizeof(Elf64_Addr);
487 else
488 rmod_data_size += ctx->nrelocs * sizeof(Elf32_Addr);
489
490 if (buffer_create(&rmod_data, rmod_data_size, "rmod"))
491 return -1;
492
493 buffer_splice(&rmod_header, &rmod_data,
494 0, sizeof(struct rmodule_header));
495 buffer_clone(&relocs, &rmod_data);
496 buffer_seek(&relocs, sizeof(struct rmodule_header));
497
498 /* Reset current location. */
499 buffer_set_size(&rmod_header, 0);
500 buffer_set_size(&relocs, 0);
501
502 /* Program contents. */
503 buffer_splice(&program, in, ctx->phdr->p_offset, ctx->phdr->p_filesz);
504
505 /* Create ELF writer with modified entry point. */
506 memcpy(&ehdr, &ctx->pelf.ehdr, sizeof(ehdr));
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600507 ew = elf_writer_init(&ehdr);
508
509 if (ew == NULL) {
510 ERROR("Failed to create ELF writer.\n");
511 buffer_delete(&rmod_data);
512 return -1;
513 }
514
515 /* Write out rmodule_header. */
516 ctx->xdr->put16(&rmod_header, RMODULE_MAGIC);
517 ctx->xdr->put8(&rmod_header, RMODULE_VERSION_1);
518 ctx->xdr->put8(&rmod_header, 0);
519 /* payload_begin_offset */
520 loc = sizeof(struct rmodule_header);
521 ctx->xdr->put32(&rmod_header, loc);
522 /* payload_end_offset */
523 loc += ctx->phdr->p_filesz;
524 ctx->xdr->put32(&rmod_header, loc);
525 /* relocations_begin_offset */
526 ctx->xdr->put32(&rmod_header, loc);
527 /* relocations_end_offset */
528 if (bit64)
529 loc += ctx->nrelocs * sizeof(Elf64_Addr);
530 else
531 loc += ctx->nrelocs * sizeof(Elf32_Addr);
532 ctx->xdr->put32(&rmod_header, loc);
533 /* module_link_start_address */
Aaron Durbin051a1812015-09-08 15:52:01 -0500534 ctx->xdr->put32(&rmod_header, ctx->phdr->p_vaddr);
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600535 /* module_program_size */
Aaron Durbin051a1812015-09-08 15:52:01 -0500536 ctx->xdr->put32(&rmod_header, ctx->phdr->p_memsz);
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600537 /* module_entry_point */
Aaron Durbin051a1812015-09-08 15:52:01 -0500538 ctx->xdr->put32(&rmod_header, ctx->pelf.ehdr.e_entry);
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600539 /* parameters_begin */
540 ctx->xdr->put32(&rmod_header, ctx->parameters_begin);
541 /* parameters_end */
542 ctx->xdr->put32(&rmod_header, ctx->parameters_end);
543 /* bss_begin */
544 ctx->xdr->put32(&rmod_header, ctx->bss_begin);
545 /* bss_end */
546 ctx->xdr->put32(&rmod_header, ctx->bss_end);
547 /* padding[4] */
548 ctx->xdr->put32(&rmod_header, 0);
549 ctx->xdr->put32(&rmod_header, 0);
550 ctx->xdr->put32(&rmod_header, 0);
551 ctx->xdr->put32(&rmod_header, 0);
552
553 /* Write the relocations. */
Sol Boucher0e539312015-03-05 15:38:03 -0800554 for (unsigned i = 0; i < ctx->nrelocs; i++) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600555 if (bit64)
556 ctx->xdr->put64(&relocs, ctx->emitted_relocs[i]);
557 else
558 ctx->xdr->put32(&relocs, ctx->emitted_relocs[i]);
559 }
560
561 total_size = 0;
562 addr = 0;
563
564 /*
565 * There are 2 cases to deal with. The program has a large NOBITS
566 * section and the relocations can fit entirely within occupied memory
567 * region for the program. The other is that the relocations increase
568 * the memory footprint of the program if it was loaded directly into
Frans Hendriks166cbde2018-11-22 14:21:12 +0100569 * the region it would run. The rmodule header is a fixed cost that
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600570 * is considered a part of the program.
571 */
572 total_size += buffer_size(&rmod_header);
Aaron Durbin518a3222014-08-26 13:52:30 -0500573 if (buffer_size(&relocs) + ctx->phdr->p_filesz > ctx->phdr->p_memsz) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600574 total_size += buffer_size(&relocs);
575 total_size += ctx->phdr->p_filesz;
Aaron Durbin518a3222014-08-26 13:52:30 -0500576 } else {
577 total_size += ctx->phdr->p_memsz;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600578 }
579
580 ret = add_section(ew, &rmod_header, ".header", addr,
581 buffer_size(&rmod_header));
582 if (ret < 0)
583 goto out;
584 addr += buffer_size(&rmod_header);
585
586 ret = add_section(ew, &program, ".program", addr, ctx->phdr->p_filesz);
587 if (ret < 0)
588 goto out;
589 addr += ctx->phdr->p_filesz;
590
Furquan Shaikhb237c102014-08-26 14:59:36 -0700591 if (ctx->nrelocs) {
592 ret = add_section(ew, &relocs, ".relocs", addr,
593 buffer_size(&relocs));
594 if (ret < 0)
595 goto out;
596 addr += buffer_size(&relocs);
597 }
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600598
599 if (total_size != addr) {
600 ret = add_section(ew, NULL, ".empty", addr, total_size - addr);
601 if (ret < 0)
602 goto out;
603 }
604
605 /*
606 * Ensure last section has a memory usage that meets the required
607 * total size of the program in memory.
608 */
609
610 ret = elf_writer_serialize(ew, out);
611 if (ret < 0)
612 ERROR("Failed to serialize ELF to buffer.\n");
613
614out:
615 buffer_delete(&rmod_data);
616 elf_writer_destroy(ew);
617
618 return ret;
619}
620
Aaron Durbinb39a9742015-09-08 17:24:04 -0500621int rmodule_init(struct rmod_context *ctx, const struct buffer *elfin)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600622{
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600623 struct parsed_elf *pelf;
Furquan Shaikh161d2332016-05-26 14:41:02 -0700624 size_t i;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600625 int ret;
626
627 ret = -1;
Aaron Durbin051a1812015-09-08 15:52:01 -0500628 memset(ctx, 0, sizeof(*ctx));
629 pelf = &ctx->pelf;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600630
631 if (parse_elf(elfin, pelf, ELF_PARSE_ALL)) {
632 ERROR("Couldn't parse ELF!\n");
633 return -1;
634 }
635
636 /* Only allow executables to be turned into rmodules. */
637 if (pelf->ehdr.e_type != ET_EXEC) {
638 ERROR("ELF is not an executable: %u.\n", pelf->ehdr.e_type);
639 goto out;
640 }
641
642 /* Determine if architecture is supported. */
643 for (i = 0; i < ARRAY_SIZE(reloc_ops); i++) {
644 if (reloc_ops[i].arch == pelf->ehdr.e_machine) {
Aaron Durbin051a1812015-09-08 15:52:01 -0500645 ctx->ops = &reloc_ops[i];
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600646 break;
647 }
648 }
649
Aaron Durbin051a1812015-09-08 15:52:01 -0500650 if (ctx->ops == NULL) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600651 ERROR("ELF is unsupported arch: %u.\n", pelf->ehdr.e_machine);
652 goto out;
653 }
654
655 /* Set the endian ops. */
Aaron Durbin051a1812015-09-08 15:52:01 -0500656 if (ctx->pelf.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
657 ctx->xdr = &xdr_be;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600658 else
Aaron Durbin051a1812015-09-08 15:52:01 -0500659 ctx->xdr = &xdr_le;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600660
Aaron Durbin051a1812015-09-08 15:52:01 -0500661 if (find_program_segment(ctx))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600662 goto out;
663
Aaron Durbin051a1812015-09-08 15:52:01 -0500664 if (filter_relocation_sections(ctx))
665 goto out;
666
667 ret = 0;
668
669out:
670 return ret;
671}
672
Aaron Durbinb39a9742015-09-08 17:24:04 -0500673void rmodule_cleanup(struct rmod_context *ctx)
Aaron Durbin051a1812015-09-08 15:52:01 -0500674{
675 free(ctx->emitted_relocs);
676 parsed_elf_destroy(&ctx->pelf);
677}
678
679int rmodule_create(const struct buffer *elfin, struct buffer *elfout)
680{
681 struct rmod_context ctx;
682 int ret = -1;
683
684 if (rmodule_init(&ctx, elfin))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600685 goto out;
686
Aaron Durbinb39a9742015-09-08 17:24:04 -0500687 if (rmodule_collect_relocations(&ctx, NULL))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600688 goto out;
689
Aaron Durbin051a1812015-09-08 15:52:01 -0500690 if (populate_rmodule_info(&ctx))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600691 goto out;
692
693 if (write_elf(&ctx, elfin, elfout))
694 goto out;
695
696 ret = 0;
697
698out:
Aaron Durbin051a1812015-09-08 15:52:01 -0500699 rmodule_cleanup(&ctx);
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600700 return ret;
701}
Aaron Durbin694fd132015-10-28 11:39:34 -0500702
703static void rmod_deserialize(struct rmodule_header *rmod, struct buffer *buff,
704 struct xdr *xdr)
705{
706 rmod->magic = xdr->get16(buff);
707 rmod->version = xdr->get8(buff);
708 rmod->type = xdr->get8(buff);
709 rmod->payload_begin_offset = xdr->get32(buff);
710 rmod->payload_end_offset = xdr->get32(buff);
711 rmod->relocations_begin_offset = xdr->get32(buff);
712 rmod->relocations_end_offset = xdr->get32(buff);
713 rmod->module_link_start_address = xdr->get32(buff);
714 rmod->module_program_size = xdr->get32(buff);
715 rmod->module_entry_point = xdr->get32(buff);
716 rmod->parameters_begin = xdr->get32(buff);
717 rmod->parameters_end = xdr->get32(buff);
718 rmod->bss_begin = xdr->get32(buff);
719 rmod->bss_end = xdr->get32(buff);
720 rmod->padding[0] = xdr->get32(buff);
721 rmod->padding[1] = xdr->get32(buff);
722 rmod->padding[2] = xdr->get32(buff);
723 rmod->padding[3] = xdr->get32(buff);
724}
725
726int rmodule_stage_to_elf(Elf64_Ehdr *ehdr, struct buffer *buff)
727{
728 struct buffer reader;
729 struct buffer elf_out;
730 struct rmodule_header rmod;
731 struct xdr *xdr;
732 struct elf_writer *ew;
733 Elf64_Shdr shdr;
734 int bit64;
735 size_t payload_sz;
736 const char *section_name = ".program";
737 const size_t input_sz = buffer_size(buff);
738
739 buffer_clone(&reader, buff);
740
741 xdr = (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) ? &xdr_be : &xdr_le;
742 bit64 = ehdr->e_ident[EI_CLASS] == ELFCLASS64;
743
744 rmod_deserialize(&rmod, &reader, xdr);
745
746 /* Indicate that file is not an rmodule if initial checks fail. */
747 if (rmod.magic != RMODULE_MAGIC)
748 return 1;
749 if (rmod.version != RMODULE_VERSION_1)
750 return 1;
751
752 if (rmod.payload_begin_offset > input_sz ||
753 rmod.payload_end_offset > input_sz ||
754 rmod.relocations_begin_offset > input_sz ||
755 rmod.relocations_end_offset > input_sz) {
756 ERROR("Rmodule fields out of bounds.\n");
757 return -1;
758 }
759
760 ehdr->e_entry = rmod.module_entry_point;
761 ew = elf_writer_init(ehdr);
762
763 if (ew == NULL)
764 return -1;
765
766 payload_sz = rmod.payload_end_offset - rmod.payload_begin_offset;
767 memset(&shdr, 0, sizeof(shdr));
768 shdr.sh_type = SHT_PROGBITS;
769 shdr.sh_flags = SHF_WRITE | SHF_ALLOC | SHF_EXECINSTR;
770 shdr.sh_addr = rmod.module_link_start_address;
771 shdr.sh_size = payload_sz;
772 buffer_splice(&reader, buff, rmod.payload_begin_offset, payload_sz);
773
774 if (elf_writer_add_section(ew, &shdr, &reader, section_name)) {
775 ERROR("Unable to add ELF section: %s\n", section_name);
776 elf_writer_destroy(ew);
777 return -1;
778 }
779
780 if (payload_sz != rmod.module_program_size) {
781 struct buffer b;
782
783 buffer_init(&b, NULL, NULL, 0);
784 memset(&shdr, 0, sizeof(shdr));
785 shdr.sh_type = SHT_NOBITS;
786 shdr.sh_flags = SHF_WRITE | SHF_ALLOC;
787 shdr.sh_addr = rmod.module_link_start_address + payload_sz;
788 shdr.sh_size = rmod.module_program_size - payload_sz;
789 if (elf_writer_add_section(ew, &shdr, &b, ".empty")) {
790 ERROR("Unable to add ELF section: .empty\n");
791 elf_writer_destroy(ew);
792 return -1;
793 }
794 }
795
796 /* Provide a section symbol so the relcoations can reference that. */
797 if (elf_writer_add_symbol(ew, section_name, section_name, shdr.sh_addr,
798 0, STB_LOCAL, STT_SECTION)) {
799 ERROR("Unable to add section symbol to ELF.\n");
800 elf_writer_destroy(ew);
801 return -1;
802 }
803
804 /* Add symbols for the parameters if they are non-zero. */
805 if (rmod.parameters_begin != rmod.parameters_end) {
806 int ret = 0;
807
808 ret |= elf_writer_add_symbol(ew, "_rmodule_params",
809 section_name,
810 rmod.parameters_begin, 0,
811 STB_GLOBAL, STT_NOTYPE);
812 ret |= elf_writer_add_symbol(ew, "_ermodule_params",
813 section_name,
814 rmod.parameters_end, 0,
815 STB_GLOBAL, STT_NOTYPE);
816
817 if (ret != 0) {
818 ERROR("Unable to add module params symbols to ELF\n");
819 elf_writer_destroy(ew);
820 return -1;
821 }
822 }
823
824 if (elf_writer_add_symbol(ew, "_bss", section_name, rmod.bss_begin, 0,
825 STB_GLOBAL, STT_NOTYPE) ||
826 elf_writer_add_symbol(ew, "_ebss", section_name, rmod.bss_end, 0,
827 STB_GLOBAL, STT_NOTYPE)) {
828 ERROR("Unable to add bss symbols to ELF\n");
829 elf_writer_destroy(ew);
830 return -1;
831 }
832
833 ssize_t relocs_sz = rmod.relocations_end_offset;
834 relocs_sz -= rmod.relocations_begin_offset;
835 buffer_splice(&reader, buff, rmod.relocations_begin_offset, relocs_sz);
836 while (relocs_sz > 0) {
837 Elf64_Addr addr;
838
839 if (bit64) {
840 relocs_sz -= sizeof(Elf64_Addr);
841 addr = xdr->get64(&reader);
842 } else {
843 relocs_sz -= sizeof(Elf32_Addr);
844 addr = xdr->get32(&reader);
845 }
846
847 /* Skip any relocations that are below the link address. */
848 if (addr < rmod.module_link_start_address)
849 continue;
850
851 if (elf_writer_add_rel(ew, section_name, addr)) {
852 ERROR("Relocation addition failure.\n");
853 elf_writer_destroy(ew);
854 return -1;
855 }
856 }
857
858 if (elf_writer_serialize(ew, &elf_out)) {
859 ERROR("ELF writer serialize failure.\n");
860 elf_writer_destroy(ew);
861 return -1;
862 }
863
864 elf_writer_destroy(ew);
865
866 /* Flip buffer with the created ELF one. */
867 buffer_delete(buff);
868 *buff = elf_out;
869
870 return 0;
871}