blob: 80e89118e56db907db654d7cdf94b5c502f11045 [file] [log] [blame]
Aaron Durbin4fde5a62014-03-07 15:11:53 -06001/*
Frans Hendriks166cbde2018-11-22 14:21:12 +01002 * Copyright (C) 2014 Google, Inc.
3 * Copyright (C) 2018 Eltan B.V.
Aaron Durbin4fde5a62014-03-07 15:11:53 -06004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Aaron Durbin4fde5a62014-03-07 15:11:53 -060013 */
14
Sol Boucher0e539312015-03-05 15:38:03 -080015#include <inttypes.h>
Aaron Durbin4fde5a62014-03-07 15:11:53 -060016#include <stdio.h>
17#include <stdlib.h>
18#include <string.h>
19
20#include "elfparsing.h"
21#include "rmodule.h"
Aaron Durbindc9f5cd2015-09-08 13:34:43 -050022#include <commonlib/rmodule-defs.h>
Aaron Durbin4fde5a62014-03-07 15:11:53 -060023
Aaron Durbin4fde5a62014-03-07 15:11:53 -060024/*
25 * Architecture specific support operations.
26 */
Sol Boucher0e539312015-03-05 15:38:03 -080027static int valid_reloc_386(Elf64_Rela *rel)
Aaron Durbin4fde5a62014-03-07 15:11:53 -060028{
29 int type;
30
31 type = ELF64_R_TYPE(rel->r_info);
32
33 /* Only these 2 relocations are expected to be found. */
34 return (type == R_386_32 || type == R_386_PC32);
35}
36
Sol Boucher0e539312015-03-05 15:38:03 -080037static int should_emit_386(Elf64_Rela *rel)
Aaron Durbin4fde5a62014-03-07 15:11:53 -060038{
39 int type;
40
41 type = ELF64_R_TYPE(rel->r_info);
42
43 /* R_386_32 relocations are absolute. Must emit these. */
44 return (type == R_386_32);
45}
46
Patrick Rudolph565bebe2018-11-26 15:54:21 +010047static int valid_reloc_amd64(Elf64_Rela *rel)
48{
49 int type;
50
51 type = ELF64_R_TYPE(rel->r_info);
52
Patrick Rudolph44b4ec72019-02-15 14:41:20 +010053 /* Only these 6 relocations are expected to be found. */
Patrick Rudolph565bebe2018-11-26 15:54:21 +010054 return (type == R_AMD64_64 ||
55 type == R_AMD64_PC64 ||
56 type == R_AMD64_32S ||
57 type == R_AMD64_32 ||
Patrick Rudolph44b4ec72019-02-15 14:41:20 +010058 type == R_AMD64_PC32 ||
59 /*
60 * binutils 2.31 introduced R_AMD64_PLT32 for non local
61 * functions. As we don't care about procedure linkage
62 * table entries handle it as R_X86_64_PC32.
63 */
64 type == R_AMD64_PLT32);
Patrick Rudolph565bebe2018-11-26 15:54:21 +010065}
66
67static int should_emit_amd64(Elf64_Rela *rel)
68{
69 int type;
70
71 type = ELF64_R_TYPE(rel->r_info);
72
73 /* Only emit absolute relocations */
74 return (type == R_AMD64_64 ||
Patrick Rudolph565bebe2018-11-26 15:54:21 +010075 type == R_AMD64_32S ||
76 type == R_AMD64_32);
77}
78
Sol Boucher0e539312015-03-05 15:38:03 -080079static int valid_reloc_arm(Elf64_Rela *rel)
Aaron Durbin785e47b2014-03-20 11:08:02 -050080{
81 int type;
82
83 type = ELF64_R_TYPE(rel->r_info);
84
Furquan Shaikhc4f08f72014-07-23 13:42:22 -070085 /* Only these 6 relocations are expected to be found. */
Aaron Durbin785e47b2014-03-20 11:08:02 -050086 return (type == R_ARM_ABS32 || type == R_ARM_THM_PC22 ||
Furquan Shaikhc4f08f72014-07-23 13:42:22 -070087 type == R_ARM_THM_JUMP24 || type == R_ARM_V4BX ||
88 type == R_ARM_CALL || type == R_ARM_JUMP24);
Aaron Durbin785e47b2014-03-20 11:08:02 -050089}
90
Sol Boucher0e539312015-03-05 15:38:03 -080091static int should_emit_arm(Elf64_Rela *rel)
Aaron Durbin785e47b2014-03-20 11:08:02 -050092{
93 int type;
94
95 type = ELF64_R_TYPE(rel->r_info);
96
97 /* R_ARM_ABS32 relocations are absolute. Must emit these. */
98 return (type == R_ARM_ABS32);
99}
100
Sol Boucher0e539312015-03-05 15:38:03 -0800101static int valid_reloc_aarch64(Elf64_Rela *rel)
Furquan Shaikhd2338ba2014-08-26 15:21:15 -0700102{
103 int type;
104
105 type = ELF64_R_TYPE(rel->r_info);
106
107 return (type == R_AARCH64_ADR_PREL_PG_HI21 ||
108 type == R_AARCH64_ADD_ABS_LO12_NC ||
Furquan Shaikhde77e6a2014-11-21 15:41:10 -0800109 type == R_AARCH64_LDST8_ABS_LO12_NC ||
Furquan Shaikh16c0a412015-06-08 11:58:04 -0700110 type == R_AARCH64_CONDBR19 ||
Furquan Shaikhd2338ba2014-08-26 15:21:15 -0700111 type == R_AARCH64_JUMP26 ||
112 type == R_AARCH64_LDST32_ABS_LO12_NC ||
Aaron Durbina47898e2014-09-18 13:39:16 -0500113 type == R_AARCH64_LDST64_ABS_LO12_NC ||
Furquan Shaikhd2338ba2014-08-26 15:21:15 -0700114 type == R_AARCH64_CALL26 ||
115 type == R_AARCH64_ABS64 ||
116 type == R_AARCH64_LD_PREL_LO19 ||
117 type == R_AARCH64_ADR_PREL_LO21);
118}
119
Sol Boucher0e539312015-03-05 15:38:03 -0800120static int should_emit_aarch64(Elf64_Rela *rel)
Furquan Shaikhd2338ba2014-08-26 15:21:15 -0700121{
122 int type;
123
124 type = ELF64_R_TYPE(rel->r_info);
125
126 return (type == R_AARCH64_ABS64);
127}
128
Aaron Durbinb39a9742015-09-08 17:24:04 -0500129static const struct arch_ops reloc_ops[] = {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600130 {
131 .arch = EM_386,
132 .valid_type = valid_reloc_386,
133 .should_emit = should_emit_386,
134 },
Aaron Durbin785e47b2014-03-20 11:08:02 -0500135 {
Patrick Rudolph565bebe2018-11-26 15:54:21 +0100136 .arch = EM_X86_64,
137 .valid_type = valid_reloc_amd64,
138 .should_emit = should_emit_amd64,
139 },
140 {
Aaron Durbin785e47b2014-03-20 11:08:02 -0500141 .arch = EM_ARM,
142 .valid_type = valid_reloc_arm,
143 .should_emit = should_emit_arm,
144 },
Furquan Shaikhd2338ba2014-08-26 15:21:15 -0700145 {
146 .arch = EM_AARCH64,
147 .valid_type = valid_reloc_aarch64,
148 .should_emit = should_emit_aarch64,
149 },
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600150};
151
152/*
153 * Relocation processing loops.
154 */
155
Aaron Durbinb39a9742015-09-08 17:24:04 -0500156static int for_each_reloc(struct rmod_context *ctx, struct reloc_filter *f,
157 int do_emit)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600158{
159 Elf64_Half i;
160 struct parsed_elf *pelf = &ctx->pelf;
161
162 for (i = 0; i < pelf->ehdr.e_shnum; i++) {
163 Elf64_Shdr *shdr;
164 Elf64_Rela *relocs;
165 Elf64_Xword nrelocs;
166 Elf64_Xword j;
167
168 relocs = pelf->relocs[i];
169
170 /* No relocations in this section. */
171 if (relocs == NULL)
172 continue;
173
174 shdr = &pelf->shdr[i];
175 nrelocs = shdr->sh_size / shdr->sh_entsize;
176
177 for (j = 0; j < nrelocs; j++) {
Aaron Durbinb39a9742015-09-08 17:24:04 -0500178 int filter_emit = 1;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600179 Elf64_Rela *r = &relocs[j];
180
Sol Boucher0e539312015-03-05 15:38:03 -0800181 if (!ctx->ops->valid_type(r)) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600182 ERROR("Invalid reloc type: %u\n",
183 (unsigned int)ELF64_R_TYPE(r->r_info));
184 return -1;
185 }
186
Aaron Durbinb39a9742015-09-08 17:24:04 -0500187 /* Allow the provided filter to have precedence. */
188 if (f != NULL) {
189 filter_emit = f->filter(f, r);
190
191 if (filter_emit < 0)
192 return filter_emit;
193 }
194
195 if (filter_emit && ctx->ops->should_emit(r)) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600196 int n = ctx->nrelocs;
197 if (do_emit)
198 ctx->emitted_relocs[n] = r->r_offset;
199 ctx->nrelocs++;
200 }
201 }
202 }
203
204 return 0;
205}
206
207static int find_program_segment(struct rmod_context *ctx)
208{
209 int i;
210 int nsegments;
211 struct parsed_elf *pelf;
Anatol Pomozov8cce7012015-07-10 17:30:01 -0700212 Elf64_Phdr *phdr = NULL;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600213
214 pelf = &ctx->pelf;
215
216 /* There should only be a single loadable segment. */
217 nsegments = 0;
218 for (i = 0; i < pelf->ehdr.e_phnum; i++) {
219 if (pelf->phdr[i].p_type != PT_LOAD)
220 continue;
221 phdr = &pelf->phdr[i];
222 nsegments++;
223 }
224
225 if (nsegments != 1) {
226 ERROR("Unexepcted number of loadable segments: %d.\n",
227 nsegments);
228 return -1;
229 }
230
231 INFO("Segment at 0x%0llx, file size 0x%0llx, mem size 0x%0llx.\n",
232 (long long)phdr->p_vaddr, (long long)phdr->p_filesz,
233 (long long)phdr->p_memsz);
234
235 ctx->phdr = phdr;
236
237 return 0;
238}
239
240static int
241filter_relocation_sections(struct rmod_context *ctx)
242{
243 int i;
244 const char *shstrtab;
245 struct parsed_elf *pelf;
246 const Elf64_Phdr *phdr;
247
248 pelf = &ctx->pelf;
249 phdr = ctx->phdr;
250 shstrtab = buffer_get(pelf->strtabs[pelf->ehdr.e_shstrndx]);
251
252 /*
253 * Find all relocation sections that contain relocation entries
254 * for sections that fall within the bounds of the segment. For
255 * easier processing the pointer to the relocation array for the
256 * sections that don't fall within the loadable program are NULL'd
257 * out.
258 */
259 for (i = 0; i < pelf->ehdr.e_shnum; i++) {
260 Elf64_Shdr *shdr;
261 Elf64_Word sh_info;
262 const char *section_name;
263
264 shdr = &pelf->shdr[i];
265
266 /* Ignore non-relocation sections. */
267 if (shdr->sh_type != SHT_RELA && shdr->sh_type != SHT_REL)
268 continue;
269
270 /* Obtain section which relocations apply. */
271 sh_info = shdr->sh_info;
272 shdr = &pelf->shdr[sh_info];
273
274 section_name = &shstrtab[shdr->sh_name];
275 DEBUG("Relocation section found for '%s' section.\n",
276 section_name);
277
278 /* Do not process relocations for debug sections. */
279 if (strstr(section_name, ".debug") != NULL) {
280 pelf->relocs[i] = NULL;
281 continue;
282 }
283
284 /*
285 * If relocations apply to a non program section ignore the
286 * relocations for future processing.
287 */
288 if (shdr->sh_type != SHT_PROGBITS) {
289 pelf->relocs[i] = NULL;
290 continue;
291 }
292
293 if (shdr->sh_addr < phdr->p_vaddr ||
294 ((shdr->sh_addr + shdr->sh_size) >
295 (phdr->p_vaddr + phdr->p_memsz))) {
296 ERROR("Relocations being applied to section %d not "
297 "within segment region.\n", sh_info);
298 return -1;
299 }
300 }
301
302 return 0;
303}
304
305static int vaddr_cmp(const void *a, const void *b)
306{
307 const Elf64_Addr *pa = a;
308 const Elf64_Addr *pb = b;
309
310 if (*pa < *pb)
311 return -1;
312 if (*pa > *pb)
313 return 1;
314 return 0;
315}
316
Aaron Durbinb39a9742015-09-08 17:24:04 -0500317int rmodule_collect_relocations(struct rmod_context *ctx,
318 struct reloc_filter *f)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600319{
Sol Boucher0e539312015-03-05 15:38:03 -0800320 Elf64_Xword nrelocs;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600321
322 /*
323 * The relocs array in the pelf should only contain relocations that
324 * apply to the program. Count the number relocations. Then collect
325 * them into the allocated buffer.
326 */
Aaron Durbinb39a9742015-09-08 17:24:04 -0500327 if (for_each_reloc(ctx, f, 0))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600328 return -1;
329
330 nrelocs = ctx->nrelocs;
Sol Boucher0e539312015-03-05 15:38:03 -0800331 INFO("%" PRIu64 " relocations to be emitted.\n", nrelocs);
Furquan Shaikhb237c102014-08-26 14:59:36 -0700332 if (!nrelocs)
333 return 0;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600334
335 /* Reset the counter for indexing into the array. */
336 ctx->nrelocs = 0;
337 ctx->emitted_relocs = calloc(nrelocs, sizeof(Elf64_Addr));
338 /* Write out the relocations into the emitted_relocs array. */
Aaron Durbinb39a9742015-09-08 17:24:04 -0500339 if (for_each_reloc(ctx, f, 1))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600340 return -1;
341
342 if (ctx->nrelocs != nrelocs) {
343 ERROR("Mismatch counted and emitted relocations: %zu vs %zu.\n",
344 (size_t)nrelocs, (size_t)ctx->nrelocs);
345 return -1;
346 }
347
348 /* Sort the relocations by their address. */
349 qsort(ctx->emitted_relocs, nrelocs, sizeof(Elf64_Addr), vaddr_cmp);
350
351 return 0;
352}
353
354static int
355populate_sym(struct rmod_context *ctx, const char *sym_name, Elf64_Addr *addr,
Aaron Durbinc9b053d2015-09-06 10:39:10 -0500356 int nsyms, const char *strtab, int optional)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600357{
358 int i;
359 Elf64_Sym *syms;
360
361 syms = ctx->pelf.syms;
362
363 for (i = 0; i < nsyms; i++) {
364 if (syms[i].st_name == 0)
365 continue;
366 if (strcmp(sym_name, &strtab[syms[i].st_name]))
367 continue;
368 DEBUG("%s -> 0x%llx\n", sym_name, (long long)syms[i].st_value);
369 *addr = syms[i].st_value;
370 return 0;
371 }
Aaron Durbinc9b053d2015-09-06 10:39:10 -0500372
373 if (optional) {
374 DEBUG("optional symbol '%s' not found.\n", sym_name);
375 *addr = 0;
376 return 0;
377 }
378
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600379 ERROR("symbol '%s' not found.\n", sym_name);
380 return -1;
381}
382
Aaron Durbin051a1812015-09-08 15:52:01 -0500383static int populate_rmodule_info(struct rmod_context *ctx)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600384{
385 int i;
386 const char *strtab;
387 struct parsed_elf *pelf;
388 Elf64_Ehdr *ehdr;
389 int nsyms;
390
391 pelf = &ctx->pelf;
392 ehdr = &pelf->ehdr;
393
394 /* Obtain the string table. */
395 strtab = NULL;
396 for (i = 0; i < ehdr->e_shnum; i++) {
397 if (ctx->pelf.strtabs[i] == NULL)
398 continue;
399 /* Don't use the section headers' string table. */
400 if (i == ehdr->e_shstrndx)
401 continue;
402 strtab = buffer_get(ctx->pelf.strtabs[i]);
403 break;
404 }
405
406 if (strtab == NULL) {
407 ERROR("No string table found.\n");
408 return -1;
409 }
410
411 /* Determine number of symbols. */
412 nsyms = 0;
413 for (i = 0; i < ehdr->e_shnum; i++) {
414 if (pelf->shdr[i].sh_type != SHT_SYMTAB)
415 continue;
416
417 nsyms = pelf->shdr[i].sh_size / pelf->shdr[i].sh_entsize;
418 break;
419 }
420
Aaron Durbindde76292015-09-05 12:59:26 -0500421 if (populate_sym(ctx, "_rmodule_params", &ctx->parameters_begin,
Aaron Durbinc9b053d2015-09-06 10:39:10 -0500422 nsyms, strtab, 1))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600423 return -1;
424
Aaron Durbindde76292015-09-05 12:59:26 -0500425 if (populate_sym(ctx, "_ermodule_params", &ctx->parameters_end,
Aaron Durbinc9b053d2015-09-06 10:39:10 -0500426 nsyms, strtab, 1))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600427 return -1;
428
Aaron Durbinc9b053d2015-09-06 10:39:10 -0500429 if (populate_sym(ctx, "_bss", &ctx->bss_begin, nsyms, strtab, 0))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600430 return -1;
431
Aaron Durbinc9b053d2015-09-06 10:39:10 -0500432 if (populate_sym(ctx, "_ebss", &ctx->bss_end, nsyms, strtab, 0))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600433 return -1;
434
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600435 return 0;
436}
437
438static int
439add_section(struct elf_writer *ew, struct buffer *data, const char *name,
440 Elf64_Addr addr, Elf64_Word size)
441{
442 Elf64_Shdr shdr;
443 int ret;
444
445 memset(&shdr, 0, sizeof(shdr));
446 if (data != NULL) {
447 shdr.sh_type = SHT_PROGBITS;
448 shdr.sh_flags = SHF_ALLOC | SHF_WRITE | SHF_EXECINSTR;
449 } else {
450 shdr.sh_type = SHT_NOBITS;
451 shdr.sh_flags = SHF_ALLOC;
452 }
453 shdr.sh_addr = addr;
454 shdr.sh_offset = addr;
455 shdr.sh_size = size;
456
457 ret = elf_writer_add_section(ew, &shdr, data, name);
458
459 if (ret)
460 ERROR("Could not add '%s' section.\n", name);
461
462 return ret;
463}
464
465static int
466write_elf(const struct rmod_context *ctx, const struct buffer *in,
467 struct buffer *out)
468{
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600469 int ret;
470 int bit64;
471 size_t loc;
472 size_t rmod_data_size;
473 struct elf_writer *ew;
474 struct buffer rmod_data;
475 struct buffer rmod_header;
476 struct buffer program;
477 struct buffer relocs;
478 Elf64_Xword total_size;
479 Elf64_Addr addr;
480 Elf64_Ehdr ehdr;
481
482 bit64 = ctx->pelf.ehdr.e_ident[EI_CLASS] == ELFCLASS64;
483
484 /*
485 * 3 sections will be added to the ELF file.
486 * +------------------+
487 * | rmodule header |
488 * +------------------+
489 * | program |
490 * +------------------+
491 * | relocations |
492 * +------------------+
493 */
494
495 /* Create buffer for header and relocations. */
496 rmod_data_size = sizeof(struct rmodule_header);
497 if (bit64)
498 rmod_data_size += ctx->nrelocs * sizeof(Elf64_Addr);
499 else
500 rmod_data_size += ctx->nrelocs * sizeof(Elf32_Addr);
501
502 if (buffer_create(&rmod_data, rmod_data_size, "rmod"))
503 return -1;
504
505 buffer_splice(&rmod_header, &rmod_data,
506 0, sizeof(struct rmodule_header));
507 buffer_clone(&relocs, &rmod_data);
508 buffer_seek(&relocs, sizeof(struct rmodule_header));
509
510 /* Reset current location. */
511 buffer_set_size(&rmod_header, 0);
512 buffer_set_size(&relocs, 0);
513
514 /* Program contents. */
515 buffer_splice(&program, in, ctx->phdr->p_offset, ctx->phdr->p_filesz);
516
517 /* Create ELF writer with modified entry point. */
518 memcpy(&ehdr, &ctx->pelf.ehdr, sizeof(ehdr));
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600519 ew = elf_writer_init(&ehdr);
520
521 if (ew == NULL) {
522 ERROR("Failed to create ELF writer.\n");
523 buffer_delete(&rmod_data);
524 return -1;
525 }
526
527 /* Write out rmodule_header. */
528 ctx->xdr->put16(&rmod_header, RMODULE_MAGIC);
529 ctx->xdr->put8(&rmod_header, RMODULE_VERSION_1);
530 ctx->xdr->put8(&rmod_header, 0);
531 /* payload_begin_offset */
532 loc = sizeof(struct rmodule_header);
533 ctx->xdr->put32(&rmod_header, loc);
534 /* payload_end_offset */
535 loc += ctx->phdr->p_filesz;
536 ctx->xdr->put32(&rmod_header, loc);
537 /* relocations_begin_offset */
538 ctx->xdr->put32(&rmod_header, loc);
539 /* relocations_end_offset */
540 if (bit64)
541 loc += ctx->nrelocs * sizeof(Elf64_Addr);
542 else
543 loc += ctx->nrelocs * sizeof(Elf32_Addr);
544 ctx->xdr->put32(&rmod_header, loc);
545 /* module_link_start_address */
Aaron Durbin051a1812015-09-08 15:52:01 -0500546 ctx->xdr->put32(&rmod_header, ctx->phdr->p_vaddr);
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600547 /* module_program_size */
Aaron Durbin051a1812015-09-08 15:52:01 -0500548 ctx->xdr->put32(&rmod_header, ctx->phdr->p_memsz);
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600549 /* module_entry_point */
Aaron Durbin051a1812015-09-08 15:52:01 -0500550 ctx->xdr->put32(&rmod_header, ctx->pelf.ehdr.e_entry);
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600551 /* parameters_begin */
552 ctx->xdr->put32(&rmod_header, ctx->parameters_begin);
553 /* parameters_end */
554 ctx->xdr->put32(&rmod_header, ctx->parameters_end);
555 /* bss_begin */
556 ctx->xdr->put32(&rmod_header, ctx->bss_begin);
557 /* bss_end */
558 ctx->xdr->put32(&rmod_header, ctx->bss_end);
559 /* padding[4] */
560 ctx->xdr->put32(&rmod_header, 0);
561 ctx->xdr->put32(&rmod_header, 0);
562 ctx->xdr->put32(&rmod_header, 0);
563 ctx->xdr->put32(&rmod_header, 0);
564
565 /* Write the relocations. */
Sol Boucher0e539312015-03-05 15:38:03 -0800566 for (unsigned i = 0; i < ctx->nrelocs; i++) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600567 if (bit64)
568 ctx->xdr->put64(&relocs, ctx->emitted_relocs[i]);
569 else
570 ctx->xdr->put32(&relocs, ctx->emitted_relocs[i]);
571 }
572
573 total_size = 0;
574 addr = 0;
575
576 /*
577 * There are 2 cases to deal with. The program has a large NOBITS
578 * section and the relocations can fit entirely within occupied memory
579 * region for the program. The other is that the relocations increase
580 * the memory footprint of the program if it was loaded directly into
Frans Hendriks166cbde2018-11-22 14:21:12 +0100581 * the region it would run. The rmodule header is a fixed cost that
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600582 * is considered a part of the program.
583 */
584 total_size += buffer_size(&rmod_header);
Aaron Durbin518a3222014-08-26 13:52:30 -0500585 if (buffer_size(&relocs) + ctx->phdr->p_filesz > ctx->phdr->p_memsz) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600586 total_size += buffer_size(&relocs);
587 total_size += ctx->phdr->p_filesz;
Aaron Durbin518a3222014-08-26 13:52:30 -0500588 } else {
589 total_size += ctx->phdr->p_memsz;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600590 }
591
592 ret = add_section(ew, &rmod_header, ".header", addr,
593 buffer_size(&rmod_header));
594 if (ret < 0)
595 goto out;
596 addr += buffer_size(&rmod_header);
597
598 ret = add_section(ew, &program, ".program", addr, ctx->phdr->p_filesz);
599 if (ret < 0)
600 goto out;
601 addr += ctx->phdr->p_filesz;
602
Furquan Shaikhb237c102014-08-26 14:59:36 -0700603 if (ctx->nrelocs) {
604 ret = add_section(ew, &relocs, ".relocs", addr,
605 buffer_size(&relocs));
606 if (ret < 0)
607 goto out;
608 addr += buffer_size(&relocs);
609 }
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600610
611 if (total_size != addr) {
612 ret = add_section(ew, NULL, ".empty", addr, total_size - addr);
613 if (ret < 0)
614 goto out;
615 }
616
617 /*
618 * Ensure last section has a memory usage that meets the required
619 * total size of the program in memory.
620 */
621
622 ret = elf_writer_serialize(ew, out);
623 if (ret < 0)
624 ERROR("Failed to serialize ELF to buffer.\n");
625
626out:
627 buffer_delete(&rmod_data);
628 elf_writer_destroy(ew);
629
630 return ret;
631}
632
Aaron Durbinb39a9742015-09-08 17:24:04 -0500633int rmodule_init(struct rmod_context *ctx, const struct buffer *elfin)
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600634{
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600635 struct parsed_elf *pelf;
Furquan Shaikh161d2332016-05-26 14:41:02 -0700636 size_t i;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600637 int ret;
638
639 ret = -1;
Aaron Durbin051a1812015-09-08 15:52:01 -0500640 memset(ctx, 0, sizeof(*ctx));
641 pelf = &ctx->pelf;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600642
643 if (parse_elf(elfin, pelf, ELF_PARSE_ALL)) {
644 ERROR("Couldn't parse ELF!\n");
645 return -1;
646 }
647
648 /* Only allow executables to be turned into rmodules. */
649 if (pelf->ehdr.e_type != ET_EXEC) {
650 ERROR("ELF is not an executable: %u.\n", pelf->ehdr.e_type);
651 goto out;
652 }
653
654 /* Determine if architecture is supported. */
655 for (i = 0; i < ARRAY_SIZE(reloc_ops); i++) {
656 if (reloc_ops[i].arch == pelf->ehdr.e_machine) {
Aaron Durbin051a1812015-09-08 15:52:01 -0500657 ctx->ops = &reloc_ops[i];
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600658 break;
659 }
660 }
661
Aaron Durbin051a1812015-09-08 15:52:01 -0500662 if (ctx->ops == NULL) {
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600663 ERROR("ELF is unsupported arch: %u.\n", pelf->ehdr.e_machine);
664 goto out;
665 }
666
667 /* Set the endian ops. */
Aaron Durbin051a1812015-09-08 15:52:01 -0500668 if (ctx->pelf.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
669 ctx->xdr = &xdr_be;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600670 else
Aaron Durbin051a1812015-09-08 15:52:01 -0500671 ctx->xdr = &xdr_le;
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600672
Aaron Durbin051a1812015-09-08 15:52:01 -0500673 if (find_program_segment(ctx))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600674 goto out;
675
Aaron Durbin051a1812015-09-08 15:52:01 -0500676 if (filter_relocation_sections(ctx))
677 goto out;
678
679 ret = 0;
680
681out:
682 return ret;
683}
684
Aaron Durbinb39a9742015-09-08 17:24:04 -0500685void rmodule_cleanup(struct rmod_context *ctx)
Aaron Durbin051a1812015-09-08 15:52:01 -0500686{
687 free(ctx->emitted_relocs);
688 parsed_elf_destroy(&ctx->pelf);
689}
690
691int rmodule_create(const struct buffer *elfin, struct buffer *elfout)
692{
693 struct rmod_context ctx;
694 int ret = -1;
695
696 if (rmodule_init(&ctx, elfin))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600697 goto out;
698
Aaron Durbinb39a9742015-09-08 17:24:04 -0500699 if (rmodule_collect_relocations(&ctx, NULL))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600700 goto out;
701
Aaron Durbin051a1812015-09-08 15:52:01 -0500702 if (populate_rmodule_info(&ctx))
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600703 goto out;
704
705 if (write_elf(&ctx, elfin, elfout))
706 goto out;
707
708 ret = 0;
709
710out:
Aaron Durbin051a1812015-09-08 15:52:01 -0500711 rmodule_cleanup(&ctx);
Aaron Durbin4fde5a62014-03-07 15:11:53 -0600712 return ret;
713}
Aaron Durbin694fd132015-10-28 11:39:34 -0500714
715static void rmod_deserialize(struct rmodule_header *rmod, struct buffer *buff,
716 struct xdr *xdr)
717{
718 rmod->magic = xdr->get16(buff);
719 rmod->version = xdr->get8(buff);
720 rmod->type = xdr->get8(buff);
721 rmod->payload_begin_offset = xdr->get32(buff);
722 rmod->payload_end_offset = xdr->get32(buff);
723 rmod->relocations_begin_offset = xdr->get32(buff);
724 rmod->relocations_end_offset = xdr->get32(buff);
725 rmod->module_link_start_address = xdr->get32(buff);
726 rmod->module_program_size = xdr->get32(buff);
727 rmod->module_entry_point = xdr->get32(buff);
728 rmod->parameters_begin = xdr->get32(buff);
729 rmod->parameters_end = xdr->get32(buff);
730 rmod->bss_begin = xdr->get32(buff);
731 rmod->bss_end = xdr->get32(buff);
732 rmod->padding[0] = xdr->get32(buff);
733 rmod->padding[1] = xdr->get32(buff);
734 rmod->padding[2] = xdr->get32(buff);
735 rmod->padding[3] = xdr->get32(buff);
736}
737
738int rmodule_stage_to_elf(Elf64_Ehdr *ehdr, struct buffer *buff)
739{
740 struct buffer reader;
741 struct buffer elf_out;
742 struct rmodule_header rmod;
743 struct xdr *xdr;
744 struct elf_writer *ew;
745 Elf64_Shdr shdr;
746 int bit64;
747 size_t payload_sz;
748 const char *section_name = ".program";
749 const size_t input_sz = buffer_size(buff);
750
751 buffer_clone(&reader, buff);
752
753 xdr = (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) ? &xdr_be : &xdr_le;
754 bit64 = ehdr->e_ident[EI_CLASS] == ELFCLASS64;
755
756 rmod_deserialize(&rmod, &reader, xdr);
757
758 /* Indicate that file is not an rmodule if initial checks fail. */
759 if (rmod.magic != RMODULE_MAGIC)
760 return 1;
761 if (rmod.version != RMODULE_VERSION_1)
762 return 1;
763
764 if (rmod.payload_begin_offset > input_sz ||
765 rmod.payload_end_offset > input_sz ||
766 rmod.relocations_begin_offset > input_sz ||
767 rmod.relocations_end_offset > input_sz) {
768 ERROR("Rmodule fields out of bounds.\n");
769 return -1;
770 }
771
772 ehdr->e_entry = rmod.module_entry_point;
773 ew = elf_writer_init(ehdr);
774
775 if (ew == NULL)
776 return -1;
777
778 payload_sz = rmod.payload_end_offset - rmod.payload_begin_offset;
779 memset(&shdr, 0, sizeof(shdr));
780 shdr.sh_type = SHT_PROGBITS;
781 shdr.sh_flags = SHF_WRITE | SHF_ALLOC | SHF_EXECINSTR;
782 shdr.sh_addr = rmod.module_link_start_address;
783 shdr.sh_size = payload_sz;
784 buffer_splice(&reader, buff, rmod.payload_begin_offset, payload_sz);
785
786 if (elf_writer_add_section(ew, &shdr, &reader, section_name)) {
787 ERROR("Unable to add ELF section: %s\n", section_name);
788 elf_writer_destroy(ew);
789 return -1;
790 }
791
792 if (payload_sz != rmod.module_program_size) {
793 struct buffer b;
794
795 buffer_init(&b, NULL, NULL, 0);
796 memset(&shdr, 0, sizeof(shdr));
797 shdr.sh_type = SHT_NOBITS;
798 shdr.sh_flags = SHF_WRITE | SHF_ALLOC;
799 shdr.sh_addr = rmod.module_link_start_address + payload_sz;
800 shdr.sh_size = rmod.module_program_size - payload_sz;
801 if (elf_writer_add_section(ew, &shdr, &b, ".empty")) {
802 ERROR("Unable to add ELF section: .empty\n");
803 elf_writer_destroy(ew);
804 return -1;
805 }
806 }
807
808 /* Provide a section symbol so the relcoations can reference that. */
809 if (elf_writer_add_symbol(ew, section_name, section_name, shdr.sh_addr,
810 0, STB_LOCAL, STT_SECTION)) {
811 ERROR("Unable to add section symbol to ELF.\n");
812 elf_writer_destroy(ew);
813 return -1;
814 }
815
816 /* Add symbols for the parameters if they are non-zero. */
817 if (rmod.parameters_begin != rmod.parameters_end) {
818 int ret = 0;
819
820 ret |= elf_writer_add_symbol(ew, "_rmodule_params",
821 section_name,
822 rmod.parameters_begin, 0,
823 STB_GLOBAL, STT_NOTYPE);
824 ret |= elf_writer_add_symbol(ew, "_ermodule_params",
825 section_name,
826 rmod.parameters_end, 0,
827 STB_GLOBAL, STT_NOTYPE);
828
829 if (ret != 0) {
830 ERROR("Unable to add module params symbols to ELF\n");
831 elf_writer_destroy(ew);
832 return -1;
833 }
834 }
835
836 if (elf_writer_add_symbol(ew, "_bss", section_name, rmod.bss_begin, 0,
837 STB_GLOBAL, STT_NOTYPE) ||
838 elf_writer_add_symbol(ew, "_ebss", section_name, rmod.bss_end, 0,
839 STB_GLOBAL, STT_NOTYPE)) {
840 ERROR("Unable to add bss symbols to ELF\n");
841 elf_writer_destroy(ew);
842 return -1;
843 }
844
845 ssize_t relocs_sz = rmod.relocations_end_offset;
846 relocs_sz -= rmod.relocations_begin_offset;
847 buffer_splice(&reader, buff, rmod.relocations_begin_offset, relocs_sz);
848 while (relocs_sz > 0) {
849 Elf64_Addr addr;
850
851 if (bit64) {
852 relocs_sz -= sizeof(Elf64_Addr);
853 addr = xdr->get64(&reader);
854 } else {
855 relocs_sz -= sizeof(Elf32_Addr);
856 addr = xdr->get32(&reader);
857 }
858
859 /* Skip any relocations that are below the link address. */
860 if (addr < rmod.module_link_start_address)
861 continue;
862
863 if (elf_writer_add_rel(ew, section_name, addr)) {
864 ERROR("Relocation addition failure.\n");
865 elf_writer_destroy(ew);
866 return -1;
867 }
868 }
869
870 if (elf_writer_serialize(ew, &elf_out)) {
871 ERROR("ELF writer serialize failure.\n");
872 elf_writer_destroy(ew);
873 return -1;
874 }
875
876 elf_writer_destroy(ew);
877
878 /* Flip buffer with the created ELF one. */
879 buffer_delete(buff);
880 *buff = elf_out;
881
882 return 0;
883}