blob: 75d67251b11d24327467827092aaf59038c647a9 [file] [log] [blame]
ebiedermc7798892009-04-01 11:03:32 +00001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2003 Eric W. Biederman <ebiederm@xmission.com>
5 * Copyright (C) 2009 Ron Minnich <rminnich@gmail.com>
George Trudeauc1b98b92016-04-04 00:19:02 -04006 * Copyright (C) 2016 George Trudeau <george.trudeau@usherbrooke.ca>
ebiedermc7798892009-04-01 11:03:32 +00007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
ebiedermc7798892009-04-01 11:03:32 +000016 */
17
Julius Werner09f29212015-09-29 13:51:35 -070018#include <commonlib/compression.h>
George Trudeauc1b98b92016-04-04 00:19:02 -040019#include <commonlib/endian.h>
Ronald G. Minnichae631262009-04-01 10:48:39 +000020#include <console/console.h>
Aaron Durbinebf142a2013-03-29 16:23:23 -050021#include <cpu/cpu.h>
Ronald G. Minnichae631262009-04-01 10:48:39 +000022#include <stdint.h>
23#include <stdlib.h>
24#include <string.h>
Julius Wernerec5e5e02014-08-20 15:29:56 -070025#include <symbols.h>
Peter Stuge483b7bb2009-04-14 07:40:01 +000026#include <cbfs.h>
Myles Watson58170782009-10-28 16:13:28 +000027#include <lib.h>
Aaron Durbinceebc052014-02-25 00:21:10 -060028#include <bootmem.h>
Aaron Durbin04654a22015-03-17 11:43:44 -050029#include <program_loading.h>
Julius Werner09f29212015-09-29 13:51:35 -070030#include <timestamp.h>
Ronald G. Minnichae631262009-04-01 10:48:39 +000031
Julius Wernerec5e5e02014-08-20 15:29:56 -070032static const unsigned long lb_start = (unsigned long)&_program;
33static const unsigned long lb_end = (unsigned long)&_eprogram;
Stefan Reinauer19436292011-11-07 12:43:03 -080034
Ronald G. Minnichae631262009-04-01 10:48:39 +000035struct segment {
36 struct segment *next;
37 struct segment *prev;
Ronald G. Minnichae631262009-04-01 10:48:39 +000038 unsigned long s_dstaddr;
39 unsigned long s_srcaddr;
40 unsigned long s_memsz;
41 unsigned long s_filesz;
Patrick Georgi369bc782009-04-25 07:32:24 +000042 int compression;
Ronald G. Minnichae631262009-04-01 10:48:39 +000043};
44
Zheng Baoe6ad7fa2009-11-05 10:02:59 +000045/* The problem:
Ronald G. Minnichae631262009-04-01 10:48:39 +000046 * Static executables all want to share the same addresses
47 * in memory because only a few addresses are reliably present on
48 * a machine, and implementing general relocation is hard.
49 *
50 * The solution:
Stefan Reinauerf64893a2009-07-23 22:03:14 +000051 * - Allocate a buffer the size of the coreboot image plus additional
52 * required space.
53 * - Anything that would overwrite coreboot copy into the lower part of
Zheng Baoe6ad7fa2009-11-05 10:02:59 +000054 * the buffer.
Stefan Reinauerf64893a2009-07-23 22:03:14 +000055 * - After loading an ELF image copy coreboot to the top of the buffer.
Ronald G. Minnichae631262009-04-01 10:48:39 +000056 * - Then jump to the loaded image.
Zheng Baoe6ad7fa2009-11-05 10:02:59 +000057 *
Ronald G. Minnichae631262009-04-01 10:48:39 +000058 * Benefits:
59 * - Nearly arbitrary standalone executables can be loaded.
60 * - Coreboot is preserved, so it can be returned to.
61 * - The implementation is still relatively simple,
Zheng Baoba49fb72009-11-01 09:18:23 +000062 * and much simpler than the general case implemented in kexec.
Ronald G. Minnichae631262009-04-01 10:48:39 +000063 */
64
Patrick Georgi5eceb322009-05-13 16:27:25 +000065static unsigned long bounce_size, bounce_buffer;
66
Aaron Durbinceebc052014-02-25 00:21:10 -060067static void get_bounce_buffer(unsigned long req_size)
Ronald G. Minnichae631262009-04-01 10:48:39 +000068{
69 unsigned long lb_size;
Aaron Durbinceebc052014-02-25 00:21:10 -060070 void *buffer;
Aaron Durbine58a24b2014-02-24 22:11:45 -060071
72 /* When the ramstage is relocatable there is no need for a bounce
73 * buffer. All payloads should not overlap the ramstage.
74 */
75 if (IS_ENABLED(CONFIG_RELOCATABLE_RAMSTAGE)) {
76 bounce_buffer = ~0UL;
77 bounce_size = 0;
78 return;
79 }
80
Stefan Reinauer19436292011-11-07 12:43:03 -080081 lb_size = lb_end - lb_start;
82 /* Plus coreboot size so I have somewhere
83 * to place a copy to return to.
84 */
Myles Watson92027982009-09-23 20:32:21 +000085 lb_size = req_size + lb_size;
Aaron Durbinceebc052014-02-25 00:21:10 -060086
87 buffer = bootmem_allocate_buffer(lb_size);
88
89 printk(BIOS_SPEW, "Bounce Buffer at %p, %lu bytes\n", buffer, lb_size);
90
91 bounce_buffer = (uintptr_t)buffer;
Myles Watson92027982009-09-23 20:32:21 +000092 bounce_size = req_size;
Ronald G. Minnichae631262009-04-01 10:48:39 +000093}
94
Patrick Georgi5eceb322009-05-13 16:27:25 +000095static int overlaps_coreboot(struct segment *seg)
96{
97 unsigned long start, end;
98 start = seg->s_dstaddr;
99 end = start + seg->s_memsz;
100 return !((end <= lb_start) || (start >= lb_end));
101}
102
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000103static int relocate_segment(unsigned long buffer, struct segment *seg)
Ronald G. Minnichae631262009-04-01 10:48:39 +0000104{
105 /* Modify all segments that want to load onto coreboot
106 * to load onto the bounce buffer instead.
107 */
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000108 /* ret: 1 : A new segment is inserted before the seg.
Stefan Reinauer19436292011-11-07 12:43:03 -0800109 * 0 : A new segment is inserted after the seg, or no new one.
110 */
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000111 unsigned long start, middle, end, ret = 0;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000112
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000113 printk(BIOS_SPEW, "lb: [0x%016lx, 0x%016lx)\n",
Ronald G. Minnichae631262009-04-01 10:48:39 +0000114 lb_start, lb_end);
115
Patrick Georgi5eceb322009-05-13 16:27:25 +0000116 /* I don't conflict with coreboot so get out of here */
117 if (!overlaps_coreboot(seg))
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000118 return 0;
Patrick Georgi5eceb322009-05-13 16:27:25 +0000119
Vladimir Serbinenkof0d39c42016-02-19 16:44:22 +0100120 if (!arch_supports_bounce_buffer())
121 die ("bounce buffer not supported");
122
Ronald G. Minnichae631262009-04-01 10:48:39 +0000123 start = seg->s_dstaddr;
124 middle = start + seg->s_filesz;
125 end = start + seg->s_memsz;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000126
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000127 printk(BIOS_SPEW, "segment: [0x%016lx, 0x%016lx, 0x%016lx)\n",
Ronald G. Minnichae631262009-04-01 10:48:39 +0000128 start, middle, end);
129
Patrick Georgi369bc782009-04-25 07:32:24 +0000130 if (seg->compression == CBFS_COMPRESS_NONE) {
131 /* Slice off a piece at the beginning
132 * that doesn't conflict with coreboot.
133 */
134 if (start < lb_start) {
135 struct segment *new;
136 unsigned long len = lb_start - start;
137 new = malloc(sizeof(*new));
138 *new = *seg;
139 new->s_memsz = len;
140 seg->s_memsz -= len;
141 seg->s_dstaddr += len;
142 seg->s_srcaddr += len;
143 if (seg->s_filesz > len) {
144 new->s_filesz = len;
145 seg->s_filesz -= len;
146 } else {
147 seg->s_filesz = 0;
148 }
149
150 /* Order by stream offset */
151 new->next = seg;
152 new->prev = seg->prev;
153 seg->prev->next = new;
154 seg->prev = new;
Patrick Georgi369bc782009-04-25 07:32:24 +0000155
156 /* compute the new value of start */
157 start = seg->s_dstaddr;
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000158
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000159 printk(BIOS_SPEW, " early: [0x%016lx, 0x%016lx, 0x%016lx)\n",
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000160 new->s_dstaddr,
Patrick Georgi369bc782009-04-25 07:32:24 +0000161 new->s_dstaddr + new->s_filesz,
162 new->s_dstaddr + new->s_memsz);
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000163
164 ret = 1;
Stefan Reinauerf64893a2009-07-23 22:03:14 +0000165 }
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000166
167 /* Slice off a piece at the end
168 * that doesn't conflict with coreboot
Patrick Georgi369bc782009-04-25 07:32:24 +0000169 */
170 if (end > lb_end) {
171 unsigned long len = lb_end - start;
172 struct segment *new;
173 new = malloc(sizeof(*new));
174 *new = *seg;
175 seg->s_memsz = len;
176 new->s_memsz -= len;
177 new->s_dstaddr += len;
178 new->s_srcaddr += len;
179 if (seg->s_filesz > len) {
180 seg->s_filesz = len;
181 new->s_filesz -= len;
182 } else {
183 new->s_filesz = 0;
184 }
185 /* Order by stream offset */
186 new->next = seg->next;
187 new->prev = seg;
188 seg->next->prev = new;
189 seg->next = new;
Patrick Georgi369bc782009-04-25 07:32:24 +0000190
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000191 printk(BIOS_SPEW, " late: [0x%016lx, 0x%016lx, 0x%016lx)\n",
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000192 new->s_dstaddr,
Patrick Georgi369bc782009-04-25 07:32:24 +0000193 new->s_dstaddr + new->s_filesz,
194 new->s_dstaddr + new->s_memsz);
Ronald G. Minnichae631262009-04-01 10:48:39 +0000195 }
Ronald G. Minnichae631262009-04-01 10:48:39 +0000196 }
Stefan Reinauerf64893a2009-07-23 22:03:14 +0000197
Ronald G. Minnichae631262009-04-01 10:48:39 +0000198 /* Now retarget this segment onto the bounce buffer */
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000199 /* sort of explanation: the buffer is a 1:1 mapping to coreboot.
Ronald G. Minnichae631262009-04-01 10:48:39 +0000200 * so you will make the dstaddr be this buffer, and it will get copied
201 * later to where coreboot lives.
202 */
203 seg->s_dstaddr = buffer + (seg->s_dstaddr - lb_start);
204
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000205 printk(BIOS_SPEW, " bounce: [0x%016lx, 0x%016lx, 0x%016lx)\n",
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000206 seg->s_dstaddr,
207 seg->s_dstaddr + seg->s_filesz,
Ronald G. Minnichae631262009-04-01 10:48:39 +0000208 seg->s_dstaddr + seg->s_memsz);
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000209
210 return ret;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000211}
212
George Trudeauc1b98b92016-04-04 00:19:02 -0400213/* Decode a serialized cbfs payload segment
214 * from memory into native endianness.
215 */
216static void cbfs_decode_payload_segment(struct cbfs_payload_segment *segment,
217 const struct cbfs_payload_segment *src)
218{
219 segment->type = read_be32(&src->type);
220 segment->compression = read_be32(&src->compression);
221 segment->offset = read_be32(&src->offset);
222 segment->load_addr = read_be64(&src->load_addr);
223 segment->len = read_be32(&src->len);
224 segment->mem_len = read_be32(&src->mem_len);
225}
Ronald G. Minnichae631262009-04-01 10:48:39 +0000226
227static int build_self_segment_list(
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000228 struct segment *head,
Aaron Durbin899d13d2015-05-15 23:39:23 -0500229 struct cbfs_payload *cbfs_payload, uintptr_t *entry)
Ronald G. Minnichae631262009-04-01 10:48:39 +0000230{
231 struct segment *new;
George Trudeauc1b98b92016-04-04 00:19:02 -0400232 struct cbfs_payload_segment *current_segment, *first_segment, segment;
233
Ronald G. Minnichae631262009-04-01 10:48:39 +0000234 memset(head, 0, sizeof(*head));
Ronald G. Minnichae631262009-04-01 10:48:39 +0000235 head->next = head->prev = head;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000236
George Trudeauc1b98b92016-04-04 00:19:02 -0400237 first_segment = &cbfs_payload->segments;
238
239 for (current_segment = first_segment;; ++current_segment) {
240 printk(BIOS_DEBUG,
241 "Loading segment from rom address 0x%p\n",
242 current_segment);
243
244 cbfs_decode_payload_segment(&segment, current_segment);
245
246 switch (segment.type) {
Ronald G. Minnichae631262009-04-01 10:48:39 +0000247 case PAYLOAD_SEGMENT_PARAMS:
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000248 printk(BIOS_DEBUG, " parameter section (skipped)\n");
Ronald G. Minnichae631262009-04-01 10:48:39 +0000249 continue;
Stefan Reinauerf64893a2009-07-23 22:03:14 +0000250
Ronald G. Minnichae631262009-04-01 10:48:39 +0000251 case PAYLOAD_SEGMENT_CODE:
252 case PAYLOAD_SEGMENT_DATA:
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000253 printk(BIOS_DEBUG, " %s (compression=%x)\n",
George Trudeauc1b98b92016-04-04 00:19:02 -0400254 segment.type == PAYLOAD_SEGMENT_CODE
255 ? "code" : "data", segment.compression);
Ronald G. Minnichae631262009-04-01 10:48:39 +0000256
George Trudeauc1b98b92016-04-04 00:19:02 -0400257 new = malloc(sizeof(*new));
258 new->s_dstaddr = segment.load_addr;
259 new->s_memsz = segment.mem_len;
260 new->compression = segment.compression;
Ronald G. Minnichef402092013-11-11 10:36:28 -0800261 new->s_srcaddr = (uintptr_t)
262 ((unsigned char *)first_segment)
George Trudeauc1b98b92016-04-04 00:19:02 -0400263 + segment.offset;
264 new->s_filesz = segment.len;
265
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000266 printk(BIOS_DEBUG, " New segment dstaddr 0x%lx memsize 0x%lx srcaddr 0x%lx filesize 0x%lx\n",
Stefan Reinauerf64893a2009-07-23 22:03:14 +0000267 new->s_dstaddr, new->s_memsz, new->s_srcaddr, new->s_filesz);
George Trudeauc1b98b92016-04-04 00:19:02 -0400268
Stefan Reinauerf64893a2009-07-23 22:03:14 +0000269 /* Clean up the values */
270 if (new->s_filesz > new->s_memsz) {
271 new->s_filesz = new->s_memsz;
Vadim Bendebury9e208bc2014-05-13 15:43:58 -0700272 printk(BIOS_DEBUG,
George Trudeauc1b98b92016-04-04 00:19:02 -0400273 " cleaned up filesize 0x%lx\n",
274 new->s_filesz);
Stefan Reinauerf64893a2009-07-23 22:03:14 +0000275 }
Stefan Reinauerf64893a2009-07-23 22:03:14 +0000276 break;
277
Ronald G. Minnichae631262009-04-01 10:48:39 +0000278 case PAYLOAD_SEGMENT_BSS:
Stefan Reinauer02e75b22011-10-17 09:51:15 -0700279 printk(BIOS_DEBUG, " BSS 0x%p (%d byte)\n", (void *)
George Trudeauc1b98b92016-04-04 00:19:02 -0400280 (intptr_t)segment.load_addr, segment.mem_len);
281
Ronald G. Minnichae631262009-04-01 10:48:39 +0000282 new = malloc(sizeof(*new));
283 new->s_filesz = 0;
Edward O'Callaghan4bab5822014-03-01 09:27:37 +1100284 new->s_srcaddr = (uintptr_t)
285 ((unsigned char *)first_segment)
George Trudeauc1b98b92016-04-04 00:19:02 -0400286 + segment.offset;
287 new->s_dstaddr = segment.load_addr;
288 new->s_memsz = segment.mem_len;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000289 break;
290
291 case PAYLOAD_SEGMENT_ENTRY:
George Trudeauc1b98b92016-04-04 00:19:02 -0400292 printk(BIOS_DEBUG, " Entry Point 0x%p\n", (void *)
293 (intptr_t)segment.load_addr);
294
295 *entry = segment.load_addr;
Stefan Reinauerf64893a2009-07-23 22:03:14 +0000296 /* Per definition, a payload always has the entry point
Martin Rothcbf2bd72013-07-09 21:51:14 -0600297 * as last segment. Thus, we use the occurrence of the
Stefan Reinauerf64893a2009-07-23 22:03:14 +0000298 * entry point as break condition for the loop.
299 * Can we actually just look at the number of section?
300 */
Ronald G. Minnichae631262009-04-01 10:48:39 +0000301 return 1;
Stefan Reinauerf64893a2009-07-23 22:03:14 +0000302
303 default:
304 /* We found something that we don't know about. Throw
305 * hands into the sky and run away!
306 */
George Trudeauc1b98b92016-04-04 00:19:02 -0400307 printk(BIOS_EMERG, "Bad segment type %x\n",
308 segment.type);
Stefan Reinauerf64893a2009-07-23 22:03:14 +0000309 return -1;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000310 }
Stefan Reinauerf64893a2009-07-23 22:03:14 +0000311
Stefan Reinauerc44de492012-01-11 14:07:39 -0800312 /* We have found another CODE, DATA or BSS segment */
George Trudeauc1b98b92016-04-04 00:19:02 -0400313 /* Insert new segment at the end of the list */
314 new->next = head;
315 new->prev = head->prev;
316 head->prev->next = new;
317 head->prev = new;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000318 }
Stefan Reinauerf64893a2009-07-23 22:03:14 +0000319
Ronald G. Minnichae631262009-04-01 10:48:39 +0000320 return 1;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000321}
322
323static int load_self_segments(
Patrick Georgi5eceb322009-05-13 16:27:25 +0000324 struct segment *head,
Aaron Durbince9efe02015-03-20 16:37:12 -0500325 struct prog *payload)
Ronald G. Minnichae631262009-04-01 10:48:39 +0000326{
Ronald G. Minnichae631262009-04-01 10:48:39 +0000327 struct segment *ptr;
Aaron Durbin6e76fff2015-03-20 09:42:05 -0500328 struct segment *last_non_empty;
Aaron Durbinceebc052014-02-25 00:21:10 -0600329 const unsigned long one_meg = (1UL << 20);
Patrick Georgi26dd71c2009-09-30 21:36:38 +0000330 unsigned long bounce_high = lb_end;
Aaron Durbinceebc052014-02-25 00:21:10 -0600331
Aaron Durbin6e76fff2015-03-20 09:42:05 -0500332 /* Determine last non-empty loaded segment. */
333 last_non_empty = NULL;
334 for(ptr = head->next; ptr != head; ptr = ptr->next)
335 if (ptr->s_filesz != 0)
336 last_non_empty = ptr;
337
Patrick Georgi5eceb322009-05-13 16:27:25 +0000338 for(ptr = head->next; ptr != head; ptr = ptr->next) {
Aaron Durbinceebc052014-02-25 00:21:10 -0600339 if (bootmem_region_targets_usable_ram(ptr->s_dstaddr,
340 ptr->s_memsz))
341 continue;
342
343 if (ptr->s_dstaddr < one_meg &&
344 (ptr->s_dstaddr + ptr->s_memsz) <= one_meg) {
345 printk(BIOS_DEBUG,
346 "Payload being loaded below 1MiB "
347 "without region being marked as RAM usable.\n");
348 continue;
349 }
350
351 /* Payload segment not targeting RAM. */
352 printk(BIOS_ERR, "SELF Payload doesn't target RAM:\n");
353 printk(BIOS_ERR, "Failed Segment: 0x%lx, %lu bytes\n",
354 ptr->s_dstaddr, ptr->s_memsz);
355 bootmem_dump_ranges();
356 return 0;
357 }
358
359 for(ptr = head->next; ptr != head; ptr = ptr->next) {
360 /*
361 * Add segments to bootmem memory map before a bounce buffer is
362 * allocated so that there aren't conflicts with the actual
363 * payload.
364 */
365 bootmem_add_range(ptr->s_dstaddr, ptr->s_memsz,
366 LB_MEM_UNUSABLE);
367
Stefan Reinauerc44de492012-01-11 14:07:39 -0800368 if (!overlaps_coreboot(ptr))
369 continue;
Patrick Georgi26dd71c2009-09-30 21:36:38 +0000370 if (ptr->s_dstaddr + ptr->s_memsz > bounce_high)
371 bounce_high = ptr->s_dstaddr + ptr->s_memsz;
Patrick Georgi5eceb322009-05-13 16:27:25 +0000372 }
Aaron Durbinceebc052014-02-25 00:21:10 -0600373 get_bounce_buffer(bounce_high - lb_start);
Patrick Georgi5eceb322009-05-13 16:27:25 +0000374 if (!bounce_buffer) {
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000375 printk(BIOS_ERR, "Could not find a bounce buffer...\n");
Patrick Georgi5eceb322009-05-13 16:27:25 +0000376 return 0;
377 }
Aaron Durbine58a24b2014-02-24 22:11:45 -0600378
Patrick Georgi5eceb322009-05-13 16:27:25 +0000379 for(ptr = head->next; ptr != head; ptr = ptr->next) {
Stefan Reinauerf64893a2009-07-23 22:03:14 +0000380 unsigned char *dest, *src;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000381 printk(BIOS_DEBUG, "Loading Segment: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
Ronald G. Minnichae631262009-04-01 10:48:39 +0000382 ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000383
Patrick Georgi5eceb322009-05-13 16:27:25 +0000384 /* Modify the segment to load onto the bounce_buffer if necessary.
385 */
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000386 if (relocate_segment(bounce_buffer, ptr)) {
387 ptr = (ptr->prev)->prev;
388 continue;
389 }
Patrick Georgi5eceb322009-05-13 16:27:25 +0000390
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000391 printk(BIOS_DEBUG, "Post relocation: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
Patrick Georgi5eceb322009-05-13 16:27:25 +0000392 ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
393
Ronald G. Minnichae631262009-04-01 10:48:39 +0000394 /* Compute the boundaries of the segment */
395 dest = (unsigned char *)(ptr->s_dstaddr);
Myles Watsonfa12b672009-04-30 22:45:41 +0000396 src = (unsigned char *)(ptr->s_srcaddr);
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000397
Ronald G. Minnichae631262009-04-01 10:48:39 +0000398 /* Copy data from the initial buffer */
399 if (ptr->s_filesz) {
Ronald G. Minnich671cedc2009-05-14 21:26:28 +0000400 unsigned char *middle, *end;
Julius Wernera25b5d22016-02-08 11:46:22 -0800401 size_t len = ptr->s_filesz;
402 size_t memsz = ptr->s_memsz;
Patrick Georgi369bc782009-04-25 07:32:24 +0000403 switch(ptr->compression) {
Patrick Georgi369bc782009-04-25 07:32:24 +0000404 case CBFS_COMPRESS_LZMA: {
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000405 printk(BIOS_DEBUG, "using LZMA\n");
Julius Werner09f29212015-09-29 13:51:35 -0700406 timestamp_add_now(TS_START_ULZMA);
Julius Wernera25b5d22016-02-08 11:46:22 -0800407 len = ulzman(src, len, dest, memsz);
Julius Werner09f29212015-09-29 13:51:35 -0700408 timestamp_add_now(TS_END_ULZMA);
409 if (!len) /* Decompression Error. */
410 return 0;
411 break;
412 }
413 case CBFS_COMPRESS_LZ4: {
414 printk(BIOS_DEBUG, "using LZ4\n");
415 timestamp_add_now(TS_START_ULZ4F);
416 len = ulz4fn(src, len, dest, memsz);
417 timestamp_add_now(TS_END_ULZ4F);
Myles Watson94de72b2010-06-01 15:19:25 +0000418 if (!len) /* Decompression Error. */
419 return 0;
Patrick Georgi369bc782009-04-25 07:32:24 +0000420 break;
421 }
Patrick Georgi369bc782009-04-25 07:32:24 +0000422 case CBFS_COMPRESS_NONE: {
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000423 printk(BIOS_DEBUG, "it's not compressed!\n");
Patrick Georgi369bc782009-04-25 07:32:24 +0000424 memcpy(dest, src, len);
425 break;
426 }
427 default:
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000428 printk(BIOS_INFO, "CBFS: Unknown compression type %d\n", ptr->compression);
Patrick Georgi369bc782009-04-25 07:32:24 +0000429 return -1;
430 }
Julius Wernera25b5d22016-02-08 11:46:22 -0800431 end = dest + memsz;
Patrick Georgi369bc782009-04-25 07:32:24 +0000432 middle = dest + len;
Myles Watson94de72b2010-06-01 15:19:25 +0000433 printk(BIOS_SPEW, "[ 0x%08lx, %08lx, 0x%08lx) <- %08lx\n",
Patrick Georgi369bc782009-04-25 07:32:24 +0000434 (unsigned long)dest,
435 (unsigned long)middle,
436 (unsigned long)end,
437 (unsigned long)src);
Ronald G. Minnich671cedc2009-05-14 21:26:28 +0000438
439 /* Zero the extra bytes between middle & end */
440 if (middle < end) {
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000441 printk(BIOS_DEBUG, "Clearing Segment: addr: 0x%016lx memsz: 0x%016lx\n",
Ronald G. Minnich671cedc2009-05-14 21:26:28 +0000442 (unsigned long)middle, (unsigned long)(end - middle));
Zheng Baoe6ad7fa2009-11-05 10:02:59 +0000443
Ronald G. Minnich671cedc2009-05-14 21:26:28 +0000444 /* Zero the extra bytes */
445 memset(middle, 0, end - middle);
446 }
Furquan Shaikh20f25dd2014-04-22 10:41:05 -0700447 /* Copy the data that's outside the area that shadows ramstage */
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000448 printk(BIOS_DEBUG, "dest %p, end %p, bouncebuffer %lx\n", dest, end, bounce_buffer);
Patrick Georgi26dd71c2009-09-30 21:36:38 +0000449 if ((unsigned long)end > bounce_buffer) {
450 if ((unsigned long)dest < bounce_buffer) {
Myles Watson7943fe62009-10-30 02:08:07 +0000451 unsigned char *from = dest;
452 unsigned char *to = (unsigned char*)(lb_start-(bounce_buffer-(unsigned long)dest));
Patrick Georgi26dd71c2009-09-30 21:36:38 +0000453 unsigned long amount = bounce_buffer-(unsigned long)dest;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000454 printk(BIOS_DEBUG, "move prefix around: from %p, to %p, amount: %lx\n", from, to, amount);
Patrick Georgi26dd71c2009-09-30 21:36:38 +0000455 memcpy(to, from, amount);
456 }
457 if ((unsigned long)end > bounce_buffer + (lb_end - lb_start)) {
458 unsigned long from = bounce_buffer + (lb_end - lb_start);
459 unsigned long to = lb_end;
Myles Watson7943fe62009-10-30 02:08:07 +0000460 unsigned long amount = (unsigned long)end - from;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000461 printk(BIOS_DEBUG, "move suffix around: from %lx, to %lx, amount: %lx\n", from, to, amount);
Myles Watson7943fe62009-10-30 02:08:07 +0000462 memcpy((char*)to, (char*)from, amount);
Patrick Georgi26dd71c2009-09-30 21:36:38 +0000463 }
464 }
Ionela Voinescu00903e52015-01-09 13:14:20 +0000465
466 /*
467 * Each architecture can perform additonal operations
468 * on the loaded segment
469 */
Aaron Durbin096f4572016-03-31 13:49:00 -0500470 prog_segment_loaded((uintptr_t)dest, ptr->s_memsz,
Aaron Durbin6e76fff2015-03-20 09:42:05 -0500471 last_non_empty == ptr ? SEG_FINAL : 0);
Ronald G. Minnichae631262009-04-01 10:48:39 +0000472 }
473 }
Ionela Voinescu00903e52015-01-09 13:14:20 +0000474
Ronald G. Minnichae631262009-04-01 10:48:39 +0000475 return 1;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000476}
477
Aaron Durbince9efe02015-03-20 16:37:12 -0500478void *selfload(struct prog *payload)
Ronald G. Minnichae631262009-04-01 10:48:39 +0000479{
Ronald G. Minnichef402092013-11-11 10:36:28 -0800480 uintptr_t entry = 0;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000481 struct segment head;
Aaron Durbin899d13d2015-05-15 23:39:23 -0500482 void *data;
483
Aaron Durbinac12c66c2015-05-20 12:08:55 -0500484 data = rdev_mmap_full(prog_rdev(payload));
Aaron Durbin899d13d2015-05-15 23:39:23 -0500485
486 if (data == NULL)
487 return NULL;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000488
489 /* Preprocess the self segments */
Aaron Durbin899d13d2015-05-15 23:39:23 -0500490 if (!build_self_segment_list(&head, data, &entry))
Ronald G. Minnichae631262009-04-01 10:48:39 +0000491 goto out;
492
493 /* Load the segments */
Aaron Durbinceebc052014-02-25 00:21:10 -0600494 if (!load_self_segments(&head, payload))
Ronald G. Minnichae631262009-04-01 10:48:39 +0000495 goto out;
496
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000497 printk(BIOS_SPEW, "Loaded segments\n");
Ronald G. Minnichae631262009-04-01 10:48:39 +0000498
Aaron Durbinac12c66c2015-05-20 12:08:55 -0500499 rdev_munmap(prog_rdev(payload), data);
500
501 /* Update the payload's area with the bounce buffer information. */
502 prog_set_area(payload, (void *)(uintptr_t)bounce_buffer, bounce_size);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500503
504 /* Update the payload's area with the bounce buffer information. */
505 prog_set_area(payload, (void *)(uintptr_t)bounce_buffer, bounce_size);
506
Aaron Durbin001de1a2013-04-24 22:59:45 -0500507 return (void *)entry;
508
509out:
Aaron Durbinac12c66c2015-05-20 12:08:55 -0500510 rdev_munmap(prog_rdev(payload), data);
Aaron Durbin001de1a2013-04-24 22:59:45 -0500511 return NULL;
512}