blob: b904e31a2167628042d6c56b1fc16edabeade288 [file] [log] [blame]
ebiedermc7798892009-04-01 11:03:32 +00001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2003 Eric W. Biederman <ebiederm@xmission.com>
5 * Copyright (C) 2009 Ron Minnich <rminnich@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
19 */
20
Ronald G. Minnichae631262009-04-01 10:48:39 +000021#include <console/console.h>
22#include <part/fallback_boot.h>
23#include <boot/elf.h>
24#include <boot/elf_boot.h>
25#include <boot/coreboot_tables.h>
26#include <ip_checksum.h>
27#include <stream/read_bytes.h>
28#include <stdint.h>
29#include <stdlib.h>
30#include <string.h>
Peter Stuge483b7bb2009-04-14 07:40:01 +000031#include <cbfs.h>
Ronald G. Minnichae631262009-04-01 10:48:39 +000032
33#ifndef CONFIG_BIG_ENDIAN
34#define ntohl(x) ( ((x&0xff)<<24) | ((x&0xff00)<<8) | \
35 ((x&0xff0000) >> 8) | ((x&0xff000000) >> 24) )
36#else
37#define ntohl(x) (x)
38#endif
39
40/* Maximum physical address we can use for the coreboot bounce buffer.
41 */
42#ifndef MAX_ADDR
43#define MAX_ADDR -1UL
44#endif
45
46extern unsigned char _ram_seg;
47extern unsigned char _eram_seg;
48
49struct segment {
50 struct segment *next;
51 struct segment *prev;
52 struct segment *phdr_next;
53 struct segment *phdr_prev;
54 unsigned long s_dstaddr;
55 unsigned long s_srcaddr;
56 unsigned long s_memsz;
57 unsigned long s_filesz;
Patrick Georgi369bc782009-04-25 07:32:24 +000058 int compression;
Ronald G. Minnichae631262009-04-01 10:48:39 +000059};
60
61struct verify_callback {
62 struct verify_callback *next;
63 int (*callback)(struct verify_callback *vcb,
64 Elf_ehdr *ehdr, Elf_phdr *phdr, struct segment *head);
65 unsigned long desc_offset;
66 unsigned long desc_addr;
67};
68
69struct ip_checksum_vcb {
70 struct verify_callback data;
71 unsigned short ip_checksum;
72};
73
Ronald G. Minnichae631262009-04-01 10:48:39 +000074/* The problem:
75 * Static executables all want to share the same addresses
76 * in memory because only a few addresses are reliably present on
77 * a machine, and implementing general relocation is hard.
78 *
79 * The solution:
80 * - Allocate a buffer twice the size of the coreboot image.
81 * - Anything that would overwrite coreboot copy into the lower half of
82 * the buffer.
83 * - After loading an ELF image copy coreboot to the upper half of the
84 * buffer.
85 * - Then jump to the loaded image.
86 *
87 * Benefits:
88 * - Nearly arbitrary standalone executables can be loaded.
89 * - Coreboot is preserved, so it can be returned to.
90 * - The implementation is still relatively simple,
91 * and much simpler then the general case implemented in kexec.
92 *
93 */
94
Patrick Georgi5eceb322009-05-13 16:27:25 +000095static unsigned long bounce_size, bounce_buffer;
96
97static void get_bounce_buffer(struct lb_memory *mem, unsigned long bounce_size)
Ronald G. Minnichae631262009-04-01 10:48:39 +000098{
99 unsigned long lb_size;
100 unsigned long mem_entries;
101 unsigned long buffer;
102 int i;
103 lb_size = (unsigned long)(&_eram_seg - &_ram_seg);
104 /* Double coreboot size so I have somewhere to place a copy to return to */
Patrick Georgi5eceb322009-05-13 16:27:25 +0000105 lb_size = bounce_size + lb_size;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000106 mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
107 buffer = 0;
108 for(i = 0; i < mem_entries; i++) {
109 unsigned long mstart, mend;
110 unsigned long msize;
111 unsigned long tbuffer;
112 if (mem->map[i].type != LB_MEM_RAM)
113 continue;
114 if (unpack_lb64(mem->map[i].start) > MAX_ADDR)
115 continue;
116 if (unpack_lb64(mem->map[i].size) < lb_size)
117 continue;
118 mstart = unpack_lb64(mem->map[i].start);
119 msize = MAX_ADDR - mstart +1;
120 if (msize > unpack_lb64(mem->map[i].size))
121 msize = unpack_lb64(mem->map[i].size);
122 mend = mstart + msize;
123 tbuffer = mend - lb_size;
124 if (tbuffer < buffer)
125 continue;
126 buffer = tbuffer;
127 }
Patrick Georgi5eceb322009-05-13 16:27:25 +0000128 bounce_buffer = buffer;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000129}
130
131static int valid_area(struct lb_memory *mem, unsigned long buffer,
132 unsigned long start, unsigned long len)
133{
134 /* Check through all of the memory segments and ensure
135 * the segment that was passed in is completely contained
136 * in RAM.
137 */
138 int i;
139 unsigned long end = start + len;
140 unsigned long mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
141
142 /* See if I conflict with the bounce buffer */
143 if (end >= buffer) {
144 return 0;
145 }
146
147 /* Walk through the table of valid memory ranges and see if I
148 * have a match.
149 */
150 for(i = 0; i < mem_entries; i++) {
151 uint64_t mstart, mend;
152 uint32_t mtype;
153 mtype = mem->map[i].type;
154 mstart = unpack_lb64(mem->map[i].start);
155 mend = mstart + unpack_lb64(mem->map[i].size);
156 if ((mtype == LB_MEM_RAM) && (start < mend) && (end > mstart)) {
157 break;
158 }
159 if ((mtype == LB_MEM_TABLE) && (start < mend) && (end > mstart)) {
160 printk_err("Payload is overwriting Coreboot tables.\n");
161 break;
162 }
163 }
164 if (i == mem_entries) {
165 printk_err("No matching ram area found for range:\n");
166 printk_err(" [0x%016lx, 0x%016lx)\n", start, end);
167 printk_err("Ram areas\n");
168 for(i = 0; i < mem_entries; i++) {
169 uint64_t mstart, mend;
170 uint32_t mtype;
171 mtype = mem->map[i].type;
172 mstart = unpack_lb64(mem->map[i].start);
173 mend = mstart + unpack_lb64(mem->map[i].size);
174 printk_err(" [0x%016lx, 0x%016lx) %s\n",
175 (unsigned long)mstart,
176 (unsigned long)mend,
177 (mtype == LB_MEM_RAM)?"RAM":"Reserved");
178
179 }
180 return 0;
181 }
182 return 1;
183}
184
Patrick Georgi5eceb322009-05-13 16:27:25 +0000185static const unsigned long lb_start = (unsigned long)&_ram_seg;
186static const unsigned long lb_end = (unsigned long)&_eram_seg;
187
188static int overlaps_coreboot(struct segment *seg)
189{
190 unsigned long start, end;
191 start = seg->s_dstaddr;
192 end = start + seg->s_memsz;
193 return !((end <= lb_start) || (start >= lb_end));
194}
195
Ronald G. Minnichae631262009-04-01 10:48:39 +0000196static void relocate_segment(unsigned long buffer, struct segment *seg)
197{
198 /* Modify all segments that want to load onto coreboot
199 * to load onto the bounce buffer instead.
200 */
Ronald G. Minnichae631262009-04-01 10:48:39 +0000201 unsigned long start, middle, end;
202
203 printk_spew("lb: [0x%016lx, 0x%016lx)\n",
204 lb_start, lb_end);
205
Patrick Georgi5eceb322009-05-13 16:27:25 +0000206 /* I don't conflict with coreboot so get out of here */
207 if (!overlaps_coreboot(seg))
208 return;
209
Ronald G. Minnichae631262009-04-01 10:48:39 +0000210 start = seg->s_dstaddr;
211 middle = start + seg->s_filesz;
212 end = start + seg->s_memsz;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000213
214 printk_spew("segment: [0x%016lx, 0x%016lx, 0x%016lx)\n",
215 start, middle, end);
216
Patrick Georgi369bc782009-04-25 07:32:24 +0000217 if (seg->compression == CBFS_COMPRESS_NONE) {
218 /* Slice off a piece at the beginning
219 * that doesn't conflict with coreboot.
220 */
221 if (start < lb_start) {
222 struct segment *new;
223 unsigned long len = lb_start - start;
224 new = malloc(sizeof(*new));
225 *new = *seg;
226 new->s_memsz = len;
227 seg->s_memsz -= len;
228 seg->s_dstaddr += len;
229 seg->s_srcaddr += len;
230 if (seg->s_filesz > len) {
231 new->s_filesz = len;
232 seg->s_filesz -= len;
233 } else {
234 seg->s_filesz = 0;
235 }
236
237 /* Order by stream offset */
238 new->next = seg;
239 new->prev = seg->prev;
240 seg->prev->next = new;
241 seg->prev = new;
242 /* Order by original program header order */
243 new->phdr_next = seg;
244 new->phdr_prev = seg->phdr_prev;
245 seg->phdr_prev->phdr_next = new;
246 seg->phdr_prev = new;
247
248 /* compute the new value of start */
249 start = seg->s_dstaddr;
250
251 printk_spew(" early: [0x%016lx, 0x%016lx, 0x%016lx)\n",
252 new->s_dstaddr,
253 new->s_dstaddr + new->s_filesz,
254 new->s_dstaddr + new->s_memsz);
255 }
256
257 /* Slice off a piece at the end
258 * that doesn't conflict with coreboot
259 */
260 if (end > lb_end) {
261 unsigned long len = lb_end - start;
262 struct segment *new;
263 new = malloc(sizeof(*new));
264 *new = *seg;
265 seg->s_memsz = len;
266 new->s_memsz -= len;
267 new->s_dstaddr += len;
268 new->s_srcaddr += len;
269 if (seg->s_filesz > len) {
270 seg->s_filesz = len;
271 new->s_filesz -= len;
272 } else {
273 new->s_filesz = 0;
274 }
275 /* Order by stream offset */
276 new->next = seg->next;
277 new->prev = seg;
278 seg->next->prev = new;
279 seg->next = new;
280 /* Order by original program header order */
281 new->phdr_next = seg->phdr_next;
282 new->phdr_prev = seg;
283 seg->phdr_next->phdr_prev = new;
284 seg->phdr_next = new;
285
286 /* compute the new value of end */
287 end = start + len;
288
289 printk_spew(" late: [0x%016lx, 0x%016lx, 0x%016lx)\n",
290 new->s_dstaddr,
291 new->s_dstaddr + new->s_filesz,
292 new->s_dstaddr + new->s_memsz);
293
Ronald G. Minnichae631262009-04-01 10:48:39 +0000294 }
Ronald G. Minnichae631262009-04-01 10:48:39 +0000295 }
296 /* Now retarget this segment onto the bounce buffer */
297 /* sort of explanation: the buffer is a 1:1 mapping to coreboot.
298 * so you will make the dstaddr be this buffer, and it will get copied
299 * later to where coreboot lives.
300 */
301 seg->s_dstaddr = buffer + (seg->s_dstaddr - lb_start);
302
303 printk_spew(" bounce: [0x%016lx, 0x%016lx, 0x%016lx)\n",
304 seg->s_dstaddr,
305 seg->s_dstaddr + seg->s_filesz,
306 seg->s_dstaddr + seg->s_memsz);
307}
308
309
310static int build_self_segment_list(
311 struct segment *head,
Patrick Georgi5eceb322009-05-13 16:27:25 +0000312 struct lb_memory *mem,
Peter Stuge483b7bb2009-04-14 07:40:01 +0000313 struct cbfs_payload *payload, u32 *entry)
Ronald G. Minnichae631262009-04-01 10:48:39 +0000314{
315 struct segment *new;
316 struct segment *ptr;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000317 int datasize;
Peter Stuge483b7bb2009-04-14 07:40:01 +0000318 struct cbfs_payload_segment *segment, *first_segment;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000319 memset(head, 0, sizeof(*head));
320 head->phdr_next = head->phdr_prev = head;
321 head->next = head->prev = head;
322 first_segment = segment = &payload->segments;
323
324 while(1) {
325 printk_debug("Segment %p\n", segment);
326 switch(segment->type) {
327 default: printk_emerg("Bad segment type %x\n", segment->type);
328 return -1;
329 case PAYLOAD_SEGMENT_PARAMS:
330 printk_info("found param section\n");
331 segment++;
332 continue;
333 case PAYLOAD_SEGMENT_CODE:
334 case PAYLOAD_SEGMENT_DATA:
335 printk_info( "%s: ", segment->type == PAYLOAD_SEGMENT_CODE ?
336 "code" : "data");
337 new = malloc(sizeof(*new));
338 new->s_dstaddr = ntohl((u32) segment->load_addr);
339 new->s_memsz = ntohl(segment->mem_len);
Patrick Georgi369bc782009-04-25 07:32:24 +0000340 new->compression = ntohl(segment->compression);
Ronald G. Minnichae631262009-04-01 10:48:39 +0000341
342 datasize = ntohl(segment->len);
Patrick Georgi369bc782009-04-25 07:32:24 +0000343 new->s_srcaddr = (u32) ((unsigned char *) first_segment) + ntohl(segment->offset);
344 new->s_filesz = ntohl(segment->len);
Ronald G. Minnichae631262009-04-01 10:48:39 +0000345 printk_debug("New segment dstaddr 0x%lx memsize 0x%lx srcaddr 0x%lx filesize 0x%lx\n",
346 new->s_dstaddr, new->s_memsz, new->s_srcaddr, new->s_filesz);
347 /* Clean up the values */
348 if (new->s_filesz > new->s_memsz) {
349 new->s_filesz = new->s_memsz;
350 }
351 printk_debug("(cleaned up) New segment addr 0x%lx size 0x%lx offset 0x%lx filesize 0x%lx\n",
352 new->s_dstaddr, new->s_memsz, new->s_srcaddr, new->s_filesz);
353 break;
354 case PAYLOAD_SEGMENT_BSS:
355 printk_info("BSS %p/%d\n", (void *) ntohl((u32) segment->load_addr),
356 ntohl(segment->mem_len));
357 new = malloc(sizeof(*new));
358 new->s_filesz = 0;
359 new->s_dstaddr = ntohl((u32) segment->load_addr);
360 new->s_memsz = ntohl(segment->mem_len);
361
362 break;
363
364 case PAYLOAD_SEGMENT_ENTRY:
365 printk_info("Entry %p\n", (void *) ntohl((u32) segment->load_addr));
Myles Watsonfa12b672009-04-30 22:45:41 +0000366 *entry = ntohl((u32) segment->load_addr);
Ronald G. Minnichae631262009-04-01 10:48:39 +0000367 return 1;
368 }
369 segment++;
370 for(ptr = head->next; ptr != head; ptr = ptr->next) {
371 if (new->s_srcaddr < ntohl((u32) segment->load_addr))
372 break;
373 }
374 /* Order by stream offset */
375 new->next = ptr;
376 new->prev = ptr->prev;
377 ptr->prev->next = new;
378 ptr->prev = new;
379 /* Order by original program header order */
380 new->phdr_next = head;
381 new->phdr_prev = head->phdr_prev;
382 head->phdr_prev->phdr_next = new;
383 head->phdr_prev = new;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000384 }
385 return 1;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000386}
387
388static int load_self_segments(
Patrick Georgi5eceb322009-05-13 16:27:25 +0000389 struct segment *head,
390 struct lb_memory *mem,
391 struct cbfs_payload *payload)
Ronald G. Minnichae631262009-04-01 10:48:39 +0000392{
393 unsigned long offset;
394 struct segment *ptr;
395
396 offset = 0;
Patrick Georgi5eceb322009-05-13 16:27:25 +0000397 unsigned long required_bounce_size = lb_end - lb_start;
398 for(ptr = head->next; ptr != head; ptr = ptr->next) {
399 if (!overlaps_coreboot(ptr)) continue;
400 unsigned long bounce = ptr->s_dstaddr + ptr->s_memsz - lb_start;
401 if (bounce > required_bounce_size) required_bounce_size = bounce;
402 }
403 get_bounce_buffer(mem, required_bounce_size);
404 if (!bounce_buffer) {
405 printk_err("Could not find a bounce buffer...\n");
406 return 0;
407 }
408 for(ptr = head->next; ptr != head; ptr = ptr->next) {
409 /* Verify the memory addresses in the segment are valid */
410 if (!valid_area(mem, bounce_buffer, ptr->s_dstaddr, ptr->s_memsz))
411 return 0;
412 }
Ronald G. Minnichae631262009-04-01 10:48:39 +0000413 for(ptr = head->next; ptr != head; ptr = ptr->next) {
Ronald G. Minnichae631262009-04-01 10:48:39 +0000414 unsigned char *dest, *middle, *end, *src;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000415 printk_debug("Loading Segment: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
416 ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
417
Patrick Georgi5eceb322009-05-13 16:27:25 +0000418 /* Modify the segment to load onto the bounce_buffer if necessary.
419 */
420 relocate_segment(bounce_buffer, ptr);
421
422 printk_debug("Post relocation: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
423 ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
424
Ronald G. Minnichae631262009-04-01 10:48:39 +0000425 /* Compute the boundaries of the segment */
426 dest = (unsigned char *)(ptr->s_dstaddr);
Myles Watsonfa12b672009-04-30 22:45:41 +0000427 src = (unsigned char *)(ptr->s_srcaddr);
Ronald G. Minnichae631262009-04-01 10:48:39 +0000428
429 /* Copy data from the initial buffer */
430 if (ptr->s_filesz) {
431 size_t len;
432 len = ptr->s_filesz;
Patrick Georgi369bc782009-04-25 07:32:24 +0000433 switch(ptr->compression) {
434#if CONFIG_COMPRESSED_PAYLOAD_LZMA==1
435 case CBFS_COMPRESS_LZMA: {
436 printk_debug("using LZMA\n");
437 unsigned long ulzma(unsigned char *src, unsigned char *dst);
438 len = ulzma(src, dest);
439 break;
440 }
441#endif
442#if CONFIG_COMPRESSED_PAYLOAD_NRV2B==1
443 case CBFS_COMPRESS_NRV2B: {
444 printk_debug("using NRV2B\n");
445 unsigned long unrv2b(u8 *src, u8 *dst, unsigned long *ilen_p);
446 unsigned long tmp;
447 len = unrv2b(src, dest, &tmp);
448 break;
449 }
450#endif
451 case CBFS_COMPRESS_NONE: {
452 printk_debug("it's not compressed!\n");
453 memcpy(dest, src, len);
454 break;
455 }
456 default:
457 printk_info( "CBFS: Unknown compression type %d\n", ptr->compression);
458 return -1;
459 }
460 end = dest + ptr->s_memsz;
461 middle = dest + len;
462 printk_spew("[ 0x%016lx, %016lx, 0x%016lx) <- %016lx\n",
463 (unsigned long)dest,
464 (unsigned long)middle,
465 (unsigned long)end,
466 (unsigned long)src);
Ronald G. Minnichae631262009-04-01 10:48:39 +0000467 }
Ronald G. Minnichae631262009-04-01 10:48:39 +0000468 /* Zero the extra bytes between middle & end */
469 if (middle < end) {
470 printk_debug("Clearing Segment: addr: 0x%016lx memsz: 0x%016lx\n",
471 (unsigned long)middle, (unsigned long)(end - middle));
472
473 /* Zero the extra bytes */
474 memset(middle, 0, end - middle);
475 }
476 }
477 return 1;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000478}
479
Peter Stuge483b7bb2009-04-14 07:40:01 +0000480int selfboot(struct lb_memory *mem, struct cbfs_payload *payload)
Ronald G. Minnichae631262009-04-01 10:48:39 +0000481{
Myles Watsonfa12b672009-04-30 22:45:41 +0000482 u32 entry=0;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000483 struct segment head;
Ronald G. Minnichae631262009-04-01 10:48:39 +0000484
485 /* Preprocess the self segments */
Patrick Georgi5eceb322009-05-13 16:27:25 +0000486 if (!build_self_segment_list(&head, mem, payload, &entry))
Ronald G. Minnichae631262009-04-01 10:48:39 +0000487 goto out;
488
489 /* Load the segments */
Patrick Georgi5eceb322009-05-13 16:27:25 +0000490 if (!load_self_segments(&head, mem, payload))
Ronald G. Minnichae631262009-04-01 10:48:39 +0000491 goto out;
492
493 printk_spew("Loaded segments\n");
494
495 /* Reset to booting from this image as late as possible */
496 boot_successful();
497
Myles Watsonfa12b672009-04-30 22:45:41 +0000498 printk_debug("Jumping to boot code at %x\n", entry);
Ronald G. Minnichae631262009-04-01 10:48:39 +0000499 post_code(0xfe);
500
501 /* Jump to kernel */
Patrick Georgi5eceb322009-05-13 16:27:25 +0000502 jmp_to_elf_entry((void*)entry, bounce_buffer, bounce_size);
Ronald G. Minnichae631262009-04-01 10:48:39 +0000503 return 1;
504
505 out:
506 return 0;
507}
508