blob: 6aae0d8f55939e1a01bcb063bded775a83c9a945 [file] [log] [blame]
Ronald G. Minnichae631262009-04-01 10:48:39 +00001#include <console/console.h>
2#include <part/fallback_boot.h>
3#include <boot/elf.h>
4#include <boot/elf_boot.h>
5#include <boot/coreboot_tables.h>
6#include <ip_checksum.h>
7#include <stream/read_bytes.h>
8#include <stdint.h>
9#include <stdlib.h>
10#include <string.h>
11#include <romfs.h>
12
13#ifndef CONFIG_BIG_ENDIAN
14#define ntohl(x) ( ((x&0xff)<<24) | ((x&0xff00)<<8) | \
15 ((x&0xff0000) >> 8) | ((x&0xff000000) >> 24) )
16#else
17#define ntohl(x) (x)
18#endif
19
20/* Maximum physical address we can use for the coreboot bounce buffer.
21 */
22#ifndef MAX_ADDR
23#define MAX_ADDR -1UL
24#endif
25
26extern unsigned char _ram_seg;
27extern unsigned char _eram_seg;
28
29struct segment {
30 struct segment *next;
31 struct segment *prev;
32 struct segment *phdr_next;
33 struct segment *phdr_prev;
34 unsigned long s_dstaddr;
35 unsigned long s_srcaddr;
36 unsigned long s_memsz;
37 unsigned long s_filesz;
38};
39
40struct verify_callback {
41 struct verify_callback *next;
42 int (*callback)(struct verify_callback *vcb,
43 Elf_ehdr *ehdr, Elf_phdr *phdr, struct segment *head);
44 unsigned long desc_offset;
45 unsigned long desc_addr;
46};
47
48struct ip_checksum_vcb {
49 struct verify_callback data;
50 unsigned short ip_checksum;
51};
52
53int romfs_self_decompress(int algo, void *src,struct segment *new)
54{
55 u8 *dst;
56
57 /* for uncompressed, it's easy: just point at the area in ROM */
58 if (algo == ROMFS_COMPRESS_NONE) {
59 new->s_srcaddr = (u32) src;
60 new->s_filesz = new->s_memsz;
61 return 0;
62 }
63
64 /* for compression, let's keep it simple. We'll malloc the destination
65 * area and decompress to there. The compression overhead far outweighs
66 * any overhead for an extra copy.
67 */
68 dst = malloc(new->s_memsz);
69 if (! dst)
70 return -1;
71
72 switch(algo) {
73#ifdef CONFIG_COMPRESSION_LZMA
74 case ROMFS_COMPRESS_LZMA: {
75 unsigned long ulzma(unsigned char *src, unsigned char *dst);
76 ulzma(src, dst);
77 }
78#endif
79
80#ifdef CONFIG_COMPRESSION_NRV2B
81 case ROMFS_COMPRESS_NRV2B: {
82 unsigned long unrv2b(u8 *src, u8 *dst, unsigned long *ilen_p);
83 unsigned long tmp;
84 unrv2b(src, dst, &tmp);
85 }
86#endif
87 default:
88 printk_info( "ROMFS: Unknown compression type %d\n",
89 algo);
90 return -1;
91 }
92
93 new->s_srcaddr = (u32) dst;
94 new->s_filesz = new->s_memsz;
95 return 0;
96
97}
98
99/* The problem:
100 * Static executables all want to share the same addresses
101 * in memory because only a few addresses are reliably present on
102 * a machine, and implementing general relocation is hard.
103 *
104 * The solution:
105 * - Allocate a buffer twice the size of the coreboot image.
106 * - Anything that would overwrite coreboot copy into the lower half of
107 * the buffer.
108 * - After loading an ELF image copy coreboot to the upper half of the
109 * buffer.
110 * - Then jump to the loaded image.
111 *
112 * Benefits:
113 * - Nearly arbitrary standalone executables can be loaded.
114 * - Coreboot is preserved, so it can be returned to.
115 * - The implementation is still relatively simple,
116 * and much simpler then the general case implemented in kexec.
117 *
118 */
119
120static unsigned long get_bounce_buffer(struct lb_memory *mem)
121{
122 unsigned long lb_size;
123 unsigned long mem_entries;
124 unsigned long buffer;
125 int i;
126 lb_size = (unsigned long)(&_eram_seg - &_ram_seg);
127 /* Double coreboot size so I have somewhere to place a copy to return to */
128 lb_size = lb_size + lb_size;
129 mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
130 buffer = 0;
131 for(i = 0; i < mem_entries; i++) {
132 unsigned long mstart, mend;
133 unsigned long msize;
134 unsigned long tbuffer;
135 if (mem->map[i].type != LB_MEM_RAM)
136 continue;
137 if (unpack_lb64(mem->map[i].start) > MAX_ADDR)
138 continue;
139 if (unpack_lb64(mem->map[i].size) < lb_size)
140 continue;
141 mstart = unpack_lb64(mem->map[i].start);
142 msize = MAX_ADDR - mstart +1;
143 if (msize > unpack_lb64(mem->map[i].size))
144 msize = unpack_lb64(mem->map[i].size);
145 mend = mstart + msize;
146 tbuffer = mend - lb_size;
147 if (tbuffer < buffer)
148 continue;
149 buffer = tbuffer;
150 }
151 return buffer;
152}
153
154static int valid_area(struct lb_memory *mem, unsigned long buffer,
155 unsigned long start, unsigned long len)
156{
157 /* Check through all of the memory segments and ensure
158 * the segment that was passed in is completely contained
159 * in RAM.
160 */
161 int i;
162 unsigned long end = start + len;
163 unsigned long mem_entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
164
165 /* See if I conflict with the bounce buffer */
166 if (end >= buffer) {
167 return 0;
168 }
169
170 /* Walk through the table of valid memory ranges and see if I
171 * have a match.
172 */
173 for(i = 0; i < mem_entries; i++) {
174 uint64_t mstart, mend;
175 uint32_t mtype;
176 mtype = mem->map[i].type;
177 mstart = unpack_lb64(mem->map[i].start);
178 mend = mstart + unpack_lb64(mem->map[i].size);
179 if ((mtype == LB_MEM_RAM) && (start < mend) && (end > mstart)) {
180 break;
181 }
182 if ((mtype == LB_MEM_TABLE) && (start < mend) && (end > mstart)) {
183 printk_err("Payload is overwriting Coreboot tables.\n");
184 break;
185 }
186 }
187 if (i == mem_entries) {
188 printk_err("No matching ram area found for range:\n");
189 printk_err(" [0x%016lx, 0x%016lx)\n", start, end);
190 printk_err("Ram areas\n");
191 for(i = 0; i < mem_entries; i++) {
192 uint64_t mstart, mend;
193 uint32_t mtype;
194 mtype = mem->map[i].type;
195 mstart = unpack_lb64(mem->map[i].start);
196 mend = mstart + unpack_lb64(mem->map[i].size);
197 printk_err(" [0x%016lx, 0x%016lx) %s\n",
198 (unsigned long)mstart,
199 (unsigned long)mend,
200 (mtype == LB_MEM_RAM)?"RAM":"Reserved");
201
202 }
203 return 0;
204 }
205 return 1;
206}
207
208static void relocate_segment(unsigned long buffer, struct segment *seg)
209{
210 /* Modify all segments that want to load onto coreboot
211 * to load onto the bounce buffer instead.
212 */
213 unsigned long lb_start = (unsigned long)&_ram_seg;
214 unsigned long lb_end = (unsigned long)&_eram_seg;
215 unsigned long start, middle, end;
216
217 printk_spew("lb: [0x%016lx, 0x%016lx)\n",
218 lb_start, lb_end);
219
220 start = seg->s_dstaddr;
221 middle = start + seg->s_filesz;
222 end = start + seg->s_memsz;
223 /* I don't conflict with coreboot so get out of here */
224 if ((end <= lb_start) || (start >= lb_end))
225 return;
226
227 printk_spew("segment: [0x%016lx, 0x%016lx, 0x%016lx)\n",
228 start, middle, end);
229
230 /* Slice off a piece at the beginning
231 * that doesn't conflict with coreboot.
232 */
233 if (start < lb_start) {
234 struct segment *new;
235 unsigned long len = lb_start - start;
236 new = malloc(sizeof(*new));
237 *new = *seg;
238 new->s_memsz = len;
239 seg->s_memsz -= len;
240 seg->s_dstaddr += len;
241 seg->s_srcaddr += len;
242 if (seg->s_filesz > len) {
243 new->s_filesz = len;
244 seg->s_filesz -= len;
245 } else {
246 seg->s_filesz = 0;
247 }
248
249 /* Order by stream offset */
250 new->next = seg;
251 new->prev = seg->prev;
252 seg->prev->next = new;
253 seg->prev = new;
254 /* Order by original program header order */
255 new->phdr_next = seg;
256 new->phdr_prev = seg->phdr_prev;
257 seg->phdr_prev->phdr_next = new;
258 seg->phdr_prev = new;
259
260 /* compute the new value of start */
261 start = seg->s_dstaddr;
262
263 printk_spew(" early: [0x%016lx, 0x%016lx, 0x%016lx)\n",
264 new->s_dstaddr,
265 new->s_dstaddr + new->s_filesz,
266 new->s_dstaddr + new->s_memsz);
267 }
268
269 /* Slice off a piece at the end
270 * that doesn't conflict with coreboot
271 */
272 if (end > lb_end) {
273 unsigned long len = lb_end - start;
274 struct segment *new;
275 new = malloc(sizeof(*new));
276 *new = *seg;
277 seg->s_memsz = len;
278 new->s_memsz -= len;
279 new->s_dstaddr += len;
280 new->s_srcaddr += len;
281 if (seg->s_filesz > len) {
282 seg->s_filesz = len;
283 new->s_filesz -= len;
284 } else {
285 new->s_filesz = 0;
286 }
287 /* Order by stream offset */
288 new->next = seg->next;
289 new->prev = seg;
290 seg->next->prev = new;
291 seg->next = new;
292 /* Order by original program header order */
293 new->phdr_next = seg->phdr_next;
294 new->phdr_prev = seg;
295 seg->phdr_next->phdr_prev = new;
296 seg->phdr_next = new;
297
298 /* compute the new value of end */
299 end = start + len;
300
301 printk_spew(" late: [0x%016lx, 0x%016lx, 0x%016lx)\n",
302 new->s_dstaddr,
303 new->s_dstaddr + new->s_filesz,
304 new->s_dstaddr + new->s_memsz);
305
306 }
307 /* Now retarget this segment onto the bounce buffer */
308 /* sort of explanation: the buffer is a 1:1 mapping to coreboot.
309 * so you will make the dstaddr be this buffer, and it will get copied
310 * later to where coreboot lives.
311 */
312 seg->s_dstaddr = buffer + (seg->s_dstaddr - lb_start);
313
314 printk_spew(" bounce: [0x%016lx, 0x%016lx, 0x%016lx)\n",
315 seg->s_dstaddr,
316 seg->s_dstaddr + seg->s_filesz,
317 seg->s_dstaddr + seg->s_memsz);
318}
319
320
321static int build_self_segment_list(
322 struct segment *head,
323 unsigned long bounce_buffer, struct lb_memory *mem,
324 struct romfs_payload *payload, u32 *entry)
325{
326 struct segment *new;
327 struct segment *ptr;
328 u8 *data;
329 int datasize;
330 struct romfs_payload_segment *segment, *first_segment;
331 memset(head, 0, sizeof(*head));
332 head->phdr_next = head->phdr_prev = head;
333 head->next = head->prev = head;
334 first_segment = segment = &payload->segments;
335
336 while(1) {
337 printk_debug("Segment %p\n", segment);
338 switch(segment->type) {
339 default: printk_emerg("Bad segment type %x\n", segment->type);
340 return -1;
341 case PAYLOAD_SEGMENT_PARAMS:
342 printk_info("found param section\n");
343 segment++;
344 continue;
345 case PAYLOAD_SEGMENT_CODE:
346 case PAYLOAD_SEGMENT_DATA:
347 printk_info( "%s: ", segment->type == PAYLOAD_SEGMENT_CODE ?
348 "code" : "data");
349 new = malloc(sizeof(*new));
350 new->s_dstaddr = ntohl((u32) segment->load_addr);
351 new->s_memsz = ntohl(segment->mem_len);
352
353 datasize = ntohl(segment->len);
354 /* figure out decompression, do it, get pointer to the area */
355 if (romfs_self_decompress(ntohl(segment->compression),
356 ((unsigned char *) first_segment) +
357 ntohl(segment->offset), new)) {
358 printk_emerg("romfs_self_decompress failed\n");
359 return;
360 }
361 printk_debug("New segment dstaddr 0x%lx memsize 0x%lx srcaddr 0x%lx filesize 0x%lx\n",
362 new->s_dstaddr, new->s_memsz, new->s_srcaddr, new->s_filesz);
363 /* Clean up the values */
364 if (new->s_filesz > new->s_memsz) {
365 new->s_filesz = new->s_memsz;
366 }
367 printk_debug("(cleaned up) New segment addr 0x%lx size 0x%lx offset 0x%lx filesize 0x%lx\n",
368 new->s_dstaddr, new->s_memsz, new->s_srcaddr, new->s_filesz);
369 break;
370 case PAYLOAD_SEGMENT_BSS:
371 printk_info("BSS %p/%d\n", (void *) ntohl((u32) segment->load_addr),
372 ntohl(segment->mem_len));
373 new = malloc(sizeof(*new));
374 new->s_filesz = 0;
375 new->s_dstaddr = ntohl((u32) segment->load_addr);
376 new->s_memsz = ntohl(segment->mem_len);
377
378 break;
379
380 case PAYLOAD_SEGMENT_ENTRY:
381 printk_info("Entry %p\n", (void *) ntohl((u32) segment->load_addr));
382 *entry = (void *) ntohl((u32) segment->load_addr);
383 return 1;
384 }
385 segment++;
386 for(ptr = head->next; ptr != head; ptr = ptr->next) {
387 if (new->s_srcaddr < ntohl((u32) segment->load_addr))
388 break;
389 }
390 /* Order by stream offset */
391 new->next = ptr;
392 new->prev = ptr->prev;
393 ptr->prev->next = new;
394 ptr->prev = new;
395 /* Order by original program header order */
396 new->phdr_next = head;
397 new->phdr_prev = head->phdr_prev;
398 head->phdr_prev->phdr_next = new;
399 head->phdr_prev = new;
400
401 /* Verify the memory addresses in the segment are valid */
402 if (!valid_area(mem, bounce_buffer, new->s_dstaddr, new->s_memsz))
403 goto out;
404
405 /* Modify the segment to load onto the bounce_buffer if necessary.
406 */
407 relocate_segment(bounce_buffer, new);
408 }
409 return 1;
410 out:
411 return 0;
412}
413
414static int load_self_segments(
415 struct segment *head, struct romfs_payload *payload)
416{
417 unsigned long offset;
418 struct segment *ptr;
419
420 offset = 0;
421 for(ptr = head->next; ptr != head; ptr = ptr->next) {
422 unsigned long skip_bytes, read_bytes;
423 unsigned char *dest, *middle, *end, *src;
424 byte_offset_t result;
425 printk_debug("Loading Segment: addr: 0x%016lx memsz: 0x%016lx filesz: 0x%016lx\n",
426 ptr->s_dstaddr, ptr->s_memsz, ptr->s_filesz);
427
428 /* Compute the boundaries of the segment */
429 dest = (unsigned char *)(ptr->s_dstaddr);
430 end = dest + ptr->s_memsz;
431 middle = dest + ptr->s_filesz;
432 src = ptr->s_srcaddr;
433 printk_spew("[ 0x%016lx, %016lx, 0x%016lx) <- %016lx\n",
434 (unsigned long)dest,
435 (unsigned long)middle,
436 (unsigned long)end,
437 (unsigned long)src);
438
439 /* Copy data from the initial buffer */
440 if (ptr->s_filesz) {
441 size_t len;
442 len = ptr->s_filesz;
443 memcpy(dest, src, len);
444 dest += len;
445 }
446
447 /* Zero the extra bytes between middle & end */
448 if (middle < end) {
449 printk_debug("Clearing Segment: addr: 0x%016lx memsz: 0x%016lx\n",
450 (unsigned long)middle, (unsigned long)(end - middle));
451
452 /* Zero the extra bytes */
453 memset(middle, 0, end - middle);
454 }
455 }
456 return 1;
457 out:
458 return 0;
459}
460
461int selfboot(struct lb_memory *mem, struct romfs_payload *payload)
462{
463 void *entry;
464 struct segment head;
465 unsigned long bounce_buffer;
466
467 /* Find a bounce buffer so I can load to coreboot's current location */
468 bounce_buffer = get_bounce_buffer(mem);
469 if (!bounce_buffer) {
470 printk_err("Could not find a bounce buffer...\n");
471 goto out;
472 }
473
474 /* Preprocess the self segments */
475 if (!build_self_segment_list(&head, bounce_buffer, mem, payload, &entry))
476 goto out;
477
478 /* Load the segments */
479 if (!load_self_segments(&head, payload))
480 goto out;
481
482 printk_spew("Loaded segments\n");
483
484 /* Reset to booting from this image as late as possible */
485 boot_successful();
486
487 printk_debug("Jumping to boot code at %p\n", entry);
488 post_code(0xfe);
489
490 /* Jump to kernel */
491 jmp_to_elf_entry(entry, bounce_buffer);
492 return 1;
493
494 out:
495 return 0;
496}
497