blob: 8bebf8743589b6e3709a8aaa10ec25314e39c76f [file] [log] [blame]
Patrick Georgi11f00792020-03-04 15:10:45 +01001/* SPDX-License-Identifier: GPL-2.0-only */
Martin Roth9df9e9392016-01-12 15:55:28 -07002
Stefan Reinauer5f5436f2010-04-25 20:42:02 +00003#include <cpu/x86/post_code.h>
Patrick Rudolph776da082019-10-25 08:09:33 +02004#include <arch/ram_segs.h>
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +00005
Aaron Durbin633f1122013-02-06 15:28:40 -06006/* Place the stack in the bss section. It's not necessary to define it in the
7 * the linker script. */
8 .section .bss, "aw", @nobits
9.global _stack
10.global _estack
11
Kyösti Mälkki2fbb6772018-05-15 19:50:20 +030012/* Stack alignment is not enforced with rmodule loader, reserve one
13 * extra CPU such that alignment can be enforced on entry. */
Aaron Durbin633f1122013-02-06 15:28:40 -060014.align CONFIG_STACK_SIZE
15_stack:
Kyösti Mälkki2fbb6772018-05-15 19:50:20 +030016.space (CONFIG_MAX_CPUS+1)*CONFIG_STACK_SIZE
Aaron Durbin633f1122013-02-06 15:28:40 -060017_estack:
Julius Wernercd49cce2019-03-05 16:53:33 -080018#if CONFIG(COOP_MULTITASKING)
Aaron Durbin38c326d2013-05-06 12:22:23 -050019.global thread_stacks
20thread_stacks:
21.space CONFIG_STACK_SIZE*CONFIG_NUM_THREADS
22#endif
Aaron Durbin633f1122013-02-06 15:28:40 -060023
Julius Wernerec5e5e02014-08-20 15:29:56 -070024 .section ".text._start", "ax", @progbits
Stefan Reinauer96938852015-06-18 01:23:48 -070025#ifdef __x86_64__
26 .code64
27#else
Eric Biederman8ca8d762003-04-22 19:02:15 +000028 .code32
Stefan Reinauer96938852015-06-18 01:23:48 -070029#endif
Eric Biederman8ca8d762003-04-22 19:02:15 +000030 .globl _start
31_start:
32 cli
33 lgdt %cs:gdtaddr
Stefan Reinauer96938852015-06-18 01:23:48 -070034#ifndef __x86_64__
Patrick Rudolph776da082019-10-25 08:09:33 +020035 ljmp $RAM_CODE_SEG, $1f
Stefan Reinauer96938852015-06-18 01:23:48 -070036#endif
Patrick Rudolph776da082019-10-25 08:09:33 +0200371: movl $RAM_DATA_SEG, %eax
Eric Biederman8ca8d762003-04-22 19:02:15 +000038 movl %eax, %ds
39 movl %eax, %es
40 movl %eax, %ss
41 movl %eax, %fs
42 movl %eax, %gs
Stefan Reinauer96938852015-06-18 01:23:48 -070043#ifdef __x86_64__
Patrick Rudolph776da082019-10-25 08:09:33 +020044 mov $RAM_CODE_SEG64, %ecx
Elyes HAOUAS2ea751a2018-12-27 09:21:02 +010045 call SetCodeSelector
Stefan Reinauer96938852015-06-18 01:23:48 -070046#endif
Eric Biederman8ca8d762003-04-22 19:02:15 +000047
Alexandru Gagniuc5005bb062011-04-11 20:17:22 +000048 post_code(POST_ENTRY_C_START) /* post 13 */
Eric Biederman8ca8d762003-04-22 19:02:15 +000049
arch import user (historical)6ca76362005-07-06 17:17:25 +000050 cld
Eric Biederman8ca8d762003-04-22 19:02:15 +000051
Arthur Heymans7c9a0e82019-10-23 17:02:50 +020052#ifdef __x86_64__
53 mov %rdi, _cbmem_top_ptr
54#else
55 /* The return argument is at 0(%esp), the calling argument at 4(%esp) */
56 movl 4(%esp), %eax
57 movl %eax, _cbmem_top_ptr
58#endif
59
Aaron Durbin633f1122013-02-06 15:28:40 -060060 /** poison the stack. Code should not count on the
61 * stack being full of zeros. This stack poisoning
62 * recently uncovered a bug in the broadcast SIPI
63 * code.
64 */
65 leal _stack, %edi
66 movl $_estack, %ecx
67 subl %edi, %ecx
68 shrl $2, %ecx /* it is 32 bit aligned, right? */
69 movl $0xDEADBEEF, %eax
70 rep
71 stosl
72
Kyösti Mälkki2fbb6772018-05-15 19:50:20 +030073 /* Set new stack with enforced alignment. */
Eric Biederman8ca8d762003-04-22 19:02:15 +000074 movl $_estack, %esp
Kyösti Mälkki2fbb6772018-05-15 19:50:20 +030075 andl $(~(CONFIG_STACK_SIZE-1)), %esp
Eric Biederman8ca8d762003-04-22 19:02:15 +000076
Julius Wernercd49cce2019-03-05 16:53:33 -080077#if CONFIG(COOP_MULTITASKING)
Aaron Durbin38c326d2013-05-06 12:22:23 -050078 /* Push the thread pointer. */
Stefan Reinauer96938852015-06-18 01:23:48 -070079 push $0
Aaron Durbin38c326d2013-05-06 12:22:23 -050080#endif
Elyes HAOUAS777ea892016-07-29 07:40:41 +020081 /* Push the CPU index and struct CPU */
Stefan Reinauer96938852015-06-18 01:23:48 -070082 push $0
83 push $0
Eric Biederman8ca8d762003-04-22 19:02:15 +000084
Eric Biederman8ca8d762003-04-22 19:02:15 +000085 /*
86 * Now we are finished. Memory is up, data is copied and
87 * bss is cleared. Now we call the main routine and
88 * let it do the rest.
Stefan Reinauer607cdf62010-04-26 12:08:51 +000089 */
Alexandru Gagniuc5005bb062011-04-11 20:17:22 +000090 post_code(POST_PRE_HARDWAREMAIN) /* post fe */
Eric Biederman8ca8d762003-04-22 19:02:15 +000091
Kyösti Mälkki4796c322017-03-15 08:07:22 +020092 andl $0xFFFFFFF0, %esp
93
Harshit Sharma9c88fb82020-06-17 20:19:00 -070094#if CONFIG(ASAN_IN_RAMSTAGE)
95 call asan_init
96#endif
97
Julius Wernercd49cce2019-03-05 16:53:33 -080098#if CONFIG(GDB_WAIT)
Kyösti Mälkkif2f7f032014-04-04 15:05:28 +030099 call gdb_hw_init
Denis 'GNUtoo' Cariklie4cece02012-06-22 15:56:37 +0200100 call gdb_stub_breakpoint
101#endif
Stefan Reinauer6adef082013-05-09 16:30:06 -0700102 call main
Stefan Reinauer607cdf62010-04-26 12:08:51 +0000103 /* NOTREACHED */
Eric Biederman8ca8d762003-04-22 19:02:15 +0000104.Lhlt:
Alexandru Gagniuc5005bb062011-04-11 20:17:22 +0000105 post_code(POST_DEAD_CODE) /* post ee */
Eric Biederman8ca8d762003-04-22 19:02:15 +0000106 hlt
107 jmp .Lhlt
Stefan Reinauer607cdf62010-04-26 12:08:51 +0000108
Julius Wernercd49cce2019-03-05 16:53:33 -0800109#if CONFIG(GDB_WAIT)
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000110
111 .globl gdb_stub_breakpoint
112gdb_stub_breakpoint:
Stefan Reinauer96938852015-06-18 01:23:48 -0700113#ifdef __x86_64__
114 pop %rax /* Return address */
115 pushfl
116 push %cs
117 push %rax /* Return address */
118 push $0 /* No error code */
119 push $32 /* vector 32 is user defined */
120#else
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000121 popl %eax /* Return address */
Stefan Reinauer607cdf62010-04-26 12:08:51 +0000122 pushfl
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000123 pushl %cs
124 pushl %eax /* Return address */
125 pushl $0 /* No error code */
126 pushl $32 /* vector 32 is user defined */
Stefan Reinauer96938852015-06-18 01:23:48 -0700127#endif
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000128 jmp int_hand
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000129#endif
130
Aaron Durbin4b032e42018-04-20 01:39:30 -0600131 .globl gdt, gdt_end
Eric Biederman8ca8d762003-04-22 19:02:15 +0000132
Eric Biederman8ca8d762003-04-22 19:02:15 +0000133gdtaddr:
Aaron Durbina146d582013-02-08 16:56:51 -0600134 .word gdt_end - gdt - 1
Stefan Reinauer96938852015-06-18 01:23:48 -0700135#ifdef __x86_64__
136 .quad gdt
137#else
Li-Ta Lof84926e2004-11-04 18:36:06 +0000138 .long gdt /* we know the offset */
Stefan Reinauer96938852015-06-18 01:23:48 -0700139#endif
Eric Biederman8ca8d762003-04-22 19:02:15 +0000140
Eric Biedermanf8a2ddd2004-10-30 08:05:41 +0000141 .data
Li-Ta Lof84926e2004-11-04 18:36:06 +0000142
Stefan Reinauerf8ee1802008-01-18 15:08:58 +0000143 /* This is the gdt for GCC part of coreboot.
Arthur Heymans1cb9cd52019-11-28 16:05:08 +0100144 * It is different from the gdt in ASM part of coreboot
Kyösti Mälkki97b76f72020-11-19 16:41:28 +0200145 * which is defined in gdt_init.S
Stefan Reinauerc0ac7e92009-11-10 22:17:15 +0000146 *
147 * When the machine is initially started, we use a very simple
Kyösti Mälkki97b76f72020-11-19 16:41:28 +0200148 * gdt from ROM (that in gdt_init.S) which only contains those
Stefan Reinauerc0ac7e92009-11-10 22:17:15 +0000149 * entries we need for protected mode.
150 *
151 * When we're executing code from RAM, we want to do more complex
Elyes HAOUAS777ea892016-07-29 07:40:41 +0200152 * stuff, like initializing PCI option ROMs in real mode, or doing
153 * a resume from a suspend to RAM.
Stefan Reinauerc0ac7e92009-11-10 22:17:15 +0000154 */
Eric Biederman8ca8d762003-04-22 19:02:15 +0000155gdt:
Li-Ta Lof84926e2004-11-04 18:36:06 +0000156 /* selgdt 0, unused */
Eric Biederman8ca8d762003-04-22 19:02:15 +0000157 .word 0x0000, 0x0000 /* dummy */
158 .byte 0x00, 0x00, 0x00, 0x00
159
Li-Ta Lof84926e2004-11-04 18:36:06 +0000160 /* selgdt 8, unused */
Eric Biederman8ca8d762003-04-22 19:02:15 +0000161 .word 0x0000, 0x0000 /* dummy */
162 .byte 0x00, 0x00, 0x00, 0x00
163
Stefan Reinauer607cdf62010-04-26 12:08:51 +0000164 /* selgdt 0x10, flat code segment */
165 .word 0xffff, 0x0000
Lee Leahy6f80ccc2017-03-16 15:18:22 -0700166 .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, So we get 4Gbytes for
167 * limit
168 */
Li-Ta Lof84926e2004-11-04 18:36:06 +0000169
170 /* selgdt 0x18, flat data segment */
Stefan Reinauer607cdf62010-04-26 12:08:51 +0000171 .word 0xffff, 0x0000
Stefan Reinauer96938852015-06-18 01:23:48 -0700172#ifdef __x86_64__
173 .byte 0x00, 0x92, 0xcf, 0x00
174#else
Stefan Reinauer607cdf62010-04-26 12:08:51 +0000175 .byte 0x00, 0x93, 0xcf, 0x00
Stefan Reinauer96938852015-06-18 01:23:48 -0700176#endif
Eric Biederman8ca8d762003-04-22 19:02:15 +0000177
Li-Ta Lof84926e2004-11-04 18:36:06 +0000178 /* selgdt 0x20, unused */
Eric Biederman8ca8d762003-04-22 19:02:15 +0000179 .word 0x0000, 0x0000 /* dummy */
180 .byte 0x00, 0x00, 0x00, 0x00
181
Stefan Reinauerc0ac7e92009-11-10 22:17:15 +0000182 /* The next two entries are used for executing VGA option ROMs */
183
Stefan Reinauer607cdf62010-04-26 12:08:51 +0000184 /* selgdt 0x28 16 bit 64k code at 0x00000000 */
Elyes HAOUAS2ea751a2018-12-27 09:21:02 +0100185 .word 0xffff, 0x0000
186 .byte 0, 0x9a, 0, 0
Stefan Reinauerf8a5c6e2009-05-29 13:08:27 +0000187
Stefan Reinauer607cdf62010-04-26 12:08:51 +0000188 /* selgdt 0x30 16 bit 64k data at 0x00000000 */
Elyes HAOUAS2ea751a2018-12-27 09:21:02 +0100189 .word 0xffff, 0x0000
190 .byte 0, 0x92, 0, 0
Stefan Reinauerc0ac7e92009-11-10 22:17:15 +0000191
192 /* The next two entries are used for ACPI S3 RESUME */
193
Stefan Reinauer607cdf62010-04-26 12:08:51 +0000194 /* selgdt 0x38, flat data segment 16 bit */
Stefan Reinauerc0ac7e92009-11-10 22:17:15 +0000195 .word 0x0000, 0x0000 /* dummy */
Lee Leahy6f80ccc2017-03-16 15:18:22 -0700196 .byte 0x00, 0x93, 0x8f, 0x00 /* G=1 and 0x0f, So we get 4Gbytes for
197 * limit
198 */
Stefan Reinauerc0ac7e92009-11-10 22:17:15 +0000199
Stefan Reinauer607cdf62010-04-26 12:08:51 +0000200 /* selgdt 0x40, flat code segment 16 bit */
201 .word 0xffff, 0x0000
Lee Leahy6f80ccc2017-03-16 15:18:22 -0700202 .byte 0x00, 0x9b, 0x8f, 0x00 /* G=1 and 0x0f, So we get 4Gbytes for
203 * limit
204 */
Stefan Reinauer96938852015-06-18 01:23:48 -0700205
206#ifdef __x86_64__
207 /* selgdt 0x48, flat x64 code segment */
208 .word 0xffff, 0x0000
209 .byte 0x00, 0x9b, 0xaf, 0x00
210#endif
Eric Biederman8ca8d762003-04-22 19:02:15 +0000211gdt_end:
212
Patrick Georgi546f29d2016-01-22 12:43:43 +0100213 .section ".text._start", "ax", @progbits
Stefan Reinauer96938852015-06-18 01:23:48 -0700214#ifdef __x86_64__
215SetCodeSelector:
Martin Rothe3690102016-01-06 15:21:02 -0700216 # save rsp because iret will align it to a 16 byte boundary
Patrick Georgi0302b062016-01-22 12:26:52 +0100217 mov %rsp, %rdx
Stefan Reinauer96938852015-06-18 01:23:48 -0700218
Martin Rothe3690102016-01-06 15:21:02 -0700219 # use iret to jump to a 64-bit offset in a new code segment
220 # iret will pop cs:rip, flags, then ss:rsp
Patrick Georgi0302b062016-01-22 12:26:52 +0100221 mov %ss, %ax # need to push ss..
Lee Leahy6f80ccc2017-03-16 15:18:22 -0700222 push %rax # push ss instuction not valid in x64 mode,
223 # so use ax
Patrick Georgi0302b062016-01-22 12:26:52 +0100224 push %rsp
Martin Rothe3690102016-01-06 15:21:02 -0700225 pushfq
Patrick Georgi0302b062016-01-22 12:26:52 +0100226 push %rcx # cx is code segment selector from caller
227 mov $setCodeSelectorLongJump, %rax
228 push %rax
Stefan Reinauer96938852015-06-18 01:23:48 -0700229
Lee Leahy6f80ccc2017-03-16 15:18:22 -0700230 # the iret will continue at next instruction, with the new cs value
231 # loaded
Martin Rothe3690102016-01-06 15:21:02 -0700232 iretq
Stefan Reinauer96938852015-06-18 01:23:48 -0700233
234setCodeSelectorLongJump:
Martin Rothe3690102016-01-06 15:21:02 -0700235 # restore rsp, it might not have been 16-byte aligned on entry
Patrick Georgi0302b062016-01-22 12:26:52 +0100236 mov %rdx, %rsp
Martin Rothe3690102016-01-06 15:21:02 -0700237 ret
Stefan Reinauer96938852015-06-18 01:23:48 -0700238#endif