blob: 61d9e3446630de0f1f54b26ad0947cd7064e1dcd [file] [log] [blame]
Angel Ponsf23ae0b2020-04-02 23:48:12 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbine0785c02013-10-21 12:15:29 -05002
Aaron Durbin154d2092017-06-16 14:20:10 -05003#include <cpu/x86/cr.h>
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -06004#include <cpu/amd/mtrr.h>
Elyes HAOUAS419bfbc2018-10-01 08:47:51 +02005#include <cpu/x86/msr.h>
Patrick Rudolph776da082019-10-25 08:09:33 +02006#include <arch/ram_segs.h>
Aaron Durbin154d2092017-06-16 14:20:10 -05007
Patrick Rudolpha1695502019-12-01 07:23:59 +01008#define __RAMSTAGE__
9
Elyes HAOUASece26962018-08-07 12:24:16 +020010/* The SIPI vector is responsible for initializing the APs in the system. It
Aaron Durbine0785c02013-10-21 12:15:29 -050011 * loads microcode, sets up MSRs, and enables caching before calling into
12 * C code. */
13
Aaron Durbine0785c02013-10-21 12:15:29 -050014.section ".module_parameters", "aw", @progbits
15ap_start_params:
16gdtaddr:
17.word 0 /* limit */
18.long 0 /* table */
19.word 0 /* unused */
20idt_ptr:
21.long 0
22stack_top:
23.long 0
24stack_size:
25.long 0
26microcode_lock:
27.long 0
28microcode_ptr:
29.long 0
30msr_table_ptr:
31.long 0
32msr_count:
33.long 0
34c_handler:
35.long 0
36ap_count:
37.long 0
38
Aaron Durbin154d2092017-06-16 14:20:10 -050039#define CR0_CLEAR_FLAGS_CACHE_ENABLE (CR0_CD | CR0_NW)
40#define CR0_SET_FLAGS (CR0_CLEAR_FLAGS_CACHE_ENABLE | CR0_PE)
41#define CR0_CLEAR_FLAGS \
42 (CR0_PG | CR0_AM | CR0_WP | CR0_NE | CR0_TS | CR0_EM | CR0_MP)
43
Aaron Durbine0785c02013-10-21 12:15:29 -050044.text
45.code16
Aaron Durbindde76292015-09-05 12:59:26 -050046.global _start
47_start:
Aaron Durbine0785c02013-10-21 12:15:29 -050048 cli
49 xorl %eax, %eax
50 movl %eax, %cr3 /* Invalidate TLB*/
51
52 /* On hyper threaded cpus, invalidating the cache here is
53 * very very bad. Don't.
54 */
55
56 /* setup the data segment */
57 movw %cs, %ax
58 movw %ax, %ds
59
60 /* The gdtaddr needs to be releative to the data segment in order
61 * to properly dereference it. The .text section comes first in an
Aaron Durbindde76292015-09-05 12:59:26 -050062 * rmodule so _start can be used as a proxy for the load address. */
Aaron Durbine0785c02013-10-21 12:15:29 -050063 movl $(gdtaddr), %ebx
Aaron Durbindde76292015-09-05 12:59:26 -050064 sub $(_start), %ebx
Aaron Durbine0785c02013-10-21 12:15:29 -050065
Edward O'Callaghan4e2294b2017-01-08 19:14:42 +110066 lgdtl (%ebx)
Aaron Durbine0785c02013-10-21 12:15:29 -050067
68 movl %cr0, %eax
Aaron Durbin154d2092017-06-16 14:20:10 -050069 andl $~CR0_CLEAR_FLAGS, %eax
70 orl $CR0_SET_FLAGS, %eax
Aaron Durbine0785c02013-10-21 12:15:29 -050071 movl %eax, %cr0
72
Patrick Rudolph776da082019-10-25 08:09:33 +020073 ljmpl $RAM_CODE_SEG, $1f
Aaron Durbine0785c02013-10-21 12:15:29 -0500741:
75 .code32
Patrick Rudolph776da082019-10-25 08:09:33 +020076 movw $RAM_DATA_SEG, %ax
Aaron Durbine0785c02013-10-21 12:15:29 -050077 movw %ax, %ds
78 movw %ax, %es
79 movw %ax, %ss
80 movw %ax, %fs
81 movw %ax, %gs
82
83 /* Load the Interrupt descriptor table */
84 mov idt_ptr, %ebx
85 lidt (%ebx)
86
Elyes HAOUASd82be922016-07-28 18:58:27 +020087 /* Obtain CPU number. */
Aaron Durbine0785c02013-10-21 12:15:29 -050088 movl ap_count, %eax
891:
90 movl %eax, %ecx
91 inc %ecx
92 lock cmpxchg %ecx, ap_count
93 jnz 1b
94
95 /* Setup stacks for each CPU. */
96 movl stack_size, %eax
97 mul %ecx
98 movl stack_top, %edx
99 subl %eax, %edx
100 mov %edx, %esp
Marshall Dawsondf319042017-09-29 11:14:44 -0600101 andl $0xfffffff0, %esp /* ensure stack alignment */
102
Elyes HAOUASd82be922016-07-28 18:58:27 +0200103 /* Save CPU number. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500104 mov %ecx, %esi
105
106 /* Determine if one should check microcode versions. */
107 mov microcode_ptr, %edi
108 test %edi, %edi
109 jz microcode_done /* Bypass if no microde exists. */
110
111 /* Get the Microcode version. */
112 mov $1, %eax
113 cpuid
114 mov $IA32_BIOS_SIGN_ID, %ecx
115 rdmsr
116 /* If something already loaded skip loading again. */
117 test %edx, %edx
118 jnz microcode_done
119
120 /* Determine if parallel microcode loading is allowed. */
Damien Zammit69331512017-09-02 20:30:39 +1000121 cmpl $0xffffffff, microcode_lock
Aaron Durbine0785c02013-10-21 12:15:29 -0500122 je load_microcode
123
124 /* Protect microcode loading. */
125lock_microcode:
Jacob Garber1627e2f2020-10-28 20:03:53 -0600126 lock btsl $0, microcode_lock
Aaron Durbine0785c02013-10-21 12:15:29 -0500127 jc lock_microcode
128
129load_microcode:
130 /* Load new microcode. */
Elyes HAOUAS419bfbc2018-10-01 08:47:51 +0200131 mov $IA32_BIOS_UPDT_TRIG, %ecx
Aaron Durbine0785c02013-10-21 12:15:29 -0500132 xor %edx, %edx
133 mov %edi, %eax
134 /* The microcode pointer is passed in pointing to the header. Adjust
135 * pointer to reflect the payload (header size is 48 bytes). */
136 add $48, %eax
137 pusha
138 wrmsr
139 popa
140
141 /* Unconditionally unlock microcode loading. */
Damien Zammit69331512017-09-02 20:30:39 +1000142 cmpl $0xffffffff, microcode_lock
Aaron Durbine0785c02013-10-21 12:15:29 -0500143 je microcode_done
144
145 xor %eax, %eax
146 mov %eax, microcode_lock
147
148microcode_done:
149 /*
150 * Load MSRs. Each entry in the table consists of:
151 * 0: index,
152 * 4: value[31:0]
153 * 8: value[63:32]
154 */
155 mov msr_table_ptr, %edi
156 mov msr_count, %ebx
157 test %ebx, %ebx
158 jz 1f
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600159
Julius Wernercd49cce2019-03-05 16:53:33 -0800160#if CONFIG(X86_AMD_FIXED_MTRRS)
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600161 /* Allow modification of RdDram and WrDram bits */
162 mov $SYSCFG_MSR, %ecx
163 rdmsr
164 or $SYSCFG_MSR_MtrrFixDramModEn, %eax
165 wrmsr
166#endif
167
Aaron Durbine0785c02013-10-21 12:15:29 -0500168load_msr:
169 mov (%edi), %ecx
170 mov 4(%edi), %eax
171 mov 8(%edi), %edx
172 wrmsr
173 add $12, %edi
174 dec %ebx
175 jnz load_msr
176
Julius Wernercd49cce2019-03-05 16:53:33 -0800177#if CONFIG(X86_AMD_FIXED_MTRRS)
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600178 mov $SYSCFG_MSR, %ecx
179 rdmsr
180 and $~SYSCFG_MSR_MtrrFixDramModEn, %eax
181 wrmsr
182#endif
183
Aaron Durbine0785c02013-10-21 12:15:29 -05001841:
185 /* Enable caching. */
186 mov %cr0, %eax
Aaron Durbin154d2092017-06-16 14:20:10 -0500187 and $~(CR0_CLEAR_FLAGS_CACHE_ENABLE), %eax
Aaron Durbine0785c02013-10-21 12:15:29 -0500188 mov %eax, %cr0
189
Julius Wernercd49cce2019-03-05 16:53:33 -0800190#if CONFIG(SSE)
Aaron Durbin8ade68a2017-06-16 15:16:13 -0500191 /* Enable sse instructions. */
192 mov %cr4, %eax
193 orl $(CR4_OSFXSR | CR4_OSXMMEXCPT), %eax
194 mov %eax, %cr4
195#endif
196
Patrick Rudolpha1695502019-12-01 07:23:59 +0100197#ifdef __x86_64__
198 /* entry64.inc preserves ebx. */
199#include <cpu/x86/64bit/entry64.inc>
200
201 mov %rsi, %rdi /* cpu_num */
202
203 movl c_handler, %eax
204 call *%rax
205#else
Marshall Dawsondf319042017-09-29 11:14:44 -0600206 /* c_handler(cpu_num), preserve proper stack alignment */
207 sub $12, %esp
Aaron Durbine0785c02013-10-21 12:15:29 -0500208 push %esi /* cpu_num */
Patrick Rudolpha1695502019-12-01 07:23:59 +0100209
Aaron Durbine0785c02013-10-21 12:15:29 -0500210 mov c_handler, %eax
211 call *%eax
Patrick Rudolpha1695502019-12-01 07:23:59 +0100212#endif
213
214
Aaron Durbine0785c02013-10-21 12:15:29 -0500215halt_jump:
216 hlt
217 jmp halt_jump