blob: ba5ae3e1aecf8819c2179fc1ba340101a1f15330 [file] [log] [blame]
Aaron Durbine0785c02013-10-21 12:15:29 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; version 2 of
9 * the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
Aaron Durbine0785c02013-10-21 12:15:29 -050015 */
16
Aaron Durbin154d2092017-06-16 14:20:10 -050017#include <cpu/x86/cr.h>
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -060018#include <cpu/amd/mtrr.h>
Aaron Durbin154d2092017-06-16 14:20:10 -050019
Elyes HAOUASece26962018-08-07 12:24:16 +020020/* The SIPI vector is responsible for initializing the APs in the system. It
Aaron Durbine0785c02013-10-21 12:15:29 -050021 * loads microcode, sets up MSRs, and enables caching before calling into
22 * C code. */
23
24/* These segment selectors need to match the gdt entries in c_start.S. */
25#define CODE_SEG 0x10
26#define DATA_SEG 0x18
27
28#define IA32_UPDT_TRIG 0x79
29#define IA32_BIOS_SIGN_ID 0x8b
30
31.section ".module_parameters", "aw", @progbits
32ap_start_params:
33gdtaddr:
34.word 0 /* limit */
35.long 0 /* table */
36.word 0 /* unused */
37idt_ptr:
38.long 0
39stack_top:
40.long 0
41stack_size:
42.long 0
43microcode_lock:
44.long 0
45microcode_ptr:
46.long 0
47msr_table_ptr:
48.long 0
49msr_count:
50.long 0
51c_handler:
52.long 0
53ap_count:
54.long 0
55
Aaron Durbin154d2092017-06-16 14:20:10 -050056#define CR0_CLEAR_FLAGS_CACHE_ENABLE (CR0_CD | CR0_NW)
57#define CR0_SET_FLAGS (CR0_CLEAR_FLAGS_CACHE_ENABLE | CR0_PE)
58#define CR0_CLEAR_FLAGS \
59 (CR0_PG | CR0_AM | CR0_WP | CR0_NE | CR0_TS | CR0_EM | CR0_MP)
60
Aaron Durbine0785c02013-10-21 12:15:29 -050061.text
62.code16
Aaron Durbindde76292015-09-05 12:59:26 -050063.global _start
64_start:
Aaron Durbine0785c02013-10-21 12:15:29 -050065 cli
66 xorl %eax, %eax
67 movl %eax, %cr3 /* Invalidate TLB*/
68
69 /* On hyper threaded cpus, invalidating the cache here is
70 * very very bad. Don't.
71 */
72
73 /* setup the data segment */
74 movw %cs, %ax
75 movw %ax, %ds
76
77 /* The gdtaddr needs to be releative to the data segment in order
78 * to properly dereference it. The .text section comes first in an
Aaron Durbindde76292015-09-05 12:59:26 -050079 * rmodule so _start can be used as a proxy for the load address. */
Aaron Durbine0785c02013-10-21 12:15:29 -050080 movl $(gdtaddr), %ebx
Aaron Durbindde76292015-09-05 12:59:26 -050081 sub $(_start), %ebx
Aaron Durbine0785c02013-10-21 12:15:29 -050082
Edward O'Callaghan4e2294b2017-01-08 19:14:42 +110083 lgdtl (%ebx)
Aaron Durbine0785c02013-10-21 12:15:29 -050084
85 movl %cr0, %eax
Aaron Durbin154d2092017-06-16 14:20:10 -050086 andl $~CR0_CLEAR_FLAGS, %eax
87 orl $CR0_SET_FLAGS, %eax
Aaron Durbine0785c02013-10-21 12:15:29 -050088 movl %eax, %cr0
89
90 ljmpl $CODE_SEG, $1f
911:
92 .code32
93 movw $DATA_SEG, %ax
94 movw %ax, %ds
95 movw %ax, %es
96 movw %ax, %ss
97 movw %ax, %fs
98 movw %ax, %gs
99
100 /* Load the Interrupt descriptor table */
101 mov idt_ptr, %ebx
102 lidt (%ebx)
103
Elyes HAOUASd82be922016-07-28 18:58:27 +0200104 /* Obtain CPU number. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500105 movl ap_count, %eax
1061:
107 movl %eax, %ecx
108 inc %ecx
109 lock cmpxchg %ecx, ap_count
110 jnz 1b
111
112 /* Setup stacks for each CPU. */
113 movl stack_size, %eax
114 mul %ecx
115 movl stack_top, %edx
116 subl %eax, %edx
117 mov %edx, %esp
Marshall Dawsondf319042017-09-29 11:14:44 -0600118 andl $0xfffffff0, %esp /* ensure stack alignment */
119
Elyes HAOUASd82be922016-07-28 18:58:27 +0200120 /* Save CPU number. */
Aaron Durbine0785c02013-10-21 12:15:29 -0500121 mov %ecx, %esi
122
123 /* Determine if one should check microcode versions. */
124 mov microcode_ptr, %edi
125 test %edi, %edi
126 jz microcode_done /* Bypass if no microde exists. */
127
128 /* Get the Microcode version. */
129 mov $1, %eax
130 cpuid
131 mov $IA32_BIOS_SIGN_ID, %ecx
132 rdmsr
133 /* If something already loaded skip loading again. */
134 test %edx, %edx
135 jnz microcode_done
136
137 /* Determine if parallel microcode loading is allowed. */
Damien Zammit69331512017-09-02 20:30:39 +1000138 cmpl $0xffffffff, microcode_lock
Aaron Durbine0785c02013-10-21 12:15:29 -0500139 je load_microcode
140
141 /* Protect microcode loading. */
142lock_microcode:
143 lock bts $0, microcode_lock
144 jc lock_microcode
145
146load_microcode:
147 /* Load new microcode. */
148 mov $IA32_UPDT_TRIG, %ecx
149 xor %edx, %edx
150 mov %edi, %eax
151 /* The microcode pointer is passed in pointing to the header. Adjust
152 * pointer to reflect the payload (header size is 48 bytes). */
153 add $48, %eax
154 pusha
155 wrmsr
156 popa
157
158 /* Unconditionally unlock microcode loading. */
Damien Zammit69331512017-09-02 20:30:39 +1000159 cmpl $0xffffffff, microcode_lock
Aaron Durbine0785c02013-10-21 12:15:29 -0500160 je microcode_done
161
162 xor %eax, %eax
163 mov %eax, microcode_lock
164
165microcode_done:
166 /*
167 * Load MSRs. Each entry in the table consists of:
168 * 0: index,
169 * 4: value[31:0]
170 * 8: value[63:32]
171 */
172 mov msr_table_ptr, %edi
173 mov msr_count, %ebx
174 test %ebx, %ebx
175 jz 1f
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600176
177#if IS_ENABLED(CONFIG_X86_AMD_FIXED_MTRRS)
178 /* Allow modification of RdDram and WrDram bits */
179 mov $SYSCFG_MSR, %ecx
180 rdmsr
181 or $SYSCFG_MSR_MtrrFixDramModEn, %eax
182 wrmsr
183#endif
184
Aaron Durbine0785c02013-10-21 12:15:29 -0500185load_msr:
186 mov (%edi), %ecx
187 mov 4(%edi), %eax
188 mov 8(%edi), %edx
189 wrmsr
190 add $12, %edi
191 dec %ebx
192 jnz load_msr
193
Marshall Dawsonc0dbeda2017-10-19 09:45:16 -0600194#if IS_ENABLED(CONFIG_X86_AMD_FIXED_MTRRS)
195 mov $SYSCFG_MSR, %ecx
196 rdmsr
197 and $~SYSCFG_MSR_MtrrFixDramModEn, %eax
198 wrmsr
199#endif
200
Aaron Durbine0785c02013-10-21 12:15:29 -05002011:
202 /* Enable caching. */
203 mov %cr0, %eax
Aaron Durbin154d2092017-06-16 14:20:10 -0500204 and $~(CR0_CLEAR_FLAGS_CACHE_ENABLE), %eax
Aaron Durbine0785c02013-10-21 12:15:29 -0500205 mov %eax, %cr0
206
Aaron Durbin8ade68a2017-06-16 15:16:13 -0500207#if IS_ENABLED(CONFIG_SSE)
208 /* Enable sse instructions. */
209 mov %cr4, %eax
210 orl $(CR4_OSFXSR | CR4_OSXMMEXCPT), %eax
211 mov %eax, %cr4
212#endif
213
Marshall Dawsondf319042017-09-29 11:14:44 -0600214 /* c_handler(cpu_num), preserve proper stack alignment */
215 sub $12, %esp
Aaron Durbine0785c02013-10-21 12:15:29 -0500216 push %esi /* cpu_num */
217 mov c_handler, %eax
218 call *%eax
219halt_jump:
220 hlt
221 jmp halt_jump