blob: 18d668c9dd0cfdd5eb367af7ad600a5a3b63844a [file] [log] [blame]
Stefan Reinauerdebb11f2008-10-29 04:46:52 +00001/*
2 * This file is part of the coreboot project.
3 *
Stefan Reinauerbc0f7a62010-08-01 15:41:14 +00004 * Copyright (C) 2008-2010 coresystems GmbH
Stefan Reinauerdebb11f2008-10-29 04:46:52 +00005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; version 2 of
9 * the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
19 * MA 02110-1301 USA
20 */
21
Stefan Reinauer4da810b2009-07-21 21:41:42 +000022// Make sure no stage 2 code is included:
Myles Watson1d6d45e2009-11-06 17:02:51 +000023#define __PRE_RAM__
Stefan Reinauer4da810b2009-07-21 21:41:42 +000024
Stefan Reinauer582748f2011-04-19 01:18:54 +000025/* On AMD's platforms we can set SMBASE by writing an MSR */
26#if !CONFIG_NORTHBRIDGE_AMD_AMDK8 && !CONFIG_NORTHBRIDGE_AMD_AMDFAM10
Stefan Reinauercadc5452010-12-18 23:29:37 +000027
Stefan Reinauer14e22772010-04-27 06:56:47 +000028// FIXME: Is this piece of code southbridge specific, or
Stefan Reinauer4da810b2009-07-21 21:41:42 +000029// can it be cleaned up so this include is not required?
Stefan Reinauerbc0f7a62010-08-01 15:41:14 +000030// It's needed right now because we get our DEFAULT_PMBASE from
Stefan Reinauer5f5436f2010-04-25 20:42:02 +000031// here.
Stefan Reinauer582748f2011-04-19 01:18:54 +000032#if CONFIG_SOUTHBRIDGE_INTEL_I82801GX
Stefan Reinauer4da810b2009-07-21 21:41:42 +000033#include "../../../southbridge/intel/i82801gx/i82801gx.h"
Stefan Reinauer582748f2011-04-19 01:18:54 +000034#elif CONFIG_SOUTHBRIDGE_INTEL_I82801DX
Stefan Reinauerbc0f7a62010-08-01 15:41:14 +000035#include "../../../southbridge/intel/i82801dx/i82801dx.h"
Stefan Reinauer582748f2011-04-19 01:18:54 +000036#elif CONFIG_SOUTHBRIDGE_INTEL_SCH
Patrick Georgibe61a172010-12-18 07:48:43 +000037#include "../../../southbridge/intel/sch/sch.h"
Stefan Reinauerbc0f7a62010-08-01 15:41:14 +000038#else
39#error "Southbridge needs SMM handler support."
40#endif
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000041
Stefan Reinauer3aa067f2012-04-02 13:24:04 -070042#if CONFIG_SMM_TSEG
43
44#include <cpu/x86/mtrr.h>
45
46#endif /* CONFIG_SMM_TSEG */
47
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000048#define LAPIC_ID 0xfee00020
49
50.global smm_relocation_start
51.global smm_relocation_end
52
53/* initially SMM is some sort of real mode. */
54.code16
55
56/**
Stefan Reinauer8c5b58e2012-04-04 10:38:05 -070057 * When starting up, x86 CPUs have their SMBASE set to 0x30000. However,
58 * this is not a good place for the SMM handler to live, so it needs to
59 * be relocated.
60 * Traditionally SMM handlers used to live in the A segment (0xa0000).
61 * With growing SMM handlers, more CPU cores, etc. CPU vendors started
62 * allowing to relocate the handler to the end of physical memory, which
63 * they refer to as TSEG.
64 * This trampoline code relocates SMBASE to base address - ( lapicid * 0x400 )
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000065 *
66 * Why 0x400? It is a safe value to cover the save state area per CPU. On
67 * current AMD CPUs this area is _documented_ to be 0x200 bytes. On Intel
68 * Core 2 CPUs the _documented_ parts of the save state area is 48 bytes
69 * bigger, effectively sizing our data structures 0x300 bytes.
70 *
Stefan Reinauer8c5b58e2012-04-04 10:38:05 -070071 * Example (with SMM handler living at 0xa0000):
72 *
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000073 * LAPICID SMBASE SMM Entry SAVE STATE
74 * 0 0xa0000 0xa8000 0xafd00
75 * 1 0x9fc00 0xa7c00 0xaf900
76 * 2 0x9f800 0xa7800 0xaf500
77 * 3 0x9f400 0xa7400 0xaf100
78 * 4 0x9f000 0xa7000 0xaed00
79 * 5 0x9ec00 0xa6c00 0xae900
80 * 6 0x9e800 0xa6800 0xae500
81 * 7 0x9e400 0xa6400 0xae100
82 * 8 0x9e000 0xa6000 0xadd00
83 * 9 0x9dc00 0xa5c00 0xad900
84 * 10 0x9d800 0xa5800 0xad500
85 * 11 0x9d400 0xa5400 0xad100
86 * 12 0x9d000 0xa5000 0xacd00
87 * 13 0x9cc00 0xa4c00 0xac900
88 * 14 0x9c800 0xa4800 0xac500
89 * 15 0x9c400 0xa4400 0xac100
90 * . . . .
91 * . . . .
92 * . . . .
93 * 31 0x98400 0xa0400 0xa8100
94 *
95 * With 32 cores, the SMM handler would need to fit between
96 * 0xa0000-0xa0400 and the stub plus stack would need to go
97 * at 0xa8000-0xa8100 (example for core 0). That is not enough.
98 *
Stefan Reinauer14e22772010-04-27 06:56:47 +000099 * This means we're basically limited to 16 cpu cores before
Stefan Reinauer8c5b58e2012-04-04 10:38:05 -0700100 * we need to move the SMM handler to TSEG.
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000101 *
102 * Note: Some versions of Pentium M need their SMBASE aligned to 32k.
103 * On those the above only works for up to 2 cores. But for now we only
104 * care fore Core (2) Duo/Solo
105 *
106 */
107
108smm_relocation_start:
109 /* Check revision to see if AMD64 style SMM_BASE
110 * Intel Core Solo/Duo: 0x30007
111 * Intel Core2 Solo/Duo: 0x30100
Stefan Reinauer3aa067f2012-04-02 13:24:04 -0700112 * Intel SandyBridge: 0x30101
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000113 * AMD64: 0x3XX64
114 * This check does not make much sense, unless someone ports
115 * SMI handling to AMD64 CPUs.
116 */
117
118 mov $0x38000 + 0x7efc, %ebx
119 addr32 mov (%ebx), %al
120 cmp $0x64, %al
121 je 1f
Stefan Reinauer14e22772010-04-27 06:56:47 +0000122
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000123 mov $0x38000 + 0x7ef8, %ebx
124 jmp smm_relocate
1251:
126 mov $0x38000 + 0x7f00, %ebx
127
128smm_relocate:
129 /* Get this CPU's LAPIC ID */
130 movl $LAPIC_ID, %esi
131 addr32 movl (%esi), %ecx
132 shr $24, %ecx
Stefan Reinauer14e22772010-04-27 06:56:47 +0000133
134 /* calculate offset by multiplying the
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000135 * apic ID by 1024 (0x400)
136 */
137 movl %ecx, %edx
138 shl $10, %edx
139
Stefan Reinauer3aa067f2012-04-02 13:24:04 -0700140#if CONFIG_SMM_TSEG
141 movl $(TSEG_BAR), %ecx /* Get TSEG base from PCIE */
142 addr32 movl (%ecx), %eax /* Save TSEG_BAR in %eax */
143 andl $~1, %eax /* Remove lock bit */
144#else
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000145 movl $0xa0000, %eax
Stefan Reinauer3aa067f2012-04-02 13:24:04 -0700146#endif
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000147 subl %edx, %eax /* subtract offset, see above */
148
149 addr32 movl %eax, (%ebx)
150
Stefan Reinauer3aa067f2012-04-02 13:24:04 -0700151#if CONFIG_SMM_TSEG
152 /* Check for SMRR capability in MTRRCAP[11] */
153 movl $MTRRcap_MSR, %ecx
154 rdmsr
155 bt $11, %eax
156 jnc skip_smrr
157
158 /* TSEG base */
159 movl $(TSEG_BAR), %ecx /* Get TSEG base from PCIE */
160 addr32 movl (%ecx), %eax /* Save TSEG_BAR in %eax */
161 andl $~1, %eax /* Remove lock bit */
162 movl %eax, %ebx
163
164 /* Set SMRR base address. */
165 movl $SMRRphysBase_MSR, %ecx
166 orl $MTRR_TYPE_WRBACK, %eax
167 xorl %edx, %edx
168 wrmsr
169
170 /* Set SMRR mask. */
171 movl $SMRRphysMask_MSR, %ecx
172 movl $(~(CONFIG_SMM_TSEG_SIZE - 1) | MTRRphysMaskValid), %eax
173 xorl %edx, %edx
174 wrmsr
175
176#if CONFIG_NORTHBRIDGE_INTEL_SANDYBRIDGE || CONFIG_NORTHBRIDGE_INTEL_IVYBRIDGE
177 /*
178 * IED base is top 4M of TSEG
179 */
180 addl $(CONFIG_SMM_TSEG_SIZE - IED_SIZE), %ebx
181 movl $(0x30000 + 0x8000 + 0x7eec), %eax
182 addr32 movl %ebx, (%eax)
183#endif
184
185skip_smrr:
186#endif
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000187
Stefan Reinauerbc0f7a62010-08-01 15:41:14 +0000188 /* The next section of code is potentially southbridge specific */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000189
190 /* Clear SMI status */
191 movw $(DEFAULT_PMBASE + 0x34), %dx
192 inw %dx, %ax
193 outw %ax, %dx
194
195 /* Clear PM1 status */
196 movw $(DEFAULT_PMBASE + 0x00), %dx
197 inw %dx, %ax
198 outw %ax, %dx
199
200 /* Set EOS bit so other SMIs can occur */
201 movw $(DEFAULT_PMBASE + 0x30), %dx
202 inl %dx, %eax
203 orl $(1 << 1), %eax
204 outl %eax, %dx
205
Stefan Reinauerbc0f7a62010-08-01 15:41:14 +0000206 /* End of southbridge specific section. */
207
Stefan Reinauer582748f2011-04-19 01:18:54 +0000208#if CONFIG_DEBUG_SMM_RELOCATION
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000209 /* print [SMM-x] so we can determine if CPUx went to SMM */
Stefan Reinauer08670622009-06-30 15:17:49 +0000210 movw $CONFIG_TTYS0_BASE, %dx
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000211 mov $'[', %al
212 outb %al, %dx
213 mov $'S', %al
214 outb %al, %dx
215 mov $'M', %al
216 outb %al, %dx
217 outb %al, %dx
218 movb $'-', %al
219 outb %al, %dx
220 /* calculate ascii of cpu number. More than 9 cores? -> FIXME */
221 movb %cl, %al
Stefan Reinauer14e22772010-04-27 06:56:47 +0000222 addb $'0', %al
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000223 outb %al, %dx
224 mov $']', %al
225 outb %al, %dx
226 mov $'\r', %al
227 outb %al, %dx
228 mov $'\n', %al
229 outb %al, %dx
230#endif
231
232 /* That's it. return */
233 rsm
234smm_relocation_end:
Stefan Reinauercadc5452010-12-18 23:29:37 +0000235#endif