blob: 50a8f28c3faa6d0f94dda7c191a30df75f884cfc [file] [log] [blame]
Stefan Reinauerdebb11f2008-10-29 04:46:52 +00001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2008 coresystems GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; version 2 of
9 * the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
19 * MA 02110-1301 USA
20 */
21
Stefan Reinauer4da810b2009-07-21 21:41:42 +000022// Make sure no stage 2 code is included:
Myles Watson1d6d45e2009-11-06 17:02:51 +000023#define __PRE_RAM__
Stefan Reinauer4da810b2009-07-21 21:41:42 +000024
Stefan Reinauer14e22772010-04-27 06:56:47 +000025// FIXME: Is this piece of code southbridge specific, or
Stefan Reinauer4da810b2009-07-21 21:41:42 +000026// can it be cleaned up so this include is not required?
Stefan Reinauer5f5436f2010-04-25 20:42:02 +000027// It's needed right now because we get our PM_BASE from
28// here.
Stefan Reinauer4da810b2009-07-21 21:41:42 +000029#include "../../../southbridge/intel/i82801gx/i82801gx.h"
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000030
31#undef DEBUG_SMM_RELOCATION
32//#define DEBUG_SMM_RELOCATION
33
34#define LAPIC_ID 0xfee00020
35
36.global smm_relocation_start
37.global smm_relocation_end
38
39/* initially SMM is some sort of real mode. */
40.code16
41
42/**
43 * This trampoline code relocates SMBASE to 0xa0000 - ( lapicid * 0x400 )
44 *
45 * Why 0x400? It is a safe value to cover the save state area per CPU. On
46 * current AMD CPUs this area is _documented_ to be 0x200 bytes. On Intel
47 * Core 2 CPUs the _documented_ parts of the save state area is 48 bytes
48 * bigger, effectively sizing our data structures 0x300 bytes.
49 *
50 * LAPICID SMBASE SMM Entry SAVE STATE
51 * 0 0xa0000 0xa8000 0xafd00
52 * 1 0x9fc00 0xa7c00 0xaf900
53 * 2 0x9f800 0xa7800 0xaf500
54 * 3 0x9f400 0xa7400 0xaf100
55 * 4 0x9f000 0xa7000 0xaed00
56 * 5 0x9ec00 0xa6c00 0xae900
57 * 6 0x9e800 0xa6800 0xae500
58 * 7 0x9e400 0xa6400 0xae100
59 * 8 0x9e000 0xa6000 0xadd00
60 * 9 0x9dc00 0xa5c00 0xad900
61 * 10 0x9d800 0xa5800 0xad500
62 * 11 0x9d400 0xa5400 0xad100
63 * 12 0x9d000 0xa5000 0xacd00
64 * 13 0x9cc00 0xa4c00 0xac900
65 * 14 0x9c800 0xa4800 0xac500
66 * 15 0x9c400 0xa4400 0xac100
67 * . . . .
68 * . . . .
69 * . . . .
70 * 31 0x98400 0xa0400 0xa8100
71 *
72 * With 32 cores, the SMM handler would need to fit between
73 * 0xa0000-0xa0400 and the stub plus stack would need to go
74 * at 0xa8000-0xa8100 (example for core 0). That is not enough.
75 *
Stefan Reinauer14e22772010-04-27 06:56:47 +000076 * This means we're basically limited to 16 cpu cores before
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000077 * we need to use the TSEG/HSEG for the actual SMM handler plus stack.
78 * When we exceed 32 cores, we also need to put SMBASE to TSEG/HSEG.
79 *
80 * If we figure out the documented values above are safe to use,
81 * we could pack the structure above even more, so we could use the
82 * scheme to pack save state areas for 63 AMD CPUs or 58 Intel CPUs
83 * in the ASEG.
84 *
85 * Note: Some versions of Pentium M need their SMBASE aligned to 32k.
86 * On those the above only works for up to 2 cores. But for now we only
87 * care fore Core (2) Duo/Solo
88 *
89 */
90
91smm_relocation_start:
92 /* Check revision to see if AMD64 style SMM_BASE
93 * Intel Core Solo/Duo: 0x30007
94 * Intel Core2 Solo/Duo: 0x30100
95 * AMD64: 0x3XX64
96 * This check does not make much sense, unless someone ports
97 * SMI handling to AMD64 CPUs.
98 */
99
100 mov $0x38000 + 0x7efc, %ebx
101 addr32 mov (%ebx), %al
102 cmp $0x64, %al
103 je 1f
Stefan Reinauer14e22772010-04-27 06:56:47 +0000104
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000105 mov $0x38000 + 0x7ef8, %ebx
106 jmp smm_relocate
1071:
108 mov $0x38000 + 0x7f00, %ebx
109
110smm_relocate:
111 /* Get this CPU's LAPIC ID */
112 movl $LAPIC_ID, %esi
113 addr32 movl (%esi), %ecx
114 shr $24, %ecx
Stefan Reinauer14e22772010-04-27 06:56:47 +0000115
116 /* calculate offset by multiplying the
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000117 * apic ID by 1024 (0x400)
118 */
119 movl %ecx, %edx
120 shl $10, %edx
121
122 movl $0xa0000, %eax
123 subl %edx, %eax /* subtract offset, see above */
124
125 addr32 movl %eax, (%ebx)
126
127
128 /* The next section of code is hardware specific */
129
130 /* Clear SMI status */
131 movw $(DEFAULT_PMBASE + 0x34), %dx
132 inw %dx, %ax
133 outw %ax, %dx
134
135 /* Clear PM1 status */
136 movw $(DEFAULT_PMBASE + 0x00), %dx
137 inw %dx, %ax
138 outw %ax, %dx
139
140 /* Set EOS bit so other SMIs can occur */
141 movw $(DEFAULT_PMBASE + 0x30), %dx
142 inl %dx, %eax
143 orl $(1 << 1), %eax
144 outl %eax, %dx
145
146 /* End of hardware specific section. */
147#ifdef DEBUG_SMM_RELOCATION
148 /* print [SMM-x] so we can determine if CPUx went to SMM */
Stefan Reinauer08670622009-06-30 15:17:49 +0000149 movw $CONFIG_TTYS0_BASE, %dx
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000150 mov $'[', %al
151 outb %al, %dx
152 mov $'S', %al
153 outb %al, %dx
154 mov $'M', %al
155 outb %al, %dx
156 outb %al, %dx
157 movb $'-', %al
158 outb %al, %dx
159 /* calculate ascii of cpu number. More than 9 cores? -> FIXME */
160 movb %cl, %al
Stefan Reinauer14e22772010-04-27 06:56:47 +0000161 addb $'0', %al
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000162 outb %al, %dx
163 mov $']', %al
164 outb %al, %dx
165 mov $'\r', %al
166 outb %al, %dx
167 mov $'\n', %al
168 outb %al, %dx
169#endif
170
171 /* That's it. return */
172 rsm
173smm_relocation_end:
174