blob: f7dcc391f26202745ea7b2e1018d1429f1a8d099 [file] [log] [blame]
Angel Ponsf23ae0b2020-04-02 23:48:12 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +00002
Stefan Reinauer14e22772010-04-27 06:56:47 +00003// FIXME: Is this piece of code southbridge specific, or
Stefan Reinauer4da810b2009-07-21 21:41:42 +00004// can it be cleaned up so this include is not required?
Stefan Reinauerbc0f7a62010-08-01 15:41:14 +00005// It's needed right now because we get our DEFAULT_PMBASE from
Stefan Reinauer5f5436f2010-04-25 20:42:02 +00006// here.
Julius Wernercd49cce2019-03-05 16:53:33 -08007#if CONFIG(SOUTHBRIDGE_INTEL_I82801DX)
Elyes HAOUAS660389e2018-10-14 20:34:09 +02008#include <southbridge/intel/i82801dx/i82801dx.h>
Julius Wernercd49cce2019-03-05 16:53:33 -08009#elif CONFIG(SOUTHBRIDGE_INTEL_I82801IX)
Elyes HAOUAS660389e2018-10-14 20:34:09 +020010#include <southbridge/intel/i82801ix/i82801ix.h>
Stefan Reinauerbc0f7a62010-08-01 15:41:14 +000011#else
12#error "Southbridge needs SMM handler support."
13#endif
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000014
Edward O'Callaghan1104c272017-01-08 19:57:45 +110015// ADDR32() macro
Patrick Georgie8741fe2017-09-04 17:37:31 +020016#include <arch/registers.h>
Edward O'Callaghan1104c272017-01-08 19:57:45 +110017
Kyösti Mälkki4d372c72019-07-08 13:48:57 +030018#if !CONFIG(SMM_ASEG)
19#error "Only use this file with ASEG."
20#endif /* CONFIG_SMM_ASEG */
Stefan Reinauer3aa067f2012-04-02 13:24:04 -070021
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000022#define LAPIC_ID 0xfee00020
23
24.global smm_relocation_start
25.global smm_relocation_end
26
27/* initially SMM is some sort of real mode. */
28.code16
29
30/**
Stefan Reinauer8c5b58e2012-04-04 10:38:05 -070031 * When starting up, x86 CPUs have their SMBASE set to 0x30000. However,
32 * this is not a good place for the SMM handler to live, so it needs to
33 * be relocated.
34 * Traditionally SMM handlers used to live in the A segment (0xa0000).
35 * With growing SMM handlers, more CPU cores, etc. CPU vendors started
36 * allowing to relocate the handler to the end of physical memory, which
37 * they refer to as TSEG.
38 * This trampoline code relocates SMBASE to base address - ( lapicid * 0x400 )
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000039 *
40 * Why 0x400? It is a safe value to cover the save state area per CPU. On
41 * current AMD CPUs this area is _documented_ to be 0x200 bytes. On Intel
42 * Core 2 CPUs the _documented_ parts of the save state area is 48 bytes
43 * bigger, effectively sizing our data structures 0x300 bytes.
44 *
Stefan Reinauer8c5b58e2012-04-04 10:38:05 -070045 * Example (with SMM handler living at 0xa0000):
46 *
Elyes HAOUAS9d759572018-05-28 15:41:12 +020047 * LAPICID SMBASE SMM Entry SAVE STATE
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000048 * 0 0xa0000 0xa8000 0xafd00
49 * 1 0x9fc00 0xa7c00 0xaf900
50 * 2 0x9f800 0xa7800 0xaf500
51 * 3 0x9f400 0xa7400 0xaf100
52 * 4 0x9f000 0xa7000 0xaed00
53 * 5 0x9ec00 0xa6c00 0xae900
54 * 6 0x9e800 0xa6800 0xae500
55 * 7 0x9e400 0xa6400 0xae100
56 * 8 0x9e000 0xa6000 0xadd00
57 * 9 0x9dc00 0xa5c00 0xad900
58 * 10 0x9d800 0xa5800 0xad500
59 * 11 0x9d400 0xa5400 0xad100
60 * 12 0x9d000 0xa5000 0xacd00
61 * 13 0x9cc00 0xa4c00 0xac900
62 * 14 0x9c800 0xa4800 0xac500
63 * 15 0x9c400 0xa4400 0xac100
64 * . . . .
65 * . . . .
66 * . . . .
67 * 31 0x98400 0xa0400 0xa8100
68 *
69 * With 32 cores, the SMM handler would need to fit between
70 * 0xa0000-0xa0400 and the stub plus stack would need to go
71 * at 0xa8000-0xa8100 (example for core 0). That is not enough.
72 *
Elyes HAOUASd82be922016-07-28 18:58:27 +020073 * This means we're basically limited to 16 CPU cores before
Stefan Reinauer8c5b58e2012-04-04 10:38:05 -070074 * we need to move the SMM handler to TSEG.
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000075 *
76 * Note: Some versions of Pentium M need their SMBASE aligned to 32k.
77 * On those the above only works for up to 2 cores. But for now we only
78 * care fore Core (2) Duo/Solo
79 *
80 */
81
82smm_relocation_start:
83 /* Check revision to see if AMD64 style SMM_BASE
84 * Intel Core Solo/Duo: 0x30007
85 * Intel Core2 Solo/Duo: 0x30100
Stefan Reinauer3aa067f2012-04-02 13:24:04 -070086 * Intel SandyBridge: 0x30101
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000087 * AMD64: 0x3XX64
88 * This check does not make much sense, unless someone ports
89 * SMI handling to AMD64 CPUs.
90 */
91
92 mov $0x38000 + 0x7efc, %ebx
Edward O'Callaghan1104c272017-01-08 19:57:45 +110093 ADDR32(mov) (%ebx), %al
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000094 cmp $0x64, %al
95 je 1f
Stefan Reinauer14e22772010-04-27 06:56:47 +000096
Stefan Reinauerdebb11f2008-10-29 04:46:52 +000097 mov $0x38000 + 0x7ef8, %ebx
98 jmp smm_relocate
991:
100 mov $0x38000 + 0x7f00, %ebx
101
102smm_relocate:
103 /* Get this CPU's LAPIC ID */
104 movl $LAPIC_ID, %esi
Edward O'Callaghan1104c272017-01-08 19:57:45 +1100105 ADDR32(movl) (%esi), %ecx
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000106 shr $24, %ecx
Stefan Reinauer14e22772010-04-27 06:56:47 +0000107
108 /* calculate offset by multiplying the
Elyes HAOUASd6e96862016-08-21 10:12:15 +0200109 * APIC ID by 1024 (0x400)
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000110 */
111 movl %ecx, %edx
112 shl $10, %edx
113
114 movl $0xa0000, %eax
115 subl %edx, %eax /* subtract offset, see above */
116
Edward O'Callaghan1104c272017-01-08 19:57:45 +1100117 ADDR32(movl) %eax, (%ebx)
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000118
Stefan Reinauerbc0f7a62010-08-01 15:41:14 +0000119 /* The next section of code is potentially southbridge specific */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000120
121 /* Clear SMI status */
122 movw $(DEFAULT_PMBASE + 0x34), %dx
123 inw %dx, %ax
124 outw %ax, %dx
125
126 /* Clear PM1 status */
127 movw $(DEFAULT_PMBASE + 0x00), %dx
128 inw %dx, %ax
129 outw %ax, %dx
130
131 /* Set EOS bit so other SMIs can occur */
132 movw $(DEFAULT_PMBASE + 0x30), %dx
133 inl %dx, %eax
134 orl $(1 << 1), %eax
135 outl %eax, %dx
136
Stefan Reinauerbc0f7a62010-08-01 15:41:14 +0000137 /* End of southbridge specific section. */
138
Julius Wernercd49cce2019-03-05 16:53:33 -0800139#if CONFIG(DEBUG_SMM_RELOCATION)
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000140 /* print [SMM-x] so we can determine if CPUx went to SMM */
Stefan Reinauer08670622009-06-30 15:17:49 +0000141 movw $CONFIG_TTYS0_BASE, %dx
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000142 mov $'[', %al
143 outb %al, %dx
144 mov $'S', %al
145 outb %al, %dx
146 mov $'M', %al
147 outb %al, %dx
148 outb %al, %dx
149 movb $'-', %al
150 outb %al, %dx
Elyes HAOUASd82be922016-07-28 18:58:27 +0200151 /* calculate ascii of CPU number. More than 9 cores? -> FIXME */
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000152 movb %cl, %al
Stefan Reinauer14e22772010-04-27 06:56:47 +0000153 addb $'0', %al
Stefan Reinauerdebb11f2008-10-29 04:46:52 +0000154 outb %al, %dx
155 mov $']', %al
156 outb %al, %dx
157 mov $'\r', %al
158 outb %al, %dx
159 mov $'\n', %al
160 outb %al, %dx
161#endif
162
163 /* That's it. return */
164 rsm
165smm_relocation_end: