blob: d3538991bb0ccb5c0921341e2414d0ab2b757a05 [file] [log] [blame]
Aaron Durbin9a7d7bc2013-09-07 00:41:48 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20#include <stddef.h>
21#include <arch/cpu.h>
22#include <arch/io.h>
23#include <arch/cbfs.h>
24#include <arch/stages.h>
25#include <console/console.h>
26#include <cbmem.h>
27#include <cpu/x86/mtrr.h>
28#include <romstage_handoff.h>
29#include <baytrail/gpio.h>
30#include <baytrail/iomap.h>
31#include <baytrail/iosf.h>
32#include <baytrail/lpc.h>
33#include <baytrail/pci_devs.h>
34#include <baytrail/romstage.h>
35
36/* The cache-as-ram assembly file calls romstage_main() after setting up
37 * cache-as-ram. romstage_main() will then call the mainboards's
38 * mainboard_romstage_entry() function. That function then calls
39 * romstage_common() below. The reason for the back and forth is to provide
40 * common entry point from cache-as-ram while still allowing for code sharing.
41 * Because we can't use global variables the stack is used for allocations --
42 * thus the need to call back and forth. */
43
44static void *setup_stack_and_mttrs(void);
45
46static void program_base_addresses(void)
47{
48 uint32_t reg;
49 const uint32_t lpc_dev = PCI_DEV(0, LPC_DEV, LPC_FUNC);
50
51 /* Memory Mapped IO registers. */
52 reg = PMC_BASE_ADDRESS | 2;
53 pci_write_config32(lpc_dev, PBASE, reg);
54 reg = IO_BASE_ADDRESS | 2;
55 pci_write_config32(lpc_dev, IOBASE, reg);
56 reg = ILB_BASE_ADDRESS | 2;
57 pci_write_config32(lpc_dev, IBASE, reg);
58 reg = SPI_BASE_ADDRESS | 2;
59 pci_write_config32(lpc_dev, SBASE, reg);
60 reg = MPHY_BASE_ADDRESS | 2;
61 pci_write_config32(lpc_dev, MPBASE, reg);
62 reg = RCBA_BASE_ADDRESS | 1;
63 pci_write_config32(lpc_dev, RCBA, reg);
64
65 /* IO Port Registers. */
66 reg = ACPI_BASE_ADDRESS | 2;
67 pci_write_config32(lpc_dev, ABASE, reg);
68 reg = GPIO_BASE_ADDRESS | 2;
69 pci_write_config32(lpc_dev, GBASE, reg);
70}
71
72/* Entry from cache-as-ram.inc. */
73void * asmlinkage romstage_main(unsigned long bist)
74{
75 /* Call into mainboard. */
76 mainboard_romstage_entry(bist);
77
78 return setup_stack_and_mttrs();
79}
80
81/* Entry from the mainboard. */
82void romstage_common(const struct romstage_params *params)
83{
84 struct romstage_handoff *handoff;
85
86 program_base_addresses();
87
88 byt_config_com1_and_enable();
89
90 console_init();
91
92 /* Initialize RAM */
93 raminit(params->mrc_params, 5);
94
95 handoff = romstage_handoff_find_or_add();
96 if (handoff != NULL)
97 handoff->s3_resume = 0;
98 else
99 printk(BIOS_DEBUG, "Romstage handoff structure not added!\n");
100
101}
102
103static void open_up_spi(void)
104{
105 const uintptr_t sbase = SPI_BASE_ADDRESS;
106
107 /* Disable generating SMI when setting WPD bit. */
108 write32(sbase + 0xf8, read32(sbase + 0xf8) & ~(1 << 7));
109 /* Disable the SMM-only BIOS write and set WPD bit. */
110 write32(sbase + 0xfc, 1 | (read32(sbase + 0xfc) & ~(1 << 5)));
111}
112
113void asmlinkage romstage_after_car(void)
114{
115 /* Allow BIOS to program SPI part. */
116 open_up_spi();
117
118 /* Load the ramstage. */
119 copy_and_run();
120 while (1);
121}
122
123static inline uint32_t *stack_push(u32 *stack, u32 value)
124{
125 stack = &stack[-1];
126 *stack = value;
127 return stack;
128}
129
130/* Romstage needs quite a bit of stack for decompressing images since the lzma
131 * lib keeps its state on the stack during romstage. */
132static unsigned long choose_top_of_stack(void)
133{
134 unsigned long stack_top;
135 const unsigned long romstage_ram_stack_size = 0x5000;
136
137 /* cbmem_add() does a find() before add(). */
138 stack_top = (unsigned long)cbmem_add(CBMEM_ID_ROMSTAGE_RAM_STACK,
139 romstage_ram_stack_size);
140 stack_top += romstage_ram_stack_size;
141 return stack_top;
142}
143
144/* setup_stack_and_mttrs() determines the stack to use after
145 * cache-as-ram is torn down as well as the MTRR settings to use. */
146static void *setup_stack_and_mttrs(void)
147{
148 unsigned long top_of_stack;
149 int num_mtrrs;
150 uint32_t *slot;
151 uint32_t mtrr_mask_upper;
152 uint32_t top_of_ram;
153
154 /* Top of stack needs to be aligned to a 4-byte boundary. */
155 top_of_stack = choose_top_of_stack() & ~3;
156 slot = (void *)top_of_stack;
157 num_mtrrs = 0;
158
159 /* The upper bits of the MTRR mask need to set according to the number
160 * of physical address bits. */
161 mtrr_mask_upper = (1 << ((cpuid_eax(0x80000008) & 0xff) - 32)) - 1;
162
163 /* The order for each MTRR is value then base with upper 32-bits of
164 * each value coming before the lower 32-bits. The reasoning for
165 * this ordering is to create a stack layout like the following:
166 * +0: Number of MTRRs
167 * +4: MTRR base 0 31:0
168 * +8: MTRR base 0 63:32
169 * +12: MTRR mask 0 31:0
170 * +16: MTRR mask 0 63:32
171 * +20: MTRR base 1 31:0
172 * +24: MTRR base 1 63:32
173 * +28: MTRR mask 1 31:0
174 * +32: MTRR mask 1 63:32
175 */
176
177 /* Cache the ROM as WP just below 4GiB. */
178 slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
179 slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRRphysMaskValid);
180 slot = stack_push(slot, 0); /* upper base */
181 slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_TYPE_WRPROT);
182 num_mtrrs++;
183
184 /* Cache RAM as WB from 0 -> CONFIG_RAMTOP. */
185 slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
186 slot = stack_push(slot, ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid);
187 slot = stack_push(slot, 0); /* upper base */
188 slot = stack_push(slot, 0 | MTRR_TYPE_WRBACK);
189 num_mtrrs++;
190
191 top_of_ram = (uint32_t)cbmem_top();
192 /* Cache 8MiB below the top of ram. The top of ram under 4GiB is the
193 * start of the TSEG region. It is required to be 8MiB aligned. Set
194 * this area as cacheable so it can be used later for ramstage before
195 * setting up the entire RAM as cacheable. */
196 slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
197 slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid);
198 slot = stack_push(slot, 0); /* upper base */
199 slot = stack_push(slot, (top_of_ram - (8 << 20)) | MTRR_TYPE_WRBACK);
200 num_mtrrs++;
201
202 /* Cache 8MiB at the top of ram. Top of ram is where the TSEG
203 * region resides. However, it is not restricted to SMM mode until
204 * SMM has been relocated. By setting the region to cacheable it
205 * provides faster access when relocating the SMM handler as well
206 * as using the TSEG region for other purposes. */
207 slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
208 slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid);
209 slot = stack_push(slot, 0); /* upper base */
210 slot = stack_push(slot, top_of_ram | MTRR_TYPE_WRBACK);
211 num_mtrrs++;
212
213 /* Save the number of MTRRs to setup. Return the stack location
214 * pointing to the number of MTRRs. */
215 slot = stack_push(slot, num_mtrrs);
216
217 return slot;
218}