blob: e4e6fdecee359929e14153a210c5ccc061ee2edf [file] [log] [blame]
Aaron Durbin9a7d7bc2013-09-07 00:41:48 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20#include <stddef.h>
21#include <arch/cpu.h>
22#include <arch/io.h>
23#include <arch/cbfs.h>
24#include <arch/stages.h>
25#include <console/console.h>
26#include <cbmem.h>
27#include <cpu/x86/mtrr.h>
28#include <romstage_handoff.h>
29#include <baytrail/gpio.h>
30#include <baytrail/iomap.h>
31#include <baytrail/iosf.h>
32#include <baytrail/lpc.h>
33#include <baytrail/pci_devs.h>
34#include <baytrail/romstage.h>
35
36/* The cache-as-ram assembly file calls romstage_main() after setting up
37 * cache-as-ram. romstage_main() will then call the mainboards's
38 * mainboard_romstage_entry() function. That function then calls
39 * romstage_common() below. The reason for the back and forth is to provide
40 * common entry point from cache-as-ram while still allowing for code sharing.
41 * Because we can't use global variables the stack is used for allocations --
42 * thus the need to call back and forth. */
43
44static void *setup_stack_and_mttrs(void);
45
46static void program_base_addresses(void)
47{
48 uint32_t reg;
49 const uint32_t lpc_dev = PCI_DEV(0, LPC_DEV, LPC_FUNC);
50
51 /* Memory Mapped IO registers. */
52 reg = PMC_BASE_ADDRESS | 2;
53 pci_write_config32(lpc_dev, PBASE, reg);
54 reg = IO_BASE_ADDRESS | 2;
55 pci_write_config32(lpc_dev, IOBASE, reg);
56 reg = ILB_BASE_ADDRESS | 2;
57 pci_write_config32(lpc_dev, IBASE, reg);
58 reg = SPI_BASE_ADDRESS | 2;
59 pci_write_config32(lpc_dev, SBASE, reg);
60 reg = MPHY_BASE_ADDRESS | 2;
61 pci_write_config32(lpc_dev, MPBASE, reg);
62 reg = RCBA_BASE_ADDRESS | 1;
63 pci_write_config32(lpc_dev, RCBA, reg);
64
65 /* IO Port Registers. */
66 reg = ACPI_BASE_ADDRESS | 2;
67 pci_write_config32(lpc_dev, ABASE, reg);
68 reg = GPIO_BASE_ADDRESS | 2;
69 pci_write_config32(lpc_dev, GBASE, reg);
70}
71
72/* Entry from cache-as-ram.inc. */
73void * asmlinkage romstage_main(unsigned long bist)
74{
75 /* Call into mainboard. */
76 mainboard_romstage_entry(bist);
77
78 return setup_stack_and_mttrs();
79}
80
81/* Entry from the mainboard. */
82void romstage_common(const struct romstage_params *params)
83{
84 struct romstage_handoff *handoff;
85
86 program_base_addresses();
87
88 byt_config_com1_and_enable();
89
90 console_init();
91
Aaron Durbinecf90862013-09-24 12:36:14 -050092 gfx_init();
93
Aaron Durbin9a7d7bc2013-09-07 00:41:48 -050094 /* Initialize RAM */
95 raminit(params->mrc_params, 5);
96
97 handoff = romstage_handoff_find_or_add();
98 if (handoff != NULL)
99 handoff->s3_resume = 0;
100 else
101 printk(BIOS_DEBUG, "Romstage handoff structure not added!\n");
102
103}
104
105static void open_up_spi(void)
106{
107 const uintptr_t sbase = SPI_BASE_ADDRESS;
108
109 /* Disable generating SMI when setting WPD bit. */
110 write32(sbase + 0xf8, read32(sbase + 0xf8) & ~(1 << 7));
111 /* Disable the SMM-only BIOS write and set WPD bit. */
112 write32(sbase + 0xfc, 1 | (read32(sbase + 0xfc) & ~(1 << 5)));
113}
114
115void asmlinkage romstage_after_car(void)
116{
117 /* Allow BIOS to program SPI part. */
118 open_up_spi();
119
120 /* Load the ramstage. */
121 copy_and_run();
122 while (1);
123}
124
125static inline uint32_t *stack_push(u32 *stack, u32 value)
126{
127 stack = &stack[-1];
128 *stack = value;
129 return stack;
130}
131
132/* Romstage needs quite a bit of stack for decompressing images since the lzma
133 * lib keeps its state on the stack during romstage. */
134static unsigned long choose_top_of_stack(void)
135{
136 unsigned long stack_top;
137 const unsigned long romstage_ram_stack_size = 0x5000;
138
139 /* cbmem_add() does a find() before add(). */
140 stack_top = (unsigned long)cbmem_add(CBMEM_ID_ROMSTAGE_RAM_STACK,
141 romstage_ram_stack_size);
142 stack_top += romstage_ram_stack_size;
143 return stack_top;
144}
145
146/* setup_stack_and_mttrs() determines the stack to use after
147 * cache-as-ram is torn down as well as the MTRR settings to use. */
148static void *setup_stack_and_mttrs(void)
149{
150 unsigned long top_of_stack;
151 int num_mtrrs;
152 uint32_t *slot;
153 uint32_t mtrr_mask_upper;
154 uint32_t top_of_ram;
155
156 /* Top of stack needs to be aligned to a 4-byte boundary. */
157 top_of_stack = choose_top_of_stack() & ~3;
158 slot = (void *)top_of_stack;
159 num_mtrrs = 0;
160
161 /* The upper bits of the MTRR mask need to set according to the number
162 * of physical address bits. */
163 mtrr_mask_upper = (1 << ((cpuid_eax(0x80000008) & 0xff) - 32)) - 1;
164
165 /* The order for each MTRR is value then base with upper 32-bits of
166 * each value coming before the lower 32-bits. The reasoning for
167 * this ordering is to create a stack layout like the following:
168 * +0: Number of MTRRs
169 * +4: MTRR base 0 31:0
170 * +8: MTRR base 0 63:32
171 * +12: MTRR mask 0 31:0
172 * +16: MTRR mask 0 63:32
173 * +20: MTRR base 1 31:0
174 * +24: MTRR base 1 63:32
175 * +28: MTRR mask 1 31:0
176 * +32: MTRR mask 1 63:32
177 */
178
179 /* Cache the ROM as WP just below 4GiB. */
180 slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
181 slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRRphysMaskValid);
182 slot = stack_push(slot, 0); /* upper base */
183 slot = stack_push(slot, ~(CONFIG_ROM_SIZE - 1) | MTRR_TYPE_WRPROT);
184 num_mtrrs++;
185
186 /* Cache RAM as WB from 0 -> CONFIG_RAMTOP. */
187 slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
188 slot = stack_push(slot, ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid);
189 slot = stack_push(slot, 0); /* upper base */
190 slot = stack_push(slot, 0 | MTRR_TYPE_WRBACK);
191 num_mtrrs++;
192
193 top_of_ram = (uint32_t)cbmem_top();
194 /* Cache 8MiB below the top of ram. The top of ram under 4GiB is the
195 * start of the TSEG region. It is required to be 8MiB aligned. Set
196 * this area as cacheable so it can be used later for ramstage before
197 * setting up the entire RAM as cacheable. */
198 slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
199 slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid);
200 slot = stack_push(slot, 0); /* upper base */
201 slot = stack_push(slot, (top_of_ram - (8 << 20)) | MTRR_TYPE_WRBACK);
202 num_mtrrs++;
203
204 /* Cache 8MiB at the top of ram. Top of ram is where the TSEG
205 * region resides. However, it is not restricted to SMM mode until
206 * SMM has been relocated. By setting the region to cacheable it
207 * provides faster access when relocating the SMM handler as well
208 * as using the TSEG region for other purposes. */
209 slot = stack_push(slot, mtrr_mask_upper); /* upper mask */
210 slot = stack_push(slot, ~((8 << 20) - 1) | MTRRphysMaskValid);
211 slot = stack_push(slot, 0); /* upper base */
212 slot = stack_push(slot, top_of_ram | MTRR_TYPE_WRBACK);
213 num_mtrrs++;
214
215 /* Save the number of MTRRs to setup. Return the stack location
216 * pointing to the number of MTRRs. */
217 slot = stack_push(slot, num_mtrrs);
218
219 return slot;
220}