blob: 85eb931ce9b75e2842a47216fc32f4cf5e070ee6 [file] [log] [blame]
Scott Duplichan1ba2eee2010-10-19 04:58:49 +00001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
Timothy Pearson1c4508e2015-09-05 17:50:29 -05005 * Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
Scott Duplichan1ba2eee2010-10-19 04:58:49 +00006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
Scott Duplichan1ba2eee2010-10-19 04:58:49 +000015 */
16
Kyösti Mälkki142b52c2013-12-10 07:33:36 +020017#include "cpu/amd/car/post_cache_as_ram.c"
Scott Duplichan1ba2eee2010-10-19 04:58:49 +000018#include "defaults.h"
19#include <stdlib.h>
20#include <cpu/x86/lapic.h>
21#include <cpu/x86/mtrr.h>
22#include <northbridge/amd/amdfam10/amdfam10.h>
23#include <northbridge/amd/amdht/AsPsDefs.h>
24#include <northbridge/amd/amdht/porting.h>
25
Scott Duplichan1ba2eee2010-10-19 04:58:49 +000026#include <northbridge/amd/amdfam10/raminit_amdmct.c>
Kerry She799fed92011-01-01 17:44:07 +000027#include <reset.h>
Scott Duplichan1ba2eee2010-10-19 04:58:49 +000028
Timothy Pearson730a0432015-10-16 13:51:51 -050029#if IS_ENABLED(CONFIG_SET_FIDVID)
Scott Duplichan1ba2eee2010-10-19 04:58:49 +000030static void prep_fid_change(void);
31static void init_fidvid_stage2(u32 apicid, u32 nodeid);
Timothy Pearson730a0432015-10-16 13:51:51 -050032#endif
33
34void cpuSetAMDMSR(uint8_t node_id);
Scott Duplichan1ba2eee2010-10-19 04:58:49 +000035
Patrick Georgie1667822012-05-05 15:29:32 +020036#if CONFIG_PCI_IO_CFG_EXT
Scott Duplichan1ba2eee2010-10-19 04:58:49 +000037static void set_EnableCf8ExtCfg(void)
38{
39 // set the NB_CFG[46]=1;
40 msr_t msr;
41 msr = rdmsr(NB_CFG_MSR);
42 // EnableCf8ExtCfg: We need that to access CONFIG_PCI_IO_CFG_EXT 4K range
43 msr.hi |= (1 << (46 - 32));
44 wrmsr(NB_CFG_MSR, msr);
45}
46#else
47static void set_EnableCf8ExtCfg(void) { }
48#endif
49
50
51typedef void (*process_ap_t) (u32 apicid, void *gp);
52
Timothy Pearson730a0432015-10-16 13:51:51 -050053uint32_t get_boot_apic_id(uint8_t node, uint32_t core) {
54 uint32_t ap_apicid;
Scott Duplichan1ba2eee2010-10-19 04:58:49 +000055
Timothy Pearson730a0432015-10-16 13:51:51 -050056 uint32_t nb_cfg_54;
57 uint32_t siblings;
58 uint32_t cores_found;
Scott Duplichan1ba2eee2010-10-19 04:58:49 +000059
Timothy Pearson730a0432015-10-16 13:51:51 -050060 uint8_t fam15h = 0;
Timothy Pearson1c4508e2015-09-05 17:50:29 -050061 uint8_t rev_gte_d = 0;
62 uint8_t dual_node = 0;
63 uint32_t f3xe8;
Timothy Pearson730a0432015-10-16 13:51:51 -050064 uint32_t family;
65 uint32_t model;
Scott Duplichan1ba2eee2010-10-19 04:58:49 +000066
Timothy Pearson730a0432015-10-16 13:51:51 -050067 uint32_t ApicIdCoreIdSize;
Scott Duplichan1ba2eee2010-10-19 04:58:49 +000068
69 /* Assume that all node are same stepping, otherwise we can use use
70 nb_cfg_54 from bsp for all nodes */
71 nb_cfg_54 = read_nb_cfg_54();
Timothy Pearson1c4508e2015-09-05 17:50:29 -050072 f3xe8 = pci_read_config32(NODE_PCI(0, 3), 0xe8);
73
Timothy Pearson730a0432015-10-16 13:51:51 -050074 family = model = cpuid_eax(0x80000001);
75 model = ((model & 0xf0000) >> 12) | ((model & 0xf0) >> 4);
76 family = ((family & 0xf00000) >> 16) | ((family & 0xf00) >> 8);
77
78 if (family >= 0x6f) {
79 /* Family 15h or later */
80 fam15h = 1;
81 nb_cfg_54 = 1;
82 }
83
84 if ((model >= 0x8) || fam15h)
Timothy Pearson1c4508e2015-09-05 17:50:29 -050085 /* Revision D or later */
86 rev_gte_d = 1;
87
88 if (rev_gte_d)
89 /* Check for dual node capability */
90 if (f3xe8 & 0x20000000)
91 dual_node = 1;
Scott Duplichan1ba2eee2010-10-19 04:58:49 +000092
93 ApicIdCoreIdSize = (cpuid_ecx(0x80000008) >> 12 & 0xf);
94 if (ApicIdCoreIdSize) {
95 siblings = ((1 << ApicIdCoreIdSize) - 1);
96 } else {
97 siblings = 3; //quad core
98 }
99
Timothy Pearson730a0432015-10-16 13:51:51 -0500100 cores_found = get_core_num_in_bsp(node);
101 if (siblings > cores_found)
102 siblings = cores_found;
103
104 if (dual_node) {
105 ap_apicid = 0;
106 if (fam15h) {
107 ap_apicid |= ((node >> 1) & 0x3) << 5; /* Node ID */
108 ap_apicid |= ((node & 0x1) * (siblings + 1)) + core; /* Core ID */
109 } else {
110 if (nb_cfg_54) {
111 ap_apicid |= ((node >> 1) & 0x3) << 4; /* Node ID */
112 ap_apicid |= ((node & 0x1) * (siblings + 1)) + core; /* Core ID */
113 } else {
114 ap_apicid |= node & 0x3; /* Node ID */
115 ap_apicid |= (((node & 0x1) * (siblings + 1)) + core) << 4; /* Core ID */
116 }
117 }
118 } else {
119 if (fam15h) {
120 ap_apicid = (node * (siblings + 1)) + core;
121 } else {
122 ap_apicid = node * (nb_cfg_54 ? (siblings + 1) : 1) +
123 core * (nb_cfg_54 ? 1 : 64);
124 }
125 }
126
127 return ap_apicid;
128}
129
130//core_range = 0 : all cores
131//core range = 1 : core 0 only
132//core range = 2 : cores other than core0
133
134static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
135 void *gp)
136{
137 // here assume the OS don't change our apicid
138 u32 ap_apicid;
139
140 u32 nodes;
141 u32 disable_siblings;
142 u32 cores_found;
143 int i, j;
144
145 /* get_nodes define in ht_wrapper.c */
146 nodes = get_nodes();
147
148 if (!CONFIG_LOGICAL_CPUS ||
149 read_option(multi_core, 0) != 0) { // 0 means multi core
150 disable_siblings = 1;
151 } else {
152 disable_siblings = 0;
153 }
154
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000155 for (i = 0; i < nodes; i++) {
156 cores_found = get_core_num_in_bsp(i);
157
158 u32 jstart, jend;
159
160 if (core_range == 2) {
161 jstart = 1;
162 } else {
163 jstart = 0;
164 }
165
166 if (disable_siblings || (core_range == 1)) {
167 jend = 0;
168 } else {
169 jend = cores_found;
170 }
171
172 for (j = jstart; j <= jend; j++) {
Timothy Pearson730a0432015-10-16 13:51:51 -0500173 ap_apicid = get_boot_apic_id(i, j);
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000174
Patrick Georgif8f00622012-05-05 15:50:17 +0200175#if CONFIG_ENABLE_APIC_EXT_ID && (CONFIG_APIC_ID_OFFSET > 0)
Patrick Georgie1667822012-05-05 15:29:32 +0200176#if !CONFIG_LIFT_BSP_APIC_ID
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000177 if ((i != 0) || (j != 0)) /* except bsp */
178#endif
179 ap_apicid += CONFIG_APIC_ID_OFFSET;
180#endif
181
182 if (ap_apicid == bsp_apicid)
183 continue;
184
185 process_ap(ap_apicid, gp);
186
187 }
188 }
189}
190
191static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue)
192{
193 int timeout;
194 u32 status;
195 int result;
196 lapic_wait_icr_idle();
197 lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
198 lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
199
200/* Extra busy check compared to lapic.h */
201 timeout = 0;
202 do {
203 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
204 } while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
205
206 timeout = 0;
207 do {
208 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
209 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
210
211 result = -1;
212
213 if (status == LAPIC_ICR_RR_VALID) {
214 *pvalue = lapic_read(LAPIC_RRR);
215 result = 0;
216 }
217 return result;
218}
219
Patrick Georgi76e81522010-11-16 21:25:29 +0000220#if CONFIG_SET_FIDVID
Xavi Drudis Ferran6bdc83b2011-02-28 03:56:52 +0000221static void init_fidvid_ap(u32 apicid, u32 nodeid, u32 coreid);
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000222#endif
223
224static inline __attribute__ ((always_inline))
225void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id,
226 const char *str)
227{
228 printk(BIOS_DEBUG,
229 "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str,
230 apicid, id.nodeid, id.coreid);
231}
232
Timothy Pearson730a0432015-10-16 13:51:51 -0500233uint32_t wait_cpu_state(uint32_t apicid, uint32_t state, uint32_t state2)
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000234{
235 u32 readback = 0;
236 u32 timeout = 1;
237 int loop = 4000000;
238 while (--loop > 0) {
239 if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0)
240 continue;
Timothy Pearson730a0432015-10-16 13:51:51 -0500241 if ((readback & 0x3f) == state || (readback & 0x3f) == state2 || (readback & 0x3f) == F10_APSTATE_RESET) {
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000242 timeout = 0;
243 break; //target cpu is in stage started
244 }
245 }
246 if (timeout) {
247 if (readback) {
248 timeout = readback;
249 }
250 }
251
252 return timeout;
253}
254
255static void wait_ap_started(u32 ap_apicid, void *gp)
256{
257 u32 timeout;
Timothy Pearson730a0432015-10-16 13:51:51 -0500258 timeout = wait_cpu_state(ap_apicid, F10_APSTATE_STARTED, F10_APSTATE_ASLEEP);
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000259 printk(BIOS_DEBUG, "* AP %02x", ap_apicid);
260 if (timeout) {
261 printk(BIOS_DEBUG, " timed out:%08x\n", timeout);
262 } else {
263 printk(BIOS_DEBUG, "started\n");
264 }
265}
266
267void wait_all_other_cores_started(u32 bsp_apicid)
268{
269 // all aps other than core0
270 printk(BIOS_DEBUG, "started ap apicid: ");
271 for_each_ap(bsp_apicid, 2, wait_ap_started, (void *)0);
272 printk(BIOS_DEBUG, "\n");
273}
274
275void allow_all_aps_stop(u32 bsp_apicid)
276{
277 /* Called by the BSP to indicate AP can stop */
278
279 /* FIXME Do APs use this? */
280
281 // allow aps to stop use 6 bits for state
282 lapic_write(LAPIC_MSG_REG, (bsp_apicid << 24) | F10_APSTATE_STOPPED);
283}
284
285static void enable_apic_ext_id(u32 node)
286{
287 u32 val;
288
289 val = pci_read_config32(NODE_HT(node), 0x68);
290 val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
291 pci_write_config32(NODE_HT(node), 0x68, val);
292}
293
Timothy Pearson730a0432015-10-16 13:51:51 -0500294static void STOP_CAR_AND_CPU(uint8_t skip_sharedc_config, uint32_t apicid)
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000295{
296 msr_t msr;
Timothy Pearson730a0432015-10-16 13:51:51 -0500297 uint32_t family;
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000298
Timothy Pearson730a0432015-10-16 13:51:51 -0500299 family = amd_fam1x_cpu_family(); // inline
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000300
Timothy Pearson730a0432015-10-16 13:51:51 -0500301 if (family < 0x6f) {
302 /* Family 10h or earlier */
303
304 /* Disable L2 IC to L3 connection (Only for CAR) */
305 msr = rdmsr(BU_CFG2);
306 msr.lo &= ~(1 << ClLinesToNbDis);
307 wrmsr(BU_CFG2, msr);
308 }
309
310 disable_cache_as_ram(skip_sharedc_config); // inline
311
312 /* Mark the core as sleeping */
313 lapic_write(LAPIC_MSG_REG, (apicid << 24) | F10_APSTATE_ASLEEP);
314
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000315 /* stop all cores except node0/core0 the bsp .... */
316 stop_this_cpu();
317}
318
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000319static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000320{
321 u32 bsp_apicid = 0;
322 u32 apicid;
Timothy Pearson730a0432015-10-16 13:51:51 -0500323 uint8_t set_mtrrs;
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000324 struct node_core_id id;
325
Timothy Pearsonfb39f822015-06-02 20:25:03 -0500326 /* Please refer to the calculations and explaination in cache_as_ram.inc before modifying these values */
Timothy Pearsonb5e46552015-06-02 13:47:36 -0500327 uint32_t max_ap_stack_region_size = CONFIG_MAX_CPUS * CONFIG_DCACHE_AP_STACK_SIZE;
Timothy Pearsonfb39f822015-06-02 20:25:03 -0500328 uint32_t max_bsp_stack_region_size = CONFIG_DCACHE_BSP_STACK_SIZE + CONFIG_DCACHE_BSP_STACK_SLUSH;
329 uint32_t bsp_stack_region_upper_boundary = CONFIG_DCACHE_RAM_BASE + CONFIG_DCACHE_RAM_SIZE;
330 uint32_t bsp_stack_region_lower_boundary = bsp_stack_region_upper_boundary - max_bsp_stack_region_size;
Timothy Pearsonb5e46552015-06-02 13:47:36 -0500331 void * lower_stack_region_boundary = (void*)(bsp_stack_region_lower_boundary - max_ap_stack_region_size);
332 if (((void*)(sysinfo + 1)) > lower_stack_region_boundary)
333 printk(BIOS_WARNING,
334 "sysinfo extends into stack region (sysinfo range: [%p,%p] lower stack region boundary: %p)\n",
335 sysinfo, sysinfo + 1, lower_stack_region_boundary);
336
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000337 /*
338 * already set early mtrr in cache_as_ram.inc
339 */
340
341 /* that is from initial apicid, we need nodeid and coreid
342 later */
343 id = get_node_core_id_x();
344
345 /* NB_CFG MSR is shared between cores, so we need make sure
346 core0 is done at first --- use wait_all_core0_started */
347 if (id.coreid == 0) {
348 set_apicid_cpuid_lo(); /* only set it on core0 */
349 set_EnableCf8ExtCfg(); /* only set it on core0 */
Patrick Georgie1667822012-05-05 15:29:32 +0200350#if CONFIG_ENABLE_APIC_EXT_ID
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000351 enable_apic_ext_id(id.nodeid);
352#endif
353 }
354
355 enable_lapic();
356
Patrick Georgif8f00622012-05-05 15:50:17 +0200357#if CONFIG_ENABLE_APIC_EXT_ID && (CONFIG_APIC_ID_OFFSET > 0)
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000358 u32 initial_apicid = get_initial_apicid();
359
Patrick Georgie1667822012-05-05 15:29:32 +0200360#if !CONFIG_LIFT_BSP_APIC_ID
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000361 if (initial_apicid != 0) // other than bsp
362#endif
363 {
364 /* use initial apic id to lift it */
365 u32 dword = lapic_read(LAPIC_ID);
366 dword &= ~(0xff << 24);
367 dword |=
368 (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
369
370 lapic_write(LAPIC_ID, dword);
371 }
Patrick Georgie1667822012-05-05 15:29:32 +0200372#if CONFIG_LIFT_BSP_APIC_ID
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000373 bsp_apicid += CONFIG_APIC_ID_OFFSET;
374#endif
375
376#endif
377
378 /* get the apicid, it may be lifted already */
379 apicid = lapicid();
380
381 // show our apicid, nodeid, and coreid
382 if (id.coreid == 0) {
383 if (id.nodeid != 0) //all core0 except bsp
384 print_apicid_nodeid_coreid(apicid, id, " core0: ");
385 } else { //all other cores
386 print_apicid_nodeid_coreid(apicid, id, " corex: ");
387 }
388
389 if (cpu_init_detectedx) {
390 print_apicid_nodeid_coreid(apicid, id,
391 "\n\n\nINIT detected from ");
392 printk(BIOS_DEBUG, "\nIssuing SOFT_RESET...\n");
393 soft_reset();
394 }
395
396 if (id.coreid == 0) {
397 if (!(warm_reset_detect(id.nodeid))) //FIXME: INIT is checked above but check for more resets?
398 distinguish_cpu_resets(id.nodeid); // Also indicates we are started
399 }
400 // Mark the core as started.
401 lapic_write(LAPIC_MSG_REG, (apicid << 24) | F10_APSTATE_STARTED);
402
403 if (apicid != bsp_apicid) {
404 /* Setup each AP's cores MSRs.
405 * This happens after HTinit.
406 * The BSP runs this code in it's own path.
407 */
408 update_microcode(cpuid_eax(1));
Kyösti Mälkkif0a13ce2013-12-08 07:20:48 +0200409
Timothy Pearson730a0432015-10-16 13:51:51 -0500410 cpuSetAMDMSR(id.nodeid);
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000411
Patrick Georgi76e81522010-11-16 21:25:29 +0000412#if CONFIG_SET_FIDVID
Patrick Georgif8f00622012-05-05 15:50:17 +0200413#if CONFIG_LOGICAL_CPUS && CONFIG_SET_FIDVID_CORE0_ONLY
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000414 // Run on all AP for proper FID/VID setup.
415 if (id.coreid == 0) // only need set fid for core0
416#endif
417 {
418 // check warm(bios) reset to call stage2 otherwise do stage1
419 if (warm_reset_detect(id.nodeid)) {
420 printk(BIOS_DEBUG,
421 "init_fidvid_stage2 apicid: %02x\n",
422 apicid);
423 init_fidvid_stage2(apicid, id.nodeid);
424 } else {
425 printk(BIOS_DEBUG,
426 "init_fidvid_ap(stage1) apicid: %02x\n",
427 apicid);
Xavi Drudis Ferran6bdc83b2011-02-28 03:56:52 +0000428 init_fidvid_ap(apicid, id.nodeid, id.coreid);
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000429 }
430 }
431#endif
432
Timothy Pearson730a0432015-10-16 13:51:51 -0500433 if (is_fam15h()) {
434 /* core 1 on node 0 is special; to avoid corrupting the
435 * BSP do not alter MTRRs on that core */
436 if (apicid == 1)
437 set_mtrrs = 0;
438 else
439 set_mtrrs = !!(apicid & 0x1);
440 } else {
441 set_mtrrs = 1;
442 }
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000443
Timothy Pearson730a0432015-10-16 13:51:51 -0500444 /* AP is ready, configure MTRRs and go to sleep */
445 if (set_mtrrs)
446 set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
447
448 printk(BIOS_DEBUG, "Disabling CAR on AP %02x\n", apicid);
449 if (is_fam15h()) {
450 /* Only modify the MSRs on the odd cores (the last cores to finish booting) */
451 STOP_CAR_AND_CPU(!set_mtrrs, apicid);
452 } else {
453 /* Modify MSRs on all cores */
454 STOP_CAR_AND_CPU(0, apicid);
455 }
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000456
457 printk(BIOS_DEBUG,
458 "\nAP %02x should be halted but you are reading this....\n",
459 apicid);
460 }
461
462 return bsp_apicid;
463}
464
465static u32 is_core0_started(u32 nodeid)
466{
467 u32 htic;
468 device_t device;
469 device = NODE_PCI(nodeid, 0);
470 htic = pci_read_config32(device, HT_INIT_CONTROL);
471 htic &= HTIC_ColdR_Detect;
472 return htic;
473}
474
475void wait_all_core0_started(void)
476{
477 /* When core0 is started, it will distingush_cpu_resets
478 * So wait for that to finish */
479 u32 i;
480 u32 nodes = get_nodes();
481
482 printk(BIOS_DEBUG, "core0 started: ");
483 for (i = 1; i < nodes; i++) { // skip bsp, because it is running on bsp
484 while (!is_core0_started(i)) {
485 }
486 printk(BIOS_DEBUG, " %02x", i);
487 }
488 printk(BIOS_DEBUG, "\n");
489}
490
491#if CONFIG_MAX_PHYSICAL_CPUS > 1
492/**
493 * void start_node(u32 node)
494 *
495 * start the core0 in node, so it can generate HT packet to feature code.
496 *
497 * This function starts the AP nodes core0s. wait_all_core0_started() in
498 * romstage.c waits for all the AP to be finished before continuing
499 * system init.
500 */
501static void start_node(u8 node)
502{
503 u32 val;
504
505 /* Enable routing table */
506 printk(BIOS_DEBUG, "Start node %02x", node);
507
508#if CONFIG_NORTHBRIDGE_AMD_AMDFAM10
509 /* For FAM10 support, we need to set Dram base/limit for the new node */
510 pci_write_config32(NODE_MP(node), 0x44, 0);
511 pci_write_config32(NODE_MP(node), 0x40, 3);
512#endif
513
514 /* Allow APs to make requests (ROM fetch) */
515 val = pci_read_config32(NODE_HT(node), 0x6c);
516 val &= ~(1 << 1);
517 pci_write_config32(NODE_HT(node), 0x6c, val);
518
519 printk(BIOS_DEBUG, " done.\n");
520}
521
522/**
523 * static void setup_remote_node(u32 node)
524 *
Martin Roth4c3ab732013-07-08 16:23:54 -0600525 * Copy the BSP Address Map to each AP.
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000526 */
527static void setup_remote_node(u8 node)
528{
529 /* There registers can be used with F1x114_x Address Map at the
530 same time, So must set them even 32 node */
531 static const u16 pci_reg[] = {
532 /* DRAM Base/Limits Registers */
533 0x44, 0x4c, 0x54, 0x5c, 0x64, 0x6c, 0x74, 0x7c,
534 0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78,
535 0x144, 0x14c, 0x154, 0x15c, 0x164, 0x16c, 0x174, 0x17c,
536 0x140, 0x148, 0x150, 0x158, 0x160, 0x168, 0x170, 0x178,
537 /* MMIO Base/Limits Registers */
538 0x84, 0x8c, 0x94, 0x9c, 0xa4, 0xac, 0xb4, 0xbc,
539 0x80, 0x88, 0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8,
540 /* IO Base/Limits Registers */
541 0xc4, 0xcc, 0xd4, 0xdc,
542 0xc0, 0xc8, 0xd0, 0xd8,
543 /* Configuration Map Registers */
544 0xe0, 0xe4, 0xe8, 0xec,
545 };
546 u16 i;
547
548 printk(BIOS_DEBUG, "setup_remote_node: %02x", node);
549
550 /* copy the default resource map from node 0 */
551 for (i = 0; i < ARRAY_SIZE(pci_reg); i++) {
552 u32 value;
553 u16 reg;
554 reg = pci_reg[i];
555 value = pci_read_config32(NODE_MP(0), reg);
556 pci_write_config32(NODE_MP(node), reg, value);
557
558 }
559 printk(BIOS_DEBUG, " done\n");
560}
561#endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
562
Timothy Pearson730a0432015-10-16 13:51:51 -0500563static void AMD_Errata281(u8 node, uint64_t revision, u32 platform)
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000564{
565 /* Workaround for Transaction Scheduling Conflict in
566 * Northbridge Cross Bar. Implement XCS Token adjustment
567 * for ganged links. Also, perform fix up for the mixed
568 * revision case.
569 */
570
571 u32 reg, val;
572 u8 i;
573 u8 mixed = 0;
574 u8 nodes = get_nodes();
575
576 if (platform & AMD_PTYPE_SVR) {
577 /* For each node we need to check for a "broken" node */
578 if (!(revision & (AMD_DR_B0 | AMD_DR_B1))) {
579 for (i = 0; i < nodes; i++) {
580 if (mctGetLogicalCPUID(i) &
581 (AMD_DR_B0 | AMD_DR_B1)) {
582 mixed = 1;
583 break;
584 }
585 }
586 }
587
588 if ((revision & (AMD_DR_B0 | AMD_DR_B1)) || mixed) {
589
590 /* F0X68[22:21] DsNpReqLmt0 = 01b */
591 val = pci_read_config32(NODE_PCI(node, 0), 0x68);
592 val &= ~0x00600000;
593 val |= 0x00200000;
594 pci_write_config32(NODE_PCI(node, 0), 0x68, val);
595
596 /* F3X6C */
597 val = pci_read_config32(NODE_PCI(node, 3), 0x6C);
598 val &= ~0x700780F7;
599 val |= 0x00010094;
600 pci_write_config32(NODE_PCI(node, 3), 0x6C, val);
601
602 /* F3X7C */
603 val = pci_read_config32(NODE_PCI(node, 3), 0x7C);
604 val &= ~0x707FFF1F;
605 val |= 0x00144514;
606 pci_write_config32(NODE_PCI(node, 3), 0x7C, val);
607
608 /* F3X144[3:0] RspTok = 0001b */
609 val = pci_read_config32(NODE_PCI(node, 3), 0x144);
610 val &= ~0x0000000F;
611 val |= 0x00000001;
612 pci_write_config32(NODE_PCI(node, 3), 0x144, val);
613
614 for (i = 0; i < 3; i++) {
615 reg = 0x148 + (i * 4);
616 val = pci_read_config32(NODE_PCI(node, 3), reg);
617 val &= ~0x000000FF;
618 val |= 0x000000DB;
619 pci_write_config32(NODE_PCI(node, 3), reg, val);
620 }
621 }
622 }
623}
624
625static void AMD_Errata298(void)
626{
627 /* Workaround for L2 Eviction May Occur during operation to
628 * set Accessed or dirty bit.
629 */
630
631 msr_t msr;
632 u8 i;
633 u8 affectedRev = 0;
634 u8 nodes = get_nodes();
635
636 /* For each core we need to check for a "broken" node */
637 for (i = 0; i < nodes; i++) {
638 if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_B2)) {
639 affectedRev = 1;
640 break;
641 }
642 }
643
644 if (affectedRev) {
645 msr = rdmsr(HWCR);
646 msr.lo |= 0x08; /* Set TlbCacheDis bit[3] */
647 wrmsr(HWCR, msr);
648
649 msr = rdmsr(BU_CFG);
650 msr.lo |= 0x02; /* Set TlbForceMemTypeUc bit[1] */
651 wrmsr(BU_CFG, msr);
652
653 msr = rdmsr(OSVW_ID_Length);
654 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
655 wrmsr(OSVW_ID_Length, msr);
656
657 msr = rdmsr(OSVW_Status);
658 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
659 wrmsr(OSVW_Status, msr);
660 }
661
662 if (!affectedRev && (mctGetLogicalCPUID(0xFF) & AMD_DR_B3)) {
663 msr = rdmsr(OSVW_ID_Length);
664 msr.lo |= 0x01; /* OS Visible Workaround - MSR */
665 wrmsr(OSVW_ID_Length, msr);
666
667 }
668}
669
670static u32 get_platform_type(void)
671{
672 u32 ret = 0;
673
674 switch (SYSTEM_TYPE) {
675 case 1:
676 ret |= AMD_PTYPE_DSK;
677 break;
678 case 2:
679 ret |= AMD_PTYPE_MOB;
680 break;
681 case 0:
682 ret |= AMD_PTYPE_SVR;
683 break;
684 default:
685 break;
686 }
687
688 /* FIXME: add UMA support. */
689
690 /* All Fam10 are multi core */
691 ret |= AMD_PTYPE_MC;
692
693 return ret;
694}
695
696static void AMD_SetupPSIVID_d(u32 platform_type, u8 node)
697{
698 u32 dword;
699 int i;
700 msr_t msr;
701
702 if (platform_type & (AMD_PTYPE_MOB | AMD_PTYPE_DSK)) {
703
704 /* The following code sets the PSIVID to the lowest support P state
705 * assuming that the VID for the lowest power state is below
706 * the VDD voltage regulator threshold. (This also assumes that there
707 * is a Pstate lower than P0)
708 */
709
710 for (i = 4; i >= 0; i--) {
711 msr = rdmsr(PS_REG_BASE + i);
712 /* Pstate valid? */
713 if (msr.hi & PS_EN_MASK) {
714 dword = pci_read_config32(NODE_PCI(i, 3), 0xA0);
715 dword &= ~0x7F;
716 dword |= (msr.lo >> 9) & 0x7F;
717 pci_write_config32(NODE_PCI(i, 3), 0xA0, dword);
718 break;
719 }
720 }
721 }
722}
723
724/**
725 * AMD_CpuFindCapability - Traverse PCI capability list to find host HT links.
726 * HT Phy operations are not valid on links that aren't present, so this
727 * prevents invalid accesses.
728 *
729 * Returns the offset of the link register.
730 */
731static BOOL AMD_CpuFindCapability(u8 node, u8 cap_count, u8 * offset)
732{
733 u32 reg;
734 u32 val;
735
736 /* get start of CPU HT Host Capabilities */
737 val = pci_read_config32(NODE_PCI(node, 0), 0x34);
738 val &= 0xFF; //reg offset of first link
739
740 cap_count++;
741
742 /* Traverse through the capabilities. */
743 do {
744 reg = pci_read_config32(NODE_PCI(node, 0), val);
745 /* Is the capability block a HyperTransport capability block? */
746 if ((reg & 0xFF) == 0x08) {
747 /* Is the HT capability block an HT Host Capability? */
748 if ((reg & 0xE0000000) == (1 << 29))
749 cap_count--;
750 }
751
752 if (cap_count)
753 val = (reg >> 8) & 0xFF; //update reg offset
754 } while (cap_count && val);
755
756 *offset = (u8) val;
757
758 /* If requested capability found val != 0 */
759 if (!cap_count)
760 return TRUE;
761 else
762 return FALSE;
763}
764
765/**
766 * AMD_checkLinkType - Compare desired link characteristics using a logical
767 * link type mask.
768 *
769 * Returns the link characteristic mask.
770 */
771static u32 AMD_checkLinkType(u8 node, u8 link, u8 regoff)
772{
773 u32 val;
774 u32 linktype = 0;
775
776 /* Check connect, init and coherency */
777 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x18);
778 val &= 0x1F;
779
780 if (val == 3)
781 linktype |= HTPHY_LINKTYPE_COHERENT;
782
783 if (val == 7)
784 linktype |= HTPHY_LINKTYPE_NONCOHERENT;
785
786 if (linktype) {
787 /* Check gen3 */
788 val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x08);
789
790 if (((val >> 8) & 0x0F) > 6)
791 linktype |= HTPHY_LINKTYPE_HT3;
792 else
793 linktype |= HTPHY_LINKTYPE_HT1;
794
795 /* Check ganged */
796 val = pci_read_config32(NODE_PCI(node, 0), (link << 2) + 0x170);
797
798 if (val & 1)
799 linktype |= HTPHY_LINKTYPE_GANGED;
800 else
801 linktype |= HTPHY_LINKTYPE_UNGANGED;
802 }
803 return linktype;
804}
805
806/**
807 * AMD_SetHtPhyRegister - Use the HT link's HT Phy portal registers to update
808 * a phy setting for that link.
809 */
810static void AMD_SetHtPhyRegister(u8 node, u8 link, u8 entry)
811{
812 u32 phyReg;
813 u32 phyBase;
814 u32 val;
815
816 /* Determine this link's portal */
817 if (link > 3)
818 link -= 4;
819
820 phyBase = ((u32) link << 3) | 0x180;
821
822 /* Get the portal control register's initial value
823 * and update it to access the desired phy register
824 */
825 phyReg = pci_read_config32(NODE_PCI(node, 4), phyBase);
826
827 if (fam10_htphy_default[entry].htreg > 0x1FF) {
828 phyReg &= ~HTPHY_DIRECT_OFFSET_MASK;
829 phyReg |= HTPHY_DIRECT_MAP;
830 } else {
831 phyReg &= ~HTPHY_OFFSET_MASK;
832 }
833
834 /* Now get the current phy register data
835 * LinkPhyDone = 0, LinkPhyWrite = 0 is a read
836 */
837 phyReg |= fam10_htphy_default[entry].htreg;
838 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
839
840 do {
841 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
842 } while (!(val & HTPHY_IS_COMPLETE_MASK));
843
844 /* Now we have the phy register data, apply the change */
845 val = pci_read_config32(NODE_PCI(node, 4), phyBase + 4);
846 val &= ~fam10_htphy_default[entry].mask;
847 val |= fam10_htphy_default[entry].data;
848 pci_write_config32(NODE_PCI(node, 4), phyBase + 4, val);
849
850 /* write it through the portal to the phy
851 * LinkPhyDone = 0, LinkPhyWrite = 1 is a write
852 */
853 phyReg |= HTPHY_WRITE_CMD;
854 pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
855
856 do {
857 val = pci_read_config32(NODE_PCI(node, 4), phyBase);
858 } while (!(val & HTPHY_IS_COMPLETE_MASK));
859}
860
Timothy Pearson730a0432015-10-16 13:51:51 -0500861void cpuSetAMDMSR(uint8_t node_id)
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000862{
863 /* This routine loads the CPU with default settings in fam10_msr_default
864 * table . It must be run after Cache-As-RAM has been enabled, and
865 * Hypertransport initialization has taken place. Also note
866 * that it is run on the current processor only, and only for the current
867 * processor core.
868 */
869 msr_t msr;
870 u8 i;
Timothy Pearson730a0432015-10-16 13:51:51 -0500871 u32 platform;
872 uint64_t revision;
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000873
874 printk(BIOS_DEBUG, "cpuSetAMDMSR ");
875
876 revision = mctGetLogicalCPUID(0xFF);
877 platform = get_platform_type();
878
879 for (i = 0; i < ARRAY_SIZE(fam10_msr_default); i++) {
880 if ((fam10_msr_default[i].revision & revision) &&
881 (fam10_msr_default[i].platform & platform)) {
882 msr = rdmsr(fam10_msr_default[i].msr);
883 msr.hi &= ~fam10_msr_default[i].mask_hi;
884 msr.hi |= fam10_msr_default[i].data_hi;
885 msr.lo &= ~fam10_msr_default[i].mask_lo;
886 msr.lo |= fam10_msr_default[i].data_lo;
887 wrmsr(fam10_msr_default[i].msr, msr);
888 }
889 }
890 AMD_Errata298();
891
Timothy Pearson730a0432015-10-16 13:51:51 -0500892 if (revision & AMD_FAM15_ALL) {
893 uint32_t f5x80;
894 uint8_t enabled;
895 uint8_t compute_unit_count = 0;
896 f5x80 = pci_read_config32(NODE_PCI(node_id, 5), 0x80);
897 enabled = f5x80 & 0xf;
898 if (enabled == 0x1)
899 compute_unit_count = 1;
900 if (enabled == 0x3)
901 compute_unit_count = 2;
902 if (enabled == 0x7)
903 compute_unit_count = 3;
904 if (enabled == 0xf)
905 compute_unit_count = 4;
906 msr = rdmsr(BU_CFG2);
907 msr.lo &= ~(0x3 << 6); /* ThrottleNbInterface[1:0] */
908 msr.lo |= (((compute_unit_count - 1) & 0x3) << 6);
909 wrmsr(BU_CFG2, msr);
910 }
911
912 /* Revision C0 and above */
913 if (revision & AMD_OR_C0) {
914 uint32_t f3x1fc = pci_read_config32(NODE_PCI(node_id, 3), 0x1fc);
915 msr = rdmsr(FP_CFG);
916 msr.hi &= ~(0x7 << (42-32)); /* DiDtCfg4 */
917 msr.hi |= (((f3x1fc >> 17) & 0x7) << (42-32));
918 msr.hi &= ~(0x1 << (41-32)); /* DiDtCfg5 */
919 msr.hi |= (((f3x1fc >> 22) & 0x1) << (41-32));
920 msr.hi &= ~(0x1 << (40-32)); /* DiDtCfg3 */
921 msr.hi |= (((f3x1fc >> 16) & 0x1) << (40-32));
922 msr.hi &= ~(0x7 << (32-32)); /* DiDtCfg1 (1) */
923 msr.hi |= (((f3x1fc >> 11) & 0x7) << (32-32));
924 msr.lo &= ~(0x1f << 27); /* DiDtCfg1 (2) */
925 msr.lo |= (((f3x1fc >> 6) & 0x1f) << 27);
926 msr.lo &= ~(0x3 << 25); /* DiDtCfg2 */
927 msr.lo |= (((f3x1fc >> 14) & 0x3) << 25);
928 msr.lo &= ~(0x1f << 18); /* DiDtCfg0 */
929 msr.lo |= (((f3x1fc >> 1) & 0x1f) << 18);
930 msr.lo &= ~(0x1 << 16); /* DiDtMode */
931 msr.lo |= ((f3x1fc & 0x1) << 16);
932 wrmsr(FP_CFG, msr);
933 }
934
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000935 printk(BIOS_DEBUG, " done\n");
936}
937
938static void cpuSetAMDPCI(u8 node)
939{
940 /* This routine loads the CPU with default settings in fam10_pci_default
941 * table . It must be run after Cache-As-RAM has been enabled, and
942 * Hypertransport initialization has taken place. Also note
943 * that it is run for the first core on each node
944 */
945 u8 i, j;
Timothy Pearson730a0432015-10-16 13:51:51 -0500946 u32 platform;
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000947 u32 val;
948 u8 offset;
Timothy Pearson730a0432015-10-16 13:51:51 -0500949 uint64_t revision;
Scott Duplichan1ba2eee2010-10-19 04:58:49 +0000950
951 printk(BIOS_DEBUG, "cpuSetAMDPCI %02d", node);
952
953 revision = mctGetLogicalCPUID(node);
954 platform = get_platform_type();
955
956 AMD_SetupPSIVID_d(platform, node); /* Set PSIVID offset which is not table driven */
957
958 for (i = 0; i < ARRAY_SIZE(fam10_pci_default); i++) {
959 if ((fam10_pci_default[i].revision & revision) &&
960 (fam10_pci_default[i].platform & platform)) {
961 val = pci_read_config32(NODE_PCI(node,
962 fam10_pci_default[i].
963 function),
964 fam10_pci_default[i].offset);
965 val &= ~fam10_pci_default[i].mask;
966 val |= fam10_pci_default[i].data;
967 pci_write_config32(NODE_PCI(node,
968 fam10_pci_default[i].
969 function),
970 fam10_pci_default[i].offset, val);
971 }
972 }
973
974 for (i = 0; i < ARRAY_SIZE(fam10_htphy_default); i++) {
975 if ((fam10_htphy_default[i].revision & revision) &&
976 (fam10_htphy_default[i].platform & platform)) {
977 /* HT Phy settings either apply to both sublinks or have
978 * separate registers for sublink zero and one, so there
979 * will be two table entries. So, here we only loop
980 * through the sublink zeros in function zero.
981 */
982 for (j = 0; j < 4; j++) {
983 if (AMD_CpuFindCapability(node, j, &offset)) {
984 if (AMD_checkLinkType(node, j, offset)
985 & fam10_htphy_default[i].linktype) {
986 AMD_SetHtPhyRegister(node, j,
987 i);
988 }
989 } else {
990 /* No more capabilities,
991 * link not present
992 */
993 break;
994 }
995 }
996 }
997 }
998
999 /* FIXME: add UMA support and programXbarToSriReg(); */
1000
1001 AMD_Errata281(node, revision, platform);
1002
1003 /* FIXME: if the dct phy doesn't init correct it needs to reset.
1004 if (revision & (AMD_DR_B2 | AMD_DR_B3))
1005 dctPhyDiag(); */
1006
1007 printk(BIOS_DEBUG, " done\n");
1008}
1009
1010#ifdef UNUSED_CODE
Timothy Pearson730a0432015-10-16 13:51:51 -05001011/* Clearing the MCA registers is apparently handled in the ramstage CPU Function 3 driver */
Scott Duplichan1ba2eee2010-10-19 04:58:49 +00001012static void cpuInitializeMCA(void)
1013{
1014 /* Clears Machine Check Architecture (MCA) registers, which power on
1015 * containing unknown data, on currently running processor.
1016 * This routine should only be executed on initial power on (cold boot),
1017 * not across a warm reset because valid data is present at that time.
1018 */
1019
1020 msr_t msr;
1021 u32 reg;
1022 u8 i;
1023
1024 if (cpuid_edx(1) & 0x4080) { /* MCE and MCA (edx[7] and edx[14]) */
1025 msr = rdmsr(MCG_CAP);
1026 if (msr.lo & MCG_CTL_P) { /* MCG_CTL_P bit is set? */
1027 msr.lo &= 0xFF;
1028 msr.lo--;
1029 msr.lo <<= 2; /* multiply the count by 4 */
1030 reg = MC0_STA + msr.lo;
1031 msr.lo = msr.hi = 0;
1032 for (i = 0; i < 4; i++) {
1033 wrmsr(reg, msr);
1034 reg -= 4; /* Touch status regs for each bank */
1035 }
1036 }
1037 }
1038}
1039#endif
1040
1041/**
1042 * finalize_node_setup()
1043 *
1044 * Do any additional post HT init
1045 *
1046 */
1047static void finalize_node_setup(struct sys_info *sysinfo)
1048{
1049 u8 i;
1050 u8 nodes = get_nodes();
Patrick Georgif3e85422010-10-26 15:11:45 +00001051 u32 reg;
Kyösti Mälkki239c3d32013-12-27 14:46:32 +02001052
Scott Duplichan1ba2eee2010-10-19 04:58:49 +00001053 /* read Node0 F0_0x64 bit [8:10] to find out SbLink # */
1054 reg = pci_read_config32(NODE_HT(0), 0x64);
1055 sysinfo->sblk = (reg >> 8) & 7;
1056 sysinfo->sbbusn = 0;
1057 sysinfo->nodes = nodes;
1058 sysinfo->sbdn = get_sbdn(sysinfo->sbbusn);
Scott Duplichan1ba2eee2010-10-19 04:58:49 +00001059
1060 for (i = 0; i < nodes; i++) {
1061 cpuSetAMDPCI(i);
1062 }
1063
Patrick Georgi76e81522010-11-16 21:25:29 +00001064#if CONFIG_SET_FIDVID
Scott Duplichan1ba2eee2010-10-19 04:58:49 +00001065 // Prep each node for FID/VID setup.
1066 prep_fid_change();
1067#endif
1068
1069#if CONFIG_MAX_PHYSICAL_CPUS > 1
1070 /* Skip the BSP, start at node 1 */
1071 for (i = 1; i < nodes; i++) {
1072 setup_remote_node(i);
1073 start_node(i);
1074 }
1075#endif
1076}
1077
1078#include "fidvid.c"