Patrick Georgi | ac95903 | 2020-05-05 22:49:26 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 2 | |
| 3 | #include <assert.h> |
| 4 | #include <commonlib/sort.h> |
| 5 | #include <console/console.h> |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 6 | #include <delay.h> |
| 7 | #include <device/pci.h> |
| 8 | #include <hob_iiouds.h> |
| 9 | #include <intelblocks/cpulib.h> |
| 10 | #include <intelblocks/pcr.h> |
| 11 | #include <soc/iomap.h> |
| 12 | #include <soc/cpu.h> |
| 13 | #include <soc/msr.h> |
| 14 | #include <soc/pci_devs.h> |
| 15 | #include <soc/pcr_ids.h> |
| 16 | #include <soc/soc_util.h> |
Andrey Petrov | 662da6c | 2020-03-16 22:46:57 -0700 | [diff] [blame] | 17 | #include <soc/util.h> |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 18 | #include <timer.h> |
| 19 | |
| 20 | /* |
| 21 | * Get TOLM CSR B0:D5:F0:Offset_d0h |
| 22 | */ |
| 23 | uintptr_t get_tolm(uint32_t bus) |
| 24 | { |
| 25 | uint32_t w = pci_io_read_config32(PCI_DEV(bus, VTD_DEV, VTD_FUNC), |
| 26 | VTD_TOLM_CSR); |
| 27 | uintptr_t addr = w & 0xfc000000; |
| 28 | printk(BIOS_DEBUG, "VTD_TOLM_CSR 0x%x, addr: 0x%lx\n", w, addr); |
| 29 | return addr; |
| 30 | } |
| 31 | |
| 32 | void get_tseg_base_lim(uint32_t bus, uint32_t *base, uint32_t *limit) |
| 33 | { |
| 34 | uint32_t w1 = pci_io_read_config32(PCI_DEV(bus, VTD_DEV, VTD_FUNC), |
| 35 | VTD_TSEG_BASE_CSR); |
| 36 | uint32_t wh = pci_io_read_config32(PCI_DEV(bus, VTD_DEV, VTD_FUNC), |
| 37 | VTD_TSEG_LIMIT_CSR); |
| 38 | *base = w1 & 0xfff00000; |
| 39 | *limit = wh & 0xfff00000; |
| 40 | } |
| 41 | |
| 42 | /* |
| 43 | * Get MMCFG CSR B1:D29:F1:Offset_C0h |
| 44 | */ |
| 45 | uintptr_t get_cha_mmcfg_base(uint32_t bus) |
| 46 | { |
| 47 | uint32_t wl = pci_io_read_config32(PCI_DEV(bus, CHA_UTIL_ALL_DEV, |
| 48 | CHA_UTIL_ALL_FUNC), CHA_UTIL_ALL_MMCFG_CSR); |
| 49 | uint32_t wh = pci_io_read_config32(PCI_DEV(bus, CHA_UTIL_ALL_DEV, |
| 50 | CHA_UTIL_ALL_FUNC), CHA_UTIL_ALL_MMCFG_CSR + 4); |
| 51 | uintptr_t addr = ((((wh & 0x3fff) << 6) | ((wl >> 26) & 0x3f)) << 26); |
| 52 | printk(BIOS_DEBUG, "CHA_UTIL_ALL_MMCFG_CSR wl: 0x%x, wh: 0x%x, addr: 0x%lx\n", |
| 53 | wl, wh, addr); |
| 54 | return addr; |
| 55 | } |
| 56 | |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 57 | uint32_t top_of_32bit_ram(void) |
| 58 | { |
| 59 | uintptr_t mmcfg, tolm; |
| 60 | uint32_t bus0 = 0, bus1 = 0; |
| 61 | uint32_t base = 0, limit = 0; |
| 62 | |
| 63 | get_cpubusnos(&bus0, &bus1, NULL, NULL); |
| 64 | |
| 65 | mmcfg = get_cha_mmcfg_base(bus1); |
| 66 | tolm = get_tolm(bus0); |
| 67 | printk(BIOS_DEBUG, "bus0: 0x%x, bus1: 0x%x, mmcfg: 0x%lx, tolm: 0x%lx\n", |
| 68 | bus0, bus1, mmcfg, tolm); |
| 69 | get_tseg_base_lim(bus0, &base, &limit); |
| 70 | printk(BIOS_DEBUG, "tseg base: 0x%x, limit: 0x%x\n", base, limit); |
| 71 | |
| 72 | /* We will use TSEG base as the top of DRAM */ |
| 73 | return base; |
| 74 | } |
| 75 | |
| 76 | /* |
| 77 | * +-------------------------+ TOLM |
| 78 | * | System Management Mode | |
| 79 | * | code and data | |
| 80 | * | (TSEG) | |
| 81 | * +-------------------------+ SMM base (aligned) |
| 82 | * | | |
| 83 | * | Chipset Reserved Memory | |
| 84 | * | | |
| 85 | * +-------------------------+ top_of_ram (aligned) |
| 86 | * | | |
| 87 | * | CBMEM Root | |
| 88 | * | | |
| 89 | * +-------------------------+ |
| 90 | * | | |
| 91 | * | FSP Reserved Memory | |
| 92 | * | | |
| 93 | * +-------------------------+ |
| 94 | * | | |
| 95 | * | Various CBMEM Entries | |
| 96 | * | | |
| 97 | * +-------------------------+ top_of_stack (8 byte aligned) |
| 98 | * | | |
| 99 | * | stack (CBMEM Entry) | |
| 100 | * | | |
| 101 | * +-------------------------+ |
| 102 | */ |
| 103 | |
| 104 | uint32_t pci_read_mmio_reg(int bus, uint32_t dev, uint32_t func, int offset) |
| 105 | { |
| 106 | return pci_mmio_read_config32(PCI_DEV(bus, dev, func), offset); |
| 107 | } |
| 108 | |
| 109 | uint32_t get_socket_stack_busno(uint32_t socket, uint32_t stack) |
| 110 | { |
| 111 | size_t hob_size; |
| 112 | const IIO_UDS *hob; |
| 113 | const uint8_t fsp_hob_iio_universal_data_guid[16] = FSP_HOB_IIO_UNIVERSAL_DATA_GUID; |
| 114 | |
| 115 | assert(socket < MAX_SOCKET && stack < MAX_IIO_STACK); |
| 116 | |
| 117 | hob = fsp_find_extension_hob_by_guid(fsp_hob_iio_universal_data_guid, &hob_size); |
| 118 | assert(hob != NULL && hob_size != 0); |
| 119 | |
| 120 | return hob->PlatformData.CpuQpiInfo[socket].StackBus[stack]; |
| 121 | } |
| 122 | |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 123 | /* return 1 if command timed out else 0 */ |
| 124 | static uint32_t wait_for_bios_cmd_cpl(pci_devfn_t dev, uint32_t reg, uint32_t mask, |
| 125 | uint32_t target) |
| 126 | { |
| 127 | uint32_t max_delay = 5000; /* 5 seconds max */ |
| 128 | uint32_t step_delay = 50; /* 50 us */ |
| 129 | struct stopwatch sw; |
| 130 | |
| 131 | stopwatch_init_msecs_expire(&sw, max_delay); |
| 132 | while ((pci_mmio_read_config32(dev, reg) & mask) != target) { |
| 133 | udelay(step_delay); |
| 134 | if (stopwatch_expired(&sw)) { |
| 135 | printk(BIOS_ERR, "%s timed out for dev: 0x%x, reg: 0x%x, " |
| 136 | "mask: 0x%x, target: 0x%x\n", __func__, dev, reg, mask, target); |
| 137 | return 1; /* timedout */ |
| 138 | } |
| 139 | } |
| 140 | return 0; /* successful */ |
| 141 | } |
| 142 | |
| 143 | /* return 1 if command timed out else 0 */ |
| 144 | static int set_bios_reset_cpl_for_package(uint32_t socket, uint32_t rst_cpl_mask, |
| 145 | uint32_t pcode_init_mask, uint32_t val) |
| 146 | { |
| 147 | uint32_t bus = get_socket_stack_busno(socket, PCU_IIO_STACK); |
| 148 | pci_devfn_t dev = PCI_DEV(bus, PCU_DEV, PCU_CR1_FUN); |
| 149 | |
| 150 | uint32_t reg = pci_mmio_read_config32(dev, PCU_CR1_BIOS_RESET_CPL_REG); |
| 151 | reg &= (uint32_t) ~rst_cpl_mask; |
| 152 | reg |= rst_cpl_mask; |
| 153 | reg |= val; |
| 154 | |
| 155 | /* update BIOS RESET completion bit */ |
| 156 | pci_mmio_write_config32(dev, PCU_CR1_BIOS_RESET_CPL_REG, reg); |
| 157 | |
| 158 | /* wait for PCU ack */ |
| 159 | return wait_for_bios_cmd_cpl(dev, PCU_CR1_BIOS_RESET_CPL_REG, pcode_init_mask, |
| 160 | pcode_init_mask); |
| 161 | } |
| 162 | |
| 163 | /* return 1 if command timed out else 0 */ |
| 164 | static uint32_t write_bios_mailbox_cmd(pci_devfn_t dev, uint32_t command, uint32_t data) |
| 165 | { |
| 166 | /* verify bios is not in busy state */ |
| 167 | if (wait_for_bios_cmd_cpl(dev, PCU_CR1_BIOS_MB_INTERFACE_REG, BIOS_MB_RUN_BUSY_MASK, 0)) |
| 168 | return 1; /* timed out */ |
| 169 | |
| 170 | /* write data to data register */ |
| 171 | printk(BIOS_SPEW, "%s - pci_mmio_write_config32 reg: 0x%x, data: 0x%x\n", __func__, |
| 172 | PCU_CR1_BIOS_MB_DATA_REG, data); |
| 173 | pci_mmio_write_config32(dev, PCU_CR1_BIOS_MB_DATA_REG, data); |
| 174 | |
| 175 | /* write the command */ |
| 176 | printk(BIOS_SPEW, "%s - pci_mmio_write_config32 reg: 0x%x, data: 0x%x\n", __func__, |
| 177 | PCU_CR1_BIOS_MB_INTERFACE_REG, |
| 178 | (uint32_t) (command | BIOS_MB_RUN_BUSY_MASK)); |
| 179 | pci_mmio_write_config32(dev, PCU_CR1_BIOS_MB_INTERFACE_REG, |
| 180 | (uint32_t) (command | BIOS_MB_RUN_BUSY_MASK)); |
| 181 | |
| 182 | /* wait for completion or time out*/ |
| 183 | return wait_for_bios_cmd_cpl(dev, PCU_CR1_BIOS_MB_INTERFACE_REG, |
| 184 | BIOS_MB_RUN_BUSY_MASK, 0); |
| 185 | } |
| 186 | |
| 187 | void config_reset_cpl3_csrs(void) |
| 188 | { |
| 189 | uint32_t data, plat_info, max_min_turbo_limit_ratio; |
| 190 | |
| 191 | for (uint32_t socket = 0; socket < MAX_SOCKET; ++socket) { |
| 192 | uint32_t bus = get_socket_stack_busno(socket, PCU_IIO_STACK); |
| 193 | |
| 194 | /* configure PCU_CR0_FUN csrs */ |
| 195 | pci_devfn_t cr0_dev = PCI_DEV(bus, PCU_DEV, PCU_CR0_FUN); |
| 196 | data = pci_mmio_read_config32(cr0_dev, PCU_CR0_P_STATE_LIMITS); |
| 197 | data |= P_STATE_LIMITS_LOCK; |
| 198 | pci_mmio_write_config32(cr0_dev, PCU_CR0_P_STATE_LIMITS, data); |
| 199 | |
| 200 | plat_info = pci_mmio_read_config32(cr0_dev, PCU_CR0_PLATFORM_INFO); |
| 201 | dump_csr64("", cr0_dev, PCU_CR0_PLATFORM_INFO); |
| 202 | max_min_turbo_limit_ratio = |
| 203 | (plat_info & MAX_NON_TURBO_LIM_RATIO_MASK) >> |
| 204 | MAX_NON_TURBO_LIM_RATIO_SHIFT; |
| 205 | printk(BIOS_SPEW, "plat_info: 0x%x, max_min_turbo_limit_ratio: 0x%x\n", |
| 206 | plat_info, max_min_turbo_limit_ratio); |
| 207 | |
| 208 | /* configure PCU_CR1_FUN csrs */ |
| 209 | pci_devfn_t cr1_dev = PCI_DEV(bus, PCU_DEV, PCU_CR1_FUN); |
| 210 | |
| 211 | data = pci_mmio_read_config32(cr1_dev, PCU_CR1_SAPMCTL); |
| 212 | /* clear bits 27:31 - FSP sets this with 0x7 which needs to be cleared */ |
| 213 | data &= 0x0fffffff; |
| 214 | data |= SAPMCTL_LOCK_MASK; |
| 215 | pci_mmio_write_config32(cr1_dev, PCU_CR1_SAPMCTL, data); |
| 216 | |
| 217 | /* configure PCU_CR1_FUN csrs */ |
| 218 | pci_devfn_t cr2_dev = PCI_DEV(bus, PCU_DEV, PCU_CR2_FUN); |
| 219 | |
| 220 | data = PCIE_IN_PKGCSTATE_L1_MASK; |
| 221 | pci_mmio_write_config32(cr2_dev, PCU_CR2_PKG_CST_ENTRY_CRITERIA_MASK, data); |
| 222 | |
| 223 | data = KTI_IN_PKGCSTATE_L1_MASK; |
| 224 | pci_mmio_write_config32(cr2_dev, PCU_CR2_PKG_CST_ENTRY_CRITERIA_MASK2, data); |
| 225 | |
| 226 | data = PROCHOT_RATIO; |
| 227 | printk(BIOS_SPEW, "PCU_CR2_PROCHOT_RESPONSE_RATIO_REG data: 0x%x\n", data); |
| 228 | pci_mmio_write_config32(cr2_dev, PCU_CR2_PROCHOT_RESPONSE_RATIO_REG, data); |
| 229 | dump_csr("", cr2_dev, PCU_CR2_PROCHOT_RESPONSE_RATIO_REG); |
| 230 | |
| 231 | data = pci_mmio_read_config32(cr2_dev, PCU_CR2_DYNAMIC_PERF_POWER_CTL); |
| 232 | data |= UNOCRE_PLIMIT_OVERRIDE_SHIFT; |
| 233 | pci_mmio_write_config32(cr2_dev, PCU_CR2_DYNAMIC_PERF_POWER_CTL, data); |
| 234 | } |
| 235 | } |
| 236 | |
| 237 | static void set_bios_init_completion_for_package(uint32_t socket) |
| 238 | { |
| 239 | uint32_t data; |
| 240 | uint32_t timedout; |
| 241 | uint32_t bus = get_socket_stack_busno(socket, PCU_IIO_STACK); |
| 242 | pci_devfn_t dev = PCI_DEV(bus, PCU_DEV, PCU_CR1_FUN); |
| 243 | |
| 244 | /* read pcu config */ |
| 245 | timedout = write_bios_mailbox_cmd(dev, BIOS_CMD_READ_PCU_MISC_CFG, 0); |
| 246 | if (timedout) { |
| 247 | /* 2nd try */ |
| 248 | timedout = write_bios_mailbox_cmd(dev, BIOS_CMD_READ_PCU_MISC_CFG, 0); |
| 249 | if (timedout) |
| 250 | die("BIOS PCU Misc Config Read timed out.\n"); |
| 251 | |
| 252 | data = pci_mmio_read_config32(dev, PCU_CR1_BIOS_MB_DATA_REG); |
| 253 | printk(BIOS_SPEW, "%s - pci_mmio_read_config32 reg: 0x%x, data: 0x%x\n", |
| 254 | __func__, PCU_CR1_BIOS_MB_DATA_REG, data); |
| 255 | |
| 256 | /* write PCU config */ |
| 257 | timedout = write_bios_mailbox_cmd(dev, BIOS_CMD_WRITE_PCU_MISC_CFG, data); |
| 258 | if (timedout) |
| 259 | die("BIOS PCU Misc Config Write timed out.\n"); |
| 260 | } |
| 261 | |
| 262 | /* update RST_CPL3, PCODE_INIT_DONE3 */ |
| 263 | timedout = set_bios_reset_cpl_for_package(socket, RST_CPL3_MASK, |
| 264 | PCODE_INIT_DONE3_MASK, RST_CPL3_MASK); |
| 265 | if (timedout) |
| 266 | die("BIOS RESET CPL3 timed out.\n"); |
| 267 | |
| 268 | /* update RST_CPL4, PCODE_INIT_DONE4 */ |
| 269 | timedout = set_bios_reset_cpl_for_package(socket, RST_CPL4_MASK, |
| 270 | PCODE_INIT_DONE4_MASK, RST_CPL4_MASK); |
| 271 | if (timedout) |
| 272 | die("BIOS RESET CPL4 timed out.\n"); |
| 273 | /* set CSR_DESIRED_CORES_CFG2 lock bit */ |
| 274 | data = pci_mmio_read_config32(dev, PCU_CR1_DESIRED_CORES_CFG2_REG); |
| 275 | data |= PCU_CR1_DESIRED_CORES_CFG2_REG_LOCK_MASK; |
| 276 | printk(BIOS_SPEW, "%s - pci_mmio_write_config32 PCU_CR1_DESIRED_CORES_CFG2_REG 0x%x, data: 0x%x\n", |
| 277 | __func__, PCU_CR1_DESIRED_CORES_CFG2_REG, data); |
| 278 | pci_mmio_write_config32(dev, PCU_CR1_DESIRED_CORES_CFG2_REG, data); |
| 279 | } |
| 280 | |
| 281 | void set_bios_init_completion(void) |
| 282 | { |
| 283 | uint32_t sbsp_socket_id = 0; /* TODO - this needs to be configurable */ |
| 284 | |
| 285 | for (uint32_t socket = 0; socket < MAX_SOCKET; ++socket) { |
| 286 | if (socket == sbsp_socket_id) |
| 287 | continue; |
| 288 | set_bios_init_completion_for_package(socket); |
| 289 | } |
| 290 | set_bios_init_completion_for_package(sbsp_socket_id); |
| 291 | } |
| 292 | |
| 293 | void get_core_thread_bits(uint32_t *core_bits, uint32_t *thread_bits) |
| 294 | { |
| 295 | register int ecx; |
| 296 | struct cpuid_result cpuid_regs; |
| 297 | |
| 298 | /* get max index of CPUID */ |
| 299 | cpuid_regs = cpuid(0); |
| 300 | assert(cpuid_regs.eax >= 0xb); /* cpuid_regs.eax is max input value for cpuid */ |
| 301 | |
| 302 | *thread_bits = *core_bits = 0; |
| 303 | ecx = 0; |
| 304 | while (1) { |
| 305 | cpuid_regs = cpuid_ext(0xb, ecx); |
| 306 | if (ecx == 0) { |
| 307 | *thread_bits = (cpuid_regs.eax & 0x1f); |
| 308 | } else { |
| 309 | *core_bits = (cpuid_regs.eax & 0x1f) - *thread_bits; |
| 310 | break; |
| 311 | } |
| 312 | ecx++; |
| 313 | } |
| 314 | } |
| 315 | |
| 316 | void get_cpu_info_from_apicid(uint32_t apicid, uint32_t core_bits, uint32_t thread_bits, |
| 317 | uint8_t *package, uint8_t *core, uint8_t *thread) |
| 318 | { |
| 319 | if (package != NULL) |
| 320 | *package = (apicid >> (thread_bits + core_bits)); |
| 321 | if (core != NULL) |
| 322 | *core = (uint32_t)((apicid >> thread_bits) & ~((~0) << core_bits)); |
| 323 | if (thread != NULL) |
| 324 | *thread = (uint32_t)(apicid & ~((~0) << thread_bits)); |
| 325 | } |
| 326 | |
| 327 | int get_cpu_count(void) |
| 328 | { |
| 329 | size_t hob_size; |
| 330 | const uint8_t fsp_hob_iio_universal_data_guid[16] = FSP_HOB_IIO_UNIVERSAL_DATA_GUID; |
| 331 | const IIO_UDS *hob; |
| 332 | |
| 333 | /* these fields are incorrect - need debugging */ |
| 334 | hob = fsp_find_extension_hob_by_guid(fsp_hob_iio_universal_data_guid, &hob_size); |
| 335 | assert(hob != NULL && hob_size != 0); |
| 336 | return hob->SystemStatus.numCpus; |
| 337 | } |
| 338 | |
| 339 | int get_threads_per_package(void) |
| 340 | { |
| 341 | unsigned int core_count, thread_count; |
| 342 | cpu_read_topology(&core_count, &thread_count); |
| 343 | return thread_count; |
| 344 | } |
| 345 | |
| 346 | int get_platform_thread_count(void) |
| 347 | { |
| 348 | return get_cpu_count() * get_threads_per_package(); |
| 349 | } |
| 350 | |
| 351 | void get_iiostack_info(struct iiostack_resource *info) |
| 352 | { |
| 353 | size_t hob_size; |
| 354 | const uint8_t fsp_hob_iio_universal_data_guid[16] = FSP_HOB_IIO_UNIVERSAL_DATA_GUID; |
| 355 | const IIO_UDS *hob; |
| 356 | |
| 357 | hob = fsp_find_extension_hob_by_guid( |
| 358 | fsp_hob_iio_universal_data_guid, &hob_size); |
| 359 | assert(hob != NULL && hob_size != 0); |
| 360 | |
| 361 | // copy IIO Stack info from FSP HOB |
| 362 | info->no_of_stacks = 0; |
| 363 | for (int s = 0; s < hob->PlatformData.numofIIO; ++s) { |
| 364 | for (int x = 0; x < MAX_IIO_STACK; ++x) { |
| 365 | const STACK_RES *ri = &hob->PlatformData.IIO_resource[s].StackRes[x]; |
| 366 | // TODO: do we have situation with only bux 0 and one stack? |
| 367 | if (ri->BusBase >= ri->BusLimit) |
| 368 | continue; |
| 369 | assert(info->no_of_stacks < (CONFIG_MAX_SOCKET * MAX_IIO_STACK)); |
| 370 | memcpy(&info->res[info->no_of_stacks++], ri, sizeof(STACK_RES)); |
| 371 | } |
| 372 | } |
| 373 | } |
| 374 | |
| 375 | #if ENV_RAMSTAGE |
| 376 | |
| 377 | void xeonsp_init_cpu_config(void) |
| 378 | { |
| 379 | struct device *dev; |
| 380 | int apic_ids[CONFIG_MAX_CPUS] = {0}, apic_ids_by_thread[CONFIG_MAX_CPUS] = {0}; |
| 381 | int num_apics = 0; |
| 382 | uint32_t core_bits, thread_bits; |
| 383 | unsigned int core_count, thread_count; |
| 384 | unsigned int num_cpus; |
| 385 | |
| 386 | /* sort APIC ids in asending order to identify apicid ranges for |
| 387 | each numa domain |
| 388 | */ |
| 389 | for (dev = all_devices; dev; dev = dev->next) { |
| 390 | if ((dev->path.type != DEVICE_PATH_APIC) || |
| 391 | (dev->bus->dev->path.type != DEVICE_PATH_CPU_CLUSTER)) { |
| 392 | continue; |
| 393 | } |
| 394 | if (!dev->enabled) |
| 395 | continue; |
| 396 | if (num_apics >= ARRAY_SIZE(apic_ids)) |
| 397 | break; |
| 398 | apic_ids[num_apics++] = dev->path.apic.apic_id; |
| 399 | } |
| 400 | if (num_apics > 1) |
| 401 | bubblesort(apic_ids, num_apics, NUM_ASCENDING); |
| 402 | |
| 403 | num_cpus = get_cpu_count(); |
| 404 | cpu_read_topology(&core_count, &thread_count); |
| 405 | assert(num_apics == (num_cpus * thread_count)); |
| 406 | |
| 407 | /* sort them by thread i.e., all cores with thread 0 and then thread 1 */ |
| 408 | int index = 0; |
| 409 | for (int id = 0; id < num_apics; ++id) { |
| 410 | int apic_id = apic_ids[id]; |
| 411 | if (apic_id & 0x1) { /* 2nd thread */ |
| 412 | apic_ids_by_thread[index + (num_apics/2) - 1] = apic_id; |
| 413 | } else { /* 1st thread */ |
| 414 | apic_ids_by_thread[index++] = apic_id; |
| 415 | } |
| 416 | } |
| 417 | |
| 418 | |
| 419 | /* update apic_id, node_id in sorted order */ |
| 420 | num_apics = 0; |
| 421 | get_core_thread_bits(&core_bits, &thread_bits); |
| 422 | for (dev = all_devices; dev; dev = dev->next) { |
| 423 | uint8_t package; |
| 424 | |
| 425 | if ((dev->path.type != DEVICE_PATH_APIC) || |
| 426 | (dev->bus->dev->path.type != DEVICE_PATH_CPU_CLUSTER)) { |
| 427 | continue; |
| 428 | } |
| 429 | if (!dev->enabled) |
| 430 | continue; |
| 431 | if (num_apics >= ARRAY_SIZE(apic_ids)) |
| 432 | break; |
| 433 | dev->path.apic.apic_id = apic_ids_by_thread[num_apics]; |
| 434 | get_cpu_info_from_apicid(dev->path.apic.apic_id, core_bits, thread_bits, |
| 435 | &package, NULL, NULL); |
| 436 | dev->path.apic.node_id = package; |
| 437 | printk(BIOS_DEBUG, "CPU %d apic_id: 0x%x (%d), node_id: 0x%x\n", |
| 438 | num_apics, dev->path.apic.apic_id, |
| 439 | dev->path.apic.apic_id, dev->path.apic.node_id); |
| 440 | |
| 441 | ++num_apics; |
| 442 | } |
| 443 | } |
| 444 | |
| 445 | unsigned int get_srat_memory_entries(acpi_srat_mem_t *srat_mem) |
| 446 | { |
| 447 | const struct SystemMemoryMapHob *memory_map; |
| 448 | size_t hob_size; |
| 449 | const uint8_t mem_hob_guid[16] = FSP_SYSTEM_MEMORYMAP_HOB_GUID; |
| 450 | unsigned int mmap_index; |
| 451 | |
| 452 | memory_map = fsp_find_extension_hob_by_guid(mem_hob_guid, &hob_size); |
| 453 | assert(memory_map != NULL && hob_size != 0); |
| 454 | printk(BIOS_DEBUG, "FSP_SYSTEM_MEMORYMAP_HOB_GUID hob_size: %ld\n", hob_size); |
| 455 | |
| 456 | mmap_index = 0; |
| 457 | for (int e = 0; e < memory_map->numberEntries; ++e) { |
| 458 | const struct SystemMemoryMapElement *mem_element = &memory_map->Element[e]; |
| 459 | uint64_t addr = |
| 460 | (uint64_t) ((uint64_t)mem_element->BaseAddress << |
| 461 | MEM_ADDR_64MB_SHIFT_BITS); |
| 462 | uint64_t size = |
| 463 | (uint64_t) ((uint64_t)mem_element->ElementSize << |
| 464 | MEM_ADDR_64MB_SHIFT_BITS); |
| 465 | |
| 466 | printk(BIOS_DEBUG, "memory_map %d addr: 0x%llx, BaseAddress: 0x%x, size: 0x%llx, " |
| 467 | "ElementSize: 0x%x, reserved: %d\n", |
| 468 | e, addr, mem_element->BaseAddress, size, |
| 469 | mem_element->ElementSize, (mem_element->Type & MEM_TYPE_RESERVED)); |
| 470 | |
| 471 | assert(mmap_index < MAX_ACPI_MEMORY_AFFINITY_COUNT); |
| 472 | |
| 473 | /* skip reserved memory region */ |
| 474 | if (mem_element->Type & MEM_TYPE_RESERVED) |
| 475 | continue; |
| 476 | |
| 477 | /* skip if this address is already added */ |
| 478 | bool skip = false; |
| 479 | for (int idx = 0; idx < mmap_index; ++idx) { |
| 480 | uint64_t base_addr = ((uint64_t)srat_mem[idx].base_address_high << 32) + |
| 481 | srat_mem[idx].base_address_low; |
| 482 | if (addr == base_addr) { |
| 483 | skip = true; |
| 484 | break; |
| 485 | } |
| 486 | } |
| 487 | if (skip) |
| 488 | continue; |
| 489 | |
| 490 | srat_mem[mmap_index].type = 1; /* Memory affinity structure */ |
| 491 | srat_mem[mmap_index].length = sizeof(acpi_srat_mem_t); |
| 492 | srat_mem[mmap_index].base_address_low = (uint32_t) (addr & 0xffffffff); |
| 493 | srat_mem[mmap_index].base_address_high = (uint32_t) (addr >> 32); |
| 494 | srat_mem[mmap_index].length_low = (uint32_t) (size & 0xffffffff); |
| 495 | srat_mem[mmap_index].length_high = (uint32_t) (size >> 32); |
| 496 | srat_mem[mmap_index].proximity_domain = mem_element->SocketId; |
| 497 | srat_mem[mmap_index].flags = SRAT_ACPI_MEMORY_ENABLED; |
| 498 | if ((mem_element->Type & MEMTYPE_VOLATILE_MASK) == 0) |
| 499 | srat_mem[mmap_index].flags |= SRAT_ACPI_MEMORY_NONVOLATILE; |
| 500 | ++mmap_index; |
| 501 | } |
| 502 | |
| 503 | return mmap_index; |
| 504 | } |
| 505 | |
| 506 | #endif |