blob: edacafdcfa85f46ec25cdf2ef86a9c92cc6febcc [file] [log] [blame]
Jonathan Zhang8f895492020-01-16 11:16:45 -08001/*
2 * This file is part of the coreboot project.
3 *
Jonathan Zhang8f895492020-01-16 11:16:45 -08004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <assert.h>
17#include <commonlib/sort.h>
18#include <console/console.h>
19#include <cpu/cpu.h>
20#include <cpu/x86/msr.h>
21#include <delay.h>
22#include <device/pci.h>
23#include <hob_iiouds.h>
24#include <intelblocks/cpulib.h>
25#include <intelblocks/pcr.h>
26#include <soc/iomap.h>
27#include <soc/cpu.h>
28#include <soc/msr.h>
29#include <soc/pci_devs.h>
30#include <soc/pcr_ids.h>
31#include <soc/soc_util.h>
32#include <stdlib.h>
Andrey Petrov662da6c2020-03-16 22:46:57 -070033#include <soc/util.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -080034#include <timer.h>
35
36/*
37 * Get TOLM CSR B0:D5:F0:Offset_d0h
38 */
39uintptr_t get_tolm(uint32_t bus)
40{
41 uint32_t w = pci_io_read_config32(PCI_DEV(bus, VTD_DEV, VTD_FUNC),
42 VTD_TOLM_CSR);
43 uintptr_t addr = w & 0xfc000000;
44 printk(BIOS_DEBUG, "VTD_TOLM_CSR 0x%x, addr: 0x%lx\n", w, addr);
45 return addr;
46}
47
48void get_tseg_base_lim(uint32_t bus, uint32_t *base, uint32_t *limit)
49{
50 uint32_t w1 = pci_io_read_config32(PCI_DEV(bus, VTD_DEV, VTD_FUNC),
51 VTD_TSEG_BASE_CSR);
52 uint32_t wh = pci_io_read_config32(PCI_DEV(bus, VTD_DEV, VTD_FUNC),
53 VTD_TSEG_LIMIT_CSR);
54 *base = w1 & 0xfff00000;
55 *limit = wh & 0xfff00000;
56}
57
58/*
59 * Get MMCFG CSR B1:D29:F1:Offset_C0h
60 */
61uintptr_t get_cha_mmcfg_base(uint32_t bus)
62{
63 uint32_t wl = pci_io_read_config32(PCI_DEV(bus, CHA_UTIL_ALL_DEV,
64 CHA_UTIL_ALL_FUNC), CHA_UTIL_ALL_MMCFG_CSR);
65 uint32_t wh = pci_io_read_config32(PCI_DEV(bus, CHA_UTIL_ALL_DEV,
66 CHA_UTIL_ALL_FUNC), CHA_UTIL_ALL_MMCFG_CSR + 4);
67 uintptr_t addr = ((((wh & 0x3fff) << 6) | ((wl >> 26) & 0x3f)) << 26);
68 printk(BIOS_DEBUG, "CHA_UTIL_ALL_MMCFG_CSR wl: 0x%x, wh: 0x%x, addr: 0x%lx\n",
69 wl, wh, addr);
70 return addr;
71}
72
Jonathan Zhang8f895492020-01-16 11:16:45 -080073uint32_t top_of_32bit_ram(void)
74{
75 uintptr_t mmcfg, tolm;
76 uint32_t bus0 = 0, bus1 = 0;
77 uint32_t base = 0, limit = 0;
78
79 get_cpubusnos(&bus0, &bus1, NULL, NULL);
80
81 mmcfg = get_cha_mmcfg_base(bus1);
82 tolm = get_tolm(bus0);
83 printk(BIOS_DEBUG, "bus0: 0x%x, bus1: 0x%x, mmcfg: 0x%lx, tolm: 0x%lx\n",
84 bus0, bus1, mmcfg, tolm);
85 get_tseg_base_lim(bus0, &base, &limit);
86 printk(BIOS_DEBUG, "tseg base: 0x%x, limit: 0x%x\n", base, limit);
87
88 /* We will use TSEG base as the top of DRAM */
89 return base;
90}
91
92/*
93 * +-------------------------+ TOLM
94 * | System Management Mode |
95 * | code and data |
96 * | (TSEG) |
97 * +-------------------------+ SMM base (aligned)
98 * | |
99 * | Chipset Reserved Memory |
100 * | |
101 * +-------------------------+ top_of_ram (aligned)
102 * | |
103 * | CBMEM Root |
104 * | |
105 * +-------------------------+
106 * | |
107 * | FSP Reserved Memory |
108 * | |
109 * +-------------------------+
110 * | |
111 * | Various CBMEM Entries |
112 * | |
113 * +-------------------------+ top_of_stack (8 byte aligned)
114 * | |
115 * | stack (CBMEM Entry) |
116 * | |
117 * +-------------------------+
118 */
119
120uint32_t pci_read_mmio_reg(int bus, uint32_t dev, uint32_t func, int offset)
121{
122 return pci_mmio_read_config32(PCI_DEV(bus, dev, func), offset);
123}
124
125uint32_t get_socket_stack_busno(uint32_t socket, uint32_t stack)
126{
127 size_t hob_size;
128 const IIO_UDS *hob;
129 const uint8_t fsp_hob_iio_universal_data_guid[16] = FSP_HOB_IIO_UNIVERSAL_DATA_GUID;
130
131 assert(socket < MAX_SOCKET && stack < MAX_IIO_STACK);
132
133 hob = fsp_find_extension_hob_by_guid(fsp_hob_iio_universal_data_guid, &hob_size);
134 assert(hob != NULL && hob_size != 0);
135
136 return hob->PlatformData.CpuQpiInfo[socket].StackBus[stack];
137}
138
Jonathan Zhang8f895492020-01-16 11:16:45 -0800139/* return 1 if command timed out else 0 */
140static uint32_t wait_for_bios_cmd_cpl(pci_devfn_t dev, uint32_t reg, uint32_t mask,
141 uint32_t target)
142{
143 uint32_t max_delay = 5000; /* 5 seconds max */
144 uint32_t step_delay = 50; /* 50 us */
145 struct stopwatch sw;
146
147 stopwatch_init_msecs_expire(&sw, max_delay);
148 while ((pci_mmio_read_config32(dev, reg) & mask) != target) {
149 udelay(step_delay);
150 if (stopwatch_expired(&sw)) {
151 printk(BIOS_ERR, "%s timed out for dev: 0x%x, reg: 0x%x, "
152 "mask: 0x%x, target: 0x%x\n", __func__, dev, reg, mask, target);
153 return 1; /* timedout */
154 }
155 }
156 return 0; /* successful */
157}
158
159/* return 1 if command timed out else 0 */
160static int set_bios_reset_cpl_for_package(uint32_t socket, uint32_t rst_cpl_mask,
161 uint32_t pcode_init_mask, uint32_t val)
162{
163 uint32_t bus = get_socket_stack_busno(socket, PCU_IIO_STACK);
164 pci_devfn_t dev = PCI_DEV(bus, PCU_DEV, PCU_CR1_FUN);
165
166 uint32_t reg = pci_mmio_read_config32(dev, PCU_CR1_BIOS_RESET_CPL_REG);
167 reg &= (uint32_t) ~rst_cpl_mask;
168 reg |= rst_cpl_mask;
169 reg |= val;
170
171 /* update BIOS RESET completion bit */
172 pci_mmio_write_config32(dev, PCU_CR1_BIOS_RESET_CPL_REG, reg);
173
174 /* wait for PCU ack */
175 return wait_for_bios_cmd_cpl(dev, PCU_CR1_BIOS_RESET_CPL_REG, pcode_init_mask,
176 pcode_init_mask);
177}
178
179/* return 1 if command timed out else 0 */
180static uint32_t write_bios_mailbox_cmd(pci_devfn_t dev, uint32_t command, uint32_t data)
181{
182 /* verify bios is not in busy state */
183 if (wait_for_bios_cmd_cpl(dev, PCU_CR1_BIOS_MB_INTERFACE_REG, BIOS_MB_RUN_BUSY_MASK, 0))
184 return 1; /* timed out */
185
186 /* write data to data register */
187 printk(BIOS_SPEW, "%s - pci_mmio_write_config32 reg: 0x%x, data: 0x%x\n", __func__,
188 PCU_CR1_BIOS_MB_DATA_REG, data);
189 pci_mmio_write_config32(dev, PCU_CR1_BIOS_MB_DATA_REG, data);
190
191 /* write the command */
192 printk(BIOS_SPEW, "%s - pci_mmio_write_config32 reg: 0x%x, data: 0x%x\n", __func__,
193 PCU_CR1_BIOS_MB_INTERFACE_REG,
194 (uint32_t) (command | BIOS_MB_RUN_BUSY_MASK));
195 pci_mmio_write_config32(dev, PCU_CR1_BIOS_MB_INTERFACE_REG,
196 (uint32_t) (command | BIOS_MB_RUN_BUSY_MASK));
197
198 /* wait for completion or time out*/
199 return wait_for_bios_cmd_cpl(dev, PCU_CR1_BIOS_MB_INTERFACE_REG,
200 BIOS_MB_RUN_BUSY_MASK, 0);
201}
202
203void config_reset_cpl3_csrs(void)
204{
205 uint32_t data, plat_info, max_min_turbo_limit_ratio;
206
207 for (uint32_t socket = 0; socket < MAX_SOCKET; ++socket) {
208 uint32_t bus = get_socket_stack_busno(socket, PCU_IIO_STACK);
209
210 /* configure PCU_CR0_FUN csrs */
211 pci_devfn_t cr0_dev = PCI_DEV(bus, PCU_DEV, PCU_CR0_FUN);
212 data = pci_mmio_read_config32(cr0_dev, PCU_CR0_P_STATE_LIMITS);
213 data |= P_STATE_LIMITS_LOCK;
214 pci_mmio_write_config32(cr0_dev, PCU_CR0_P_STATE_LIMITS, data);
215
216 plat_info = pci_mmio_read_config32(cr0_dev, PCU_CR0_PLATFORM_INFO);
217 dump_csr64("", cr0_dev, PCU_CR0_PLATFORM_INFO);
218 max_min_turbo_limit_ratio =
219 (plat_info & MAX_NON_TURBO_LIM_RATIO_MASK) >>
220 MAX_NON_TURBO_LIM_RATIO_SHIFT;
221 printk(BIOS_SPEW, "plat_info: 0x%x, max_min_turbo_limit_ratio: 0x%x\n",
222 plat_info, max_min_turbo_limit_ratio);
223
224 /* configure PCU_CR1_FUN csrs */
225 pci_devfn_t cr1_dev = PCI_DEV(bus, PCU_DEV, PCU_CR1_FUN);
226
227 data = pci_mmio_read_config32(cr1_dev, PCU_CR1_SAPMCTL);
228 /* clear bits 27:31 - FSP sets this with 0x7 which needs to be cleared */
229 data &= 0x0fffffff;
230 data |= SAPMCTL_LOCK_MASK;
231 pci_mmio_write_config32(cr1_dev, PCU_CR1_SAPMCTL, data);
232
233 /* configure PCU_CR1_FUN csrs */
234 pci_devfn_t cr2_dev = PCI_DEV(bus, PCU_DEV, PCU_CR2_FUN);
235
236 data = PCIE_IN_PKGCSTATE_L1_MASK;
237 pci_mmio_write_config32(cr2_dev, PCU_CR2_PKG_CST_ENTRY_CRITERIA_MASK, data);
238
239 data = KTI_IN_PKGCSTATE_L1_MASK;
240 pci_mmio_write_config32(cr2_dev, PCU_CR2_PKG_CST_ENTRY_CRITERIA_MASK2, data);
241
242 data = PROCHOT_RATIO;
243 printk(BIOS_SPEW, "PCU_CR2_PROCHOT_RESPONSE_RATIO_REG data: 0x%x\n", data);
244 pci_mmio_write_config32(cr2_dev, PCU_CR2_PROCHOT_RESPONSE_RATIO_REG, data);
245 dump_csr("", cr2_dev, PCU_CR2_PROCHOT_RESPONSE_RATIO_REG);
246
247 data = pci_mmio_read_config32(cr2_dev, PCU_CR2_DYNAMIC_PERF_POWER_CTL);
248 data |= UNOCRE_PLIMIT_OVERRIDE_SHIFT;
249 pci_mmio_write_config32(cr2_dev, PCU_CR2_DYNAMIC_PERF_POWER_CTL, data);
250 }
251}
252
253static void set_bios_init_completion_for_package(uint32_t socket)
254{
255 uint32_t data;
256 uint32_t timedout;
257 uint32_t bus = get_socket_stack_busno(socket, PCU_IIO_STACK);
258 pci_devfn_t dev = PCI_DEV(bus, PCU_DEV, PCU_CR1_FUN);
259
260 /* read pcu config */
261 timedout = write_bios_mailbox_cmd(dev, BIOS_CMD_READ_PCU_MISC_CFG, 0);
262 if (timedout) {
263 /* 2nd try */
264 timedout = write_bios_mailbox_cmd(dev, BIOS_CMD_READ_PCU_MISC_CFG, 0);
265 if (timedout)
266 die("BIOS PCU Misc Config Read timed out.\n");
267
268 data = pci_mmio_read_config32(dev, PCU_CR1_BIOS_MB_DATA_REG);
269 printk(BIOS_SPEW, "%s - pci_mmio_read_config32 reg: 0x%x, data: 0x%x\n",
270 __func__, PCU_CR1_BIOS_MB_DATA_REG, data);
271
272 /* write PCU config */
273 timedout = write_bios_mailbox_cmd(dev, BIOS_CMD_WRITE_PCU_MISC_CFG, data);
274 if (timedout)
275 die("BIOS PCU Misc Config Write timed out.\n");
276 }
277
278 /* update RST_CPL3, PCODE_INIT_DONE3 */
279 timedout = set_bios_reset_cpl_for_package(socket, RST_CPL3_MASK,
280 PCODE_INIT_DONE3_MASK, RST_CPL3_MASK);
281 if (timedout)
282 die("BIOS RESET CPL3 timed out.\n");
283
284 /* update RST_CPL4, PCODE_INIT_DONE4 */
285 timedout = set_bios_reset_cpl_for_package(socket, RST_CPL4_MASK,
286 PCODE_INIT_DONE4_MASK, RST_CPL4_MASK);
287 if (timedout)
288 die("BIOS RESET CPL4 timed out.\n");
289 /* set CSR_DESIRED_CORES_CFG2 lock bit */
290 data = pci_mmio_read_config32(dev, PCU_CR1_DESIRED_CORES_CFG2_REG);
291 data |= PCU_CR1_DESIRED_CORES_CFG2_REG_LOCK_MASK;
292 printk(BIOS_SPEW, "%s - pci_mmio_write_config32 PCU_CR1_DESIRED_CORES_CFG2_REG 0x%x, data: 0x%x\n",
293 __func__, PCU_CR1_DESIRED_CORES_CFG2_REG, data);
294 pci_mmio_write_config32(dev, PCU_CR1_DESIRED_CORES_CFG2_REG, data);
295}
296
297void set_bios_init_completion(void)
298{
299 uint32_t sbsp_socket_id = 0; /* TODO - this needs to be configurable */
300
301 for (uint32_t socket = 0; socket < MAX_SOCKET; ++socket) {
302 if (socket == sbsp_socket_id)
303 continue;
304 set_bios_init_completion_for_package(socket);
305 }
306 set_bios_init_completion_for_package(sbsp_socket_id);
307}
308
309void get_core_thread_bits(uint32_t *core_bits, uint32_t *thread_bits)
310{
311 register int ecx;
312 struct cpuid_result cpuid_regs;
313
314 /* get max index of CPUID */
315 cpuid_regs = cpuid(0);
316 assert(cpuid_regs.eax >= 0xb); /* cpuid_regs.eax is max input value for cpuid */
317
318 *thread_bits = *core_bits = 0;
319 ecx = 0;
320 while (1) {
321 cpuid_regs = cpuid_ext(0xb, ecx);
322 if (ecx == 0) {
323 *thread_bits = (cpuid_regs.eax & 0x1f);
324 } else {
325 *core_bits = (cpuid_regs.eax & 0x1f) - *thread_bits;
326 break;
327 }
328 ecx++;
329 }
330}
331
332void get_cpu_info_from_apicid(uint32_t apicid, uint32_t core_bits, uint32_t thread_bits,
333 uint8_t *package, uint8_t *core, uint8_t *thread)
334{
335 if (package != NULL)
336 *package = (apicid >> (thread_bits + core_bits));
337 if (core != NULL)
338 *core = (uint32_t)((apicid >> thread_bits) & ~((~0) << core_bits));
339 if (thread != NULL)
340 *thread = (uint32_t)(apicid & ~((~0) << thread_bits));
341}
342
343int get_cpu_count(void)
344{
345 size_t hob_size;
346 const uint8_t fsp_hob_iio_universal_data_guid[16] = FSP_HOB_IIO_UNIVERSAL_DATA_GUID;
347 const IIO_UDS *hob;
348
349 /* these fields are incorrect - need debugging */
350 hob = fsp_find_extension_hob_by_guid(fsp_hob_iio_universal_data_guid, &hob_size);
351 assert(hob != NULL && hob_size != 0);
352 return hob->SystemStatus.numCpus;
353}
354
355int get_threads_per_package(void)
356{
357 unsigned int core_count, thread_count;
358 cpu_read_topology(&core_count, &thread_count);
359 return thread_count;
360}
361
362int get_platform_thread_count(void)
363{
364 return get_cpu_count() * get_threads_per_package();
365}
366
367void get_iiostack_info(struct iiostack_resource *info)
368{
369 size_t hob_size;
370 const uint8_t fsp_hob_iio_universal_data_guid[16] = FSP_HOB_IIO_UNIVERSAL_DATA_GUID;
371 const IIO_UDS *hob;
372
373 hob = fsp_find_extension_hob_by_guid(
374 fsp_hob_iio_universal_data_guid, &hob_size);
375 assert(hob != NULL && hob_size != 0);
376
377 // copy IIO Stack info from FSP HOB
378 info->no_of_stacks = 0;
379 for (int s = 0; s < hob->PlatformData.numofIIO; ++s) {
380 for (int x = 0; x < MAX_IIO_STACK; ++x) {
381 const STACK_RES *ri = &hob->PlatformData.IIO_resource[s].StackRes[x];
382 // TODO: do we have situation with only bux 0 and one stack?
383 if (ri->BusBase >= ri->BusLimit)
384 continue;
385 assert(info->no_of_stacks < (CONFIG_MAX_SOCKET * MAX_IIO_STACK));
386 memcpy(&info->res[info->no_of_stacks++], ri, sizeof(STACK_RES));
387 }
388 }
389}
390
391#if ENV_RAMSTAGE
392
393void xeonsp_init_cpu_config(void)
394{
395 struct device *dev;
396 int apic_ids[CONFIG_MAX_CPUS] = {0}, apic_ids_by_thread[CONFIG_MAX_CPUS] = {0};
397 int num_apics = 0;
398 uint32_t core_bits, thread_bits;
399 unsigned int core_count, thread_count;
400 unsigned int num_cpus;
401
402 /* sort APIC ids in asending order to identify apicid ranges for
403 each numa domain
404 */
405 for (dev = all_devices; dev; dev = dev->next) {
406 if ((dev->path.type != DEVICE_PATH_APIC) ||
407 (dev->bus->dev->path.type != DEVICE_PATH_CPU_CLUSTER)) {
408 continue;
409 }
410 if (!dev->enabled)
411 continue;
412 if (num_apics >= ARRAY_SIZE(apic_ids))
413 break;
414 apic_ids[num_apics++] = dev->path.apic.apic_id;
415 }
416 if (num_apics > 1)
417 bubblesort(apic_ids, num_apics, NUM_ASCENDING);
418
419 num_cpus = get_cpu_count();
420 cpu_read_topology(&core_count, &thread_count);
421 assert(num_apics == (num_cpus * thread_count));
422
423 /* sort them by thread i.e., all cores with thread 0 and then thread 1 */
424 int index = 0;
425 for (int id = 0; id < num_apics; ++id) {
426 int apic_id = apic_ids[id];
427 if (apic_id & 0x1) { /* 2nd thread */
428 apic_ids_by_thread[index + (num_apics/2) - 1] = apic_id;
429 } else { /* 1st thread */
430 apic_ids_by_thread[index++] = apic_id;
431 }
432 }
433
434
435 /* update apic_id, node_id in sorted order */
436 num_apics = 0;
437 get_core_thread_bits(&core_bits, &thread_bits);
438 for (dev = all_devices; dev; dev = dev->next) {
439 uint8_t package;
440
441 if ((dev->path.type != DEVICE_PATH_APIC) ||
442 (dev->bus->dev->path.type != DEVICE_PATH_CPU_CLUSTER)) {
443 continue;
444 }
445 if (!dev->enabled)
446 continue;
447 if (num_apics >= ARRAY_SIZE(apic_ids))
448 break;
449 dev->path.apic.apic_id = apic_ids_by_thread[num_apics];
450 get_cpu_info_from_apicid(dev->path.apic.apic_id, core_bits, thread_bits,
451 &package, NULL, NULL);
452 dev->path.apic.node_id = package;
453 printk(BIOS_DEBUG, "CPU %d apic_id: 0x%x (%d), node_id: 0x%x\n",
454 num_apics, dev->path.apic.apic_id,
455 dev->path.apic.apic_id, dev->path.apic.node_id);
456
457 ++num_apics;
458 }
459}
460
461unsigned int get_srat_memory_entries(acpi_srat_mem_t *srat_mem)
462{
463 const struct SystemMemoryMapHob *memory_map;
464 size_t hob_size;
465 const uint8_t mem_hob_guid[16] = FSP_SYSTEM_MEMORYMAP_HOB_GUID;
466 unsigned int mmap_index;
467
468 memory_map = fsp_find_extension_hob_by_guid(mem_hob_guid, &hob_size);
469 assert(memory_map != NULL && hob_size != 0);
470 printk(BIOS_DEBUG, "FSP_SYSTEM_MEMORYMAP_HOB_GUID hob_size: %ld\n", hob_size);
471
472 mmap_index = 0;
473 for (int e = 0; e < memory_map->numberEntries; ++e) {
474 const struct SystemMemoryMapElement *mem_element = &memory_map->Element[e];
475 uint64_t addr =
476 (uint64_t) ((uint64_t)mem_element->BaseAddress <<
477 MEM_ADDR_64MB_SHIFT_BITS);
478 uint64_t size =
479 (uint64_t) ((uint64_t)mem_element->ElementSize <<
480 MEM_ADDR_64MB_SHIFT_BITS);
481
482 printk(BIOS_DEBUG, "memory_map %d addr: 0x%llx, BaseAddress: 0x%x, size: 0x%llx, "
483 "ElementSize: 0x%x, reserved: %d\n",
484 e, addr, mem_element->BaseAddress, size,
485 mem_element->ElementSize, (mem_element->Type & MEM_TYPE_RESERVED));
486
487 assert(mmap_index < MAX_ACPI_MEMORY_AFFINITY_COUNT);
488
489 /* skip reserved memory region */
490 if (mem_element->Type & MEM_TYPE_RESERVED)
491 continue;
492
493 /* skip if this address is already added */
494 bool skip = false;
495 for (int idx = 0; idx < mmap_index; ++idx) {
496 uint64_t base_addr = ((uint64_t)srat_mem[idx].base_address_high << 32) +
497 srat_mem[idx].base_address_low;
498 if (addr == base_addr) {
499 skip = true;
500 break;
501 }
502 }
503 if (skip)
504 continue;
505
506 srat_mem[mmap_index].type = 1; /* Memory affinity structure */
507 srat_mem[mmap_index].length = sizeof(acpi_srat_mem_t);
508 srat_mem[mmap_index].base_address_low = (uint32_t) (addr & 0xffffffff);
509 srat_mem[mmap_index].base_address_high = (uint32_t) (addr >> 32);
510 srat_mem[mmap_index].length_low = (uint32_t) (size & 0xffffffff);
511 srat_mem[mmap_index].length_high = (uint32_t) (size >> 32);
512 srat_mem[mmap_index].proximity_domain = mem_element->SocketId;
513 srat_mem[mmap_index].flags = SRAT_ACPI_MEMORY_ENABLED;
514 if ((mem_element->Type & MEMTYPE_VOLATILE_MASK) == 0)
515 srat_mem[mmap_index].flags |= SRAT_ACPI_MEMORY_NONVOLATILE;
516 ++mmap_index;
517 }
518
519 return mmap_index;
520}
521
522#endif