blob: b78cc53bb8be1cc87f8bc1b37018f07b3605c225 [file] [log] [blame]
Jason Gleneskf934fae2021-07-20 02:19:58 -07001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <acpi/acpi_ivrs.h>
4#include <amdblocks/acpi.h>
5#include <amdblocks/cpu.h>
6#include <amdblocks/data_fabric.h>
7#include <amdblocks/ioapic.h>
8#include <arch/mmio.h>
9#include <console/console.h>
10#include <cpu/amd/cpuid.h>
11#include <cpu/amd/msr.h>
12#include <device/device.h>
13#include <device/pci_def.h>
14#include <device/pci_ops.h>
15#include <soc/acpi.h>
16#include <soc/data_fabric.h>
17#include <soc/pci_devs.h>
Jason Gleneskf934fae2021-07-20 02:19:58 -070018
19#define MAX_DEV_ID 0xFFFF
20
21unsigned long acpi_fill_ivrs_ioapic(acpi_ivrs_t *ivrs, unsigned long current)
22{
23 ivrs_ivhd_special_t *ivhd_ioapic = (ivrs_ivhd_special_t *)current;
24 memset(ivhd_ioapic, 0, sizeof(*ivhd_ioapic));
25
26 ivhd_ioapic->type = IVHD_DEV_8_BYTE_EXT_SPECIAL_DEV;
27 ivhd_ioapic->dte_setting = IVHD_DTE_LINT_1_PASS | IVHD_DTE_LINT_0_PASS |
28 IVHD_DTE_SYS_MGT_NO_TRANS | IVHD_DTE_NMI_PASS |
29 IVHD_DTE_EXT_INT_PASS | IVHD_DTE_INIT_PASS;
30 ivhd_ioapic->handle = FCH_IOAPIC_ID;
31 ivhd_ioapic->source_dev_id = PCI_DEVFN(SMBUS_DEV, SMBUS_FUNC);
32 ivhd_ioapic->variety = IVHD_SPECIAL_DEV_IOAPIC;
33 current += sizeof(ivrs_ivhd_special_t);
34
35 ivhd_ioapic = (ivrs_ivhd_special_t *)current;
36 memset(ivhd_ioapic, 0, sizeof(*ivhd_ioapic));
37
38 ivhd_ioapic->type = IVHD_DEV_8_BYTE_EXT_SPECIAL_DEV;
39 ivhd_ioapic->handle = GNB_IOAPIC_ID;
40 ivhd_ioapic->source_dev_id = PCI_DEVFN(0, 1);
41 ivhd_ioapic->variety = IVHD_SPECIAL_DEV_IOAPIC;
42 current += sizeof(ivrs_ivhd_special_t);
43
44 return current;
45}
46
47static unsigned long ivhd_describe_hpet(unsigned long current)
48{
49 ivrs_ivhd_special_t *ivhd_hpet = (ivrs_ivhd_special_t *)current;
50
51 ivhd_hpet->type = IVHD_DEV_8_BYTE_EXT_SPECIAL_DEV;
52 ivhd_hpet->reserved = 0x0000;
53 ivhd_hpet->dte_setting = 0x00;
54 ivhd_hpet->handle = 0x00;
55 ivhd_hpet->source_dev_id = PCI_DEVFN(SMBUS_DEV, SMBUS_FUNC);
56 ivhd_hpet->variety = IVHD_SPECIAL_DEV_HPET;
57 current += sizeof(ivrs_ivhd_special_t);
58
59 return current;
60}
61
62static unsigned long ivhd_describe_f0_device(unsigned long current,
63 uint16_t dev_id, uint8_t datasetting)
64{
65 ivrs_ivhd_f0_entry_t *ivhd_f0 = (ivrs_ivhd_f0_entry_t *) current;
66
67 ivhd_f0->type = IVHD_DEV_VARIABLE;
68 ivhd_f0->dev_id = dev_id;
69 ivhd_f0->dte_setting = datasetting;
70 ivhd_f0->hardware_id[0] = 'A';
71 ivhd_f0->hardware_id[1] = 'M';
72 ivhd_f0->hardware_id[2] = 'D';
73 ivhd_f0->hardware_id[3] = 'I';
74 ivhd_f0->hardware_id[4] = '0';
75 ivhd_f0->hardware_id[5] = '0';
76 ivhd_f0->hardware_id[6] = '4';
77 ivhd_f0->hardware_id[7] = '0';
78
79 memset(ivhd_f0->compatible_id, 0, sizeof(ivhd_f0->compatible_id));
80
81 ivhd_f0->uuid_format = 0;
82 ivhd_f0->uuid_length = 0;
83
84 current += sizeof(ivrs_ivhd_f0_entry_t);
85 return current;
86}
87
88static unsigned long ivhd_dev_range(unsigned long current, uint16_t start_devid,
89 uint16_t end_devid, uint8_t setting)
90{
91 /* 4-byte IVHD structures must be aligned to the 4-byte boundary. */
92 current = ALIGN_UP(current, 4);
93 ivrs_ivhd_generic_t *ivhd_range = (ivrs_ivhd_generic_t *)current;
94
95 /* Create the start range IVHD entry */
96 ivhd_range->type = IVHD_DEV_4_BYTE_START_RANGE;
97 ivhd_range->dev_id = start_devid;
98 ivhd_range->dte_setting = setting;
99 current += sizeof(ivrs_ivhd_generic_t);
100
101 /* Create the end range IVHD entry */
102 ivhd_range = (ivrs_ivhd_generic_t *)current;
103 ivhd_range->type = IVHD_DEV_4_BYTE_END_RANGE;
104 ivhd_range->dev_id = end_devid;
105 ivhd_range->dte_setting = setting;
106 current += sizeof(ivrs_ivhd_generic_t);
107
108 return current;
109}
110
111static unsigned long add_ivhd_dev_entry(struct device *parent, struct device *dev,
112 unsigned long *current, uint8_t type, uint8_t data)
113{
114 if (type == IVHD_DEV_4_BYTE_SELECT) {
115 /* 4-byte IVHD structures must be aligned to the 4-byte boundary. */
116 *current = ALIGN_UP(*current, 4);
117 ivrs_ivhd_generic_t *ivhd_entry = (ivrs_ivhd_generic_t *)*current;
118
119 ivhd_entry->type = type;
120 ivhd_entry->dev_id = dev->path.pci.devfn | (dev->bus->secondary << 8);
121 ivhd_entry->dte_setting = data;
122 *current += sizeof(ivrs_ivhd_generic_t);
123 } else if (type == IVHD_DEV_8_BYTE_ALIAS_SELECT) {
124 ivrs_ivhd_alias_t *ivhd_entry = (ivrs_ivhd_alias_t *)*current;
125
126 ivhd_entry->type = type;
127 ivhd_entry->dev_id = dev->path.pci.devfn | (dev->bus->secondary << 8);
128 ivhd_entry->dte_setting = data;
129 ivhd_entry->reserved1 = 0;
130 ivhd_entry->reserved2 = 0;
131 ivhd_entry->source_dev_id = parent->path.pci.devfn |
132 (parent->bus->secondary << 8);
133 *current += sizeof(ivrs_ivhd_alias_t);
134 }
135
136 return *current;
137}
138
139static void ivrs_add_device_or_bridge(struct device *parent, struct device *dev,
140 unsigned long *current, uint16_t *ivhd_length)
141{
142 unsigned int header_type, is_pcie;
143 unsigned long current_backup;
144
145 header_type = dev->hdr_type & 0x7f;
146 is_pcie = pci_find_capability(dev, PCI_CAP_ID_PCIE);
147
148 if (((header_type == PCI_HEADER_TYPE_NORMAL) ||
149 (header_type == PCI_HEADER_TYPE_BRIDGE)) && is_pcie) {
150 /* Device or Bridge is PCIe */
151 current_backup = *current;
152 add_ivhd_dev_entry(parent, dev, current, IVHD_DEV_4_BYTE_SELECT, 0x0);
153 *ivhd_length += (*current - current_backup);
154 } else if ((header_type == PCI_HEADER_TYPE_NORMAL) && !is_pcie) {
155 /* Device is legacy PCI or PCI-X */
156 current_backup = *current;
157 add_ivhd_dev_entry(parent, dev, current, IVHD_DEV_8_BYTE_ALIAS_SELECT, 0x0);
158 *ivhd_length += (*current - current_backup);
159 }
160}
161
162static void add_ivhd_device_entries(struct device *parent, struct device *dev,
163 unsigned int depth, int linknum, int8_t *root_level,
164 unsigned long *current, uint16_t *ivhd_length)
165{
166 struct device *sibling;
167 struct bus *link;
168
169 if (!root_level)
170 return;
171
172 if (dev->path.type == DEVICE_PATH_PCI) {
173 if ((dev->bus->secondary == 0x0) &&
174 (dev->path.pci.devfn == 0x0))
175 *root_level = depth;
176
177 if ((*root_level != -1) && (dev->enabled)) {
178 if (depth != *root_level)
179 ivrs_add_device_or_bridge(parent, dev, current, ivhd_length);
180 }
181 }
182
183 for (link = dev->link_list; link; link = link->next)
184 for (sibling = link->children; sibling; sibling =
185 sibling->sibling)
186 add_ivhd_device_entries(dev, sibling, depth + 1, depth, root_level,
187 current, ivhd_length);
188}
189
190static unsigned long acpi_fill_ivrs40(unsigned long current, acpi_ivrs_t *ivrs)
191{
192 acpi_ivrs_ivhd40_t *ivhd_40;
193 unsigned long current_backup;
194 int8_t root_level;
195
196 /*
197 * These devices should be already found by previous function.
198 * Do not perform NULL checks.
199 */
200 struct device *nb_dev = pcidev_on_root(0, 0);
201 struct device *iommu_dev = pcidev_on_root(0, 2);
202
203 memset((void *)current, 0, sizeof(acpi_ivrs_ivhd40_t));
204 ivhd_40 = (acpi_ivrs_ivhd40_t *)current;
205
206 /* Enable EFR */
207 ivhd_40->type = IVHD_BLOCK_TYPE_FULL__ACPI_HID;
208 /* For type 40h bits 6 and 7 are reserved */
209 ivhd_40->flags = ivrs->ivhd.flags & 0x3f;
210 ivhd_40->length = sizeof(struct acpi_ivrs_ivhd_40);
211 /* BDF <bus>:00.2 */
212 ivhd_40->device_id = 0x02 | (nb_dev->bus->secondary << 8);
213 ivhd_40->capability_offset = pci_find_capability(iommu_dev, IOMMU_CAP_ID);
214 ivhd_40->iommu_base_low = ivrs->ivhd.iommu_base_low;
215 ivhd_40->iommu_base_high = ivrs->ivhd.iommu_base_high;
216 ivhd_40->pci_segment_group = 0x0000;
217 ivhd_40->iommu_info = ivrs->ivhd.iommu_info;
218 /* For type 40h bits 31:28 and 12:0 are reserved */
219 ivhd_40->iommu_attributes = ivrs->ivhd.iommu_feature_info & 0xfffe000;
220
221 if (pci_read_config32(iommu_dev, ivhd_40->capability_offset) & EFR_FEATURE_SUP) {
Arthur Heymans4c684872022-04-19 21:44:22 +0200222 ivhd_40->efr_reg_image_low = read32p(ivhd_40->iommu_base_low + 0x30);
223 ivhd_40->efr_reg_image_high = read32p(ivhd_40->iommu_base_low + 0x34);
Jason Gleneskf934fae2021-07-20 02:19:58 -0700224 }
225
226 current += sizeof(acpi_ivrs_ivhd40_t);
227
228 /* Now repeat all the device entries from type 10h */
229 current_backup = current;
230 current = ivhd_dev_range(current, PCI_DEVFN(1, 0), MAX_DEV_ID, 0);
231 ivhd_40->length += (current - current_backup);
232 root_level = -1;
233 add_ivhd_device_entries(NULL, all_devices, 0, -1, &root_level,
234 &current, &ivhd_40->length);
235
236 /* Describe HPET */
237 current_backup = current;
238 current = ivhd_describe_hpet(current);
239 ivhd_40->length += (current - current_backup);
240
241 /* Describe IOAPICs */
242 current_backup = current;
243 current = acpi_fill_ivrs_ioapic(ivrs, current);
244 ivhd_40->length += (current - current_backup);
245
246 /* Describe EMMC */
247 current_backup = current;
248 current = ivhd_describe_f0_device(current, PCI_DEVFN(0x13, 1),
249 IVHD_DTE_LINT_1_PASS | IVHD_DTE_LINT_0_PASS |
250 IVHD_DTE_SYS_MGT_TRANS | IVHD_DTE_NMI_PASS |
251 IVHD_DTE_EXT_INT_PASS | IVHD_DTE_INIT_PASS);
252 ivhd_40->length += (current - current_backup);
253
254 return current;
255}
256
257static unsigned long acpi_fill_ivrs11(unsigned long current, acpi_ivrs_t *ivrs)
258{
259 acpi_ivrs_ivhd11_t *ivhd_11;
260 ivhd11_iommu_attr_t *ivhd11_attr_ptr;
261 unsigned long current_backup;
262 int8_t root_level;
263
264 /*
265 * These devices should be already found by previous function.
266 * Do not perform NULL checks.
267 */
268 struct device *nb_dev = pcidev_on_root(0, 0);
269 struct device *iommu_dev = pcidev_on_root(0, 2);
270
271 /*
272 * In order to utilize all features, firmware should expose type 11h
273 * IVHD which supersedes the type 10h.
274 */
275 memset((void *)current, 0, sizeof(acpi_ivrs_ivhd11_t));
276 ivhd_11 = (acpi_ivrs_ivhd11_t *)current;
277
278 /* Enable EFR */
279 ivhd_11->type = IVHD_BLOCK_TYPE_FULL__FIXED;
280 /* For type 11h bits 6 and 7 are reserved */
281 ivhd_11->flags = ivrs->ivhd.flags & 0x3f;
282 ivhd_11->length = sizeof(struct acpi_ivrs_ivhd_11);
283 /* BDF <bus>:00.2 */
284 ivhd_11->device_id = 0x02 | (nb_dev->bus->secondary << 8);
285 ivhd_11->capability_offset = pci_find_capability(iommu_dev, IOMMU_CAP_ID);
286 ivhd_11->iommu_base_low = ivrs->ivhd.iommu_base_low;
287 ivhd_11->iommu_base_high = ivrs->ivhd.iommu_base_high;
288 ivhd_11->pci_segment_group = 0x0000;
289 ivhd_11->iommu_info = ivrs->ivhd.iommu_info;
290 ivhd11_attr_ptr = (ivhd11_iommu_attr_t *) &ivrs->ivhd.iommu_feature_info;
291 ivhd_11->iommu_attributes.perf_counters = ivhd11_attr_ptr->perf_counters;
292 ivhd_11->iommu_attributes.perf_counter_banks = ivhd11_attr_ptr->perf_counter_banks;
293 ivhd_11->iommu_attributes.msi_num_ppr = ivhd11_attr_ptr->msi_num_ppr;
294
295 if (pci_read_config32(iommu_dev, ivhd_11->capability_offset) & EFR_FEATURE_SUP) {
Arthur Heymans4c684872022-04-19 21:44:22 +0200296 ivhd_11->efr_reg_image_low = read32p(ivhd_11->iommu_base_low + 0x30);
297 ivhd_11->efr_reg_image_high = read32p(ivhd_11->iommu_base_low + 0x34);
Jason Gleneskf934fae2021-07-20 02:19:58 -0700298 }
299
300 current += sizeof(acpi_ivrs_ivhd11_t);
301
302 /* Now repeat all the device entries from type 10h */
303 current_backup = current;
304 current = ivhd_dev_range(current, PCI_DEVFN(1, 0), MAX_DEV_ID, 0);
305 ivhd_11->length += (current - current_backup);
306 root_level = -1;
307 add_ivhd_device_entries(NULL, all_devices, 0, -1, &root_level,
308 &current, &ivhd_11->length);
309
310 /* Describe HPET */
311 current_backup = current;
312 current = ivhd_describe_hpet(current);
313 ivhd_11->length += (current - current_backup);
314
315 /* Describe IOAPICs */
316 current_backup = current;
317 current = acpi_fill_ivrs_ioapic(ivrs, current);
318 ivhd_11->length += (current - current_backup);
319
320 return acpi_fill_ivrs40(current, ivrs);
321}
322
323unsigned long acpi_fill_ivrs(acpi_ivrs_t *ivrs, unsigned long current)
324{
325 unsigned long current_backup;
326 uint64_t mmio_x30_value;
327 uint64_t mmio_x18_value;
328 uint64_t mmio_x4000_value;
329 uint32_t cap_offset_0;
330 uint32_t cap_offset_10;
331 int8_t root_level;
332
333 struct device *iommu_dev;
334 struct device *nb_dev;
335
336 nb_dev = pcidev_on_root(0, 0);
337 if (!nb_dev) {
338 printk(BIOS_WARNING, "%s: Northbridge device not present!\n", __func__);
339 printk(BIOS_WARNING, "%s: IVRS table not generated...\n", __func__);
340
341 return (unsigned long)ivrs;
342 }
343
344 iommu_dev = pcidev_on_root(0, 2);
345 if (!iommu_dev) {
346 printk(BIOS_WARNING, "%s: IOMMU device not found\n", __func__);
347
348 return (unsigned long)ivrs;
349 }
350
351 if (ivrs != NULL) {
352 ivrs->ivhd.type = IVHD_BLOCK_TYPE_LEGACY__FIXED;
353 ivrs->ivhd.length = sizeof(struct acpi_ivrs_ivhd);
354
355 /* BDF <bus>:00.2 */
356 ivrs->ivhd.device_id = 0x02 | (nb_dev->bus->secondary << 8);
357 ivrs->ivhd.capability_offset = pci_find_capability(iommu_dev, IOMMU_CAP_ID);
358 ivrs->ivhd.iommu_base_low = pci_read_config32(iommu_dev, 0x44) & 0xffffc000;
359 ivrs->ivhd.iommu_base_high = pci_read_config32(iommu_dev, 0x48);
360
361 cap_offset_0 = pci_read_config32(iommu_dev, ivrs->ivhd.capability_offset);
362 cap_offset_10 = pci_read_config32(iommu_dev,
363 ivrs->ivhd.capability_offset + 0x10);
Arthur Heymans4c684872022-04-19 21:44:22 +0200364 mmio_x18_value = read64p(ivrs->ivhd.iommu_base_low + 0x18);
365 mmio_x30_value = read64p(ivrs->ivhd.iommu_base_low + 0x30);
366 mmio_x4000_value = read64p(ivrs->ivhd.iommu_base_low + 0x4000);
Jason Gleneskf934fae2021-07-20 02:19:58 -0700367
368 ivrs->ivhd.flags |= ((mmio_x30_value & MMIO_EXT_FEATURE_PPR_SUP) ?
369 IVHD_FLAG_PPE_SUP : 0);
370 ivrs->ivhd.flags |= ((mmio_x30_value & MMIO_EXT_FEATURE_PRE_F_SUP) ?
371 IVHD_FLAG_PREF_SUP : 0);
372 ivrs->ivhd.flags |= ((mmio_x18_value & MMIO_CTRL_COHERENT) ?
373 IVHD_FLAG_COHERENT : 0);
374 ivrs->ivhd.flags |= ((cap_offset_0 & CAP_OFFSET_0_IOTLB_SP) ?
375 IVHD_FLAG_IOTLB_SUP : 0);
376 ivrs->ivhd.flags |= ((mmio_x18_value & MMIO_CTRL_ISOC) ?
377 IVHD_FLAG_ISOC : 0);
378 ivrs->ivhd.flags |= ((mmio_x18_value & MMIO_CTRL_RES_PASS_PW) ?
379 IVHD_FLAG_RES_PASS_PW : 0);
380 ivrs->ivhd.flags |= ((mmio_x18_value & MMIO_CTRL_PASS_PW) ?
381 IVHD_FLAG_PASS_PW : 0);
382 ivrs->ivhd.flags |= ((mmio_x18_value & MMIO_CTRL_HT_TUN_EN) ?
383 IVHD_FLAG_HT_TUN_EN : 0);
384
385 ivrs->ivhd.pci_segment_group = 0x0000;
386
387 ivrs->ivhd.iommu_info = pci_read_config16(iommu_dev,
388 ivrs->ivhd.capability_offset + 0x10) & 0x1F;
389 ivrs->ivhd.iommu_info |= (pci_read_config16(iommu_dev,
390 ivrs->ivhd.capability_offset + 0xC) & 0x1F) << IOMMU_INFO_UNIT_ID_SHIFT;
391
392 ivrs->ivhd.iommu_feature_info = 0;
393 ivrs->ivhd.iommu_feature_info |= (mmio_x30_value & MMIO_EXT_FEATURE_HATS_MASK)
394 << (IOMMU_FEATURE_HATS_SHIFT - MMIO_EXT_FEATURE_HATS_SHIFT);
395
396 ivrs->ivhd.iommu_feature_info |= (mmio_x30_value & MMIO_EXT_FEATURE_GATS_MASK)
397 << (IOMMU_FEATURE_GATS_SHIFT - MMIO_EXT_FEATURE_GATS_SHIFT);
398
399 ivrs->ivhd.iommu_feature_info |= (cap_offset_10 & CAP_OFFSET_10_MSI_NUM_PPR)
400 >> (CAP_OFFSET_10_MSI_NUM_PPR_SHIFT
401 - IOMMU_FEATURE_MSI_NUM_PPR_SHIFT);
402
403 ivrs->ivhd.iommu_feature_info |= (mmio_x4000_value &
404 MMIO_CNT_CFG_N_COUNTER_BANKS)
405 << (IOMMU_FEATURE_PN_BANKS_SHIFT - MMIO_CNT_CFG_N_CNT_BANKS_SHIFT);
406
407 ivrs->ivhd.iommu_feature_info |= (mmio_x4000_value & MMIO_CNT_CFG_N_COUNTER)
408 << (IOMMU_FEATURE_PN_COUNTERS_SHIFT - MMIO_CNT_CFG_N_COUNTER_SHIFT);
409 ivrs->ivhd.iommu_feature_info |= (mmio_x30_value &
410 MMIO_EXT_FEATURE_PAS_MAX_MASK)
411 >> (MMIO_EXT_FEATURE_PAS_MAX_SHIFT - IOMMU_FEATURE_PA_SMAX_SHIFT);
412 ivrs->ivhd.iommu_feature_info |= ((mmio_x30_value & MMIO_EXT_FEATURE_HE_SUP)
413 ? IOMMU_FEATURE_HE_SUP : 0);
414 ivrs->ivhd.iommu_feature_info |= ((mmio_x30_value & MMIO_EXT_FEATURE_GA_SUP)
415 ? IOMMU_FEATURE_GA_SUP : 0);
416 ivrs->ivhd.iommu_feature_info |= ((mmio_x30_value & MMIO_EXT_FEATURE_IA_SUP)
417 ? IOMMU_FEATURE_IA_SUP : 0);
418 ivrs->ivhd.iommu_feature_info |= (mmio_x30_value &
419 MMIO_EXT_FEATURE_GLX_SUP_MASK)
420 >> (MMIO_EXT_FEATURE_GLX_SHIFT - IOMMU_FEATURE_GLX_SHIFT);
421 ivrs->ivhd.iommu_feature_info |= ((mmio_x30_value & MMIO_EXT_FEATURE_GT_SUP)
422 ? IOMMU_FEATURE_GT_SUP : 0);
423 ivrs->ivhd.iommu_feature_info |= ((mmio_x30_value & MMIO_EXT_FEATURE_NX_SUP)
424 ? IOMMU_FEATURE_NX_SUP : 0);
425 ivrs->ivhd.iommu_feature_info |= ((mmio_x30_value & MMIO_EXT_FEATURE_XT_SUP)
426 ? IOMMU_FEATURE_XT_SUP : 0);
427
428 /* Enable EFR if supported */
429 ivrs->iv_info = pci_read_config32(iommu_dev,
430 ivrs->ivhd.capability_offset + 0x10) & 0x007fffe0;
431 if (pci_read_config32(iommu_dev,
432 ivrs->ivhd.capability_offset) & EFR_FEATURE_SUP)
433 ivrs->iv_info |= IVINFO_EFR_SUPPORTED;
434
435 } else {
436 printk(BIOS_WARNING, "%s: AGESA returned NULL IVRS\n", __func__);
437
438 return (unsigned long)ivrs;
439 }
440
441 /*
442 * Add all possible PCI devices that can generate transactions
443 * processed by IOMMU. Start with device 00:01.0
444 */
445 current_backup = current;
446 current = ivhd_dev_range(current, PCI_DEVFN(1, 0), MAX_DEV_ID, 0);
447 ivrs->ivhd.length += (current - current_backup);
448 root_level = -1;
449 add_ivhd_device_entries(NULL, all_devices, 0, -1, &root_level,
450 &current, &ivrs->ivhd.length);
451
452 /* Describe HPET */
453 current_backup = current;
454 current = ivhd_describe_hpet(current);
455 ivrs->ivhd.length += (current - current_backup);
456
457 /* Describe IOAPICs */
458 current_backup = current;
459 current = acpi_fill_ivrs_ioapic(ivrs, current);
460 ivrs->ivhd.length += (current - current_backup);
461
462 /* If EFR is not supported, IVHD type 11h is reserved */
463 if (!(ivrs->iv_info & IVINFO_EFR_SUPPORTED))
464 return current;
465
466 return acpi_fill_ivrs11(current, ivrs);
467}