blob: 3da837c0af423e79317b1a29a9ac2c4fe08c7f69 [file] [log] [blame]
Angel Pons0612b272020-04-05 15:46:56 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Subrata Banik01ae11b2017-03-04 23:32:41 +05302
Subrata Banik7609c652017-05-19 14:50:09 +05303#include <cbmem.h>
Subrata Banikb6df6b02020-01-03 15:29:02 +05304#include <console/console.h>
Subrata Banik7609c652017-05-19 14:50:09 +05305#include <device/device.h>
6#include <device/pci.h>
7#include <device/pci_ids.h>
Werner Zehd12530c2018-12-14 13:09:12 +01008#include <intelblocks/acpi.h>
Subrata Banikb6df6b02020-01-03 15:29:02 +05309#include <intelblocks/cfg.h>
Subrata Banik01ae11b2017-03-04 23:32:41 +053010#include <intelblocks/systemagent.h>
Lijian Zhao357e5522019-04-11 13:07:00 -070011#include <smbios.h>
Subrata Banik7609c652017-05-19 14:50:09 +053012#include <soc/iomap.h>
Subrata Banik01ae11b2017-03-04 23:32:41 +053013#include <soc/pci_devs.h>
Subrata Banik7609c652017-05-19 14:50:09 +053014#include <soc/systemagent.h>
15#include "systemagent_def.h"
Subrata Banik01ae11b2017-03-04 23:32:41 +053016
Subrata Banik7609c652017-05-19 14:50:09 +053017/* SoC override function */
Aaron Durbin64031672018-04-21 14:45:32 -060018__weak void soc_systemagent_init(struct device *dev)
Subrata Banik01ae11b2017-03-04 23:32:41 +053019{
Subrata Banik7609c652017-05-19 14:50:09 +053020 /* no-op */
Subrata Banik01ae11b2017-03-04 23:32:41 +053021}
22
Aaron Durbin64031672018-04-21 14:45:32 -060023__weak void soc_add_fixed_mmio_resources(struct device *dev,
Subrata Banik7609c652017-05-19 14:50:09 +053024 int *resource_cnt)
25{
26 /* no-op */
27}
28
Aaron Durbin64031672018-04-21 14:45:32 -060029__weak int soc_get_uncore_prmmr_base_and_mask(uint64_t *base,
Pratik Prajapati82cdfa72017-08-28 14:48:55 -070030 uint64_t *mask)
31{
32 /* return failure for this dummy API */
33 return -1;
34}
35
Furquan Shaikh0f007d82020-04-24 06:41:18 -070036__weak unsigned long sa_write_acpi_tables(const struct device *dev,
Werner Zehd12530c2018-12-14 13:09:12 +010037 unsigned long current,
38 struct acpi_rsdp *rsdp)
39{
40 return current;
41}
42
Subrata Banik7609c652017-05-19 14:50:09 +053043/*
44 * Add all known fixed MMIO ranges that hang off the host bridge/memory
45 * controller device.
46 */
47void sa_add_fixed_mmio_resources(struct device *dev, int *resource_cnt,
48 const struct sa_mmio_descriptor *sa_fixed_resources, size_t count)
49{
50 int i;
51 int index = *resource_cnt;
52
53 for (i = 0; i < count; i++) {
54 uintptr_t base;
55 size_t size;
56
57 size = sa_fixed_resources[i].size;
58 base = sa_fixed_resources[i].base;
59
60 mmio_resource(dev, index++, base / KiB, size / KiB);
61 }
62
63 *resource_cnt = index;
64}
65
66/*
67 * DRAM memory mapped register
68 *
69 * TOUUD: This 64 bit register defines the Top of Upper Usable DRAM
70 * TOLUD: This 32 bit register defines the Top of Low Usable DRAM
71 * BGSM: This register contains the base address of stolen DRAM memory for GTT
72 * TSEG: This register contains the base address of TSEG DRAM memory
73 */
74static const struct sa_mem_map_descriptor sa_memory_map[MAX_MAP_ENTRIES] = {
75 { TOUUD, true, "TOUUD" },
76 { TOLUD, false, "TOLUD" },
77 { BGSM, false, "BGSM" },
78 { TSEG, false, "TSEG" },
79};
80
81/* Read DRAM memory map register value through PCI configuration space */
Elyes HAOUAS4a131262018-09-16 17:35:48 +020082static void sa_read_map_entry(struct device *dev,
Subrata Banik7609c652017-05-19 14:50:09 +053083 const struct sa_mem_map_descriptor *entry, uint64_t *result)
84{
85 uint64_t value = 0;
86
87 if (entry->is_64_bit) {
88 value = pci_read_config32(dev, entry->reg + 4);
89 value <<= 32;
90 }
91
92 value |= pci_read_config32(dev, entry->reg);
93 /* All registers are on a 1MiB granularity. */
94 value = ALIGN_DOWN(value, 1 * MiB);
95
96 *result = value;
97}
98
Furquan Shaikh1085fee2020-05-07 16:04:16 -070099/*
100 * This function will get above 4GB mmio enable config specific to soc.
101 *
102 * Return values:
103 * 0 = Above 4GB memory is not enable
104 * 1 = Above 4GB memory is enable
105 */
106static int get_enable_above_4GB_mmio(void)
107{
108 const struct soc_intel_common_config *common_config;
109 common_config = chip_get_common_soc_structure();
110
111 return common_config->enable_above_4GB_mmio;
112}
113
114/* Fill MMIO resource above 4GB into GNVS */
115void sa_fill_gnvs(global_nvs_t *gnvs)
116{
117 if (!get_enable_above_4GB_mmio())
118 return;
119
120 struct device *sa_dev = pcidev_path_on_root(SA_DEVFN_ROOT);
121
122 gnvs->e4gm = 1;
123 sa_read_map_entry(sa_dev, &sa_memory_map[SA_TOUUD_REG], &gnvs->a4gb);
124 gnvs->a4gs = ABOVE_4GB_MEM_BASE_SIZE;
125 printk(BIOS_DEBUG, "PCI space above 4GB MMIO is from 0x%llx to len = 0x%llx\n",
126 gnvs->a4gb, gnvs->a4gs);
127}
128
129
Subrata Banik7609c652017-05-19 14:50:09 +0530130static void sa_get_mem_map(struct device *dev, uint64_t *values)
131{
132 int i;
133 for (i = 0; i < MAX_MAP_ENTRIES; i++)
134 sa_read_map_entry(dev, &sa_memory_map[i], &values[i]);
135}
136
137/*
Subrata Banik7609c652017-05-19 14:50:09 +0530138 * These are the host memory ranges that should be added:
139 * - 0 -> 0xa0000: cacheable
140 * - 0xc0000 -> top_of_ram : cacheable
Michael Niewöhner40f893e2019-10-21 18:58:04 +0200141 * - top_of_ram -> BGSM: cacheable with standard MTRRs and reserved
Subrata Banik7609c652017-05-19 14:50:09 +0530142 * - BGSM -> TOLUD: not cacheable with standard MTRRs and reserved
143 * - 4GiB -> TOUUD: cacheable
144 *
145 * The default SMRAM space is reserved so that the range doesn't
146 * have to be saved during S3 Resume. Once marked reserved the OS
147 * cannot use the memory. This is a bit of an odd place to reserve
148 * the region, but the CPU devices don't have dev_ops->read_resources()
149 * called on them.
150 *
151 * The range 0xa0000 -> 0xc0000 does not have any resources
152 * associated with it to handle legacy VGA memory. If this range
153 * is not omitted the mtrr code will setup the area as cacheable
154 * causing VGA access to not work.
155 *
156 * The TSEG region is mapped as cacheable so that one can perform
157 * SMRAM relocation faster. Once the SMRR is enabled the SMRR takes
158 * precedence over the existing MTRRs covering this region.
159 *
160 * It should be noted that cacheable entry types need to be added in
161 * order. The reason is that the current MTRR code assumes this and
162 * falls over itself if it isn't.
163 *
164 * The resource index starts low and should not meet or exceed
165 * PCI_BASE_ADDRESS_0.
166 */
167static void sa_add_dram_resources(struct device *dev, int *resource_count)
168{
169 uintptr_t base_k, touud_k;
Michael Niewöhner40f893e2019-10-21 18:58:04 +0200170 size_t size_k;
Subrata Banik7609c652017-05-19 14:50:09 +0530171 uint64_t sa_map_values[MAX_MAP_ENTRIES];
172 uintptr_t top_of_ram;
173 int index = *resource_count;
174
Subrata Banik7609c652017-05-19 14:50:09 +0530175 top_of_ram = (uintptr_t)cbmem_top();
176
177 /* 0 - > 0xa0000 */
178 base_k = 0;
179 size_k = (0xa0000 / KiB) - base_k;
180 ram_resource(dev, index++, base_k, size_k);
181
182 /* 0xc0000 -> top_of_ram */
183 base_k = 0xc0000 / KiB;
184 size_k = (top_of_ram / KiB) - base_k;
185 ram_resource(dev, index++, base_k, size_k);
186
187 sa_get_mem_map(dev, &sa_map_values[0]);
188
Michael Niewöhner40f893e2019-10-21 18:58:04 +0200189 /* top_of_ram -> BGSM */
Subrata Banik7609c652017-05-19 14:50:09 +0530190 base_k = top_of_ram;
Subrata Banik7609c652017-05-19 14:50:09 +0530191 size_k = sa_map_values[SA_BGSM_REG] - base_k;
192 reserved_ram_resource(dev, index++, base_k / KiB, size_k / KiB);
193
194 /* BGSM -> TOLUD */
195 base_k = sa_map_values[SA_BGSM_REG];
196 size_k = sa_map_values[SA_TOLUD_REG] - base_k;
197 mmio_resource(dev, index++, base_k / KiB, size_k / KiB);
198
199 /* 4GiB -> TOUUD */
200 base_k = 4 * (GiB / KiB); /* 4GiB */
201 touud_k = sa_map_values[SA_TOUUD_REG] / KiB;
202 size_k = touud_k - base_k;
203 if (touud_k > base_k)
204 ram_resource(dev, index++, base_k, size_k);
205
206 /*
207 * Reserve everything between A segment and 1MB:
208 *
209 * 0xa0000 - 0xbffff: legacy VGA
210 * 0xc0000 - 0xfffff: RAM
211 */
212 mmio_resource(dev, index++, 0xa0000 / KiB, (0xc0000 - 0xa0000) / KiB);
213 reserved_ram_resource(dev, index++, 0xc0000 / KiB,
214 (1*MiB - 0xc0000) / KiB);
215
216 *resource_count = index;
217}
218
219static bool is_imr_enabled(uint32_t imr_base_reg)
220{
221 return !!(imr_base_reg & (1 << 31));
222}
223
Elyes HAOUAS4a131262018-09-16 17:35:48 +0200224static void imr_resource(struct device *dev, int idx, uint32_t base,
225 uint32_t mask)
Subrata Banik7609c652017-05-19 14:50:09 +0530226{
227 uint32_t base_k, size_k;
228 /* Bits 28:0 encode the base address bits 38:10, hence the KiB unit. */
229 base_k = (base & 0x0fffffff);
230 /* Bits 28:0 encode the AND mask used for comparison, in KiB. */
231 size_k = ((~mask & 0x0fffffff) + 1);
232 /*
233 * IMRs sit in lower DRAM. Mark them cacheable, otherwise we run
234 * out of MTRRs. Memory reserved by IMRs is not usable for host
235 * so mark it reserved.
236 */
237 reserved_ram_resource(dev, idx, base_k, size_k);
238}
239
240/*
241 * Add IMR ranges that hang off the host bridge/memory
242 * controller device in case CONFIG_SA_ENABLE_IMR is selected by SoC.
243 */
244static void sa_add_imr_resources(struct device *dev, int *resource_cnt)
245{
246 size_t i, imr_offset;
247 uint32_t base, mask;
248 int index = *resource_cnt;
249
250 for (i = 0; i < MCH_NUM_IMRS; i++) {
251 imr_offset = i * MCH_IMR_PITCH;
252 base = MCHBAR32(imr_offset + MCH_IMR0_BASE);
253 mask = MCHBAR32(imr_offset + MCH_IMR0_MASK);
254
255 if (is_imr_enabled(base))
256 imr_resource(dev, index++, base, mask);
257 }
258
259 *resource_cnt = index;
260}
261
262static void systemagent_read_resources(struct device *dev)
263{
264 int index = 0;
265
266 /* Read standard PCI resources. */
267 pci_dev_read_resources(dev);
268
269 /* Add all fixed MMIO resources. */
270 soc_add_fixed_mmio_resources(dev, &index);
271 /* Calculate and add DRAM resources. */
272 sa_add_dram_resources(dev, &index);
Julius Wernercd49cce2019-03-05 16:53:33 -0800273 if (CONFIG(SA_ENABLE_IMR))
Subrata Banik7609c652017-05-19 14:50:09 +0530274 /* Add the isolated memory ranges (IMRs). */
275 sa_add_imr_resources(dev, &index);
276}
277
Lijian Zhao357e5522019-04-11 13:07:00 -0700278#if CONFIG(GENERATE_SMBIOS_TABLES)
279static int sa_smbios_write_type_16(struct device *dev, int *handle,
280 unsigned long *current)
281{
282 struct smbios_type16 *t = (struct smbios_type16 *)*current;
283 int len = sizeof(struct smbios_type16);
284
285 struct memory_info *meminfo;
286 meminfo = cbmem_find(CBMEM_ID_MEMINFO);
287 if (meminfo == NULL)
288 return 0; /* can't find mem info in cbmem */
289
290 memset(t, 0, sizeof(struct smbios_type16));
291 t->type = SMBIOS_PHYS_MEMORY_ARRAY;
292 t->handle = *handle;
293 t->length = len - 2;
294 t->location = MEMORY_ARRAY_LOCATION_SYSTEM_BOARD;
295 t->use = MEMORY_ARRAY_USE_SYSTEM;
296 /* TBD, meminfo hob have information about ECC */
297 t->memory_error_correction = MEMORY_ARRAY_ECC_NONE;
298 /* no error information handle available */
299 t->memory_error_information_handle = 0xFFFE;
300 t->maximum_capacity = 32 * (GiB / KiB); /* 32GB as default */
301 t->number_of_memory_devices = meminfo->dimm_cnt;
302
303 *current += len;
304 *handle += 1;
305 return len;
306}
307#endif
308
Subrata Banik7609c652017-05-19 14:50:09 +0530309void enable_power_aware_intr(void)
310{
311 uint8_t pair;
312
313 /* Enable Power Aware Interrupt Routing */
314 pair = MCHBAR8(MCH_PAIR);
315 pair &= ~0x7; /* Clear 2:0 */
316 pair |= 0x4; /* Fixed Priority */
317 MCHBAR8(MCH_PAIR) = pair;
318}
319
320static struct device_operations systemagent_ops = {
Elyes HAOUAS1d191272018-11-27 12:23:48 +0100321 .read_resources = systemagent_read_resources,
322 .set_resources = pci_dev_set_resources,
323 .enable_resources = pci_dev_enable_resources,
Subrata Banik7609c652017-05-19 14:50:09 +0530324 .init = soc_systemagent_init,
Subrata Banik6bbc91a2017-12-07 14:55:51 +0530325 .ops_pci = &pci_dev_ops_pci,
Julius Wernercd49cce2019-03-05 16:53:33 -0800326#if CONFIG(HAVE_ACPI_TABLES)
Werner Zehd12530c2018-12-14 13:09:12 +0100327 .write_acpi_tables = sa_write_acpi_tables,
328#endif
Lijian Zhao357e5522019-04-11 13:07:00 -0700329#if CONFIG(GENERATE_SMBIOS_TABLES)
330 .get_smbios_data = sa_smbios_write_type_16,
331#endif
Subrata Banik7609c652017-05-19 14:50:09 +0530332};
333
334static const unsigned short systemagent_ids[] = {
335 PCI_DEVICE_ID_INTEL_GLK_NB,
336 PCI_DEVICE_ID_INTEL_APL_NB,
Lijian Zhaobbedef92017-07-29 16:38:38 -0700337 PCI_DEVICE_ID_INTEL_CNL_ID_U,
338 PCI_DEVICE_ID_INTEL_CNL_ID_Y,
Subrata Banik7609c652017-05-19 14:50:09 +0530339 PCI_DEVICE_ID_INTEL_SKL_ID_U,
340 PCI_DEVICE_ID_INTEL_SKL_ID_Y,
341 PCI_DEVICE_ID_INTEL_SKL_ID_ULX,
Maxim Polyakovdde937c2019-09-09 15:50:03 +0300342 PCI_DEVICE_ID_INTEL_SKL_ID_H_4,
Keno Fischer1044eba2019-06-07 01:55:56 -0400343 PCI_DEVICE_ID_INTEL_SKL_ID_H_2,
344 PCI_DEVICE_ID_INTEL_SKL_ID_S_2,
345 PCI_DEVICE_ID_INTEL_SKL_ID_S_4,
Lean Sheng Tan38c3ff72019-05-27 13:06:35 +0800346 PCI_DEVICE_ID_INTEL_WHL_ID_W_2,
347 PCI_DEVICE_ID_INTEL_WHL_ID_W_4,
Gaggery Tsaie415a4c2018-03-21 22:36:18 +0800348 PCI_DEVICE_ID_INTEL_KBL_ID_S,
Subrata Banik7609c652017-05-19 14:50:09 +0530349 PCI_DEVICE_ID_INTEL_SKL_ID_H_EM,
350 PCI_DEVICE_ID_INTEL_KBL_ID_U,
351 PCI_DEVICE_ID_INTEL_KBL_ID_Y,
352 PCI_DEVICE_ID_INTEL_KBL_ID_H,
353 PCI_DEVICE_ID_INTEL_KBL_U_R,
V Sowmyaacc2a482018-01-23 15:27:23 +0530354 PCI_DEVICE_ID_INTEL_KBL_ID_DT,
Christian Walter3d840382019-05-17 19:37:16 +0200355 PCI_DEVICE_ID_INTEL_KBL_ID_DT_2,
Maulikfc19ab52018-01-05 22:40:35 +0530356 PCI_DEVICE_ID_INTEL_CFL_ID_U,
Christian Walterccac15a2019-08-13 09:55:37 +0200357 PCI_DEVICE_ID_INTEL_CFL_ID_U_2,
praveen hodagatta praneshe26c4a42018-09-20 03:49:45 +0800358 PCI_DEVICE_ID_INTEL_CFL_ID_H,
Christian Walterccac15a2019-08-13 09:55:37 +0200359 PCI_DEVICE_ID_INTEL_CFL_ID_H_4,
Lean Sheng Tan38c3ff72019-05-27 13:06:35 +0800360 PCI_DEVICE_ID_INTEL_CFL_ID_H_8,
praveen hodagatta praneshe26c4a42018-09-20 03:49:45 +0800361 PCI_DEVICE_ID_INTEL_CFL_ID_S,
Christian Walterccac15a2019-08-13 09:55:37 +0200362 PCI_DEVICE_ID_INTEL_CFL_ID_S_DT_2,
Felix Singerd298ffe2019-07-28 13:27:11 +0200363 PCI_DEVICE_ID_INTEL_CFL_ID_S_DT_4,
Lean Sheng Tan38c3ff72019-05-27 13:06:35 +0800364 PCI_DEVICE_ID_INTEL_CFL_ID_S_DT_8,
Christian Walterccac15a2019-08-13 09:55:37 +0200365 PCI_DEVICE_ID_INTEL_CFL_ID_S_WS_4,
366 PCI_DEVICE_ID_INTEL_CFL_ID_S_WS_6,
Lean Sheng Tan38c3ff72019-05-27 13:06:35 +0800367 PCI_DEVICE_ID_INTEL_CFL_ID_S_WS_8,
Christian Walterccac15a2019-08-13 09:55:37 +0200368 PCI_DEVICE_ID_INTEL_CFL_ID_S_S_4,
369 PCI_DEVICE_ID_INTEL_CFL_ID_S_S_6,
370 PCI_DEVICE_ID_INTEL_CFL_ID_S_S_8,
Aamir Bohra9eac0392018-06-30 12:07:04 +0530371 PCI_DEVICE_ID_INTEL_ICL_ID_U,
372 PCI_DEVICE_ID_INTEL_ICL_ID_U_2_2,
373 PCI_DEVICE_ID_INTEL_ICL_ID_Y,
374 PCI_DEVICE_ID_INTEL_ICL_ID_Y_2,
Ronak Kanabarf606a2f2019-02-04 16:06:50 +0530375 PCI_DEVICE_ID_INTEL_CML_ULT,
Subrata Banikba8af582019-02-27 15:00:55 +0530376 PCI_DEVICE_ID_INTEL_CML_ULT_2_2,
Ronak Kanabarf606a2f2019-02-04 16:06:50 +0530377 PCI_DEVICE_ID_INTEL_CML_ULT_6_2,
378 PCI_DEVICE_ID_INTEL_CML_ULX,
379 PCI_DEVICE_ID_INTEL_CML_S,
Gaggery Tsaifdcc9ab2019-11-04 20:49:10 -0800380 PCI_DEVICE_ID_INTEL_CML_S_G0G1_P0P1_6_2,
381 PCI_DEVICE_ID_INTEL_CML_S_P0P1_8_2,
382 PCI_DEVICE_ID_INTEL_CML_S_P0P1_10_2,
Gaggery Tsai39e1f442020-01-08 15:22:13 -0800383 PCI_DEVICE_ID_INTEL_CML_S_G0G1_4,
384 PCI_DEVICE_ID_INTEL_CML_S_G0G1_2,
Ronak Kanabarf606a2f2019-02-04 16:06:50 +0530385 PCI_DEVICE_ID_INTEL_CML_H,
Jamie Chen6bb9aaf2019-12-20 19:30:33 +0800386 PCI_DEVICE_ID_INTEL_CML_H_4_2,
Ronak Kanabarf606a2f2019-02-04 16:06:50 +0530387 PCI_DEVICE_ID_INTEL_CML_H_8_2,
Ravi Sarawadi6b5bf402019-10-21 22:25:04 -0700388 PCI_DEVICE_ID_INTEL_TGL_ID_U,
Subrata Banikae695752019-11-12 12:47:43 +0530389 PCI_DEVICE_ID_INTEL_TGL_ID_U_1,
Srinidhi N Kaushik1d812e82020-02-07 15:51:09 -0800390 PCI_DEVICE_ID_INTEL_TGL_ID_U_2_2,
Ravi Sarawadi6b5bf402019-10-21 22:25:04 -0700391 PCI_DEVICE_ID_INTEL_TGL_ID_Y,
Tan, Lean Sheng26136092020-01-20 19:13:56 -0800392 PCI_DEVICE_ID_INTEL_JSL_EHL,
393 PCI_DEVICE_ID_INTEL_EHL_ID_1,
Meera Ravindranath3f4af0d2020-02-12 16:01:22 +0530394 PCI_DEVICE_ID_INTEL_JSL_ID_1,
Maulik V Vaghela8745a272020-04-22 12:13:40 +0530395 PCI_DEVICE_ID_INTEL_JSL_ID_2,
396 PCI_DEVICE_ID_INTEL_JSL_ID_3,
397 PCI_DEVICE_ID_INTEL_JSL_ID_4,
Subrata Banik7609c652017-05-19 14:50:09 +0530398 0
399};
400
401static const struct pci_driver systemagent_driver __pci_driver = {
402 .ops = &systemagent_ops,
403 .vendor = PCI_VENDOR_ID_INTEL,
404 .devices = systemagent_ids
405};