blob: c6b8fab65a99b15c02ae274d91f49d98af96e759 [file] [log] [blame]
Angel Pons4b429832020-04-02 23:48:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2/* This file is part of the coreboot project. */
Aaron Durbin76c37002012-10-30 09:03:43 -05003
Nico Huberc2e46422020-03-23 01:22:49 +01004#include <commonlib/helpers.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05005#include <arch/io.h>
Kyösti Mälkki13f66502019-03-03 08:01:05 +02006#include <device/mmio.h>
Kyösti Mälkkif1b58b72019-03-01 13:43:02 +02007#include <device/pci_ops.h>
Matt DeVillier7c789702017-06-16 23:36:46 -05008#include <cbmem.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05009#include <console/console.h>
Kyösti Mälkkiab56b3b2013-11-28 16:44:51 +020010#include <bootmode.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050011#include <delay.h>
12#include <device/device.h>
13#include <device/pci.h>
14#include <device/pci_ids.h>
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -070015#include <drivers/intel/gma/i915_reg.h>
Furquan Shaikh77f48cd2013-08-19 10:16:50 -070016#include <drivers/intel/gma/i915.h>
Nico Huber18228162017-06-08 16:31:57 +020017#include <drivers/intel/gma/libgfxinit.h>
Duncan Laurie356833d2013-07-09 15:40:27 -070018#include <cpu/intel/haswell/haswell.h>
Matt DeVillierebe08e02017-07-14 13:28:42 -050019#include <drivers/intel/gma/opregion.h>
Matt DeVillier7c789702017-06-16 23:36:46 -050020#include <southbridge/intel/lynxpoint/nvs.h>
Ronald G. Minnich9518b562013-09-19 16:45:22 -070021#include <string.h>
Elyes HAOUAS51401c32019-05-15 21:09:30 +020022#include <types.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050023
24#include "chip.h"
25#include "haswell.h"
26
Julius Wernercd49cce2019-03-05 16:53:33 -080027#if CONFIG(CHROMEOS)
Furquan Shaikhcb61ea72013-08-15 15:23:58 -070028#include <vendorcode/google/chromeos/chromeos.h>
29#endif
30
Duncan Laurie356833d2013-07-09 15:40:27 -070031struct gt_reg {
32 u32 reg;
33 u32 andmask;
34 u32 ormask;
35};
36
37static const struct gt_reg haswell_gt_setup[] = {
38 /* Enable Counters */
39 { 0x0a248, 0x00000000, 0x00000016 },
40 { 0x0a000, 0x00000000, 0x00070020 },
41 { 0x0a180, 0xff3fffff, 0x15000000 },
42 /* Enable DOP Clock Gating */
43 { 0x09424, 0x00000000, 0x000003fd },
44 /* Enable Unit Level Clock Gating */
45 { 0x09400, 0x00000000, 0x00000080 },
46 { 0x09404, 0x00000000, 0x40401000 },
47 { 0x09408, 0x00000000, 0x00000000 },
48 { 0x0940c, 0x00000000, 0x02000001 },
49 { 0x0a008, 0x00000000, 0x08000000 },
50 /* Wake Rate Limits */
51 { 0x0a090, 0xffffffff, 0x00000000 },
52 { 0x0a098, 0xffffffff, 0x03e80000 },
53 { 0x0a09c, 0xffffffff, 0x00280000 },
54 { 0x0a0a8, 0xffffffff, 0x0001e848 },
55 { 0x0a0ac, 0xffffffff, 0x00000019 },
56 /* Render/Video/Blitter Idle Max Count */
57 { 0x02054, 0x00000000, 0x0000000a },
58 { 0x12054, 0x00000000, 0x0000000a },
59 { 0x22054, 0x00000000, 0x0000000a },
60 /* RC Sleep / RCx Thresholds */
61 { 0x0a0b0, 0xffffffff, 0x00000000 },
62 { 0x0a0b4, 0xffffffff, 0x000003e8 },
63 { 0x0a0b8, 0xffffffff, 0x0000c350 },
64 /* RP Settings */
65 { 0x0a010, 0xffffffff, 0x000f4240 },
66 { 0x0a014, 0xffffffff, 0x12060000 },
67 { 0x0a02c, 0xffffffff, 0x0000e808 },
68 { 0x0a030, 0xffffffff, 0x0003bd08 },
69 { 0x0a068, 0xffffffff, 0x000101d0 },
70 { 0x0a06c, 0xffffffff, 0x00055730 },
71 { 0x0a070, 0xffffffff, 0x0000000a },
72 /* RP Control */
73 { 0x0a024, 0x00000000, 0x00000b92 },
74 /* HW RC6 Control */
75 { 0x0a090, 0x00000000, 0x88040000 },
76 /* Video Frequency Request */
77 { 0x0a00c, 0x00000000, 0x08000000 },
78 { 0 },
79};
80
81static const struct gt_reg haswell_gt_lock[] = {
82 { 0x0a248, 0xffffffff, 0x80000000 },
83 { 0x0a004, 0xffffffff, 0x00000010 },
84 { 0x0a080, 0xffffffff, 0x00000004 },
85 { 0x0a180, 0xffffffff, 0x80000000 },
86 { 0 },
87};
88
Angel Pons1db5bc72020-01-15 00:49:03 +010089/*
90 * Some VGA option roms are used for several chipsets but they only have one PCI ID in their
91 * header. If we encounter such an option rom, we need to do the mapping ourselves.
Aaron Durbin76c37002012-10-30 09:03:43 -050092 */
93
94u32 map_oprom_vendev(u32 vendev)
95{
Elyes HAOUAS69d658f2016-09-17 20:32:07 +020096 u32 new_vendev = vendev;
Aaron Durbin76c37002012-10-30 09:03:43 -050097
98 switch (vendev) {
Aaron Durbin71161292012-12-13 16:43:32 -060099 case 0x80860402: /* GT1 Desktop */
100 case 0x80860406: /* GT1 Mobile */
101 case 0x8086040a: /* GT1 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -0800102 case 0x80860a06: /* GT1 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600103
104 case 0x80860412: /* GT2 Desktop */
105 case 0x80860416: /* GT2 Mobile */
106 case 0x8086041a: /* GT2 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -0800107 case 0x80860a16: /* GT2 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600108
109 case 0x80860422: /* GT3 Desktop */
110 case 0x80860426: /* GT3 Mobile */
111 case 0x8086042a: /* GT3 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -0800112 case 0x80860a26: /* GT3 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600113
Elyes HAOUAS69d658f2016-09-17 20:32:07 +0200114 new_vendev = 0x80860406; /* GT1 Mobile */
Aaron Durbin76c37002012-10-30 09:03:43 -0500115 break;
116 }
117
118 return new_vendev;
119}
120
Angel Pons1db5bc72020-01-15 00:49:03 +0100121/** FIXME: Seems to be outdated. */
122/*
123 * GTT is the Global Translation Table for the graphics pipeline. It is used to translate
124 * graphics addresses to physical memory addresses. As in the CPU, GTTs map 4K pages.
125 *
126 * The setgtt function adds a further bit of flexibility: it allows you to set a range (the
127 * first two parameters) to point to a physical address (third parameter); the physical address
128 * is incremented by a count (fourth parameter) for each GTT in the range.
129 *
130 * Why do it this way? For ultrafast startup, we can point all the GTT entries to point to one
131 * page, and set that page to 0s:
132 *
133 * memset(physbase, 0, 4096);
134 * setgtt(0, 4250, physbase, 0);
135 *
136 * this takes about 2 ms, and is a win because zeroing the page takes up to 200 ms.
137 *
138 * This call sets the GTT to point to a linear range of pages starting at physbase.
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700139 */
140
141#define GTT_PTE_BASE (2 << 20)
142
Angel Pons1db5bc72020-01-15 00:49:03 +0100143void set_translation_table(int start, int end, u64 base, int inc)
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700144{
145 int i;
146
Elyes HAOUAS12df9502016-08-23 21:29:48 +0200147 for (i = start; i < end; i++){
Angel Pons1db5bc72020-01-15 00:49:03 +0100148 u64 physical_address = base + i * inc;
149
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700150 /* swizzle the 32:39 bits to 4:11 */
151 u32 word = physical_address | ((physical_address >> 28) & 0xff0) | 1;
Angel Pons1db5bc72020-01-15 00:49:03 +0100152
153 /*
154 * Note: we've confirmed by checking the values that MRC does no useful
155 * setup before we run this.
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700156 */
157 gtt_write(GTT_PTE_BASE + i * 4, word);
158 gtt_read(GTT_PTE_BASE + i * 4);
159 }
160}
161
Aaron Durbin76c37002012-10-30 09:03:43 -0500162static struct resource *gtt_res = NULL;
163
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700164u32 gtt_read(u32 reg)
Aaron Durbin76c37002012-10-30 09:03:43 -0500165{
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700166 u32 val;
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800167 val = read32(res2mmio(gtt_res, reg, 0));
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700168 return val;
169
Aaron Durbin76c37002012-10-30 09:03:43 -0500170}
171
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700172void gtt_write(u32 reg, u32 data)
Aaron Durbin76c37002012-10-30 09:03:43 -0500173{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800174 write32(res2mmio(gtt_res, reg, 0), data);
Aaron Durbin76c37002012-10-30 09:03:43 -0500175}
176
Duncan Laurie356833d2013-07-09 15:40:27 -0700177static inline void gtt_rmw(u32 reg, u32 andmask, u32 ormask)
178{
179 u32 val = gtt_read(reg);
180 val &= andmask;
181 val |= ormask;
182 gtt_write(reg, val);
183}
184
185static inline void gtt_write_regs(const struct gt_reg *gt)
186{
187 for (; gt && gt->reg; gt++) {
188 if (gt->andmask)
189 gtt_rmw(gt->reg, gt->andmask, gt->ormask);
190 else
191 gtt_write(gt->reg, gt->ormask);
192 }
193}
194
Aaron Durbin76c37002012-10-30 09:03:43 -0500195#define GTT_RETRY 1000
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700196int gtt_poll(u32 reg, u32 mask, u32 value)
Aaron Durbin76c37002012-10-30 09:03:43 -0500197{
Martin Roth468d02c2019-10-23 21:44:42 -0600198 unsigned int try = GTT_RETRY;
Aaron Durbin76c37002012-10-30 09:03:43 -0500199 u32 data;
200
201 while (try--) {
202 data = gtt_read(reg);
203 if ((data & mask) == value)
204 return 1;
Angel Pons1db5bc72020-01-15 00:49:03 +0100205
Aaron Durbin76c37002012-10-30 09:03:43 -0500206 udelay(10);
207 }
208
209 printk(BIOS_ERR, "GT init timeout\n");
210 return 0;
211}
212
Patrick Rudolph19c2ad82017-06-30 14:52:01 +0200213uintptr_t gma_get_gnvs_aslb(const void *gnvs)
214{
215 const global_nvs_t *gnvs_ptr = gnvs;
216 return (uintptr_t)(gnvs_ptr ? gnvs_ptr->aslb : 0);
217}
218
219void gma_set_gnvs_aslb(void *gnvs, uintptr_t aslb)
220{
221 global_nvs_t *gnvs_ptr = gnvs;
222 if (gnvs_ptr)
223 gnvs_ptr->aslb = aslb;
224}
225
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700226static void power_well_enable(void)
227{
228 gtt_write(HSW_PWR_WELL_CTL1, HSW_PWR_WELL_ENABLE);
229 gtt_poll(HSW_PWR_WELL_CTL1, HSW_PWR_WELL_STATE, HSW_PWR_WELL_STATE);
230}
231
Aaron Durbin76c37002012-10-30 09:03:43 -0500232static void gma_pm_init_pre_vbios(struct device *dev)
233{
Aaron Durbin76c37002012-10-30 09:03:43 -0500234 printk(BIOS_DEBUG, "GT Power Management Init\n");
235
236 gtt_res = find_resource(dev, PCI_BASE_ADDRESS_0);
237 if (!gtt_res || !gtt_res->base)
238 return;
239
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700240 power_well_enable();
241
Duncan Laurie67113e92013-01-10 13:23:04 -0800242 /*
243 * Enable RC6
244 */
Aaron Durbin76c37002012-10-30 09:03:43 -0500245
Duncan Laurie67113e92013-01-10 13:23:04 -0800246 /* Enable Force Wake */
247 gtt_write(0x0a180, 1 << 5);
248 gtt_write(0x0a188, 0x00010001);
Edward O'Callaghan986e85c2014-10-29 12:15:34 +1100249 gtt_poll(FORCEWAKE_ACK_HSW, 1 << 0, 1 << 0);
Aaron Durbin76c37002012-10-30 09:03:43 -0500250
Duncan Laurie356833d2013-07-09 15:40:27 -0700251 /* GT Settings */
252 gtt_write_regs(haswell_gt_setup);
Aaron Durbin76c37002012-10-30 09:03:43 -0500253
Duncan Laurie356833d2013-07-09 15:40:27 -0700254 /* Wait for Mailbox Ready */
Ryan Salsamendifa0725d2017-06-30 17:29:37 -0700255 gtt_poll(0x138124, (1UL << 31), (0UL << 31));
Angel Pons1db5bc72020-01-15 00:49:03 +0100256
Duncan Laurie356833d2013-07-09 15:40:27 -0700257 /* Mailbox Data - RC6 VIDS */
258 gtt_write(0x138128, 0x00000000);
Angel Pons1db5bc72020-01-15 00:49:03 +0100259
Duncan Laurie356833d2013-07-09 15:40:27 -0700260 /* Mailbox Command */
261 gtt_write(0x138124, 0x80000004);
Angel Pons1db5bc72020-01-15 00:49:03 +0100262
Duncan Laurie356833d2013-07-09 15:40:27 -0700263 /* Wait for Mailbox Ready */
Ryan Salsamendifa0725d2017-06-30 17:29:37 -0700264 gtt_poll(0x138124, (1UL << 31), (0UL << 31));
Aaron Durbin76c37002012-10-30 09:03:43 -0500265
Duncan Laurie356833d2013-07-09 15:40:27 -0700266 /* Enable PM Interrupts */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700267 gtt_write(GEN6_PMIER, GEN6_PM_MBOX_EVENT | GEN6_PM_THERMAL_EVENT |
268 GEN6_PM_RP_DOWN_TIMEOUT | GEN6_PM_RP_UP_THRESHOLD |
269 GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_UP_EI_EXPIRED |
270 GEN6_PM_RP_DOWN_EI_EXPIRED);
Aaron Durbin76c37002012-10-30 09:03:43 -0500271
Duncan Laurie67113e92013-01-10 13:23:04 -0800272 /* Enable RC6 in idle */
273 gtt_write(0x0a094, 0x00040000);
Duncan Laurie356833d2013-07-09 15:40:27 -0700274
275 /* PM Lock Settings */
276 gtt_write_regs(haswell_gt_lock);
Aaron Durbin76c37002012-10-30 09:03:43 -0500277}
278
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700279static void init_display_planes(void)
280{
281 int pipe, plane;
282
283 /* Disable cursor mode */
284 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
285 gtt_write(CURCNTR_IVB(pipe), CURSOR_MODE_DISABLE);
286 gtt_write(CURBASE_IVB(pipe), 0x00000000);
287 }
288
Angel Pons1db5bc72020-01-15 00:49:03 +0100289 /* Disable primary plane and set surface base address */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700290 for (plane = PLANE_A; plane <= PLANE_C; plane++) {
291 gtt_write(DSPCNTR(plane), DISPLAY_PLANE_DISABLE);
292 gtt_write(DSPSURF(plane), 0x00000000);
293 }
294
295 /* Disable VGA display */
296 gtt_write(CPU_VGACNTRL, CPU_VGA_DISABLE);
297}
298
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700299static void gma_setup_panel(struct device *dev)
Aaron Durbin76c37002012-10-30 09:03:43 -0500300{
301 struct northbridge_intel_haswell_config *conf = dev->chip_info;
302 u32 reg32;
303
304 printk(BIOS_DEBUG, "GT Power Management Init (post VBIOS)\n");
305
Aaron Durbin76c37002012-10-30 09:03:43 -0500306 /* Setup Digital Port Hotplug */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700307 reg32 = gtt_read(PCH_PORT_HOTPLUG);
Aaron Durbin76c37002012-10-30 09:03:43 -0500308 if (!reg32) {
309 reg32 = (conf->gpu_dp_b_hotplug & 0x7) << 2;
310 reg32 |= (conf->gpu_dp_c_hotplug & 0x7) << 10;
311 reg32 |= (conf->gpu_dp_d_hotplug & 0x7) << 18;
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700312 gtt_write(PCH_PORT_HOTPLUG, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500313 }
314
315 /* Setup Panel Power On Delays */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700316 reg32 = gtt_read(PCH_PP_ON_DELAYS);
Aaron Durbin76c37002012-10-30 09:03:43 -0500317 if (!reg32) {
318 reg32 = (conf->gpu_panel_port_select & 0x3) << 30;
319 reg32 |= (conf->gpu_panel_power_up_delay & 0x1fff) << 16;
320 reg32 |= (conf->gpu_panel_power_backlight_on_delay & 0x1fff);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700321 gtt_write(PCH_PP_ON_DELAYS, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500322 }
323
324 /* Setup Panel Power Off Delays */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700325 reg32 = gtt_read(PCH_PP_OFF_DELAYS);
Aaron Durbin76c37002012-10-30 09:03:43 -0500326 if (!reg32) {
327 reg32 = (conf->gpu_panel_power_down_delay & 0x1fff) << 16;
328 reg32 |= (conf->gpu_panel_power_backlight_off_delay & 0x1fff);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700329 gtt_write(PCH_PP_OFF_DELAYS, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500330 }
331
332 /* Setup Panel Power Cycle Delay */
333 if (conf->gpu_panel_power_cycle_delay) {
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700334 reg32 = gtt_read(PCH_PP_DIVISOR);
Aaron Durbin76c37002012-10-30 09:03:43 -0500335 reg32 &= ~0xff;
336 reg32 |= conf->gpu_panel_power_cycle_delay & 0xff;
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700337 gtt_write(PCH_PP_DIVISOR, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500338 }
339
Nico Huberc2e46422020-03-23 01:22:49 +0100340 /* Enforce the PCH PWM function, as so does Linux.
341 The CPU PWM controls are disabled after reset. */
342 if (conf->gpu_pch_backlight_pwm_hz) {
343 /* Reference clock is either 24MHz or 135MHz. We can choose
344 either a 16 or a 128 step increment. Use 16 if we would
345 have less than 100 steps otherwise. */
346 const unsigned int refclock = CONFIG(INTEL_LYNXPOINT_LP) ? 24*MHz : 135*MHz;
347 const unsigned int hz_limit = refclock / 128 / 100;
348 unsigned int pwm_increment, pwm_period;
349 u32 south_chicken2;
350
351 south_chicken2 = gtt_read(SOUTH_CHICKEN2);
352 if (conf->gpu_pch_backlight_pwm_hz > hz_limit) {
353 pwm_increment = 16;
354 south_chicken2 |= LPT_PWM_GRANULARITY;
355 } else {
356 pwm_increment = 128;
357 south_chicken2 &= ~LPT_PWM_GRANULARITY;
358 }
359 gtt_write(SOUTH_CHICKEN2, south_chicken2);
360
361 pwm_period = refclock / pwm_increment / conf->gpu_pch_backlight_pwm_hz;
362 printk(BIOS_INFO,
363 "GMA: Setting backlight PWM frequency to %uMHz / %u / %u = %uHz\n",
364 refclock / MHz, pwm_increment, pwm_period,
365 DIV_ROUND_CLOSEST(refclock, pwm_increment * pwm_period));
366
367 /* Start with a 50% duty cycle. */
368 gtt_write(BLC_PWM_PCH_CTL2, pwm_period << 16 | pwm_period / 2);
369
370 gtt_write(BLC_PWM_PCH_CTL1,
371 (conf->gpu_pch_backlight_polarity == GPU_BACKLIGHT_POLARITY_LOW) << 29 |
372 BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE);
Aaron Durbin76c37002012-10-30 09:03:43 -0500373 }
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700374
375 /* Get display,pipeline,and DDI registers into a basic sane state */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700376 power_well_enable();
377
378 init_display_planes();
379
Angel Pons1db5bc72020-01-15 00:49:03 +0100380 /*
381 * DDI-A params set:
382 * bit 0: Display detected (RO)
383 * bit 4: DDI A supports 4 lanes and DDI E is not used
384 * bit 7: DDI buffer is idle
385 */
Tristan Corrick1a73eb02018-10-31 02:27:29 +1300386 reg32 = DDI_BUF_IS_IDLE | DDI_INIT_DISPLAY_DETECTED;
387 if (!conf->gpu_ddi_e_connected)
388 reg32 |= DDI_A_4_LANES;
389 gtt_write(DDI_BUF_CTL_A, reg32);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700390
391 /* Set FDI registers - is this required? */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700392 gtt_write(_FDI_RXA_MISC, 0x00200090);
393 gtt_write(_FDI_RXA_MISC, 0x0a000000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700394
395 /* Enable the handshake with PCH display when processing reset */
396 gtt_write(NDE_RSTWRN_OPT, RST_PCH_HNDSHK_EN);
397
Angel Pons1db5bc72020-01-15 00:49:03 +0100398 /* Undocumented */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700399 gtt_write(0x42090, 0x04000000);
Angel Pons1db5bc72020-01-15 00:49:03 +0100400 gtt_write(0x9840, 0x00000000);
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700401 gtt_write(0x42090, 0xa4000000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700402
403 gtt_write(SOUTH_DSPCLK_GATE_D, PCH_LP_PARTITION_LEVEL_DISABLE);
404
Angel Pons1db5bc72020-01-15 00:49:03 +0100405 /* Undocumented */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700406 gtt_write(0x42080, 0x00004000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700407
408 /* Prepare DDI buffers for DP and FDI */
409 intel_prepare_ddi();
410
411 /* Hot plug detect buffer enabled for port A */
412 gtt_write(DIGITAL_PORT_HOTPLUG_CNTRL, DIGITAL_PORTA_HOTPLUG_ENABLE);
413
414 /* Enable HPD buffer for digital port D and B */
415 gtt_write(PCH_PORT_HOTPLUG, PORTD_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE);
416
Angel Pons1db5bc72020-01-15 00:49:03 +0100417 /*
418 * Bits 4:0 - Power cycle delay (default 0x6 --> 500ms)
419 * Bits 31:8 - Reference divider (0x0004af ----> 24MHz)
420 */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700421 gtt_write(PCH_PP_DIVISOR, 0x0004af06);
Aaron Durbin76c37002012-10-30 09:03:43 -0500422}
423
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700424static void gma_pm_init_post_vbios(struct device *dev)
425{
Duncan Laurie356833d2013-07-09 15:40:27 -0700426 int cdclk = 0;
427 int devid = pci_read_config16(dev, PCI_DEVICE_ID);
428 int gpu_is_ulx = 0;
429
430 if (devid == 0x0a0e || devid == 0x0a1e)
431 gpu_is_ulx = 1;
432
433 /* CD Frequency */
Duncan Laurie3106d0f2013-08-12 13:51:22 -0700434 if ((gtt_read(0x42014) & 0x1000000) || gpu_is_ulx || haswell_is_ult())
435 cdclk = 0; /* fixed frequency */
436 else
437 cdclk = 2; /* variable frequency */
Duncan Laurie356833d2013-07-09 15:40:27 -0700438
Duncan Laurie356833d2013-07-09 15:40:27 -0700439 if (gpu_is_ulx || cdclk != 0)
440 gtt_rmw(0x130040, 0xf7ffffff, 0x04000000);
441 else
442 gtt_rmw(0x130040, 0xf3ffffff, 0x00000000);
443
444 /* More magic */
445 if (haswell_is_ult() || gpu_is_ulx) {
Duncan Laurie3106d0f2013-08-12 13:51:22 -0700446 if (!gpu_is_ulx)
Duncan Laurie356833d2013-07-09 15:40:27 -0700447 gtt_write(0x138128, 0x00000000);
448 else
449 gtt_write(0x138128, 0x00000001);
450 gtt_write(0x13812c, 0x00000000);
451 gtt_write(0x138124, 0x80000017);
452 }
453
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700454 /* Disable Force Wake */
455 gtt_write(0x0a188, 0x00010000);
Edward O'Callaghan986e85c2014-10-29 12:15:34 +1100456 gtt_poll(FORCEWAKE_ACK_HSW, 1 << 0, 0 << 0);
Duncan Laurie356833d2013-07-09 15:40:27 -0700457 gtt_write(0x0a188, 0x00000001);
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700458}
459
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200460/* Enable SCI to ACPI _GPE._L06 */
461static void gma_enable_swsci(void)
462{
463 u16 reg16;
464
Angel Pons1db5bc72020-01-15 00:49:03 +0100465 /* Clear DMISCI status */
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200466 reg16 = inw(get_pmbase() + TCO1_STS);
467 reg16 &= DMISCI_STS;
468 outw(get_pmbase() + TCO1_STS, reg16);
469
Angel Pons1db5bc72020-01-15 00:49:03 +0100470 /* Clear and enable ACPI TCO SCI */
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200471 enable_tco_sci();
472}
473
Aaron Durbin76c37002012-10-30 09:03:43 -0500474static void gma_func0_init(struct device *dev)
475{
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700476 int lightup_ok = 0;
Aaron Durbin76c37002012-10-30 09:03:43 -0500477 u32 reg32;
Matt DeVillier6955b9c2017-04-16 01:42:44 -0500478
Aaron Durbin76c37002012-10-30 09:03:43 -0500479 /* IGD needs to be Bus Master */
480 reg32 = pci_read_config32(dev, PCI_COMMAND);
481 reg32 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
482 pci_write_config32(dev, PCI_COMMAND, reg32);
483
484 /* Init graphics power management */
485 gma_pm_init_pre_vbios(dev);
486
Matt DeVillier6955b9c2017-04-16 01:42:44 -0500487 /* Pre panel init */
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700488 gma_setup_panel(dev);
489
Arthur Heymanse6c8f7e2018-08-09 11:31:51 +0200490 int vga_disable = (pci_read_config16(dev, GGC) & 2) >> 1;
491
Julius Wernercd49cce2019-03-05 16:53:33 -0800492 if (CONFIG(MAINBOARD_USE_LIBGFXINIT)) {
Arthur Heymanse6c8f7e2018-08-09 11:31:51 +0200493 if (vga_disable) {
494 printk(BIOS_INFO,
495 "IGD is not decoding legacy VGA MEM and IO: skipping NATIVE graphic init\n");
496 } else {
497 printk(BIOS_SPEW, "NATIVE graphics, run native enable\n");
498 gma_gfxinit(&lightup_ok);
499 gfx_set_init_done(1);
500 }
Arthur Heymans23cda3472016-12-18 16:03:52 +0100501 }
502
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700503 if (! lightup_ok) {
504 printk(BIOS_SPEW, "FUI did not run; using VBIOS\n");
Stefan Reinauerf1aabec2014-01-22 15:16:30 -0800505 mdelay(CONFIG_PRE_GRAPHICS_DELAY);
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700506 pci_dev_init(dev);
507 }
508
Matt DeVillier6955b9c2017-04-16 01:42:44 -0500509 /* Post panel init */
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700510 gma_pm_init_post_vbios(dev);
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200511
512 gma_enable_swsci();
513 intel_gma_restore_opregion();
Aaron Durbin76c37002012-10-30 09:03:43 -0500514}
515
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500516static void gma_generate_ssdt(struct device *dev)
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100517{
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500518 const struct northbridge_intel_haswell_config *chip = dev->chip_info;
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100519
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500520 drivers_intel_gma_displays_ssdt_generate(&chip->gfx);
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100521}
522
Angel Pons1db5bc72020-01-15 00:49:03 +0100523static unsigned long gma_write_acpi_tables(struct device *const dev, unsigned long current,
524 struct acpi_rsdp *const rsdp)
Patrick Rudolphee14ccc2017-05-20 11:46:06 +0200525{
526 igd_opregion_t *opregion = (igd_opregion_t *)current;
Matt DeVillier7c789702017-06-16 23:36:46 -0500527 global_nvs_t *gnvs;
Patrick Rudolphee14ccc2017-05-20 11:46:06 +0200528
Matt DeVillierebe08e02017-07-14 13:28:42 -0500529 if (intel_gma_init_igd_opregion(opregion) != CB_SUCCESS)
Patrick Rudolphee14ccc2017-05-20 11:46:06 +0200530 return current;
531
532 current += sizeof(igd_opregion_t);
533
Matt DeVillier7c789702017-06-16 23:36:46 -0500534 /* GNVS has been already set up */
535 gnvs = cbmem_find(CBMEM_ID_ACPI_GNVS);
536 if (gnvs) {
537 /* IGD OpRegion Base Address */
Patrick Rudolph19c2ad82017-06-30 14:52:01 +0200538 gma_set_gnvs_aslb(gnvs, (uintptr_t)opregion);
Matt DeVillier7c789702017-06-16 23:36:46 -0500539 } else {
540 printk(BIOS_ERR, "Error: GNVS table not found.\n");
541 }
542
Patrick Rudolphee14ccc2017-05-20 11:46:06 +0200543 current = acpi_align_current(current);
544 return current;
545}
546
Aaron Durbin76c37002012-10-30 09:03:43 -0500547static struct pci_operations gma_pci_ops = {
Angel Pons1db5bc72020-01-15 00:49:03 +0100548 .set_subsystem = pci_dev_set_subsystem,
Aaron Durbin76c37002012-10-30 09:03:43 -0500549};
550
551static struct device_operations gma_func0_ops = {
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500552 .read_resources = pci_dev_read_resources,
553 .set_resources = pci_dev_set_resources,
554 .enable_resources = pci_dev_enable_resources,
555 .init = gma_func0_init,
556 .acpi_fill_ssdt = gma_generate_ssdt,
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500557 .ops_pci = &gma_pci_ops,
558 .write_acpi_tables = gma_write_acpi_tables,
Aaron Durbin76c37002012-10-30 09:03:43 -0500559};
560
Duncan Lauriedf7be712012-12-17 11:22:57 -0800561static const unsigned short pci_device_ids[] = {
562 0x0402, /* Desktop GT1 */
563 0x0412, /* Desktop GT2 */
564 0x0422, /* Desktop GT3 */
565 0x0406, /* Mobile GT1 */
566 0x0416, /* Mobile GT2 */
567 0x0426, /* Mobile GT3 */
568 0x0d16, /* Mobile 4+3 GT1 */
569 0x0d26, /* Mobile 4+3 GT2 */
570 0x0d36, /* Mobile 4+3 GT3 */
571 0x0a06, /* ULT GT1 */
572 0x0a16, /* ULT GT2 */
573 0x0a26, /* ULT GT3 */
574 0,
575};
Aaron Durbin76c37002012-10-30 09:03:43 -0500576
577static const struct pci_driver pch_lpc __pci_driver = {
Angel Pons1db5bc72020-01-15 00:49:03 +0100578 .ops = &gma_func0_ops,
579 .vendor = PCI_VENDOR_ID_INTEL,
Aaron Durbin76c37002012-10-30 09:03:43 -0500580 .devices = pci_device_ids,
581};