blob: dc7b629a2ee0e88b359668d198c7e6c361433840 [file] [log] [blame]
Angel Pons4b429832020-04-02 23:48:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin76c37002012-10-30 09:03:43 -05002
Nico Huberc2e46422020-03-23 01:22:49 +01003#include <commonlib/helpers.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05004#include <arch/io.h>
Kyösti Mälkki13f66502019-03-03 08:01:05 +02005#include <device/mmio.h>
Kyösti Mälkkif1b58b72019-03-01 13:43:02 +02006#include <device/pci_ops.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05007#include <console/console.h>
Kyösti Mälkkiab56b3b2013-11-28 16:44:51 +02008#include <bootmode.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05009#include <delay.h>
10#include <device/device.h>
11#include <device/pci.h>
12#include <device/pci_ids.h>
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -070013#include <drivers/intel/gma/i915_reg.h>
Furquan Shaikh77f48cd2013-08-19 10:16:50 -070014#include <drivers/intel/gma/i915.h>
Nico Huber18228162017-06-08 16:31:57 +020015#include <drivers/intel/gma/libgfxinit.h>
Duncan Laurie356833d2013-07-09 15:40:27 -070016#include <cpu/intel/haswell/haswell.h>
Matt DeVillierebe08e02017-07-14 13:28:42 -050017#include <drivers/intel/gma/opregion.h>
Ronald G. Minnich9518b562013-09-19 16:45:22 -070018#include <string.h>
Elyes HAOUAS51401c32019-05-15 21:09:30 +020019#include <types.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050020
21#include "chip.h"
22#include "haswell.h"
23
Julius Wernercd49cce2019-03-05 16:53:33 -080024#if CONFIG(CHROMEOS)
Furquan Shaikhcb61ea72013-08-15 15:23:58 -070025#include <vendorcode/google/chromeos/chromeos.h>
26#endif
27
Duncan Laurie356833d2013-07-09 15:40:27 -070028struct gt_reg {
29 u32 reg;
30 u32 andmask;
31 u32 ormask;
32};
33
34static const struct gt_reg haswell_gt_setup[] = {
35 /* Enable Counters */
36 { 0x0a248, 0x00000000, 0x00000016 },
37 { 0x0a000, 0x00000000, 0x00070020 },
38 { 0x0a180, 0xff3fffff, 0x15000000 },
39 /* Enable DOP Clock Gating */
40 { 0x09424, 0x00000000, 0x000003fd },
41 /* Enable Unit Level Clock Gating */
42 { 0x09400, 0x00000000, 0x00000080 },
43 { 0x09404, 0x00000000, 0x40401000 },
44 { 0x09408, 0x00000000, 0x00000000 },
45 { 0x0940c, 0x00000000, 0x02000001 },
46 { 0x0a008, 0x00000000, 0x08000000 },
47 /* Wake Rate Limits */
48 { 0x0a090, 0xffffffff, 0x00000000 },
49 { 0x0a098, 0xffffffff, 0x03e80000 },
50 { 0x0a09c, 0xffffffff, 0x00280000 },
51 { 0x0a0a8, 0xffffffff, 0x0001e848 },
52 { 0x0a0ac, 0xffffffff, 0x00000019 },
53 /* Render/Video/Blitter Idle Max Count */
54 { 0x02054, 0x00000000, 0x0000000a },
55 { 0x12054, 0x00000000, 0x0000000a },
56 { 0x22054, 0x00000000, 0x0000000a },
57 /* RC Sleep / RCx Thresholds */
58 { 0x0a0b0, 0xffffffff, 0x00000000 },
59 { 0x0a0b4, 0xffffffff, 0x000003e8 },
60 { 0x0a0b8, 0xffffffff, 0x0000c350 },
61 /* RP Settings */
62 { 0x0a010, 0xffffffff, 0x000f4240 },
63 { 0x0a014, 0xffffffff, 0x12060000 },
64 { 0x0a02c, 0xffffffff, 0x0000e808 },
65 { 0x0a030, 0xffffffff, 0x0003bd08 },
66 { 0x0a068, 0xffffffff, 0x000101d0 },
67 { 0x0a06c, 0xffffffff, 0x00055730 },
68 { 0x0a070, 0xffffffff, 0x0000000a },
69 /* RP Control */
70 { 0x0a024, 0x00000000, 0x00000b92 },
71 /* HW RC6 Control */
72 { 0x0a090, 0x00000000, 0x88040000 },
73 /* Video Frequency Request */
74 { 0x0a00c, 0x00000000, 0x08000000 },
75 { 0 },
76};
77
78static const struct gt_reg haswell_gt_lock[] = {
79 { 0x0a248, 0xffffffff, 0x80000000 },
80 { 0x0a004, 0xffffffff, 0x00000010 },
81 { 0x0a080, 0xffffffff, 0x00000004 },
82 { 0x0a180, 0xffffffff, 0x80000000 },
83 { 0 },
84};
85
Angel Pons1db5bc72020-01-15 00:49:03 +010086/*
87 * Some VGA option roms are used for several chipsets but they only have one PCI ID in their
88 * header. If we encounter such an option rom, we need to do the mapping ourselves.
Aaron Durbin76c37002012-10-30 09:03:43 -050089 */
90
91u32 map_oprom_vendev(u32 vendev)
92{
Elyes HAOUAS69d658f2016-09-17 20:32:07 +020093 u32 new_vendev = vendev;
Aaron Durbin76c37002012-10-30 09:03:43 -050094
95 switch (vendev) {
Aaron Durbin71161292012-12-13 16:43:32 -060096 case 0x80860402: /* GT1 Desktop */
97 case 0x80860406: /* GT1 Mobile */
98 case 0x8086040a: /* GT1 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -080099 case 0x80860a06: /* GT1 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600100
101 case 0x80860412: /* GT2 Desktop */
102 case 0x80860416: /* GT2 Mobile */
103 case 0x8086041a: /* GT2 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -0800104 case 0x80860a16: /* GT2 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600105
106 case 0x80860422: /* GT3 Desktop */
107 case 0x80860426: /* GT3 Mobile */
108 case 0x8086042a: /* GT3 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -0800109 case 0x80860a26: /* GT3 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600110
Iru Cai12a13e12020-05-22 22:57:03 +0800111 case 0x80860d22: /* GT3e Desktop */
112 case 0x80860d16: /* GT1 Mobile 4+3 */
113 case 0x80860d26: /* GT2 Mobile 4+3, GT3e Mobile */
114 case 0x80860d36: /* GT3 Mobile 4+3 */
115
Elyes HAOUAS69d658f2016-09-17 20:32:07 +0200116 new_vendev = 0x80860406; /* GT1 Mobile */
Aaron Durbin76c37002012-10-30 09:03:43 -0500117 break;
118 }
119
120 return new_vendev;
121}
122
Angel Pons1db5bc72020-01-15 00:49:03 +0100123/** FIXME: Seems to be outdated. */
124/*
125 * GTT is the Global Translation Table for the graphics pipeline. It is used to translate
126 * graphics addresses to physical memory addresses. As in the CPU, GTTs map 4K pages.
127 *
128 * The setgtt function adds a further bit of flexibility: it allows you to set a range (the
129 * first two parameters) to point to a physical address (third parameter); the physical address
130 * is incremented by a count (fourth parameter) for each GTT in the range.
131 *
132 * Why do it this way? For ultrafast startup, we can point all the GTT entries to point to one
133 * page, and set that page to 0s:
134 *
135 * memset(physbase, 0, 4096);
136 * setgtt(0, 4250, physbase, 0);
137 *
138 * this takes about 2 ms, and is a win because zeroing the page takes up to 200 ms.
139 *
140 * This call sets the GTT to point to a linear range of pages starting at physbase.
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700141 */
142
143#define GTT_PTE_BASE (2 << 20)
144
Angel Pons1db5bc72020-01-15 00:49:03 +0100145void set_translation_table(int start, int end, u64 base, int inc)
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700146{
147 int i;
148
Elyes HAOUAS12df9502016-08-23 21:29:48 +0200149 for (i = start; i < end; i++){
Angel Pons1db5bc72020-01-15 00:49:03 +0100150 u64 physical_address = base + i * inc;
151
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700152 /* swizzle the 32:39 bits to 4:11 */
153 u32 word = physical_address | ((physical_address >> 28) & 0xff0) | 1;
Angel Pons1db5bc72020-01-15 00:49:03 +0100154
155 /*
156 * Note: we've confirmed by checking the values that MRC does no useful
157 * setup before we run this.
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700158 */
159 gtt_write(GTT_PTE_BASE + i * 4, word);
160 gtt_read(GTT_PTE_BASE + i * 4);
161 }
162}
163
Aaron Durbin76c37002012-10-30 09:03:43 -0500164static struct resource *gtt_res = NULL;
165
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700166u32 gtt_read(u32 reg)
Aaron Durbin76c37002012-10-30 09:03:43 -0500167{
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700168 u32 val;
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800169 val = read32(res2mmio(gtt_res, reg, 0));
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700170 return val;
171
Aaron Durbin76c37002012-10-30 09:03:43 -0500172}
173
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700174void gtt_write(u32 reg, u32 data)
Aaron Durbin76c37002012-10-30 09:03:43 -0500175{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800176 write32(res2mmio(gtt_res, reg, 0), data);
Aaron Durbin76c37002012-10-30 09:03:43 -0500177}
178
Duncan Laurie356833d2013-07-09 15:40:27 -0700179static inline void gtt_rmw(u32 reg, u32 andmask, u32 ormask)
180{
181 u32 val = gtt_read(reg);
182 val &= andmask;
183 val |= ormask;
184 gtt_write(reg, val);
185}
186
187static inline void gtt_write_regs(const struct gt_reg *gt)
188{
189 for (; gt && gt->reg; gt++) {
190 if (gt->andmask)
191 gtt_rmw(gt->reg, gt->andmask, gt->ormask);
192 else
193 gtt_write(gt->reg, gt->ormask);
194 }
195}
196
Aaron Durbin76c37002012-10-30 09:03:43 -0500197#define GTT_RETRY 1000
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700198int gtt_poll(u32 reg, u32 mask, u32 value)
Aaron Durbin76c37002012-10-30 09:03:43 -0500199{
Martin Roth468d02c2019-10-23 21:44:42 -0600200 unsigned int try = GTT_RETRY;
Aaron Durbin76c37002012-10-30 09:03:43 -0500201 u32 data;
202
203 while (try--) {
204 data = gtt_read(reg);
205 if ((data & mask) == value)
206 return 1;
Angel Pons1db5bc72020-01-15 00:49:03 +0100207
Aaron Durbin76c37002012-10-30 09:03:43 -0500208 udelay(10);
209 }
210
211 printk(BIOS_ERR, "GT init timeout\n");
212 return 0;
213}
214
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700215static void power_well_enable(void)
216{
217 gtt_write(HSW_PWR_WELL_CTL1, HSW_PWR_WELL_ENABLE);
218 gtt_poll(HSW_PWR_WELL_CTL1, HSW_PWR_WELL_STATE, HSW_PWR_WELL_STATE);
219}
220
Aaron Durbin76c37002012-10-30 09:03:43 -0500221static void gma_pm_init_pre_vbios(struct device *dev)
222{
Aaron Durbin76c37002012-10-30 09:03:43 -0500223 printk(BIOS_DEBUG, "GT Power Management Init\n");
224
225 gtt_res = find_resource(dev, PCI_BASE_ADDRESS_0);
226 if (!gtt_res || !gtt_res->base)
227 return;
228
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700229 power_well_enable();
230
Duncan Laurie67113e92013-01-10 13:23:04 -0800231 /*
232 * Enable RC6
233 */
Aaron Durbin76c37002012-10-30 09:03:43 -0500234
Duncan Laurie67113e92013-01-10 13:23:04 -0800235 /* Enable Force Wake */
236 gtt_write(0x0a180, 1 << 5);
237 gtt_write(0x0a188, 0x00010001);
Edward O'Callaghan986e85c2014-10-29 12:15:34 +1100238 gtt_poll(FORCEWAKE_ACK_HSW, 1 << 0, 1 << 0);
Aaron Durbin76c37002012-10-30 09:03:43 -0500239
Duncan Laurie356833d2013-07-09 15:40:27 -0700240 /* GT Settings */
241 gtt_write_regs(haswell_gt_setup);
Aaron Durbin76c37002012-10-30 09:03:43 -0500242
Duncan Laurie356833d2013-07-09 15:40:27 -0700243 /* Wait for Mailbox Ready */
Ryan Salsamendifa0725d2017-06-30 17:29:37 -0700244 gtt_poll(0x138124, (1UL << 31), (0UL << 31));
Angel Pons1db5bc72020-01-15 00:49:03 +0100245
Duncan Laurie356833d2013-07-09 15:40:27 -0700246 /* Mailbox Data - RC6 VIDS */
247 gtt_write(0x138128, 0x00000000);
Angel Pons1db5bc72020-01-15 00:49:03 +0100248
Duncan Laurie356833d2013-07-09 15:40:27 -0700249 /* Mailbox Command */
250 gtt_write(0x138124, 0x80000004);
Angel Pons1db5bc72020-01-15 00:49:03 +0100251
Duncan Laurie356833d2013-07-09 15:40:27 -0700252 /* Wait for Mailbox Ready */
Ryan Salsamendifa0725d2017-06-30 17:29:37 -0700253 gtt_poll(0x138124, (1UL << 31), (0UL << 31));
Aaron Durbin76c37002012-10-30 09:03:43 -0500254
Duncan Laurie356833d2013-07-09 15:40:27 -0700255 /* Enable PM Interrupts */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700256 gtt_write(GEN6_PMIER, GEN6_PM_MBOX_EVENT | GEN6_PM_THERMAL_EVENT |
257 GEN6_PM_RP_DOWN_TIMEOUT | GEN6_PM_RP_UP_THRESHOLD |
258 GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_UP_EI_EXPIRED |
259 GEN6_PM_RP_DOWN_EI_EXPIRED);
Aaron Durbin76c37002012-10-30 09:03:43 -0500260
Duncan Laurie67113e92013-01-10 13:23:04 -0800261 /* Enable RC6 in idle */
262 gtt_write(0x0a094, 0x00040000);
Duncan Laurie356833d2013-07-09 15:40:27 -0700263
264 /* PM Lock Settings */
265 gtt_write_regs(haswell_gt_lock);
Aaron Durbin76c37002012-10-30 09:03:43 -0500266}
267
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700268static void init_display_planes(void)
269{
270 int pipe, plane;
271
272 /* Disable cursor mode */
273 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
274 gtt_write(CURCNTR_IVB(pipe), CURSOR_MODE_DISABLE);
275 gtt_write(CURBASE_IVB(pipe), 0x00000000);
276 }
277
Angel Pons1db5bc72020-01-15 00:49:03 +0100278 /* Disable primary plane and set surface base address */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700279 for (plane = PLANE_A; plane <= PLANE_C; plane++) {
280 gtt_write(DSPCNTR(plane), DISPLAY_PLANE_DISABLE);
281 gtt_write(DSPSURF(plane), 0x00000000);
282 }
283
284 /* Disable VGA display */
285 gtt_write(CPU_VGACNTRL, CPU_VGA_DISABLE);
286}
287
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700288static void gma_setup_panel(struct device *dev)
Aaron Durbin76c37002012-10-30 09:03:43 -0500289{
Angel Ponse153a352020-10-23 14:53:59 +0200290 struct northbridge_intel_haswell_config *conf = config_of(dev);
Aaron Durbin76c37002012-10-30 09:03:43 -0500291 u32 reg32;
292
Aaron Durbin76c37002012-10-30 09:03:43 -0500293 /* Setup Digital Port Hotplug */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700294 reg32 = gtt_read(PCH_PORT_HOTPLUG);
Aaron Durbin76c37002012-10-30 09:03:43 -0500295 if (!reg32) {
296 reg32 = (conf->gpu_dp_b_hotplug & 0x7) << 2;
297 reg32 |= (conf->gpu_dp_c_hotplug & 0x7) << 10;
298 reg32 |= (conf->gpu_dp_d_hotplug & 0x7) << 18;
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700299 gtt_write(PCH_PORT_HOTPLUG, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500300 }
301
302 /* Setup Panel Power On Delays */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700303 reg32 = gtt_read(PCH_PP_ON_DELAYS);
Aaron Durbin76c37002012-10-30 09:03:43 -0500304 if (!reg32) {
Aaron Durbin76c37002012-10-30 09:03:43 -0500305 reg32 |= (conf->gpu_panel_power_up_delay & 0x1fff) << 16;
306 reg32 |= (conf->gpu_panel_power_backlight_on_delay & 0x1fff);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700307 gtt_write(PCH_PP_ON_DELAYS, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500308 }
309
310 /* Setup Panel Power Off Delays */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700311 reg32 = gtt_read(PCH_PP_OFF_DELAYS);
Aaron Durbin76c37002012-10-30 09:03:43 -0500312 if (!reg32) {
313 reg32 = (conf->gpu_panel_power_down_delay & 0x1fff) << 16;
314 reg32 |= (conf->gpu_panel_power_backlight_off_delay & 0x1fff);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700315 gtt_write(PCH_PP_OFF_DELAYS, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500316 }
317
318 /* Setup Panel Power Cycle Delay */
319 if (conf->gpu_panel_power_cycle_delay) {
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700320 reg32 = gtt_read(PCH_PP_DIVISOR);
Aaron Durbin76c37002012-10-30 09:03:43 -0500321 reg32 &= ~0xff;
322 reg32 |= conf->gpu_panel_power_cycle_delay & 0xff;
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700323 gtt_write(PCH_PP_DIVISOR, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500324 }
325
Nico Huberc2e46422020-03-23 01:22:49 +0100326 /* Enforce the PCH PWM function, as so does Linux.
327 The CPU PWM controls are disabled after reset. */
328 if (conf->gpu_pch_backlight_pwm_hz) {
329 /* Reference clock is either 24MHz or 135MHz. We can choose
330 either a 16 or a 128 step increment. Use 16 if we would
331 have less than 100 steps otherwise. */
332 const unsigned int refclock = CONFIG(INTEL_LYNXPOINT_LP) ? 24*MHz : 135*MHz;
333 const unsigned int hz_limit = refclock / 128 / 100;
334 unsigned int pwm_increment, pwm_period;
335 u32 south_chicken2;
336
337 south_chicken2 = gtt_read(SOUTH_CHICKEN2);
338 if (conf->gpu_pch_backlight_pwm_hz > hz_limit) {
339 pwm_increment = 16;
340 south_chicken2 |= LPT_PWM_GRANULARITY;
341 } else {
342 pwm_increment = 128;
343 south_chicken2 &= ~LPT_PWM_GRANULARITY;
344 }
345 gtt_write(SOUTH_CHICKEN2, south_chicken2);
346
347 pwm_period = refclock / pwm_increment / conf->gpu_pch_backlight_pwm_hz;
348 printk(BIOS_INFO,
349 "GMA: Setting backlight PWM frequency to %uMHz / %u / %u = %uHz\n",
350 refclock / MHz, pwm_increment, pwm_period,
351 DIV_ROUND_CLOSEST(refclock, pwm_increment * pwm_period));
352
353 /* Start with a 50% duty cycle. */
354 gtt_write(BLC_PWM_PCH_CTL2, pwm_period << 16 | pwm_period / 2);
355
356 gtt_write(BLC_PWM_PCH_CTL1,
357 (conf->gpu_pch_backlight_polarity == GPU_BACKLIGHT_POLARITY_LOW) << 29 |
358 BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE);
Aaron Durbin76c37002012-10-30 09:03:43 -0500359 }
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700360
361 /* Get display,pipeline,and DDI registers into a basic sane state */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700362 power_well_enable();
363
364 init_display_planes();
365
Angel Pons1db5bc72020-01-15 00:49:03 +0100366 /*
367 * DDI-A params set:
368 * bit 0: Display detected (RO)
369 * bit 4: DDI A supports 4 lanes and DDI E is not used
370 * bit 7: DDI buffer is idle
371 */
Tristan Corrick1a73eb02018-10-31 02:27:29 +1300372 reg32 = DDI_BUF_IS_IDLE | DDI_INIT_DISPLAY_DETECTED;
373 if (!conf->gpu_ddi_e_connected)
374 reg32 |= DDI_A_4_LANES;
375 gtt_write(DDI_BUF_CTL_A, reg32);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700376
377 /* Set FDI registers - is this required? */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700378 gtt_write(_FDI_RXA_MISC, 0x00200090);
379 gtt_write(_FDI_RXA_MISC, 0x0a000000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700380
381 /* Enable the handshake with PCH display when processing reset */
382 gtt_write(NDE_RSTWRN_OPT, RST_PCH_HNDSHK_EN);
383
Angel Pons1db5bc72020-01-15 00:49:03 +0100384 /* Undocumented */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700385 gtt_write(0x42090, 0x04000000);
Angel Pons1db5bc72020-01-15 00:49:03 +0100386 gtt_write(0x9840, 0x00000000);
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700387 gtt_write(0x42090, 0xa4000000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700388
389 gtt_write(SOUTH_DSPCLK_GATE_D, PCH_LP_PARTITION_LEVEL_DISABLE);
390
Angel Pons1db5bc72020-01-15 00:49:03 +0100391 /* Undocumented */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700392 gtt_write(0x42080, 0x00004000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700393
394 /* Prepare DDI buffers for DP and FDI */
395 intel_prepare_ddi();
396
397 /* Hot plug detect buffer enabled for port A */
398 gtt_write(DIGITAL_PORT_HOTPLUG_CNTRL, DIGITAL_PORTA_HOTPLUG_ENABLE);
399
400 /* Enable HPD buffer for digital port D and B */
401 gtt_write(PCH_PORT_HOTPLUG, PORTD_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE);
402
Angel Pons1db5bc72020-01-15 00:49:03 +0100403 /*
404 * Bits 4:0 - Power cycle delay (default 0x6 --> 500ms)
405 * Bits 31:8 - Reference divider (0x0004af ----> 24MHz)
406 */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700407 gtt_write(PCH_PP_DIVISOR, 0x0004af06);
Aaron Durbin76c37002012-10-30 09:03:43 -0500408}
409
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700410static void gma_pm_init_post_vbios(struct device *dev)
411{
Duncan Laurie356833d2013-07-09 15:40:27 -0700412 int cdclk = 0;
413 int devid = pci_read_config16(dev, PCI_DEVICE_ID);
414 int gpu_is_ulx = 0;
415
416 if (devid == 0x0a0e || devid == 0x0a1e)
417 gpu_is_ulx = 1;
418
419 /* CD Frequency */
Duncan Laurie3106d0f2013-08-12 13:51:22 -0700420 if ((gtt_read(0x42014) & 0x1000000) || gpu_is_ulx || haswell_is_ult())
421 cdclk = 0; /* fixed frequency */
422 else
423 cdclk = 2; /* variable frequency */
Duncan Laurie356833d2013-07-09 15:40:27 -0700424
Duncan Laurie356833d2013-07-09 15:40:27 -0700425 if (gpu_is_ulx || cdclk != 0)
426 gtt_rmw(0x130040, 0xf7ffffff, 0x04000000);
427 else
428 gtt_rmw(0x130040, 0xf3ffffff, 0x00000000);
429
430 /* More magic */
431 if (haswell_is_ult() || gpu_is_ulx) {
Duncan Laurie3106d0f2013-08-12 13:51:22 -0700432 if (!gpu_is_ulx)
Duncan Laurie356833d2013-07-09 15:40:27 -0700433 gtt_write(0x138128, 0x00000000);
434 else
435 gtt_write(0x138128, 0x00000001);
436 gtt_write(0x13812c, 0x00000000);
437 gtt_write(0x138124, 0x80000017);
438 }
439
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700440 /* Disable Force Wake */
441 gtt_write(0x0a188, 0x00010000);
Edward O'Callaghan986e85c2014-10-29 12:15:34 +1100442 gtt_poll(FORCEWAKE_ACK_HSW, 1 << 0, 0 << 0);
Duncan Laurie356833d2013-07-09 15:40:27 -0700443 gtt_write(0x0a188, 0x00000001);
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700444}
445
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200446/* Enable SCI to ACPI _GPE._L06 */
447static void gma_enable_swsci(void)
448{
449 u16 reg16;
450
Angel Pons1db5bc72020-01-15 00:49:03 +0100451 /* Clear DMISCI status */
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200452 reg16 = inw(get_pmbase() + TCO1_STS);
453 reg16 &= DMISCI_STS;
454 outw(get_pmbase() + TCO1_STS, reg16);
455
Angel Pons1db5bc72020-01-15 00:49:03 +0100456 /* Clear and enable ACPI TCO SCI */
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200457 enable_tco_sci();
458}
459
Aaron Durbin76c37002012-10-30 09:03:43 -0500460static void gma_func0_init(struct device *dev)
461{
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700462 int lightup_ok = 0;
Matt DeVillier6955b9c2017-04-16 01:42:44 -0500463
Nico Huberf2a0be22020-04-26 17:01:25 +0200464 intel_gma_init_igd_opregion();
465
Aaron Durbin76c37002012-10-30 09:03:43 -0500466 /* Init graphics power management */
467 gma_pm_init_pre_vbios(dev);
468
Matt DeVillier6955b9c2017-04-16 01:42:44 -0500469 /* Pre panel init */
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700470 gma_setup_panel(dev);
471
Nico Huberdd597622020-04-26 19:46:35 +0200472 if (!CONFIG(NO_GFX_INIT))
473 pci_or_config16(dev, PCI_COMMAND, PCI_COMMAND_MASTER);
474
Arthur Heymanse6c8f7e2018-08-09 11:31:51 +0200475 int vga_disable = (pci_read_config16(dev, GGC) & 2) >> 1;
476
Julius Wernercd49cce2019-03-05 16:53:33 -0800477 if (CONFIG(MAINBOARD_USE_LIBGFXINIT)) {
Arthur Heymanse6c8f7e2018-08-09 11:31:51 +0200478 if (vga_disable) {
479 printk(BIOS_INFO,
480 "IGD is not decoding legacy VGA MEM and IO: skipping NATIVE graphic init\n");
481 } else {
482 printk(BIOS_SPEW, "NATIVE graphics, run native enable\n");
483 gma_gfxinit(&lightup_ok);
484 gfx_set_init_done(1);
485 }
Arthur Heymans23cda3472016-12-18 16:03:52 +0100486 }
487
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700488 if (! lightup_ok) {
489 printk(BIOS_SPEW, "FUI did not run; using VBIOS\n");
Stefan Reinauerf1aabec2014-01-22 15:16:30 -0800490 mdelay(CONFIG_PRE_GRAPHICS_DELAY);
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700491 pci_dev_init(dev);
492 }
493
Angel Ponsdb3047c2020-10-23 14:56:19 +0200494 printk(BIOS_DEBUG, "GT Power Management Init (post VBIOS)\n");
495
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700496 gma_pm_init_post_vbios(dev);
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200497
498 gma_enable_swsci();
Aaron Durbin76c37002012-10-30 09:03:43 -0500499}
500
Furquan Shaikh7536a392020-04-24 21:59:21 -0700501static void gma_generate_ssdt(const struct device *dev)
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100502{
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500503 const struct northbridge_intel_haswell_config *chip = dev->chip_info;
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100504
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500505 drivers_intel_gma_displays_ssdt_generate(&chip->gfx);
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100506}
507
Aaron Durbin76c37002012-10-30 09:03:43 -0500508static struct device_operations gma_func0_ops = {
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500509 .read_resources = pci_dev_read_resources,
510 .set_resources = pci_dev_set_resources,
511 .enable_resources = pci_dev_enable_resources,
512 .init = gma_func0_init,
513 .acpi_fill_ssdt = gma_generate_ssdt,
Angel Pons1fc0edd2020-05-31 00:03:28 +0200514 .ops_pci = &pci_dev_ops_pci,
Aaron Durbin76c37002012-10-30 09:03:43 -0500515};
516
Duncan Lauriedf7be712012-12-17 11:22:57 -0800517static const unsigned short pci_device_ids[] = {
518 0x0402, /* Desktop GT1 */
519 0x0412, /* Desktop GT2 */
520 0x0422, /* Desktop GT3 */
Iru Cai12a13e12020-05-22 22:57:03 +0800521 0x0d22, /* Desktop GT3e */
Duncan Lauriedf7be712012-12-17 11:22:57 -0800522 0x0406, /* Mobile GT1 */
523 0x0416, /* Mobile GT2 */
524 0x0426, /* Mobile GT3 */
525 0x0d16, /* Mobile 4+3 GT1 */
Iru Cai12a13e12020-05-22 22:57:03 +0800526 0x0d26, /* Mobile 4+3 GT2, Mobile GT3e */
Duncan Lauriedf7be712012-12-17 11:22:57 -0800527 0x0d36, /* Mobile 4+3 GT3 */
528 0x0a06, /* ULT GT1 */
529 0x0a16, /* ULT GT2 */
530 0x0a26, /* ULT GT3 */
531 0,
532};
Aaron Durbin76c37002012-10-30 09:03:43 -0500533
534static const struct pci_driver pch_lpc __pci_driver = {
Angel Pons1db5bc72020-01-15 00:49:03 +0100535 .ops = &gma_func0_ops,
536 .vendor = PCI_VENDOR_ID_INTEL,
Aaron Durbin76c37002012-10-30 09:03:43 -0500537 .devices = pci_device_ids,
538};