blob: 2368e7bf861e3c4ae10a4ae5f29a1e73bae43d10 [file] [log] [blame]
Angel Pons4b429832020-04-02 23:48:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin76c37002012-10-30 09:03:43 -05002
Nico Huberc2e46422020-03-23 01:22:49 +01003#include <commonlib/helpers.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05004#include <arch/io.h>
Kyösti Mälkki13f66502019-03-03 08:01:05 +02005#include <device/mmio.h>
Kyösti Mälkkif1b58b72019-03-01 13:43:02 +02006#include <device/pci_ops.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05007#include <console/console.h>
Kyösti Mälkkiab56b3b2013-11-28 16:44:51 +02008#include <bootmode.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05009#include <delay.h>
10#include <device/device.h>
11#include <device/pci.h>
12#include <device/pci_ids.h>
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -070013#include <drivers/intel/gma/i915_reg.h>
Furquan Shaikh77f48cd2013-08-19 10:16:50 -070014#include <drivers/intel/gma/i915.h>
Nico Huber18228162017-06-08 16:31:57 +020015#include <drivers/intel/gma/libgfxinit.h>
Duncan Laurie356833d2013-07-09 15:40:27 -070016#include <cpu/intel/haswell/haswell.h>
Matt DeVillierebe08e02017-07-14 13:28:42 -050017#include <drivers/intel/gma/opregion.h>
Ronald G. Minnich9518b562013-09-19 16:45:22 -070018#include <string.h>
Elyes HAOUAS51401c32019-05-15 21:09:30 +020019#include <types.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050020
21#include "chip.h"
22#include "haswell.h"
23
Julius Wernercd49cce2019-03-05 16:53:33 -080024#if CONFIG(CHROMEOS)
Furquan Shaikhcb61ea72013-08-15 15:23:58 -070025#include <vendorcode/google/chromeos/chromeos.h>
26#endif
27
Duncan Laurie356833d2013-07-09 15:40:27 -070028struct gt_reg {
29 u32 reg;
30 u32 andmask;
31 u32 ormask;
32};
33
34static const struct gt_reg haswell_gt_setup[] = {
35 /* Enable Counters */
36 { 0x0a248, 0x00000000, 0x00000016 },
37 { 0x0a000, 0x00000000, 0x00070020 },
38 { 0x0a180, 0xff3fffff, 0x15000000 },
39 /* Enable DOP Clock Gating */
40 { 0x09424, 0x00000000, 0x000003fd },
41 /* Enable Unit Level Clock Gating */
42 { 0x09400, 0x00000000, 0x00000080 },
43 { 0x09404, 0x00000000, 0x40401000 },
44 { 0x09408, 0x00000000, 0x00000000 },
45 { 0x0940c, 0x00000000, 0x02000001 },
46 { 0x0a008, 0x00000000, 0x08000000 },
47 /* Wake Rate Limits */
48 { 0x0a090, 0xffffffff, 0x00000000 },
49 { 0x0a098, 0xffffffff, 0x03e80000 },
50 { 0x0a09c, 0xffffffff, 0x00280000 },
51 { 0x0a0a8, 0xffffffff, 0x0001e848 },
52 { 0x0a0ac, 0xffffffff, 0x00000019 },
53 /* Render/Video/Blitter Idle Max Count */
54 { 0x02054, 0x00000000, 0x0000000a },
55 { 0x12054, 0x00000000, 0x0000000a },
56 { 0x22054, 0x00000000, 0x0000000a },
57 /* RC Sleep / RCx Thresholds */
58 { 0x0a0b0, 0xffffffff, 0x00000000 },
59 { 0x0a0b4, 0xffffffff, 0x000003e8 },
60 { 0x0a0b8, 0xffffffff, 0x0000c350 },
61 /* RP Settings */
62 { 0x0a010, 0xffffffff, 0x000f4240 },
63 { 0x0a014, 0xffffffff, 0x12060000 },
64 { 0x0a02c, 0xffffffff, 0x0000e808 },
65 { 0x0a030, 0xffffffff, 0x0003bd08 },
66 { 0x0a068, 0xffffffff, 0x000101d0 },
67 { 0x0a06c, 0xffffffff, 0x00055730 },
68 { 0x0a070, 0xffffffff, 0x0000000a },
69 /* RP Control */
70 { 0x0a024, 0x00000000, 0x00000b92 },
71 /* HW RC6 Control */
72 { 0x0a090, 0x00000000, 0x88040000 },
73 /* Video Frequency Request */
74 { 0x0a00c, 0x00000000, 0x08000000 },
75 { 0 },
76};
77
78static const struct gt_reg haswell_gt_lock[] = {
79 { 0x0a248, 0xffffffff, 0x80000000 },
80 { 0x0a004, 0xffffffff, 0x00000010 },
81 { 0x0a080, 0xffffffff, 0x00000004 },
82 { 0x0a180, 0xffffffff, 0x80000000 },
83 { 0 },
84};
85
Angel Pons1db5bc72020-01-15 00:49:03 +010086/*
87 * Some VGA option roms are used for several chipsets but they only have one PCI ID in their
88 * header. If we encounter such an option rom, we need to do the mapping ourselves.
Aaron Durbin76c37002012-10-30 09:03:43 -050089 */
90
91u32 map_oprom_vendev(u32 vendev)
92{
Elyes HAOUAS69d658f2016-09-17 20:32:07 +020093 u32 new_vendev = vendev;
Aaron Durbin76c37002012-10-30 09:03:43 -050094
95 switch (vendev) {
Aaron Durbin71161292012-12-13 16:43:32 -060096 case 0x80860402: /* GT1 Desktop */
97 case 0x80860406: /* GT1 Mobile */
98 case 0x8086040a: /* GT1 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -080099 case 0x80860a06: /* GT1 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600100
101 case 0x80860412: /* GT2 Desktop */
102 case 0x80860416: /* GT2 Mobile */
103 case 0x8086041a: /* GT2 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -0800104 case 0x80860a16: /* GT2 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600105
106 case 0x80860422: /* GT3 Desktop */
107 case 0x80860426: /* GT3 Mobile */
108 case 0x8086042a: /* GT3 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -0800109 case 0x80860a26: /* GT3 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600110
Iru Cai12a13e12020-05-22 22:57:03 +0800111 case 0x80860d22: /* GT3e Desktop */
112 case 0x80860d16: /* GT1 Mobile 4+3 */
113 case 0x80860d26: /* GT2 Mobile 4+3, GT3e Mobile */
114 case 0x80860d36: /* GT3 Mobile 4+3 */
115
Elyes HAOUAS69d658f2016-09-17 20:32:07 +0200116 new_vendev = 0x80860406; /* GT1 Mobile */
Aaron Durbin76c37002012-10-30 09:03:43 -0500117 break;
118 }
119
120 return new_vendev;
121}
122
Angel Pons1db5bc72020-01-15 00:49:03 +0100123/** FIXME: Seems to be outdated. */
124/*
125 * GTT is the Global Translation Table for the graphics pipeline. It is used to translate
126 * graphics addresses to physical memory addresses. As in the CPU, GTTs map 4K pages.
127 *
128 * The setgtt function adds a further bit of flexibility: it allows you to set a range (the
129 * first two parameters) to point to a physical address (third parameter); the physical address
130 * is incremented by a count (fourth parameter) for each GTT in the range.
131 *
132 * Why do it this way? For ultrafast startup, we can point all the GTT entries to point to one
133 * page, and set that page to 0s:
134 *
135 * memset(physbase, 0, 4096);
136 * setgtt(0, 4250, physbase, 0);
137 *
138 * this takes about 2 ms, and is a win because zeroing the page takes up to 200 ms.
139 *
140 * This call sets the GTT to point to a linear range of pages starting at physbase.
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700141 */
142
143#define GTT_PTE_BASE (2 << 20)
144
Angel Pons1db5bc72020-01-15 00:49:03 +0100145void set_translation_table(int start, int end, u64 base, int inc)
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700146{
147 int i;
148
Elyes HAOUAS12df9502016-08-23 21:29:48 +0200149 for (i = start; i < end; i++){
Angel Pons1db5bc72020-01-15 00:49:03 +0100150 u64 physical_address = base + i * inc;
151
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700152 /* swizzle the 32:39 bits to 4:11 */
153 u32 word = physical_address | ((physical_address >> 28) & 0xff0) | 1;
Angel Pons1db5bc72020-01-15 00:49:03 +0100154
155 /*
156 * Note: we've confirmed by checking the values that MRC does no useful
157 * setup before we run this.
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700158 */
159 gtt_write(GTT_PTE_BASE + i * 4, word);
160 gtt_read(GTT_PTE_BASE + i * 4);
161 }
162}
163
Aaron Durbin76c37002012-10-30 09:03:43 -0500164static struct resource *gtt_res = NULL;
165
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700166u32 gtt_read(u32 reg)
Aaron Durbin76c37002012-10-30 09:03:43 -0500167{
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700168 u32 val;
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800169 val = read32(res2mmio(gtt_res, reg, 0));
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700170 return val;
171
Aaron Durbin76c37002012-10-30 09:03:43 -0500172}
173
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700174void gtt_write(u32 reg, u32 data)
Aaron Durbin76c37002012-10-30 09:03:43 -0500175{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800176 write32(res2mmio(gtt_res, reg, 0), data);
Aaron Durbin76c37002012-10-30 09:03:43 -0500177}
178
Duncan Laurie356833d2013-07-09 15:40:27 -0700179static inline void gtt_rmw(u32 reg, u32 andmask, u32 ormask)
180{
181 u32 val = gtt_read(reg);
182 val &= andmask;
183 val |= ormask;
184 gtt_write(reg, val);
185}
186
187static inline void gtt_write_regs(const struct gt_reg *gt)
188{
189 for (; gt && gt->reg; gt++) {
190 if (gt->andmask)
191 gtt_rmw(gt->reg, gt->andmask, gt->ormask);
192 else
193 gtt_write(gt->reg, gt->ormask);
194 }
195}
196
Aaron Durbin76c37002012-10-30 09:03:43 -0500197#define GTT_RETRY 1000
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700198int gtt_poll(u32 reg, u32 mask, u32 value)
Aaron Durbin76c37002012-10-30 09:03:43 -0500199{
Martin Roth468d02c2019-10-23 21:44:42 -0600200 unsigned int try = GTT_RETRY;
Aaron Durbin76c37002012-10-30 09:03:43 -0500201 u32 data;
202
203 while (try--) {
204 data = gtt_read(reg);
205 if ((data & mask) == value)
206 return 1;
Angel Pons1db5bc72020-01-15 00:49:03 +0100207
Aaron Durbin76c37002012-10-30 09:03:43 -0500208 udelay(10);
209 }
210
211 printk(BIOS_ERR, "GT init timeout\n");
212 return 0;
213}
214
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700215static void power_well_enable(void)
216{
217 gtt_write(HSW_PWR_WELL_CTL1, HSW_PWR_WELL_ENABLE);
218 gtt_poll(HSW_PWR_WELL_CTL1, HSW_PWR_WELL_STATE, HSW_PWR_WELL_STATE);
219}
220
Aaron Durbin76c37002012-10-30 09:03:43 -0500221static void gma_pm_init_pre_vbios(struct device *dev)
222{
Aaron Durbin76c37002012-10-30 09:03:43 -0500223 printk(BIOS_DEBUG, "GT Power Management Init\n");
224
225 gtt_res = find_resource(dev, PCI_BASE_ADDRESS_0);
226 if (!gtt_res || !gtt_res->base)
227 return;
228
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700229 power_well_enable();
230
Duncan Laurie67113e92013-01-10 13:23:04 -0800231 /*
232 * Enable RC6
233 */
Aaron Durbin76c37002012-10-30 09:03:43 -0500234
Duncan Laurie67113e92013-01-10 13:23:04 -0800235 /* Enable Force Wake */
236 gtt_write(0x0a180, 1 << 5);
237 gtt_write(0x0a188, 0x00010001);
Edward O'Callaghan986e85c2014-10-29 12:15:34 +1100238 gtt_poll(FORCEWAKE_ACK_HSW, 1 << 0, 1 << 0);
Aaron Durbin76c37002012-10-30 09:03:43 -0500239
Duncan Laurie356833d2013-07-09 15:40:27 -0700240 /* GT Settings */
241 gtt_write_regs(haswell_gt_setup);
Aaron Durbin76c37002012-10-30 09:03:43 -0500242
Duncan Laurie356833d2013-07-09 15:40:27 -0700243 /* Wait for Mailbox Ready */
Ryan Salsamendifa0725d2017-06-30 17:29:37 -0700244 gtt_poll(0x138124, (1UL << 31), (0UL << 31));
Angel Pons1db5bc72020-01-15 00:49:03 +0100245
Duncan Laurie356833d2013-07-09 15:40:27 -0700246 /* Mailbox Data - RC6 VIDS */
247 gtt_write(0x138128, 0x00000000);
Angel Pons1db5bc72020-01-15 00:49:03 +0100248
Duncan Laurie356833d2013-07-09 15:40:27 -0700249 /* Mailbox Command */
250 gtt_write(0x138124, 0x80000004);
Angel Pons1db5bc72020-01-15 00:49:03 +0100251
Duncan Laurie356833d2013-07-09 15:40:27 -0700252 /* Wait for Mailbox Ready */
Ryan Salsamendifa0725d2017-06-30 17:29:37 -0700253 gtt_poll(0x138124, (1UL << 31), (0UL << 31));
Aaron Durbin76c37002012-10-30 09:03:43 -0500254
Duncan Laurie356833d2013-07-09 15:40:27 -0700255 /* Enable PM Interrupts */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700256 gtt_write(GEN6_PMIER, GEN6_PM_MBOX_EVENT | GEN6_PM_THERMAL_EVENT |
257 GEN6_PM_RP_DOWN_TIMEOUT | GEN6_PM_RP_UP_THRESHOLD |
258 GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_UP_EI_EXPIRED |
259 GEN6_PM_RP_DOWN_EI_EXPIRED);
Aaron Durbin76c37002012-10-30 09:03:43 -0500260
Duncan Laurie67113e92013-01-10 13:23:04 -0800261 /* Enable RC6 in idle */
262 gtt_write(0x0a094, 0x00040000);
Duncan Laurie356833d2013-07-09 15:40:27 -0700263
264 /* PM Lock Settings */
265 gtt_write_regs(haswell_gt_lock);
Aaron Durbin76c37002012-10-30 09:03:43 -0500266}
267
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700268static void init_display_planes(void)
269{
270 int pipe, plane;
271
272 /* Disable cursor mode */
273 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
274 gtt_write(CURCNTR_IVB(pipe), CURSOR_MODE_DISABLE);
275 gtt_write(CURBASE_IVB(pipe), 0x00000000);
276 }
277
Angel Pons1db5bc72020-01-15 00:49:03 +0100278 /* Disable primary plane and set surface base address */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700279 for (plane = PLANE_A; plane <= PLANE_C; plane++) {
280 gtt_write(DSPCNTR(plane), DISPLAY_PLANE_DISABLE);
281 gtt_write(DSPSURF(plane), 0x00000000);
282 }
283
284 /* Disable VGA display */
285 gtt_write(CPU_VGACNTRL, CPU_VGA_DISABLE);
286}
287
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700288static void gma_setup_panel(struct device *dev)
Aaron Durbin76c37002012-10-30 09:03:43 -0500289{
Angel Ponse153a352020-10-23 14:53:59 +0200290 struct northbridge_intel_haswell_config *conf = config_of(dev);
Aaron Durbin76c37002012-10-30 09:03:43 -0500291 u32 reg32;
292
293 printk(BIOS_DEBUG, "GT Power Management Init (post VBIOS)\n");
294
Aaron Durbin76c37002012-10-30 09:03:43 -0500295 /* Setup Digital Port Hotplug */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700296 reg32 = gtt_read(PCH_PORT_HOTPLUG);
Aaron Durbin76c37002012-10-30 09:03:43 -0500297 if (!reg32) {
298 reg32 = (conf->gpu_dp_b_hotplug & 0x7) << 2;
299 reg32 |= (conf->gpu_dp_c_hotplug & 0x7) << 10;
300 reg32 |= (conf->gpu_dp_d_hotplug & 0x7) << 18;
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700301 gtt_write(PCH_PORT_HOTPLUG, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500302 }
303
304 /* Setup Panel Power On Delays */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700305 reg32 = gtt_read(PCH_PP_ON_DELAYS);
Aaron Durbin76c37002012-10-30 09:03:43 -0500306 if (!reg32) {
Aaron Durbin76c37002012-10-30 09:03:43 -0500307 reg32 |= (conf->gpu_panel_power_up_delay & 0x1fff) << 16;
308 reg32 |= (conf->gpu_panel_power_backlight_on_delay & 0x1fff);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700309 gtt_write(PCH_PP_ON_DELAYS, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500310 }
311
312 /* Setup Panel Power Off Delays */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700313 reg32 = gtt_read(PCH_PP_OFF_DELAYS);
Aaron Durbin76c37002012-10-30 09:03:43 -0500314 if (!reg32) {
315 reg32 = (conf->gpu_panel_power_down_delay & 0x1fff) << 16;
316 reg32 |= (conf->gpu_panel_power_backlight_off_delay & 0x1fff);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700317 gtt_write(PCH_PP_OFF_DELAYS, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500318 }
319
320 /* Setup Panel Power Cycle Delay */
321 if (conf->gpu_panel_power_cycle_delay) {
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700322 reg32 = gtt_read(PCH_PP_DIVISOR);
Aaron Durbin76c37002012-10-30 09:03:43 -0500323 reg32 &= ~0xff;
324 reg32 |= conf->gpu_panel_power_cycle_delay & 0xff;
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700325 gtt_write(PCH_PP_DIVISOR, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500326 }
327
Nico Huberc2e46422020-03-23 01:22:49 +0100328 /* Enforce the PCH PWM function, as so does Linux.
329 The CPU PWM controls are disabled after reset. */
330 if (conf->gpu_pch_backlight_pwm_hz) {
331 /* Reference clock is either 24MHz or 135MHz. We can choose
332 either a 16 or a 128 step increment. Use 16 if we would
333 have less than 100 steps otherwise. */
334 const unsigned int refclock = CONFIG(INTEL_LYNXPOINT_LP) ? 24*MHz : 135*MHz;
335 const unsigned int hz_limit = refclock / 128 / 100;
336 unsigned int pwm_increment, pwm_period;
337 u32 south_chicken2;
338
339 south_chicken2 = gtt_read(SOUTH_CHICKEN2);
340 if (conf->gpu_pch_backlight_pwm_hz > hz_limit) {
341 pwm_increment = 16;
342 south_chicken2 |= LPT_PWM_GRANULARITY;
343 } else {
344 pwm_increment = 128;
345 south_chicken2 &= ~LPT_PWM_GRANULARITY;
346 }
347 gtt_write(SOUTH_CHICKEN2, south_chicken2);
348
349 pwm_period = refclock / pwm_increment / conf->gpu_pch_backlight_pwm_hz;
350 printk(BIOS_INFO,
351 "GMA: Setting backlight PWM frequency to %uMHz / %u / %u = %uHz\n",
352 refclock / MHz, pwm_increment, pwm_period,
353 DIV_ROUND_CLOSEST(refclock, pwm_increment * pwm_period));
354
355 /* Start with a 50% duty cycle. */
356 gtt_write(BLC_PWM_PCH_CTL2, pwm_period << 16 | pwm_period / 2);
357
358 gtt_write(BLC_PWM_PCH_CTL1,
359 (conf->gpu_pch_backlight_polarity == GPU_BACKLIGHT_POLARITY_LOW) << 29 |
360 BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE);
Aaron Durbin76c37002012-10-30 09:03:43 -0500361 }
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700362
363 /* Get display,pipeline,and DDI registers into a basic sane state */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700364 power_well_enable();
365
366 init_display_planes();
367
Angel Pons1db5bc72020-01-15 00:49:03 +0100368 /*
369 * DDI-A params set:
370 * bit 0: Display detected (RO)
371 * bit 4: DDI A supports 4 lanes and DDI E is not used
372 * bit 7: DDI buffer is idle
373 */
Tristan Corrick1a73eb02018-10-31 02:27:29 +1300374 reg32 = DDI_BUF_IS_IDLE | DDI_INIT_DISPLAY_DETECTED;
375 if (!conf->gpu_ddi_e_connected)
376 reg32 |= DDI_A_4_LANES;
377 gtt_write(DDI_BUF_CTL_A, reg32);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700378
379 /* Set FDI registers - is this required? */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700380 gtt_write(_FDI_RXA_MISC, 0x00200090);
381 gtt_write(_FDI_RXA_MISC, 0x0a000000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700382
383 /* Enable the handshake with PCH display when processing reset */
384 gtt_write(NDE_RSTWRN_OPT, RST_PCH_HNDSHK_EN);
385
Angel Pons1db5bc72020-01-15 00:49:03 +0100386 /* Undocumented */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700387 gtt_write(0x42090, 0x04000000);
Angel Pons1db5bc72020-01-15 00:49:03 +0100388 gtt_write(0x9840, 0x00000000);
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700389 gtt_write(0x42090, 0xa4000000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700390
391 gtt_write(SOUTH_DSPCLK_GATE_D, PCH_LP_PARTITION_LEVEL_DISABLE);
392
Angel Pons1db5bc72020-01-15 00:49:03 +0100393 /* Undocumented */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700394 gtt_write(0x42080, 0x00004000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700395
396 /* Prepare DDI buffers for DP and FDI */
397 intel_prepare_ddi();
398
399 /* Hot plug detect buffer enabled for port A */
400 gtt_write(DIGITAL_PORT_HOTPLUG_CNTRL, DIGITAL_PORTA_HOTPLUG_ENABLE);
401
402 /* Enable HPD buffer for digital port D and B */
403 gtt_write(PCH_PORT_HOTPLUG, PORTD_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE);
404
Angel Pons1db5bc72020-01-15 00:49:03 +0100405 /*
406 * Bits 4:0 - Power cycle delay (default 0x6 --> 500ms)
407 * Bits 31:8 - Reference divider (0x0004af ----> 24MHz)
408 */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700409 gtt_write(PCH_PP_DIVISOR, 0x0004af06);
Aaron Durbin76c37002012-10-30 09:03:43 -0500410}
411
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700412static void gma_pm_init_post_vbios(struct device *dev)
413{
Duncan Laurie356833d2013-07-09 15:40:27 -0700414 int cdclk = 0;
415 int devid = pci_read_config16(dev, PCI_DEVICE_ID);
416 int gpu_is_ulx = 0;
417
418 if (devid == 0x0a0e || devid == 0x0a1e)
419 gpu_is_ulx = 1;
420
421 /* CD Frequency */
Duncan Laurie3106d0f2013-08-12 13:51:22 -0700422 if ((gtt_read(0x42014) & 0x1000000) || gpu_is_ulx || haswell_is_ult())
423 cdclk = 0; /* fixed frequency */
424 else
425 cdclk = 2; /* variable frequency */
Duncan Laurie356833d2013-07-09 15:40:27 -0700426
Duncan Laurie356833d2013-07-09 15:40:27 -0700427 if (gpu_is_ulx || cdclk != 0)
428 gtt_rmw(0x130040, 0xf7ffffff, 0x04000000);
429 else
430 gtt_rmw(0x130040, 0xf3ffffff, 0x00000000);
431
432 /* More magic */
433 if (haswell_is_ult() || gpu_is_ulx) {
Duncan Laurie3106d0f2013-08-12 13:51:22 -0700434 if (!gpu_is_ulx)
Duncan Laurie356833d2013-07-09 15:40:27 -0700435 gtt_write(0x138128, 0x00000000);
436 else
437 gtt_write(0x138128, 0x00000001);
438 gtt_write(0x13812c, 0x00000000);
439 gtt_write(0x138124, 0x80000017);
440 }
441
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700442 /* Disable Force Wake */
443 gtt_write(0x0a188, 0x00010000);
Edward O'Callaghan986e85c2014-10-29 12:15:34 +1100444 gtt_poll(FORCEWAKE_ACK_HSW, 1 << 0, 0 << 0);
Duncan Laurie356833d2013-07-09 15:40:27 -0700445 gtt_write(0x0a188, 0x00000001);
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700446}
447
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200448/* Enable SCI to ACPI _GPE._L06 */
449static void gma_enable_swsci(void)
450{
451 u16 reg16;
452
Angel Pons1db5bc72020-01-15 00:49:03 +0100453 /* Clear DMISCI status */
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200454 reg16 = inw(get_pmbase() + TCO1_STS);
455 reg16 &= DMISCI_STS;
456 outw(get_pmbase() + TCO1_STS, reg16);
457
Angel Pons1db5bc72020-01-15 00:49:03 +0100458 /* Clear and enable ACPI TCO SCI */
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200459 enable_tco_sci();
460}
461
Aaron Durbin76c37002012-10-30 09:03:43 -0500462static void gma_func0_init(struct device *dev)
463{
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700464 int lightup_ok = 0;
Matt DeVillier6955b9c2017-04-16 01:42:44 -0500465
Nico Huberf2a0be22020-04-26 17:01:25 +0200466 intel_gma_init_igd_opregion();
467
Aaron Durbin76c37002012-10-30 09:03:43 -0500468 /* Init graphics power management */
469 gma_pm_init_pre_vbios(dev);
470
Matt DeVillier6955b9c2017-04-16 01:42:44 -0500471 /* Pre panel init */
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700472 gma_setup_panel(dev);
473
Nico Huberdd597622020-04-26 19:46:35 +0200474 if (!CONFIG(NO_GFX_INIT))
475 pci_or_config16(dev, PCI_COMMAND, PCI_COMMAND_MASTER);
476
Arthur Heymanse6c8f7e2018-08-09 11:31:51 +0200477 int vga_disable = (pci_read_config16(dev, GGC) & 2) >> 1;
478
Julius Wernercd49cce2019-03-05 16:53:33 -0800479 if (CONFIG(MAINBOARD_USE_LIBGFXINIT)) {
Arthur Heymanse6c8f7e2018-08-09 11:31:51 +0200480 if (vga_disable) {
481 printk(BIOS_INFO,
482 "IGD is not decoding legacy VGA MEM and IO: skipping NATIVE graphic init\n");
483 } else {
484 printk(BIOS_SPEW, "NATIVE graphics, run native enable\n");
485 gma_gfxinit(&lightup_ok);
486 gfx_set_init_done(1);
487 }
Arthur Heymans23cda3472016-12-18 16:03:52 +0100488 }
489
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700490 if (! lightup_ok) {
491 printk(BIOS_SPEW, "FUI did not run; using VBIOS\n");
Stefan Reinauerf1aabec2014-01-22 15:16:30 -0800492 mdelay(CONFIG_PRE_GRAPHICS_DELAY);
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700493 pci_dev_init(dev);
494 }
495
Matt DeVillier6955b9c2017-04-16 01:42:44 -0500496 /* Post panel init */
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700497 gma_pm_init_post_vbios(dev);
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200498
499 gma_enable_swsci();
Aaron Durbin76c37002012-10-30 09:03:43 -0500500}
501
Furquan Shaikh7536a392020-04-24 21:59:21 -0700502static void gma_generate_ssdt(const struct device *dev)
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100503{
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500504 const struct northbridge_intel_haswell_config *chip = dev->chip_info;
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100505
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500506 drivers_intel_gma_displays_ssdt_generate(&chip->gfx);
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100507}
508
Aaron Durbin76c37002012-10-30 09:03:43 -0500509static struct device_operations gma_func0_ops = {
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500510 .read_resources = pci_dev_read_resources,
511 .set_resources = pci_dev_set_resources,
512 .enable_resources = pci_dev_enable_resources,
513 .init = gma_func0_init,
514 .acpi_fill_ssdt = gma_generate_ssdt,
Angel Pons1fc0edd2020-05-31 00:03:28 +0200515 .ops_pci = &pci_dev_ops_pci,
Aaron Durbin76c37002012-10-30 09:03:43 -0500516};
517
Duncan Lauriedf7be712012-12-17 11:22:57 -0800518static const unsigned short pci_device_ids[] = {
519 0x0402, /* Desktop GT1 */
520 0x0412, /* Desktop GT2 */
521 0x0422, /* Desktop GT3 */
Iru Cai12a13e12020-05-22 22:57:03 +0800522 0x0d22, /* Desktop GT3e */
Duncan Lauriedf7be712012-12-17 11:22:57 -0800523 0x0406, /* Mobile GT1 */
524 0x0416, /* Mobile GT2 */
525 0x0426, /* Mobile GT3 */
526 0x0d16, /* Mobile 4+3 GT1 */
Iru Cai12a13e12020-05-22 22:57:03 +0800527 0x0d26, /* Mobile 4+3 GT2, Mobile GT3e */
Duncan Lauriedf7be712012-12-17 11:22:57 -0800528 0x0d36, /* Mobile 4+3 GT3 */
529 0x0a06, /* ULT GT1 */
530 0x0a16, /* ULT GT2 */
531 0x0a26, /* ULT GT3 */
532 0,
533};
Aaron Durbin76c37002012-10-30 09:03:43 -0500534
535static const struct pci_driver pch_lpc __pci_driver = {
Angel Pons1db5bc72020-01-15 00:49:03 +0100536 .ops = &gma_func0_ops,
537 .vendor = PCI_VENDOR_ID_INTEL,
Aaron Durbin76c37002012-10-30 09:03:43 -0500538 .devices = pci_device_ids,
539};