blob: eed6740bc3b111b9a00cac57ceb1755eb717f691 [file] [log] [blame]
Aaron Durbin76c37002012-10-30 09:03:43 -05001/*
2 * This file is part of the coreboot project.
3 *
Aaron Durbin76c37002012-10-30 09:03:43 -05004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Aaron Durbin76c37002012-10-30 09:03:43 -050013 */
14
15#include <arch/io.h>
Kyösti Mälkki13f66502019-03-03 08:01:05 +020016#include <device/mmio.h>
Kyösti Mälkkif1b58b72019-03-01 13:43:02 +020017#include <device/pci_ops.h>
Matt DeVillier7c789702017-06-16 23:36:46 -050018#include <cbmem.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050019#include <console/console.h>
Kyösti Mälkkiab56b3b2013-11-28 16:44:51 +020020#include <bootmode.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050021#include <delay.h>
22#include <device/device.h>
23#include <device/pci.h>
24#include <device/pci_ids.h>
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -070025#include <drivers/intel/gma/i915_reg.h>
Furquan Shaikh77f48cd2013-08-19 10:16:50 -070026#include <drivers/intel/gma/i915.h>
Nico Huber18228162017-06-08 16:31:57 +020027#include <drivers/intel/gma/libgfxinit.h>
Duncan Laurie356833d2013-07-09 15:40:27 -070028#include <cpu/intel/haswell/haswell.h>
Matt DeVillierebe08e02017-07-14 13:28:42 -050029#include <drivers/intel/gma/opregion.h>
Matt DeVillier7c789702017-06-16 23:36:46 -050030#include <southbridge/intel/lynxpoint/nvs.h>
Ronald G. Minnich9518b562013-09-19 16:45:22 -070031#include <string.h>
Elyes HAOUAS51401c32019-05-15 21:09:30 +020032#include <types.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050033
34#include "chip.h"
35#include "haswell.h"
36
Julius Wernercd49cce2019-03-05 16:53:33 -080037#if CONFIG(CHROMEOS)
Furquan Shaikhcb61ea72013-08-15 15:23:58 -070038#include <vendorcode/google/chromeos/chromeos.h>
39#endif
40
Duncan Laurie356833d2013-07-09 15:40:27 -070041struct gt_reg {
42 u32 reg;
43 u32 andmask;
44 u32 ormask;
45};
46
47static const struct gt_reg haswell_gt_setup[] = {
48 /* Enable Counters */
49 { 0x0a248, 0x00000000, 0x00000016 },
50 { 0x0a000, 0x00000000, 0x00070020 },
51 { 0x0a180, 0xff3fffff, 0x15000000 },
52 /* Enable DOP Clock Gating */
53 { 0x09424, 0x00000000, 0x000003fd },
54 /* Enable Unit Level Clock Gating */
55 { 0x09400, 0x00000000, 0x00000080 },
56 { 0x09404, 0x00000000, 0x40401000 },
57 { 0x09408, 0x00000000, 0x00000000 },
58 { 0x0940c, 0x00000000, 0x02000001 },
59 { 0x0a008, 0x00000000, 0x08000000 },
60 /* Wake Rate Limits */
61 { 0x0a090, 0xffffffff, 0x00000000 },
62 { 0x0a098, 0xffffffff, 0x03e80000 },
63 { 0x0a09c, 0xffffffff, 0x00280000 },
64 { 0x0a0a8, 0xffffffff, 0x0001e848 },
65 { 0x0a0ac, 0xffffffff, 0x00000019 },
66 /* Render/Video/Blitter Idle Max Count */
67 { 0x02054, 0x00000000, 0x0000000a },
68 { 0x12054, 0x00000000, 0x0000000a },
69 { 0x22054, 0x00000000, 0x0000000a },
70 /* RC Sleep / RCx Thresholds */
71 { 0x0a0b0, 0xffffffff, 0x00000000 },
72 { 0x0a0b4, 0xffffffff, 0x000003e8 },
73 { 0x0a0b8, 0xffffffff, 0x0000c350 },
74 /* RP Settings */
75 { 0x0a010, 0xffffffff, 0x000f4240 },
76 { 0x0a014, 0xffffffff, 0x12060000 },
77 { 0x0a02c, 0xffffffff, 0x0000e808 },
78 { 0x0a030, 0xffffffff, 0x0003bd08 },
79 { 0x0a068, 0xffffffff, 0x000101d0 },
80 { 0x0a06c, 0xffffffff, 0x00055730 },
81 { 0x0a070, 0xffffffff, 0x0000000a },
82 /* RP Control */
83 { 0x0a024, 0x00000000, 0x00000b92 },
84 /* HW RC6 Control */
85 { 0x0a090, 0x00000000, 0x88040000 },
86 /* Video Frequency Request */
87 { 0x0a00c, 0x00000000, 0x08000000 },
88 { 0 },
89};
90
91static const struct gt_reg haswell_gt_lock[] = {
92 { 0x0a248, 0xffffffff, 0x80000000 },
93 { 0x0a004, 0xffffffff, 0x00000010 },
94 { 0x0a080, 0xffffffff, 0x00000004 },
95 { 0x0a180, 0xffffffff, 0x80000000 },
96 { 0 },
97};
98
Angel Pons1db5bc72020-01-15 00:49:03 +010099/*
100 * Some VGA option roms are used for several chipsets but they only have one PCI ID in their
101 * header. If we encounter such an option rom, we need to do the mapping ourselves.
Aaron Durbin76c37002012-10-30 09:03:43 -0500102 */
103
104u32 map_oprom_vendev(u32 vendev)
105{
Elyes HAOUAS69d658f2016-09-17 20:32:07 +0200106 u32 new_vendev = vendev;
Aaron Durbin76c37002012-10-30 09:03:43 -0500107
108 switch (vendev) {
Aaron Durbin71161292012-12-13 16:43:32 -0600109 case 0x80860402: /* GT1 Desktop */
110 case 0x80860406: /* GT1 Mobile */
111 case 0x8086040a: /* GT1 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -0800112 case 0x80860a06: /* GT1 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600113
114 case 0x80860412: /* GT2 Desktop */
115 case 0x80860416: /* GT2 Mobile */
116 case 0x8086041a: /* GT2 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -0800117 case 0x80860a16: /* GT2 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600118
119 case 0x80860422: /* GT3 Desktop */
120 case 0x80860426: /* GT3 Mobile */
121 case 0x8086042a: /* GT3 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -0800122 case 0x80860a26: /* GT3 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600123
Elyes HAOUAS69d658f2016-09-17 20:32:07 +0200124 new_vendev = 0x80860406; /* GT1 Mobile */
Aaron Durbin76c37002012-10-30 09:03:43 -0500125 break;
126 }
127
128 return new_vendev;
129}
130
Angel Pons1db5bc72020-01-15 00:49:03 +0100131/** FIXME: Seems to be outdated. */
132/*
133 * GTT is the Global Translation Table for the graphics pipeline. It is used to translate
134 * graphics addresses to physical memory addresses. As in the CPU, GTTs map 4K pages.
135 *
136 * The setgtt function adds a further bit of flexibility: it allows you to set a range (the
137 * first two parameters) to point to a physical address (third parameter); the physical address
138 * is incremented by a count (fourth parameter) for each GTT in the range.
139 *
140 * Why do it this way? For ultrafast startup, we can point all the GTT entries to point to one
141 * page, and set that page to 0s:
142 *
143 * memset(physbase, 0, 4096);
144 * setgtt(0, 4250, physbase, 0);
145 *
146 * this takes about 2 ms, and is a win because zeroing the page takes up to 200 ms.
147 *
148 * This call sets the GTT to point to a linear range of pages starting at physbase.
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700149 */
150
151#define GTT_PTE_BASE (2 << 20)
152
Angel Pons1db5bc72020-01-15 00:49:03 +0100153void set_translation_table(int start, int end, u64 base, int inc)
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700154{
155 int i;
156
Elyes HAOUAS12df9502016-08-23 21:29:48 +0200157 for (i = start; i < end; i++){
Angel Pons1db5bc72020-01-15 00:49:03 +0100158 u64 physical_address = base + i * inc;
159
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700160 /* swizzle the 32:39 bits to 4:11 */
161 u32 word = physical_address | ((physical_address >> 28) & 0xff0) | 1;
Angel Pons1db5bc72020-01-15 00:49:03 +0100162
163 /*
164 * Note: we've confirmed by checking the values that MRC does no useful
165 * setup before we run this.
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700166 */
167 gtt_write(GTT_PTE_BASE + i * 4, word);
168 gtt_read(GTT_PTE_BASE + i * 4);
169 }
170}
171
Aaron Durbin76c37002012-10-30 09:03:43 -0500172static struct resource *gtt_res = NULL;
173
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700174u32 gtt_read(u32 reg)
Aaron Durbin76c37002012-10-30 09:03:43 -0500175{
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700176 u32 val;
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800177 val = read32(res2mmio(gtt_res, reg, 0));
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700178 return val;
179
Aaron Durbin76c37002012-10-30 09:03:43 -0500180}
181
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700182void gtt_write(u32 reg, u32 data)
Aaron Durbin76c37002012-10-30 09:03:43 -0500183{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800184 write32(res2mmio(gtt_res, reg, 0), data);
Aaron Durbin76c37002012-10-30 09:03:43 -0500185}
186
Duncan Laurie356833d2013-07-09 15:40:27 -0700187static inline void gtt_rmw(u32 reg, u32 andmask, u32 ormask)
188{
189 u32 val = gtt_read(reg);
190 val &= andmask;
191 val |= ormask;
192 gtt_write(reg, val);
193}
194
195static inline void gtt_write_regs(const struct gt_reg *gt)
196{
197 for (; gt && gt->reg; gt++) {
198 if (gt->andmask)
199 gtt_rmw(gt->reg, gt->andmask, gt->ormask);
200 else
201 gtt_write(gt->reg, gt->ormask);
202 }
203}
204
Aaron Durbin76c37002012-10-30 09:03:43 -0500205#define GTT_RETRY 1000
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700206int gtt_poll(u32 reg, u32 mask, u32 value)
Aaron Durbin76c37002012-10-30 09:03:43 -0500207{
Martin Roth468d02c2019-10-23 21:44:42 -0600208 unsigned int try = GTT_RETRY;
Aaron Durbin76c37002012-10-30 09:03:43 -0500209 u32 data;
210
211 while (try--) {
212 data = gtt_read(reg);
213 if ((data & mask) == value)
214 return 1;
Angel Pons1db5bc72020-01-15 00:49:03 +0100215
Aaron Durbin76c37002012-10-30 09:03:43 -0500216 udelay(10);
217 }
218
219 printk(BIOS_ERR, "GT init timeout\n");
220 return 0;
221}
222
Patrick Rudolph19c2ad82017-06-30 14:52:01 +0200223uintptr_t gma_get_gnvs_aslb(const void *gnvs)
224{
225 const global_nvs_t *gnvs_ptr = gnvs;
226 return (uintptr_t)(gnvs_ptr ? gnvs_ptr->aslb : 0);
227}
228
229void gma_set_gnvs_aslb(void *gnvs, uintptr_t aslb)
230{
231 global_nvs_t *gnvs_ptr = gnvs;
232 if (gnvs_ptr)
233 gnvs_ptr->aslb = aslb;
234}
235
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700236static void power_well_enable(void)
237{
238 gtt_write(HSW_PWR_WELL_CTL1, HSW_PWR_WELL_ENABLE);
239 gtt_poll(HSW_PWR_WELL_CTL1, HSW_PWR_WELL_STATE, HSW_PWR_WELL_STATE);
240}
241
Aaron Durbin76c37002012-10-30 09:03:43 -0500242static void gma_pm_init_pre_vbios(struct device *dev)
243{
Aaron Durbin76c37002012-10-30 09:03:43 -0500244 printk(BIOS_DEBUG, "GT Power Management Init\n");
245
246 gtt_res = find_resource(dev, PCI_BASE_ADDRESS_0);
247 if (!gtt_res || !gtt_res->base)
248 return;
249
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700250 power_well_enable();
251
Duncan Laurie67113e92013-01-10 13:23:04 -0800252 /*
253 * Enable RC6
254 */
Aaron Durbin76c37002012-10-30 09:03:43 -0500255
Duncan Laurie67113e92013-01-10 13:23:04 -0800256 /* Enable Force Wake */
257 gtt_write(0x0a180, 1 << 5);
258 gtt_write(0x0a188, 0x00010001);
Edward O'Callaghan986e85c2014-10-29 12:15:34 +1100259 gtt_poll(FORCEWAKE_ACK_HSW, 1 << 0, 1 << 0);
Aaron Durbin76c37002012-10-30 09:03:43 -0500260
Duncan Laurie356833d2013-07-09 15:40:27 -0700261 /* GT Settings */
262 gtt_write_regs(haswell_gt_setup);
Aaron Durbin76c37002012-10-30 09:03:43 -0500263
Duncan Laurie356833d2013-07-09 15:40:27 -0700264 /* Wait for Mailbox Ready */
Ryan Salsamendifa0725d2017-06-30 17:29:37 -0700265 gtt_poll(0x138124, (1UL << 31), (0UL << 31));
Angel Pons1db5bc72020-01-15 00:49:03 +0100266
Duncan Laurie356833d2013-07-09 15:40:27 -0700267 /* Mailbox Data - RC6 VIDS */
268 gtt_write(0x138128, 0x00000000);
Angel Pons1db5bc72020-01-15 00:49:03 +0100269
Duncan Laurie356833d2013-07-09 15:40:27 -0700270 /* Mailbox Command */
271 gtt_write(0x138124, 0x80000004);
Angel Pons1db5bc72020-01-15 00:49:03 +0100272
Duncan Laurie356833d2013-07-09 15:40:27 -0700273 /* Wait for Mailbox Ready */
Ryan Salsamendifa0725d2017-06-30 17:29:37 -0700274 gtt_poll(0x138124, (1UL << 31), (0UL << 31));
Aaron Durbin76c37002012-10-30 09:03:43 -0500275
Duncan Laurie356833d2013-07-09 15:40:27 -0700276 /* Enable PM Interrupts */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700277 gtt_write(GEN6_PMIER, GEN6_PM_MBOX_EVENT | GEN6_PM_THERMAL_EVENT |
278 GEN6_PM_RP_DOWN_TIMEOUT | GEN6_PM_RP_UP_THRESHOLD |
279 GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_UP_EI_EXPIRED |
280 GEN6_PM_RP_DOWN_EI_EXPIRED);
Aaron Durbin76c37002012-10-30 09:03:43 -0500281
Duncan Laurie67113e92013-01-10 13:23:04 -0800282 /* Enable RC6 in idle */
283 gtt_write(0x0a094, 0x00040000);
Duncan Laurie356833d2013-07-09 15:40:27 -0700284
285 /* PM Lock Settings */
286 gtt_write_regs(haswell_gt_lock);
Aaron Durbin76c37002012-10-30 09:03:43 -0500287}
288
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700289static void init_display_planes(void)
290{
291 int pipe, plane;
292
293 /* Disable cursor mode */
294 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
295 gtt_write(CURCNTR_IVB(pipe), CURSOR_MODE_DISABLE);
296 gtt_write(CURBASE_IVB(pipe), 0x00000000);
297 }
298
Angel Pons1db5bc72020-01-15 00:49:03 +0100299 /* Disable primary plane and set surface base address */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700300 for (plane = PLANE_A; plane <= PLANE_C; plane++) {
301 gtt_write(DSPCNTR(plane), DISPLAY_PLANE_DISABLE);
302 gtt_write(DSPSURF(plane), 0x00000000);
303 }
304
305 /* Disable VGA display */
306 gtt_write(CPU_VGACNTRL, CPU_VGA_DISABLE);
307}
308
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700309static void gma_setup_panel(struct device *dev)
Aaron Durbin76c37002012-10-30 09:03:43 -0500310{
311 struct northbridge_intel_haswell_config *conf = dev->chip_info;
312 u32 reg32;
313
314 printk(BIOS_DEBUG, "GT Power Management Init (post VBIOS)\n");
315
Aaron Durbin76c37002012-10-30 09:03:43 -0500316 /* Setup Digital Port Hotplug */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700317 reg32 = gtt_read(PCH_PORT_HOTPLUG);
Aaron Durbin76c37002012-10-30 09:03:43 -0500318 if (!reg32) {
319 reg32 = (conf->gpu_dp_b_hotplug & 0x7) << 2;
320 reg32 |= (conf->gpu_dp_c_hotplug & 0x7) << 10;
321 reg32 |= (conf->gpu_dp_d_hotplug & 0x7) << 18;
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700322 gtt_write(PCH_PORT_HOTPLUG, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500323 }
324
325 /* Setup Panel Power On Delays */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700326 reg32 = gtt_read(PCH_PP_ON_DELAYS);
Aaron Durbin76c37002012-10-30 09:03:43 -0500327 if (!reg32) {
328 reg32 = (conf->gpu_panel_port_select & 0x3) << 30;
329 reg32 |= (conf->gpu_panel_power_up_delay & 0x1fff) << 16;
330 reg32 |= (conf->gpu_panel_power_backlight_on_delay & 0x1fff);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700331 gtt_write(PCH_PP_ON_DELAYS, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500332 }
333
334 /* Setup Panel Power Off Delays */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700335 reg32 = gtt_read(PCH_PP_OFF_DELAYS);
Aaron Durbin76c37002012-10-30 09:03:43 -0500336 if (!reg32) {
337 reg32 = (conf->gpu_panel_power_down_delay & 0x1fff) << 16;
338 reg32 |= (conf->gpu_panel_power_backlight_off_delay & 0x1fff);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700339 gtt_write(PCH_PP_OFF_DELAYS, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500340 }
341
342 /* Setup Panel Power Cycle Delay */
343 if (conf->gpu_panel_power_cycle_delay) {
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700344 reg32 = gtt_read(PCH_PP_DIVISOR);
Aaron Durbin76c37002012-10-30 09:03:43 -0500345 reg32 &= ~0xff;
346 reg32 |= conf->gpu_panel_power_cycle_delay & 0xff;
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700347 gtt_write(PCH_PP_DIVISOR, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500348 }
349
350 /* Enable Backlight if needed */
351 if (conf->gpu_cpu_backlight) {
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700352 gtt_write(BLC_PWM_CPU_CTL2, BLC_PWM2_ENABLE);
353 gtt_write(BLC_PWM_CPU_CTL, conf->gpu_cpu_backlight);
Aaron Durbin76c37002012-10-30 09:03:43 -0500354 }
355 if (conf->gpu_pch_backlight) {
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700356 gtt_write(BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE);
357 gtt_write(BLC_PWM_PCH_CTL2, conf->gpu_pch_backlight);
Aaron Durbin76c37002012-10-30 09:03:43 -0500358 }
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700359
360 /* Get display,pipeline,and DDI registers into a basic sane state */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700361 power_well_enable();
362
363 init_display_planes();
364
Angel Pons1db5bc72020-01-15 00:49:03 +0100365 /*
366 * DDI-A params set:
367 * bit 0: Display detected (RO)
368 * bit 4: DDI A supports 4 lanes and DDI E is not used
369 * bit 7: DDI buffer is idle
370 */
Tristan Corrick1a73eb02018-10-31 02:27:29 +1300371 reg32 = DDI_BUF_IS_IDLE | DDI_INIT_DISPLAY_DETECTED;
372 if (!conf->gpu_ddi_e_connected)
373 reg32 |= DDI_A_4_LANES;
374 gtt_write(DDI_BUF_CTL_A, reg32);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700375
376 /* Set FDI registers - is this required? */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700377 gtt_write(_FDI_RXA_MISC, 0x00200090);
378 gtt_write(_FDI_RXA_MISC, 0x0a000000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700379
380 /* Enable the handshake with PCH display when processing reset */
381 gtt_write(NDE_RSTWRN_OPT, RST_PCH_HNDSHK_EN);
382
Angel Pons1db5bc72020-01-15 00:49:03 +0100383 /* Undocumented */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700384 gtt_write(0x42090, 0x04000000);
Angel Pons1db5bc72020-01-15 00:49:03 +0100385 gtt_write(0x9840, 0x00000000);
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700386 gtt_write(0x42090, 0xa4000000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700387
388 gtt_write(SOUTH_DSPCLK_GATE_D, PCH_LP_PARTITION_LEVEL_DISABLE);
389
Angel Pons1db5bc72020-01-15 00:49:03 +0100390 /* Undocumented */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700391 gtt_write(0x42080, 0x00004000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700392
393 /* Prepare DDI buffers for DP and FDI */
394 intel_prepare_ddi();
395
396 /* Hot plug detect buffer enabled for port A */
397 gtt_write(DIGITAL_PORT_HOTPLUG_CNTRL, DIGITAL_PORTA_HOTPLUG_ENABLE);
398
399 /* Enable HPD buffer for digital port D and B */
400 gtt_write(PCH_PORT_HOTPLUG, PORTD_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE);
401
Angel Pons1db5bc72020-01-15 00:49:03 +0100402 /*
403 * Bits 4:0 - Power cycle delay (default 0x6 --> 500ms)
404 * Bits 31:8 - Reference divider (0x0004af ----> 24MHz)
405 */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700406 gtt_write(PCH_PP_DIVISOR, 0x0004af06);
Aaron Durbin76c37002012-10-30 09:03:43 -0500407}
408
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700409static void gma_pm_init_post_vbios(struct device *dev)
410{
Duncan Laurie356833d2013-07-09 15:40:27 -0700411 int cdclk = 0;
412 int devid = pci_read_config16(dev, PCI_DEVICE_ID);
413 int gpu_is_ulx = 0;
414
415 if (devid == 0x0a0e || devid == 0x0a1e)
416 gpu_is_ulx = 1;
417
418 /* CD Frequency */
Duncan Laurie3106d0f2013-08-12 13:51:22 -0700419 if ((gtt_read(0x42014) & 0x1000000) || gpu_is_ulx || haswell_is_ult())
420 cdclk = 0; /* fixed frequency */
421 else
422 cdclk = 2; /* variable frequency */
Duncan Laurie356833d2013-07-09 15:40:27 -0700423
Duncan Laurie356833d2013-07-09 15:40:27 -0700424 if (gpu_is_ulx || cdclk != 0)
425 gtt_rmw(0x130040, 0xf7ffffff, 0x04000000);
426 else
427 gtt_rmw(0x130040, 0xf3ffffff, 0x00000000);
428
429 /* More magic */
430 if (haswell_is_ult() || gpu_is_ulx) {
Duncan Laurie3106d0f2013-08-12 13:51:22 -0700431 if (!gpu_is_ulx)
Duncan Laurie356833d2013-07-09 15:40:27 -0700432 gtt_write(0x138128, 0x00000000);
433 else
434 gtt_write(0x138128, 0x00000001);
435 gtt_write(0x13812c, 0x00000000);
436 gtt_write(0x138124, 0x80000017);
437 }
438
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700439 /* Disable Force Wake */
440 gtt_write(0x0a188, 0x00010000);
Edward O'Callaghan986e85c2014-10-29 12:15:34 +1100441 gtt_poll(FORCEWAKE_ACK_HSW, 1 << 0, 0 << 0);
Duncan Laurie356833d2013-07-09 15:40:27 -0700442 gtt_write(0x0a188, 0x00000001);
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700443}
444
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200445/* Enable SCI to ACPI _GPE._L06 */
446static void gma_enable_swsci(void)
447{
448 u16 reg16;
449
Angel Pons1db5bc72020-01-15 00:49:03 +0100450 /* Clear DMISCI status */
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200451 reg16 = inw(get_pmbase() + TCO1_STS);
452 reg16 &= DMISCI_STS;
453 outw(get_pmbase() + TCO1_STS, reg16);
454
Angel Pons1db5bc72020-01-15 00:49:03 +0100455 /* Clear and enable ACPI TCO SCI */
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200456 enable_tco_sci();
457}
458
Aaron Durbin76c37002012-10-30 09:03:43 -0500459static void gma_func0_init(struct device *dev)
460{
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700461 int lightup_ok = 0;
Aaron Durbin76c37002012-10-30 09:03:43 -0500462 u32 reg32;
Matt DeVillier6955b9c2017-04-16 01:42:44 -0500463
Aaron Durbin76c37002012-10-30 09:03:43 -0500464 /* IGD needs to be Bus Master */
465 reg32 = pci_read_config32(dev, PCI_COMMAND);
466 reg32 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
467 pci_write_config32(dev, PCI_COMMAND, reg32);
468
469 /* Init graphics power management */
470 gma_pm_init_pre_vbios(dev);
471
Matt DeVillier6955b9c2017-04-16 01:42:44 -0500472 /* Pre panel init */
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700473 gma_setup_panel(dev);
474
Arthur Heymanse6c8f7e2018-08-09 11:31:51 +0200475 int vga_disable = (pci_read_config16(dev, GGC) & 2) >> 1;
476
Julius Wernercd49cce2019-03-05 16:53:33 -0800477 if (CONFIG(MAINBOARD_USE_LIBGFXINIT)) {
Arthur Heymanse6c8f7e2018-08-09 11:31:51 +0200478 if (vga_disable) {
479 printk(BIOS_INFO,
480 "IGD is not decoding legacy VGA MEM and IO: skipping NATIVE graphic init\n");
481 } else {
482 printk(BIOS_SPEW, "NATIVE graphics, run native enable\n");
483 gma_gfxinit(&lightup_ok);
484 gfx_set_init_done(1);
485 }
Arthur Heymans23cda3472016-12-18 16:03:52 +0100486 }
487
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700488 if (! lightup_ok) {
489 printk(BIOS_SPEW, "FUI did not run; using VBIOS\n");
Stefan Reinauerf1aabec2014-01-22 15:16:30 -0800490 mdelay(CONFIG_PRE_GRAPHICS_DELAY);
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700491 pci_dev_init(dev);
492 }
493
Matt DeVillier6955b9c2017-04-16 01:42:44 -0500494 /* Post panel init */
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700495 gma_pm_init_post_vbios(dev);
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200496
497 gma_enable_swsci();
498 intel_gma_restore_opregion();
Aaron Durbin76c37002012-10-30 09:03:43 -0500499}
500
Angel Pons1db5bc72020-01-15 00:49:03 +0100501const struct i915_gpu_controller_info *intel_gma_get_controller_info(void)
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100502{
Angel Pons1db5bc72020-01-15 00:49:03 +0100503 struct device *dev = pcidev_on_root(2, 0);
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100504 if (!dev) {
505 return NULL;
506 }
507 struct northbridge_intel_haswell_config *chip = dev->chip_info;
508 return &chip->gfx;
509}
510
Elyes HAOUAS77f7a6e2018-05-09 17:47:59 +0200511static void gma_ssdt(struct device *device)
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100512{
513 const struct i915_gpu_controller_info *gfx = intel_gma_get_controller_info();
514 if (!gfx) {
515 return;
516 }
517
518 drivers_intel_gma_displays_ssdt_generate(gfx);
519}
520
Angel Pons1db5bc72020-01-15 00:49:03 +0100521static unsigned long gma_write_acpi_tables(struct device *const dev, unsigned long current,
522 struct acpi_rsdp *const rsdp)
Patrick Rudolphee14ccc2017-05-20 11:46:06 +0200523{
524 igd_opregion_t *opregion = (igd_opregion_t *)current;
Matt DeVillier7c789702017-06-16 23:36:46 -0500525 global_nvs_t *gnvs;
Patrick Rudolphee14ccc2017-05-20 11:46:06 +0200526
Matt DeVillierebe08e02017-07-14 13:28:42 -0500527 if (intel_gma_init_igd_opregion(opregion) != CB_SUCCESS)
Patrick Rudolphee14ccc2017-05-20 11:46:06 +0200528 return current;
529
530 current += sizeof(igd_opregion_t);
531
Matt DeVillier7c789702017-06-16 23:36:46 -0500532 /* GNVS has been already set up */
533 gnvs = cbmem_find(CBMEM_ID_ACPI_GNVS);
534 if (gnvs) {
535 /* IGD OpRegion Base Address */
Patrick Rudolph19c2ad82017-06-30 14:52:01 +0200536 gma_set_gnvs_aslb(gnvs, (uintptr_t)opregion);
Matt DeVillier7c789702017-06-16 23:36:46 -0500537 } else {
538 printk(BIOS_ERR, "Error: GNVS table not found.\n");
539 }
540
Patrick Rudolphee14ccc2017-05-20 11:46:06 +0200541 current = acpi_align_current(current);
542 return current;
543}
544
Aaron Durbin76c37002012-10-30 09:03:43 -0500545static struct pci_operations gma_pci_ops = {
Angel Pons1db5bc72020-01-15 00:49:03 +0100546 .set_subsystem = pci_dev_set_subsystem,
Aaron Durbin76c37002012-10-30 09:03:43 -0500547};
548
549static struct device_operations gma_func0_ops = {
Angel Pons1db5bc72020-01-15 00:49:03 +0100550 .read_resources = pci_dev_read_resources,
551 .set_resources = pci_dev_set_resources,
552 .enable_resources = pci_dev_enable_resources,
553 .init = gma_func0_init,
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100554 .acpi_fill_ssdt_generator = gma_ssdt,
Angel Pons1db5bc72020-01-15 00:49:03 +0100555 .scan_bus = NULL,
556 .enable = NULL,
557 .ops_pci = &gma_pci_ops,
558 .write_acpi_tables = gma_write_acpi_tables,
Aaron Durbin76c37002012-10-30 09:03:43 -0500559};
560
Duncan Lauriedf7be712012-12-17 11:22:57 -0800561static const unsigned short pci_device_ids[] = {
562 0x0402, /* Desktop GT1 */
563 0x0412, /* Desktop GT2 */
564 0x0422, /* Desktop GT3 */
565 0x0406, /* Mobile GT1 */
566 0x0416, /* Mobile GT2 */
567 0x0426, /* Mobile GT3 */
568 0x0d16, /* Mobile 4+3 GT1 */
569 0x0d26, /* Mobile 4+3 GT2 */
570 0x0d36, /* Mobile 4+3 GT3 */
571 0x0a06, /* ULT GT1 */
572 0x0a16, /* ULT GT2 */
573 0x0a26, /* ULT GT3 */
574 0,
575};
Aaron Durbin76c37002012-10-30 09:03:43 -0500576
577static const struct pci_driver pch_lpc __pci_driver = {
Angel Pons1db5bc72020-01-15 00:49:03 +0100578 .ops = &gma_func0_ops,
579 .vendor = PCI_VENDOR_ID_INTEL,
Aaron Durbin76c37002012-10-30 09:03:43 -0500580 .devices = pci_device_ids,
581};