blob: 0caa64fe11ee0523d5e770bb52af08594a8ae452 [file] [log] [blame]
Angel Pons4b429832020-04-02 23:48:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin76c37002012-10-30 09:03:43 -05002
Nico Huberc2e46422020-03-23 01:22:49 +01003#include <commonlib/helpers.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05004#include <arch/io.h>
Kyösti Mälkki13f66502019-03-03 08:01:05 +02005#include <device/mmio.h>
Kyösti Mälkkif1b58b72019-03-01 13:43:02 +02006#include <device/pci_ops.h>
Matt DeVillier7c789702017-06-16 23:36:46 -05007#include <cbmem.h>
Aaron Durbin76c37002012-10-30 09:03:43 -05008#include <console/console.h>
Kyösti Mälkkiab56b3b2013-11-28 16:44:51 +02009#include <bootmode.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050010#include <delay.h>
11#include <device/device.h>
12#include <device/pci.h>
13#include <device/pci_ids.h>
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -070014#include <drivers/intel/gma/i915_reg.h>
Furquan Shaikh77f48cd2013-08-19 10:16:50 -070015#include <drivers/intel/gma/i915.h>
Nico Huber18228162017-06-08 16:31:57 +020016#include <drivers/intel/gma/libgfxinit.h>
Duncan Laurie356833d2013-07-09 15:40:27 -070017#include <cpu/intel/haswell/haswell.h>
Matt DeVillierebe08e02017-07-14 13:28:42 -050018#include <drivers/intel/gma/opregion.h>
Matt DeVillier7c789702017-06-16 23:36:46 -050019#include <southbridge/intel/lynxpoint/nvs.h>
Ronald G. Minnich9518b562013-09-19 16:45:22 -070020#include <string.h>
Elyes HAOUAS51401c32019-05-15 21:09:30 +020021#include <types.h>
Aaron Durbin76c37002012-10-30 09:03:43 -050022
23#include "chip.h"
24#include "haswell.h"
25
Julius Wernercd49cce2019-03-05 16:53:33 -080026#if CONFIG(CHROMEOS)
Furquan Shaikhcb61ea72013-08-15 15:23:58 -070027#include <vendorcode/google/chromeos/chromeos.h>
28#endif
29
Duncan Laurie356833d2013-07-09 15:40:27 -070030struct gt_reg {
31 u32 reg;
32 u32 andmask;
33 u32 ormask;
34};
35
36static const struct gt_reg haswell_gt_setup[] = {
37 /* Enable Counters */
38 { 0x0a248, 0x00000000, 0x00000016 },
39 { 0x0a000, 0x00000000, 0x00070020 },
40 { 0x0a180, 0xff3fffff, 0x15000000 },
41 /* Enable DOP Clock Gating */
42 { 0x09424, 0x00000000, 0x000003fd },
43 /* Enable Unit Level Clock Gating */
44 { 0x09400, 0x00000000, 0x00000080 },
45 { 0x09404, 0x00000000, 0x40401000 },
46 { 0x09408, 0x00000000, 0x00000000 },
47 { 0x0940c, 0x00000000, 0x02000001 },
48 { 0x0a008, 0x00000000, 0x08000000 },
49 /* Wake Rate Limits */
50 { 0x0a090, 0xffffffff, 0x00000000 },
51 { 0x0a098, 0xffffffff, 0x03e80000 },
52 { 0x0a09c, 0xffffffff, 0x00280000 },
53 { 0x0a0a8, 0xffffffff, 0x0001e848 },
54 { 0x0a0ac, 0xffffffff, 0x00000019 },
55 /* Render/Video/Blitter Idle Max Count */
56 { 0x02054, 0x00000000, 0x0000000a },
57 { 0x12054, 0x00000000, 0x0000000a },
58 { 0x22054, 0x00000000, 0x0000000a },
59 /* RC Sleep / RCx Thresholds */
60 { 0x0a0b0, 0xffffffff, 0x00000000 },
61 { 0x0a0b4, 0xffffffff, 0x000003e8 },
62 { 0x0a0b8, 0xffffffff, 0x0000c350 },
63 /* RP Settings */
64 { 0x0a010, 0xffffffff, 0x000f4240 },
65 { 0x0a014, 0xffffffff, 0x12060000 },
66 { 0x0a02c, 0xffffffff, 0x0000e808 },
67 { 0x0a030, 0xffffffff, 0x0003bd08 },
68 { 0x0a068, 0xffffffff, 0x000101d0 },
69 { 0x0a06c, 0xffffffff, 0x00055730 },
70 { 0x0a070, 0xffffffff, 0x0000000a },
71 /* RP Control */
72 { 0x0a024, 0x00000000, 0x00000b92 },
73 /* HW RC6 Control */
74 { 0x0a090, 0x00000000, 0x88040000 },
75 /* Video Frequency Request */
76 { 0x0a00c, 0x00000000, 0x08000000 },
77 { 0 },
78};
79
80static const struct gt_reg haswell_gt_lock[] = {
81 { 0x0a248, 0xffffffff, 0x80000000 },
82 { 0x0a004, 0xffffffff, 0x00000010 },
83 { 0x0a080, 0xffffffff, 0x00000004 },
84 { 0x0a180, 0xffffffff, 0x80000000 },
85 { 0 },
86};
87
Angel Pons1db5bc72020-01-15 00:49:03 +010088/*
89 * Some VGA option roms are used for several chipsets but they only have one PCI ID in their
90 * header. If we encounter such an option rom, we need to do the mapping ourselves.
Aaron Durbin76c37002012-10-30 09:03:43 -050091 */
92
93u32 map_oprom_vendev(u32 vendev)
94{
Elyes HAOUAS69d658f2016-09-17 20:32:07 +020095 u32 new_vendev = vendev;
Aaron Durbin76c37002012-10-30 09:03:43 -050096
97 switch (vendev) {
Aaron Durbin71161292012-12-13 16:43:32 -060098 case 0x80860402: /* GT1 Desktop */
99 case 0x80860406: /* GT1 Mobile */
100 case 0x8086040a: /* GT1 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -0800101 case 0x80860a06: /* GT1 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600102
103 case 0x80860412: /* GT2 Desktop */
104 case 0x80860416: /* GT2 Mobile */
105 case 0x8086041a: /* GT2 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -0800106 case 0x80860a16: /* GT2 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600107
108 case 0x80860422: /* GT3 Desktop */
109 case 0x80860426: /* GT3 Mobile */
110 case 0x8086042a: /* GT3 Server */
Duncan Laurie26e7dd72012-12-19 09:12:31 -0800111 case 0x80860a26: /* GT3 ULT */
Aaron Durbin71161292012-12-13 16:43:32 -0600112
Elyes HAOUAS69d658f2016-09-17 20:32:07 +0200113 new_vendev = 0x80860406; /* GT1 Mobile */
Aaron Durbin76c37002012-10-30 09:03:43 -0500114 break;
115 }
116
117 return new_vendev;
118}
119
Angel Pons1db5bc72020-01-15 00:49:03 +0100120/** FIXME: Seems to be outdated. */
121/*
122 * GTT is the Global Translation Table for the graphics pipeline. It is used to translate
123 * graphics addresses to physical memory addresses. As in the CPU, GTTs map 4K pages.
124 *
125 * The setgtt function adds a further bit of flexibility: it allows you to set a range (the
126 * first two parameters) to point to a physical address (third parameter); the physical address
127 * is incremented by a count (fourth parameter) for each GTT in the range.
128 *
129 * Why do it this way? For ultrafast startup, we can point all the GTT entries to point to one
130 * page, and set that page to 0s:
131 *
132 * memset(physbase, 0, 4096);
133 * setgtt(0, 4250, physbase, 0);
134 *
135 * this takes about 2 ms, and is a win because zeroing the page takes up to 200 ms.
136 *
137 * This call sets the GTT to point to a linear range of pages starting at physbase.
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700138 */
139
140#define GTT_PTE_BASE (2 << 20)
141
Angel Pons1db5bc72020-01-15 00:49:03 +0100142void set_translation_table(int start, int end, u64 base, int inc)
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700143{
144 int i;
145
Elyes HAOUAS12df9502016-08-23 21:29:48 +0200146 for (i = start; i < end; i++){
Angel Pons1db5bc72020-01-15 00:49:03 +0100147 u64 physical_address = base + i * inc;
148
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700149 /* swizzle the 32:39 bits to 4:11 */
150 u32 word = physical_address | ((physical_address >> 28) & 0xff0) | 1;
Angel Pons1db5bc72020-01-15 00:49:03 +0100151
152 /*
153 * Note: we've confirmed by checking the values that MRC does no useful
154 * setup before we run this.
Ronald G. Minnich4c8465c2013-09-30 15:57:21 -0700155 */
156 gtt_write(GTT_PTE_BASE + i * 4, word);
157 gtt_read(GTT_PTE_BASE + i * 4);
158 }
159}
160
Aaron Durbin76c37002012-10-30 09:03:43 -0500161static struct resource *gtt_res = NULL;
162
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700163u32 gtt_read(u32 reg)
Aaron Durbin76c37002012-10-30 09:03:43 -0500164{
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700165 u32 val;
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800166 val = read32(res2mmio(gtt_res, reg, 0));
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700167 return val;
168
Aaron Durbin76c37002012-10-30 09:03:43 -0500169}
170
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700171void gtt_write(u32 reg, u32 data)
Aaron Durbin76c37002012-10-30 09:03:43 -0500172{
Kevin Paul Herbertbde6d302014-12-24 18:43:20 -0800173 write32(res2mmio(gtt_res, reg, 0), data);
Aaron Durbin76c37002012-10-30 09:03:43 -0500174}
175
Duncan Laurie356833d2013-07-09 15:40:27 -0700176static inline void gtt_rmw(u32 reg, u32 andmask, u32 ormask)
177{
178 u32 val = gtt_read(reg);
179 val &= andmask;
180 val |= ormask;
181 gtt_write(reg, val);
182}
183
184static inline void gtt_write_regs(const struct gt_reg *gt)
185{
186 for (; gt && gt->reg; gt++) {
187 if (gt->andmask)
188 gtt_rmw(gt->reg, gt->andmask, gt->ormask);
189 else
190 gtt_write(gt->reg, gt->ormask);
191 }
192}
193
Aaron Durbin76c37002012-10-30 09:03:43 -0500194#define GTT_RETRY 1000
Ronald G. Minnich9518b562013-09-19 16:45:22 -0700195int gtt_poll(u32 reg, u32 mask, u32 value)
Aaron Durbin76c37002012-10-30 09:03:43 -0500196{
Martin Roth468d02c2019-10-23 21:44:42 -0600197 unsigned int try = GTT_RETRY;
Aaron Durbin76c37002012-10-30 09:03:43 -0500198 u32 data;
199
200 while (try--) {
201 data = gtt_read(reg);
202 if ((data & mask) == value)
203 return 1;
Angel Pons1db5bc72020-01-15 00:49:03 +0100204
Aaron Durbin76c37002012-10-30 09:03:43 -0500205 udelay(10);
206 }
207
208 printk(BIOS_ERR, "GT init timeout\n");
209 return 0;
210}
211
Patrick Rudolph19c2ad82017-06-30 14:52:01 +0200212uintptr_t gma_get_gnvs_aslb(const void *gnvs)
213{
214 const global_nvs_t *gnvs_ptr = gnvs;
215 return (uintptr_t)(gnvs_ptr ? gnvs_ptr->aslb : 0);
216}
217
218void gma_set_gnvs_aslb(void *gnvs, uintptr_t aslb)
219{
220 global_nvs_t *gnvs_ptr = gnvs;
221 if (gnvs_ptr)
222 gnvs_ptr->aslb = aslb;
223}
224
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700225static void power_well_enable(void)
226{
227 gtt_write(HSW_PWR_WELL_CTL1, HSW_PWR_WELL_ENABLE);
228 gtt_poll(HSW_PWR_WELL_CTL1, HSW_PWR_WELL_STATE, HSW_PWR_WELL_STATE);
229}
230
Aaron Durbin76c37002012-10-30 09:03:43 -0500231static void gma_pm_init_pre_vbios(struct device *dev)
232{
Aaron Durbin76c37002012-10-30 09:03:43 -0500233 printk(BIOS_DEBUG, "GT Power Management Init\n");
234
235 gtt_res = find_resource(dev, PCI_BASE_ADDRESS_0);
236 if (!gtt_res || !gtt_res->base)
237 return;
238
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700239 power_well_enable();
240
Duncan Laurie67113e92013-01-10 13:23:04 -0800241 /*
242 * Enable RC6
243 */
Aaron Durbin76c37002012-10-30 09:03:43 -0500244
Duncan Laurie67113e92013-01-10 13:23:04 -0800245 /* Enable Force Wake */
246 gtt_write(0x0a180, 1 << 5);
247 gtt_write(0x0a188, 0x00010001);
Edward O'Callaghan986e85c2014-10-29 12:15:34 +1100248 gtt_poll(FORCEWAKE_ACK_HSW, 1 << 0, 1 << 0);
Aaron Durbin76c37002012-10-30 09:03:43 -0500249
Duncan Laurie356833d2013-07-09 15:40:27 -0700250 /* GT Settings */
251 gtt_write_regs(haswell_gt_setup);
Aaron Durbin76c37002012-10-30 09:03:43 -0500252
Duncan Laurie356833d2013-07-09 15:40:27 -0700253 /* Wait for Mailbox Ready */
Ryan Salsamendifa0725d2017-06-30 17:29:37 -0700254 gtt_poll(0x138124, (1UL << 31), (0UL << 31));
Angel Pons1db5bc72020-01-15 00:49:03 +0100255
Duncan Laurie356833d2013-07-09 15:40:27 -0700256 /* Mailbox Data - RC6 VIDS */
257 gtt_write(0x138128, 0x00000000);
Angel Pons1db5bc72020-01-15 00:49:03 +0100258
Duncan Laurie356833d2013-07-09 15:40:27 -0700259 /* Mailbox Command */
260 gtt_write(0x138124, 0x80000004);
Angel Pons1db5bc72020-01-15 00:49:03 +0100261
Duncan Laurie356833d2013-07-09 15:40:27 -0700262 /* Wait for Mailbox Ready */
Ryan Salsamendifa0725d2017-06-30 17:29:37 -0700263 gtt_poll(0x138124, (1UL << 31), (0UL << 31));
Aaron Durbin76c37002012-10-30 09:03:43 -0500264
Duncan Laurie356833d2013-07-09 15:40:27 -0700265 /* Enable PM Interrupts */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700266 gtt_write(GEN6_PMIER, GEN6_PM_MBOX_EVENT | GEN6_PM_THERMAL_EVENT |
267 GEN6_PM_RP_DOWN_TIMEOUT | GEN6_PM_RP_UP_THRESHOLD |
268 GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_UP_EI_EXPIRED |
269 GEN6_PM_RP_DOWN_EI_EXPIRED);
Aaron Durbin76c37002012-10-30 09:03:43 -0500270
Duncan Laurie67113e92013-01-10 13:23:04 -0800271 /* Enable RC6 in idle */
272 gtt_write(0x0a094, 0x00040000);
Duncan Laurie356833d2013-07-09 15:40:27 -0700273
274 /* PM Lock Settings */
275 gtt_write_regs(haswell_gt_lock);
Aaron Durbin76c37002012-10-30 09:03:43 -0500276}
277
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700278static void init_display_planes(void)
279{
280 int pipe, plane;
281
282 /* Disable cursor mode */
283 for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
284 gtt_write(CURCNTR_IVB(pipe), CURSOR_MODE_DISABLE);
285 gtt_write(CURBASE_IVB(pipe), 0x00000000);
286 }
287
Angel Pons1db5bc72020-01-15 00:49:03 +0100288 /* Disable primary plane and set surface base address */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700289 for (plane = PLANE_A; plane <= PLANE_C; plane++) {
290 gtt_write(DSPCNTR(plane), DISPLAY_PLANE_DISABLE);
291 gtt_write(DSPSURF(plane), 0x00000000);
292 }
293
294 /* Disable VGA display */
295 gtt_write(CPU_VGACNTRL, CPU_VGA_DISABLE);
296}
297
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700298static void gma_setup_panel(struct device *dev)
Aaron Durbin76c37002012-10-30 09:03:43 -0500299{
300 struct northbridge_intel_haswell_config *conf = dev->chip_info;
301 u32 reg32;
302
303 printk(BIOS_DEBUG, "GT Power Management Init (post VBIOS)\n");
304
Aaron Durbin76c37002012-10-30 09:03:43 -0500305 /* Setup Digital Port Hotplug */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700306 reg32 = gtt_read(PCH_PORT_HOTPLUG);
Aaron Durbin76c37002012-10-30 09:03:43 -0500307 if (!reg32) {
308 reg32 = (conf->gpu_dp_b_hotplug & 0x7) << 2;
309 reg32 |= (conf->gpu_dp_c_hotplug & 0x7) << 10;
310 reg32 |= (conf->gpu_dp_d_hotplug & 0x7) << 18;
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700311 gtt_write(PCH_PORT_HOTPLUG, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500312 }
313
314 /* Setup Panel Power On Delays */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700315 reg32 = gtt_read(PCH_PP_ON_DELAYS);
Aaron Durbin76c37002012-10-30 09:03:43 -0500316 if (!reg32) {
317 reg32 = (conf->gpu_panel_port_select & 0x3) << 30;
318 reg32 |= (conf->gpu_panel_power_up_delay & 0x1fff) << 16;
319 reg32 |= (conf->gpu_panel_power_backlight_on_delay & 0x1fff);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700320 gtt_write(PCH_PP_ON_DELAYS, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500321 }
322
323 /* Setup Panel Power Off Delays */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700324 reg32 = gtt_read(PCH_PP_OFF_DELAYS);
Aaron Durbin76c37002012-10-30 09:03:43 -0500325 if (!reg32) {
326 reg32 = (conf->gpu_panel_power_down_delay & 0x1fff) << 16;
327 reg32 |= (conf->gpu_panel_power_backlight_off_delay & 0x1fff);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700328 gtt_write(PCH_PP_OFF_DELAYS, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500329 }
330
331 /* Setup Panel Power Cycle Delay */
332 if (conf->gpu_panel_power_cycle_delay) {
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700333 reg32 = gtt_read(PCH_PP_DIVISOR);
Aaron Durbin76c37002012-10-30 09:03:43 -0500334 reg32 &= ~0xff;
335 reg32 |= conf->gpu_panel_power_cycle_delay & 0xff;
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700336 gtt_write(PCH_PP_DIVISOR, reg32);
Aaron Durbin76c37002012-10-30 09:03:43 -0500337 }
338
Nico Huberc2e46422020-03-23 01:22:49 +0100339 /* Enforce the PCH PWM function, as so does Linux.
340 The CPU PWM controls are disabled after reset. */
341 if (conf->gpu_pch_backlight_pwm_hz) {
342 /* Reference clock is either 24MHz or 135MHz. We can choose
343 either a 16 or a 128 step increment. Use 16 if we would
344 have less than 100 steps otherwise. */
345 const unsigned int refclock = CONFIG(INTEL_LYNXPOINT_LP) ? 24*MHz : 135*MHz;
346 const unsigned int hz_limit = refclock / 128 / 100;
347 unsigned int pwm_increment, pwm_period;
348 u32 south_chicken2;
349
350 south_chicken2 = gtt_read(SOUTH_CHICKEN2);
351 if (conf->gpu_pch_backlight_pwm_hz > hz_limit) {
352 pwm_increment = 16;
353 south_chicken2 |= LPT_PWM_GRANULARITY;
354 } else {
355 pwm_increment = 128;
356 south_chicken2 &= ~LPT_PWM_GRANULARITY;
357 }
358 gtt_write(SOUTH_CHICKEN2, south_chicken2);
359
360 pwm_period = refclock / pwm_increment / conf->gpu_pch_backlight_pwm_hz;
361 printk(BIOS_INFO,
362 "GMA: Setting backlight PWM frequency to %uMHz / %u / %u = %uHz\n",
363 refclock / MHz, pwm_increment, pwm_period,
364 DIV_ROUND_CLOSEST(refclock, pwm_increment * pwm_period));
365
366 /* Start with a 50% duty cycle. */
367 gtt_write(BLC_PWM_PCH_CTL2, pwm_period << 16 | pwm_period / 2);
368
369 gtt_write(BLC_PWM_PCH_CTL1,
370 (conf->gpu_pch_backlight_polarity == GPU_BACKLIGHT_POLARITY_LOW) << 29 |
371 BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE);
Aaron Durbin76c37002012-10-30 09:03:43 -0500372 }
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700373
374 /* Get display,pipeline,and DDI registers into a basic sane state */
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700375 power_well_enable();
376
377 init_display_planes();
378
Angel Pons1db5bc72020-01-15 00:49:03 +0100379 /*
380 * DDI-A params set:
381 * bit 0: Display detected (RO)
382 * bit 4: DDI A supports 4 lanes and DDI E is not used
383 * bit 7: DDI buffer is idle
384 */
Tristan Corrick1a73eb02018-10-31 02:27:29 +1300385 reg32 = DDI_BUF_IS_IDLE | DDI_INIT_DISPLAY_DETECTED;
386 if (!conf->gpu_ddi_e_connected)
387 reg32 |= DDI_A_4_LANES;
388 gtt_write(DDI_BUF_CTL_A, reg32);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700389
390 /* Set FDI registers - is this required? */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700391 gtt_write(_FDI_RXA_MISC, 0x00200090);
392 gtt_write(_FDI_RXA_MISC, 0x0a000000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700393
394 /* Enable the handshake with PCH display when processing reset */
395 gtt_write(NDE_RSTWRN_OPT, RST_PCH_HNDSHK_EN);
396
Angel Pons1db5bc72020-01-15 00:49:03 +0100397 /* Undocumented */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700398 gtt_write(0x42090, 0x04000000);
Angel Pons1db5bc72020-01-15 00:49:03 +0100399 gtt_write(0x9840, 0x00000000);
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700400 gtt_write(0x42090, 0xa4000000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700401
402 gtt_write(SOUTH_DSPCLK_GATE_D, PCH_LP_PARTITION_LEVEL_DISABLE);
403
Angel Pons1db5bc72020-01-15 00:49:03 +0100404 /* Undocumented */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700405 gtt_write(0x42080, 0x00004000);
Furquan Shaikh77f48cd2013-08-19 10:16:50 -0700406
407 /* Prepare DDI buffers for DP and FDI */
408 intel_prepare_ddi();
409
410 /* Hot plug detect buffer enabled for port A */
411 gtt_write(DIGITAL_PORT_HOTPLUG_CNTRL, DIGITAL_PORTA_HOTPLUG_ENABLE);
412
413 /* Enable HPD buffer for digital port D and B */
414 gtt_write(PCH_PORT_HOTPLUG, PORTD_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE);
415
Angel Pons1db5bc72020-01-15 00:49:03 +0100416 /*
417 * Bits 4:0 - Power cycle delay (default 0x6 --> 500ms)
418 * Bits 31:8 - Reference divider (0x0004af ----> 24MHz)
419 */
Ronald G. Minnich5bcca7e2013-06-25 15:56:46 -0700420 gtt_write(PCH_PP_DIVISOR, 0x0004af06);
Aaron Durbin76c37002012-10-30 09:03:43 -0500421}
422
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700423static void gma_pm_init_post_vbios(struct device *dev)
424{
Duncan Laurie356833d2013-07-09 15:40:27 -0700425 int cdclk = 0;
426 int devid = pci_read_config16(dev, PCI_DEVICE_ID);
427 int gpu_is_ulx = 0;
428
429 if (devid == 0x0a0e || devid == 0x0a1e)
430 gpu_is_ulx = 1;
431
432 /* CD Frequency */
Duncan Laurie3106d0f2013-08-12 13:51:22 -0700433 if ((gtt_read(0x42014) & 0x1000000) || gpu_is_ulx || haswell_is_ult())
434 cdclk = 0; /* fixed frequency */
435 else
436 cdclk = 2; /* variable frequency */
Duncan Laurie356833d2013-07-09 15:40:27 -0700437
Duncan Laurie356833d2013-07-09 15:40:27 -0700438 if (gpu_is_ulx || cdclk != 0)
439 gtt_rmw(0x130040, 0xf7ffffff, 0x04000000);
440 else
441 gtt_rmw(0x130040, 0xf3ffffff, 0x00000000);
442
443 /* More magic */
444 if (haswell_is_ult() || gpu_is_ulx) {
Duncan Laurie3106d0f2013-08-12 13:51:22 -0700445 if (!gpu_is_ulx)
Duncan Laurie356833d2013-07-09 15:40:27 -0700446 gtt_write(0x138128, 0x00000000);
447 else
448 gtt_write(0x138128, 0x00000001);
449 gtt_write(0x13812c, 0x00000000);
450 gtt_write(0x138124, 0x80000017);
451 }
452
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700453 /* Disable Force Wake */
454 gtt_write(0x0a188, 0x00010000);
Edward O'Callaghan986e85c2014-10-29 12:15:34 +1100455 gtt_poll(FORCEWAKE_ACK_HSW, 1 << 0, 0 << 0);
Duncan Laurie356833d2013-07-09 15:40:27 -0700456 gtt_write(0x0a188, 0x00000001);
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700457}
458
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200459/* Enable SCI to ACPI _GPE._L06 */
460static void gma_enable_swsci(void)
461{
462 u16 reg16;
463
Angel Pons1db5bc72020-01-15 00:49:03 +0100464 /* Clear DMISCI status */
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200465 reg16 = inw(get_pmbase() + TCO1_STS);
466 reg16 &= DMISCI_STS;
467 outw(get_pmbase() + TCO1_STS, reg16);
468
Angel Pons1db5bc72020-01-15 00:49:03 +0100469 /* Clear and enable ACPI TCO SCI */
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200470 enable_tco_sci();
471}
472
Aaron Durbin76c37002012-10-30 09:03:43 -0500473static void gma_func0_init(struct device *dev)
474{
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700475 int lightup_ok = 0;
Aaron Durbin76c37002012-10-30 09:03:43 -0500476 u32 reg32;
Matt DeVillier6955b9c2017-04-16 01:42:44 -0500477
Aaron Durbin76c37002012-10-30 09:03:43 -0500478 /* IGD needs to be Bus Master */
479 reg32 = pci_read_config32(dev, PCI_COMMAND);
480 reg32 |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
481 pci_write_config32(dev, PCI_COMMAND, reg32);
482
483 /* Init graphics power management */
484 gma_pm_init_pre_vbios(dev);
485
Matt DeVillier6955b9c2017-04-16 01:42:44 -0500486 /* Pre panel init */
Duncan Lauriec7f2ab72013-05-28 07:49:09 -0700487 gma_setup_panel(dev);
488
Arthur Heymanse6c8f7e2018-08-09 11:31:51 +0200489 int vga_disable = (pci_read_config16(dev, GGC) & 2) >> 1;
490
Julius Wernercd49cce2019-03-05 16:53:33 -0800491 if (CONFIG(MAINBOARD_USE_LIBGFXINIT)) {
Arthur Heymanse6c8f7e2018-08-09 11:31:51 +0200492 if (vga_disable) {
493 printk(BIOS_INFO,
494 "IGD is not decoding legacy VGA MEM and IO: skipping NATIVE graphic init\n");
495 } else {
496 printk(BIOS_SPEW, "NATIVE graphics, run native enable\n");
497 gma_gfxinit(&lightup_ok);
498 gfx_set_init_done(1);
499 }
Arthur Heymans23cda3472016-12-18 16:03:52 +0100500 }
501
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700502 if (! lightup_ok) {
503 printk(BIOS_SPEW, "FUI did not run; using VBIOS\n");
Stefan Reinauerf1aabec2014-01-22 15:16:30 -0800504 mdelay(CONFIG_PRE_GRAPHICS_DELAY);
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700505 pci_dev_init(dev);
506 }
507
Matt DeVillier6955b9c2017-04-16 01:42:44 -0500508 /* Post panel init */
Ronald G. Minnich4f78b182013-04-17 16:57:30 -0700509 gma_pm_init_post_vbios(dev);
Patrick Rudolph89f3a602017-06-20 18:25:22 +0200510
511 gma_enable_swsci();
512 intel_gma_restore_opregion();
Aaron Durbin76c37002012-10-30 09:03:43 -0500513}
514
Furquan Shaikh7536a392020-04-24 21:59:21 -0700515static void gma_generate_ssdt(const struct device *dev)
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100516{
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500517 const struct northbridge_intel_haswell_config *chip = dev->chip_info;
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100518
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500519 drivers_intel_gma_displays_ssdt_generate(&chip->gfx);
Vladimir Serbinenkodd2bc3f2014-10-31 09:16:31 +0100520}
521
Furquan Shaikh0f007d82020-04-24 06:41:18 -0700522static unsigned long gma_write_acpi_tables(const struct device *const dev,
523 unsigned long current,
Angel Pons1db5bc72020-01-15 00:49:03 +0100524 struct acpi_rsdp *const rsdp)
Patrick Rudolphee14ccc2017-05-20 11:46:06 +0200525{
526 igd_opregion_t *opregion = (igd_opregion_t *)current;
Matt DeVillier7c789702017-06-16 23:36:46 -0500527 global_nvs_t *gnvs;
Patrick Rudolphee14ccc2017-05-20 11:46:06 +0200528
Matt DeVillierebe08e02017-07-14 13:28:42 -0500529 if (intel_gma_init_igd_opregion(opregion) != CB_SUCCESS)
Patrick Rudolphee14ccc2017-05-20 11:46:06 +0200530 return current;
531
532 current += sizeof(igd_opregion_t);
533
Matt DeVillier7c789702017-06-16 23:36:46 -0500534 /* GNVS has been already set up */
535 gnvs = cbmem_find(CBMEM_ID_ACPI_GNVS);
536 if (gnvs) {
537 /* IGD OpRegion Base Address */
Patrick Rudolph19c2ad82017-06-30 14:52:01 +0200538 gma_set_gnvs_aslb(gnvs, (uintptr_t)opregion);
Matt DeVillier7c789702017-06-16 23:36:46 -0500539 } else {
540 printk(BIOS_ERR, "Error: GNVS table not found.\n");
541 }
542
Patrick Rudolphee14ccc2017-05-20 11:46:06 +0200543 current = acpi_align_current(current);
544 return current;
545}
546
Aaron Durbin76c37002012-10-30 09:03:43 -0500547static struct pci_operations gma_pci_ops = {
Angel Pons1db5bc72020-01-15 00:49:03 +0100548 .set_subsystem = pci_dev_set_subsystem,
Aaron Durbin76c37002012-10-30 09:03:43 -0500549};
550
551static struct device_operations gma_func0_ops = {
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500552 .read_resources = pci_dev_read_resources,
553 .set_resources = pci_dev_set_resources,
554 .enable_resources = pci_dev_enable_resources,
555 .init = gma_func0_init,
556 .acpi_fill_ssdt = gma_generate_ssdt,
Matt DeVillier41c4eb52020-03-30 19:20:54 -0500557 .ops_pci = &gma_pci_ops,
558 .write_acpi_tables = gma_write_acpi_tables,
Aaron Durbin76c37002012-10-30 09:03:43 -0500559};
560
Duncan Lauriedf7be712012-12-17 11:22:57 -0800561static const unsigned short pci_device_ids[] = {
562 0x0402, /* Desktop GT1 */
563 0x0412, /* Desktop GT2 */
564 0x0422, /* Desktop GT3 */
565 0x0406, /* Mobile GT1 */
566 0x0416, /* Mobile GT2 */
567 0x0426, /* Mobile GT3 */
568 0x0d16, /* Mobile 4+3 GT1 */
569 0x0d26, /* Mobile 4+3 GT2 */
570 0x0d36, /* Mobile 4+3 GT3 */
571 0x0a06, /* ULT GT1 */
572 0x0a16, /* ULT GT2 */
573 0x0a26, /* ULT GT3 */
574 0,
575};
Aaron Durbin76c37002012-10-30 09:03:43 -0500576
577static const struct pci_driver pch_lpc __pci_driver = {
Angel Pons1db5bc72020-01-15 00:49:03 +0100578 .ops = &gma_func0_ops,
579 .vendor = PCI_VENDOR_ID_INTEL,
Aaron Durbin76c37002012-10-30 09:03:43 -0500580 .devices = pci_device_ids,
581};