blob: 42845505d20b779d391de8e31105f9884d004436 [file] [log] [blame]
Patrick Georgi40a3e322015-06-22 19:41:29 +02001/*
Martin Rothc5158982018-05-26 18:42:05 -06002 * This file is part of the coreboot project.
3 *
Patrick Georgi40a3e322015-06-22 19:41:29 +02004 * drivers/video/tegra/dc/dp.c
5 *
6 * Copyright (c) 2011-2015, NVIDIA Corporation.
7 * Copyright 2014 Google Inc.
8 *
Martin Rothc5158982018-05-26 18:42:05 -06009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
Patrick Georgi40a3e322015-06-22 19:41:29 +020012 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
Elyes HAOUAS30818552019-06-23 07:03:59 +020019
Patrick Georgi40a3e322015-06-22 19:41:29 +020020#include <console/console.h>
21#include <device/device.h>
Nico Huber0f2dd1e2017-08-01 14:02:40 +020022#include <device/i2c_simple.h>
Patrick Georgi40a3e322015-06-22 19:41:29 +020023#include <edid.h>
Patrick Georgi40a3e322015-06-22 19:41:29 +020024#include <string.h>
25#include <delay.h>
26#include <soc/addressmap.h>
27#include <soc/clock.h>
28#include <soc/display.h>
29#include <soc/nvidia/tegra/i2c.h>
30#include <soc/nvidia/tegra/dc.h>
31#include <soc/nvidia/tegra/types.h>
32#include <soc/nvidia/tegra/pwm.h>
33#include <soc/nvidia/tegra/displayport.h>
34#include <soc/sor.h>
Elyes HAOUAS30818552019-06-23 07:03:59 +020035#include <types.h>
36
Patrick Georgi40a3e322015-06-22 19:41:29 +020037#include "chip.h"
38
39#define DO_FAST_LINK_TRAINING 0
40
41struct tegra_dc dc_data;
42
43enum {
44 DP_LT_SUCCESS = 0,
45 DP_LT_FAILED = -1,
46};
47
48struct tegra_dc_dp_data dp_data;
49
50static inline u32 tegra_dpaux_readl(struct tegra_dc_dp_data *dp, u32 reg)
51{
52 void *addr = dp->aux_base + (u32) (reg << 2);
53 u32 reg_val = READL(addr);
54 return reg_val;
55}
56
57static inline void tegra_dpaux_writel(struct tegra_dc_dp_data *dp,
58 u32 reg, u32 val)
59{
60 void *addr = dp->aux_base + (u32) (reg << 2);
61 WRITEL(val, addr);
62}
63
64static inline u32 tegra_dc_dpaux_poll_register(struct tegra_dc_dp_data *dp,
65 u32 reg, u32 mask, u32 exp_val,
66 u32 poll_interval_us,
67 u32 timeout_us)
68{
69 u32 reg_val = 0;
70 u32 temp = timeout_us;
71
72 do {
73 udelay(poll_interval_us);
74 reg_val = tegra_dpaux_readl(dp, reg);
75 if (timeout_us > poll_interval_us)
76 timeout_us -= poll_interval_us;
77 else
78 break;
79 } while ((reg_val & mask) != exp_val);
80
81 if ((reg_val & mask) == exp_val)
82 return 0; /* success */
83 printk(BIOS_ERR,
84 "dpaux_poll_register 0x%x: timeout: "
85 "(reg_val)0x%08x & (mask)0x%08x != (exp_val)0x%08x\n",
86 reg, reg_val, mask, exp_val);
87 return temp;
88}
89
90static inline int tegra_dpaux_wait_transaction(struct tegra_dc_dp_data *dp)
91{
92 /* According to DP spec, each aux transaction needs to finish
93 within 40ms. */
94 if (tegra_dc_dpaux_poll_register(dp, DPAUX_DP_AUXCTL,
95 DPAUX_DP_AUXCTL_TRANSACTREQ_MASK,
96 DPAUX_DP_AUXCTL_TRANSACTREQ_DONE,
97 100, DP_AUX_TIMEOUT_MS * 1000) != 0) {
98 printk(BIOS_INFO, "dp: DPAUX transaction timeout\n");
99 return -1;
100 }
101 return 0;
102}
103
104static int tegra_dc_dpaux_write_chunk(struct tegra_dc_dp_data *dp, u32 cmd,
105 u32 addr, u8 *data, u32 *size,
106 u32 *aux_stat)
107{
108 int i;
109 u32 reg_val;
110 u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
111 u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
112 u32 temp_data;
113
114 if (*size > DP_AUX_MAX_BYTES)
115 return -1; /* only write one chunk of data */
116
117 /* Make sure the command is write command */
118 switch (cmd) {
119 case DPAUX_DP_AUXCTL_CMD_I2CWR:
120 case DPAUX_DP_AUXCTL_CMD_MOTWR:
121 case DPAUX_DP_AUXCTL_CMD_AUXWR:
122 break;
123 default:
124 printk(BIOS_ERR, "dp: aux write cmd 0x%x is invalid\n", cmd);
125 return -1;
126 }
127
128 tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
129 for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i) {
130 memcpy(&temp_data, data, 4);
131 tegra_dpaux_writel(dp, DPAUX_DP_AUXDATA_WRITE_W(i), temp_data);
132 data += 4;
133 }
134
135 reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
136 reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
137 reg_val |= cmd;
138 reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
139 reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
140
141 while ((timeout_retries > 0) && (defer_retries > 0)) {
142 if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
143 (defer_retries != DP_AUX_DEFER_MAX_TRIES))
144 udelay(1);
145
146 reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
147 tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
148
149 if (tegra_dpaux_wait_transaction(dp))
150 printk(BIOS_ERR, "dp: aux write transaction timeout\n");
151
152 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
153
154 if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
155 (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
156 (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
157 (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
158 if (timeout_retries-- > 0) {
159 printk(BIOS_INFO, "dp: aux write retry (0x%x)"
160 " -- %d\n",
161 *aux_stat, timeout_retries);
162 /* clear the error bits */
163 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
164 *aux_stat);
165 continue;
166 } else {
167 printk(BIOS_ERR, "dp: aux write got error"
168 " (0x%x)\n", *aux_stat);
169 return -1;
170 }
171 }
172
173 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
174 (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
175 if (defer_retries-- > 0) {
176 printk(BIOS_INFO, "dp: aux write defer (0x%x)"
177 " -- %d\n", *aux_stat, defer_retries);
178 /* clear the error bits */
179 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
180 *aux_stat);
181 continue;
182 } else {
183 printk(BIOS_ERR, "dp: aux write defer exceeds"
184 " max retries (0x%x)\n", *aux_stat);
185 return -1;
186 }
187 }
188
189 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
190 DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
191 *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
192 return 0;
193 } else {
194 printk(BIOS_ERR, "dp: aux write failed (0x%x)\n",
195 *aux_stat);
196 return -1;
197 }
198 }
199 /* Should never come to here */
200 return -1;
201}
202
203static int tegra_dc_dpaux_read_chunk(struct tegra_dc_dp_data *dp, u32 cmd,
204 u32 addr, u8 *data, u32 *size,
205 u32 *aux_stat)
206{
207 u32 reg_val;
208 u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
209 u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
210
211 if (*size > DP_AUX_MAX_BYTES)
212 return -1; /* only read one chunk */
213
214 /* Check to make sure the command is read command */
215 switch (cmd) {
216 case DPAUX_DP_AUXCTL_CMD_I2CRD:
217 case DPAUX_DP_AUXCTL_CMD_I2CREQWSTAT:
218 case DPAUX_DP_AUXCTL_CMD_MOTRD:
219 case DPAUX_DP_AUXCTL_CMD_AUXRD:
220 break;
221 default:
222 printk(BIOS_ERR, "dp: aux read cmd 0x%x is invalid\n", cmd);
223 return -1;
224 }
225
226 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
227 if (!(*aux_stat & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
228 printk(BIOS_SPEW, "dp: HPD is not detected\n");
229 return -1;
230 }
231
232 tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
233
234 reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
235 reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
236 reg_val |= cmd;
237 reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
238 reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
239 while ((timeout_retries > 0) && (defer_retries > 0)) {
240 if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
241 (defer_retries != DP_AUX_DEFER_MAX_TRIES))
242 udelay(DP_DPCP_RETRY_SLEEP_NS * 2);
243
244 reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
245 tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
246
247 if (tegra_dpaux_wait_transaction(dp))
248 printk(BIOS_INFO, "dp: aux read transaction timeout\n");
249
250 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
251
252 if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
253 (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
254 (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
255 (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
256 if (timeout_retries-- > 0) {
257 printk(BIOS_INFO, "dp: aux read retry (0x%x)"
258 " -- %d\n", *aux_stat,
259 timeout_retries);
260 /* clear the error bits */
261 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
262 *aux_stat);
263 continue; /* retry */
264 } else {
265 printk(BIOS_ERR, "dp: aux read got error"
266 " (0x%x)\n", *aux_stat);
267 return -1;
268 }
269 }
270
271 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
272 (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
273 if (defer_retries-- > 0) {
274 printk(BIOS_INFO, "dp: aux read defer (0x%x)"
275 " -- %d\n", *aux_stat, defer_retries);
276 /* clear the error bits */
277 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
278 *aux_stat);
279 continue;
280 } else {
281 printk(BIOS_INFO, "dp: aux read defer exceeds"
282 " max retries (0x%x)\n", *aux_stat);
283 return -1;
284 }
285 }
286
287 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
288 DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
289 int i;
290 u32 temp_data[4];
291
292 for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i)
293 temp_data[i] = tegra_dpaux_readl(dp,
294 DPAUX_DP_AUXDATA_READ_W(i));
295
296 *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
297 memcpy(data, temp_data, *size);
298
299 return 0;
300 } else {
301 printk(BIOS_ERR, "dp: aux read failed (0x%x\n",
302 *aux_stat);
303 return -1;
304 }
305 }
306 /* Should never come to here */
307 printk(BIOS_ERR, "%s: can't\n", __func__);
308 return -1;
309}
310
311#if DO_FAST_LINK_TRAINING
312static int tegra_dc_dpaux_read(struct tegra_dc_dp_data *dp, u32 cmd, u32 addr,
313 u8 *data, u32 *size, u32 *aux_stat)
314{
315 u32 finished = 0;
316 u32 cur_size;
317 int ret = 0;
318
319 do {
320 cur_size = *size - finished;
321 if (cur_size > DP_AUX_MAX_BYTES)
322 cur_size = DP_AUX_MAX_BYTES;
323
324 ret = tegra_dc_dpaux_read_chunk(dp, cmd, addr,
325 data, &cur_size, aux_stat);
326 if (ret)
327 break;
328
329 /* cur_size should be the real size returned */
330 addr += cur_size;
331 data += cur_size;
332 finished += cur_size;
333
334 } while (*size > finished);
335
336 *size = finished;
337 return ret;
338}
339#endif /* DO_FAST_LINK_TRAINING */
340
341static int tegra_dc_dp_dpcd_read(struct tegra_dc_dp_data *dp, u32 cmd,
342 u8 *data_ptr)
343{
344 u32 size = 1;
345 u32 status = 0;
346 int ret;
347
348 ret = tegra_dc_dpaux_read_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
349 cmd, data_ptr, &size, &status);
350 if (ret)
351 printk(BIOS_ERR,
352 "dp: Failed to read DPCD data. CMD 0x%x, Status 0x%x\n",
353 cmd, status);
354
355 return ret;
356}
357
358static int tegra_dc_dp_dpcd_write(struct tegra_dc_dp_data *dp, u32 cmd,
359 u8 data)
360{
361 u32 size = 1;
362 u32 status = 0;
363 int ret;
364
365 ret = tegra_dc_dpaux_write_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXWR,
366 cmd, &data, &size, &status);
367 if (ret)
368 printk(BIOS_ERR,
369 "dp: Failed to write DPCD data. CMD 0x%x, Status 0x%x\n",
370 cmd, status);
371 return ret;
372}
373
374static int tegra_dc_i2c_aux_read(struct tegra_dc_dp_data *dp, u32 i2c_addr,
375 u8 addr, u8 *data, u32 *size, u32 *aux_stat)
376{
377 u32 finished = 0;
378 int ret = 0;
379
380 do {
381 u32 cur_size = MIN(DP_AUX_MAX_BYTES, *size - finished);
382
383 u32 len = 1;
384 ret = tegra_dc_dpaux_write_chunk(
385 dp, DPAUX_DP_AUXCTL_CMD_MOTWR, i2c_addr,
386 &addr, &len, aux_stat);
387 if (ret) {
388 printk(BIOS_ERR, "%s: error sending address to read.\n",
389 __func__);
390 break;
391 }
392
393 ret = tegra_dc_dpaux_read_chunk(
394 dp, DPAUX_DP_AUXCTL_CMD_I2CRD, i2c_addr,
395 data, &cur_size, aux_stat);
396 if (ret) {
397 printk(BIOS_ERR, "%s: error reading data.\n", __func__);
398 break;
399 }
400
401 /* cur_size should be the real size returned */
402 addr += cur_size;
403 data += cur_size;
404 finished += cur_size;
405 } while (*size > finished);
406
407 *size = finished;
408 return ret;
409}
410
411static void tegra_dc_dpaux_enable(struct tegra_dc_dp_data *dp)
412{
413 /* clear interrupt */
414 tegra_dpaux_writel(dp, DPAUX_INTR_AUX, 0xffffffff);
415 /* do not enable interrupt for now. Enable them when Isr in place */
416 tegra_dpaux_writel(dp, DPAUX_INTR_EN_AUX, 0x0);
417
418 tegra_dpaux_writel(dp, DPAUX_HYBRID_PADCTL,
419 DPAUX_HYBRID_PADCTL_AUX_DRVZ_OHM_50 |
420 DPAUX_HYBRID_PADCTL_AUX_CMH_V0_70 |
421 0x18 << DPAUX_HYBRID_PADCTL_AUX_DRVI_SHIFT |
422 DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV_ENABLE);
423
424 tegra_dpaux_writel(dp, DPAUX_HYBRID_SPARE,
425 DPAUX_HYBRID_SPARE_PAD_PWR_POWERUP);
426}
427
428static void tegra_dc_dp_dump_link_cfg(struct tegra_dc_dp_data *dp,
429 const struct tegra_dc_dp_link_config *link_cfg)
430{
431 printk(BIOS_INFO, "DP config: cfg_name "
432 "cfg_value\n");
433 printk(BIOS_INFO, " Lane Count %d\n",
434 link_cfg->max_lane_count);
435 printk(BIOS_INFO, " SupportEnhancedFraming %s\n",
436 link_cfg->support_enhanced_framing ? "Y" : "N");
437 printk(BIOS_INFO, " Bandwidth %d\n",
438 link_cfg->max_link_bw);
439 printk(BIOS_INFO, " bpp %d\n",
440 link_cfg->bits_per_pixel);
441 printk(BIOS_INFO, " EnhancedFraming %s\n",
442 link_cfg->enhanced_framing ? "Y" : "N");
443 printk(BIOS_INFO, " Scramble_enabled %s\n",
444 link_cfg->scramble_ena ? "Y" : "N");
445 printk(BIOS_INFO, " LinkBW %d\n",
446 link_cfg->link_bw);
447 printk(BIOS_INFO, " lane_count %d\n",
448 link_cfg->lane_count);
449 printk(BIOS_INFO, " activespolarity %d\n",
450 link_cfg->activepolarity);
451 printk(BIOS_INFO, " active_count %d\n",
452 link_cfg->active_count);
453 printk(BIOS_INFO, " tu_size %d\n",
454 link_cfg->tu_size);
455 printk(BIOS_INFO, " active_frac %d\n",
456 link_cfg->active_frac);
457 printk(BIOS_INFO, " watermark %d\n",
458 link_cfg->watermark);
459 printk(BIOS_INFO, " hblank_sym %d\n",
460 link_cfg->hblank_sym);
461 printk(BIOS_INFO, " vblank_sym %d\n",
462 link_cfg->vblank_sym);
463}
464
465static int _tegra_dp_lower_link_config(struct tegra_dc_dp_data *dp,
466 struct tegra_dc_dp_link_config *link_cfg)
467{
468
469 switch (link_cfg->link_bw) {
470 case SOR_LINK_SPEED_G1_62:
471 if (link_cfg->max_link_bw > SOR_LINK_SPEED_G1_62)
472 link_cfg->link_bw = SOR_LINK_SPEED_G2_7;
473 link_cfg->lane_count /= 2;
474 break;
475 case SOR_LINK_SPEED_G2_7:
476 link_cfg->link_bw = SOR_LINK_SPEED_G1_62;
477 break;
478 case SOR_LINK_SPEED_G5_4:
479 if (link_cfg->lane_count == 1) {
480 link_cfg->link_bw = SOR_LINK_SPEED_G2_7;
481 link_cfg->lane_count = link_cfg->max_lane_count;
482 } else
483 link_cfg->lane_count /= 2;
484 break;
485 default:
486 printk(BIOS_ERR, "dp: Error link rate %d\n", link_cfg->link_bw);
487 return DP_LT_FAILED;
488 }
489
490 return (link_cfg->lane_count > 0) ? DP_LT_SUCCESS : DP_LT_FAILED;
491}
492
493/* Calcuate if given cfg can meet the mode request. */
494/* Return true if mode is possible, false otherwise. */
495static int tegra_dc_dp_calc_config(struct tegra_dc_dp_data *dp,
496 const struct soc_nvidia_tegra210_config *config,
497 struct tegra_dc_dp_link_config *link_cfg)
498{
499 const u32 link_rate = 27 * link_cfg->link_bw * 1000 * 1000;
500 const u64 f = 100000; /* precision factor */
501
502 u32 num_linkclk_line; /* Number of link clocks per line */
503 u64 ratio_f; /* Ratio of incoming to outgoing data rate */
504
505 u64 frac_f;
506 u64 activesym_f; /* Activesym per TU */
507 u64 activecount_f;
508 u32 activecount;
509 u32 activepolarity;
510 u64 approx_value_f;
511 u32 activefrac = 0;
512 u64 accumulated_error_f = 0;
513 u32 lowest_neg_activecount = 0;
514 u32 lowest_neg_activepolarity = 0;
515 u32 lowest_neg_tusize = 64;
516 u32 num_symbols_per_line;
517 u64 lowest_neg_activefrac = 0;
518 u64 lowest_neg_error_f = 64 * f;
519 u64 watermark_f;
520
521 int i;
522 int neg;
523
524 printk(BIOS_INFO, "dp: %s\n", __func__);
525
526 if (!link_rate || !link_cfg->lane_count || !config->pixel_clock ||
527 !link_cfg->bits_per_pixel)
528 return -1;
529
530 if ((u64)config->pixel_clock * link_cfg->bits_per_pixel >=
531 (u64)link_rate * 8 * link_cfg->lane_count)
532 return -1;
533
534 num_linkclk_line = (u32)((u64)link_rate * (u64)config->xres /
535 config->pixel_clock);
536
537 ratio_f = (u64)config->pixel_clock * link_cfg->bits_per_pixel * f;
538 ratio_f /= 8;
539 ratio_f = (u64)(ratio_f / (link_rate * link_cfg->lane_count));
540
541 for (i = 64; i >= 32; --i) {
542 activesym_f = ratio_f * i;
543 activecount_f = (u64)(activesym_f / (u32)f) * f;
544 frac_f = activesym_f - activecount_f;
545 activecount = (u32)((u64)(activecount_f / (u32)f));
546
547 if (frac_f < (f / 2)) /* fraction < 0.5 */
548 activepolarity = 0;
549 else {
550 activepolarity = 1;
551 frac_f = f - frac_f;
552 }
553
554 if (frac_f != 0) {
555 frac_f = (u64)((f * f) / frac_f); /* 1/fraction */
556 if (frac_f > (15 * f))
557 activefrac = activepolarity ? 1 : 15;
558 else
559 activefrac = activepolarity ?
560 (u32)((u64)(frac_f / (u32)f)) + 1 :
561 (u32)((u64)(frac_f / (u32)f));
562 }
563
564 if (activefrac == 1)
565 activepolarity = 0;
566
567 if (activepolarity == 1)
568 approx_value_f = activefrac ? (u64)(
569 (activecount_f + (activefrac * f - f) * f) /
570 (activefrac * f)) :
571 activecount_f + f;
572 else
573 approx_value_f = activefrac ?
574 activecount_f + (u64)(f / activefrac) :
575 activecount_f;
576
577 if (activesym_f < approx_value_f) {
578 accumulated_error_f = num_linkclk_line *
579 (u64)((approx_value_f - activesym_f) / i);
580 neg = 1;
581 } else {
582 accumulated_error_f = num_linkclk_line *
583 (u64)((activesym_f - approx_value_f) / i);
584 neg = 0;
585 }
586
587 if ((neg && (lowest_neg_error_f > accumulated_error_f)) ||
588 (accumulated_error_f == 0)) {
589 lowest_neg_error_f = accumulated_error_f;
590 lowest_neg_tusize = i;
591 lowest_neg_activecount = activecount;
592 lowest_neg_activepolarity = activepolarity;
593 lowest_neg_activefrac = activefrac;
594
595 if (accumulated_error_f == 0)
596 break;
597 }
598 }
599
600 if (lowest_neg_activefrac == 0) {
601 link_cfg->activepolarity = 0;
602 link_cfg->active_count = lowest_neg_activepolarity ?
603 lowest_neg_activecount : lowest_neg_activecount - 1;
604 link_cfg->tu_size = lowest_neg_tusize;
605 link_cfg->active_frac = 1;
606 } else {
607 link_cfg->activepolarity = lowest_neg_activepolarity;
608 link_cfg->active_count = (u32)lowest_neg_activecount;
609 link_cfg->tu_size = lowest_neg_tusize;
610 link_cfg->active_frac = (u32)lowest_neg_activefrac;
611 }
612
613 watermark_f = (u64)((ratio_f * link_cfg->tu_size * (f - ratio_f)) / f);
614 link_cfg->watermark = (u32)((u64)((watermark_f + lowest_neg_error_f) /
615 f)) + link_cfg->bits_per_pixel / 4 - 1;
616 num_symbols_per_line = (config->xres * link_cfg->bits_per_pixel) /
617 (8 * link_cfg->lane_count);
618
619 if (link_cfg->watermark > 30) {
620 printk(BIOS_INFO,
621 "dp: sor setting: unable to get a good tusize, "
622 "force watermark to 30.\n");
623 link_cfg->watermark = 30;
624 return -1;
625 } else if (link_cfg->watermark > num_symbols_per_line) {
626 printk(BIOS_INFO,
627 "dp: sor setting: force watermark to the number "
628 "of symbols in the line.\n");
629 link_cfg->watermark = num_symbols_per_line;
630 return -1;
631 }
632
633 /* Refer to dev_disp.ref for more information. */
634 /* # symbols/hblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - */
635 /* SetRasterBlankStart.X - 7) * link_clk / pclk) */
636 /* - 3 * enhanced_framing - Y */
637 /* where Y = (# lanes == 4) 3 : (# lanes == 2) ? 6 : 12 */
638 link_cfg->hblank_sym = (int)((u64)(((u64)(config->hback_porch +
639 config->hfront_porch + config->hsync_width - 7) *
640 link_rate) / config->pixel_clock)) -
641 3 * link_cfg->enhanced_framing -
642 (12 / link_cfg->lane_count);
643
644 if (link_cfg->hblank_sym < 0)
645 link_cfg->hblank_sym = 0;
646
647
648 /* Refer to dev_disp.ref for more information. */
649 /* # symbols/vblank = ((SetRasterBlankStart.X - */
650 /* SetRasterBlankEen.X - 25) * link_clk / pclk) */
651 /* - Y - 1; */
652 /* where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39 */
653 link_cfg->vblank_sym = (int)((u64)((u64)(config->xres - 25)
654 * link_rate / config->pixel_clock)) - (36 /
655 link_cfg->lane_count) - 4;
656
657 if (link_cfg->vblank_sym < 0)
658 link_cfg->vblank_sym = 0;
659
660 link_cfg->is_valid = 1;
661 tegra_dc_dp_dump_link_cfg(dp, link_cfg);
662
663 return 0;
664}
665
666static int tegra_dc_dp_init_max_link_cfg(
667 struct soc_nvidia_tegra210_config *config,
668 struct tegra_dc_dp_data *dp,
669 struct tegra_dc_dp_link_config *link_cfg)
670{
671 u8 dpcd_data;
672 int ret;
673
674 printk(BIOS_INFO, "dp: %s\n", __func__);
675
676 CHECK_RET(tegra_dc_dp_dpcd_read(dp, NV_DPCD_MAX_LANE_COUNT,
677 &dpcd_data));
678 link_cfg->max_lane_count = dpcd_data & NV_DPCD_MAX_LANE_COUNT_MASK;
679 link_cfg->tps3_supported = (dpcd_data &
680 NV_DPCD_MAX_LANE_COUNT_TPS3_SUPPORTED_YES) ? 1 : 0;
681
682 link_cfg->support_enhanced_framing =
683 (dpcd_data & NV_DPCD_MAX_LANE_COUNT_ENHANCED_FRAMING_YES) ?
684 1 : 0;
685
686 CHECK_RET(tegra_dc_dp_dpcd_read(dp, NV_DPCD_MAX_DOWNSPREAD,
687 &dpcd_data));
688 link_cfg->downspread = (dpcd_data & NV_DPCD_MAX_DOWNSPREAD_VAL_0_5_PCT)
689 ? 1 : 0;
690
691 CHECK_RET(tegra_dc_dp_dpcd_read(dp, NV_DPCD_TRAINING_AUX_RD_INTERVAL,
692 &link_cfg->aux_rd_interval));
693
694 CHECK_RET(tegra_dc_dp_dpcd_read(dp, NV_DPCD_MAX_LINK_BANDWIDTH,
695 &link_cfg->max_link_bw));
696
697 link_cfg->bits_per_pixel = config->panel_bits_per_pixel;
698
699 /*
700 * Set to a high value for link training and attach.
701 * Will be re-programmed when dp is enabled.
702 */
703 link_cfg->drive_current = config->dp.drive_current;
704 link_cfg->preemphasis = config->dp.preemphasis;
705 link_cfg->postcursor = config->dp.postcursor;
706
707 CHECK_RET(tegra_dc_dp_dpcd_read(dp, NV_DPCD_EDP_CONFIG_CAP,
708 &dpcd_data));
709 link_cfg->alt_scramber_reset_cap =
710 (dpcd_data & NV_DPCD_EDP_CONFIG_CAP_ASC_RESET_YES) ?
711 1 : 0;
712 link_cfg->only_enhanced_framing =
713 (dpcd_data & NV_DPCD_EDP_CONFIG_CAP_FRAMING_CHANGE_YES) ?
714 1 : 0;
715
716 link_cfg->lane_count = link_cfg->max_lane_count;
717 link_cfg->link_bw = link_cfg->max_link_bw;
718 link_cfg->enhanced_framing = link_cfg->support_enhanced_framing;
719
720 tegra_dc_dp_calc_config(dp, config, link_cfg);
721 return 0;
722}
723
724static int tegra_dc_dp_set_assr(struct tegra_dc_dp_data *dp, int ena)
725{
726 int ret;
727
728 u8 dpcd_data = ena ?
729 NV_DPCD_EDP_CONFIG_SET_ASC_RESET_ENABLE :
730 NV_DPCD_EDP_CONFIG_SET_ASC_RESET_DISABLE;
731
732 CHECK_RET(tegra_dc_dp_dpcd_write(dp, NV_DPCD_EDP_CONFIG_SET,
733 dpcd_data));
734
735 /* Also reset the scrambler to 0xfffe */
736 tegra_dc_sor_set_internal_panel(&dp->sor, ena);
737 return 0;
738}
739
740static int tegra_dp_set_link_bandwidth(struct tegra_dc_dp_data *dp, u8 link_bw)
741{
742 tegra_dc_sor_set_link_bandwidth(&dp->sor, link_bw);
743
744 /* Sink side */
745 return tegra_dc_dp_dpcd_write(dp, NV_DPCD_LINK_BANDWIDTH_SET, link_bw);
746}
747
748static int tegra_dp_set_lane_count(struct tegra_dc_dp_data *dp,
749 const struct tegra_dc_dp_link_config *link_cfg)
750{
751 u8 dpcd_data;
752 int ret;
753
754 /* check if panel support enhanched_framing */
755 dpcd_data = link_cfg->lane_count;
756 if (link_cfg->enhanced_framing)
757 dpcd_data |= NV_DPCD_LANE_COUNT_SET_ENHANCEDFRAMING_T;
758 CHECK_RET(tegra_dc_dp_dpcd_write(dp, NV_DPCD_LANE_COUNT_SET,
759 dpcd_data));
760
761 tegra_dc_sor_set_lane_count(&dp->sor, link_cfg->lane_count);
762
763 /* Also power down lanes that will not be used */
764 return 0;
765}
766
767#if DO_FAST_LINK_TRAINING
768static int tegra_dc_dp_link_trained(struct tegra_dc_dp_data *dp,
769 const struct tegra_dc_dp_link_config *link_cfg)
770{
771 u32 lane;
772 u8 mask;
773 u8 data;
774 int ret;
775
776 for (lane = 0; lane < link_cfg->lane_count; ++lane) {
777 CHECK_RET(tegra_dc_dp_dpcd_read(dp, (lane/2) ?
778 NV_DPCD_LANE2_3_STATUS : NV_DPCD_LANE0_1_STATUS,
779 &data));
780 mask = (lane & 1) ?
781 NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES |
782 NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES |
783 NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES :
784 NV_DPCD_STATUS_LANEX_CR_DONE_YES |
785 NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_YES |
786 NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_YES;
787 if ((data & mask) != mask)
788 return -1;
789 }
790 return 0;
791}
792#endif /* DO_FAST_LINK_TRAINING */
793
794static int tegra_dp_channel_eq_status(struct tegra_dc_dp_data *dp)
795{
796 u32 cnt;
797 u32 n_lanes = dp->link_cfg.lane_count;
798 u8 data;
799 u8 ce_done = 1;
800
801 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
802 tegra_dc_dp_dpcd_read(dp, (NV_DPCD_LANE0_1_STATUS + cnt),
803 &data);
804
805 if (n_lanes == 1) {
806 ce_done =
807 (data & (0x1 << NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) &&
808 (data & (0x1 << NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT));
809 break;
810 } else
811 if (!(data & (0x1 << NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) ||
812 !(data & (0x1 << NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT)) ||
813 !(data & (0x1 << NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_SHIFT)) ||
814 !(data & (0x1 << NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_SHIFT)))
815 return 0;
816 }
817
818 if (ce_done) {
819 tegra_dc_dp_dpcd_read(dp, NV_DPCD_LANE_ALIGN_STATUS_UPDATED,
820 &data);
821 if (!(data & NV_DPCD_LANE_ALIGN_STATUS_UPDATED_DONE_YES))
822 ce_done = 0;
823 }
824
825 return ce_done;
826}
827
828static u8 tegra_dp_clock_recovery_status(struct tegra_dc_dp_data *dp)
829{
830 u32 cnt;
831 u32 n_lanes = dp->link_cfg.lane_count;
832 u8 data_ptr;
833
834 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
835 tegra_dc_dp_dpcd_read(dp,
836 (NV_DPCD_LANE0_1_STATUS + cnt), &data_ptr);
837
838 if (n_lanes == 1)
839 return (data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ? 1 : 0;
840 else if (!(data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ||
841 !(data_ptr & (NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES)))
842 return 0;
843 }
844
845 return 1;
846}
847
848static void tegra_dp_lt_adjust(struct tegra_dc_dp_data *dp,
849 u32 pe[4], u32 vs[4], u32 pc[4],
850 u8 pc_supported)
851{
852 size_t cnt;
853 u8 data_ptr;
854 u32 n_lanes = dp->link_cfg.lane_count;
855
856 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
857 tegra_dc_dp_dpcd_read(dp,
858 (NV_DPCD_LANE0_1_ADJUST_REQ + cnt), &data_ptr);
859 pe[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_PE_MASK) >>
860 NV_DPCD_ADJUST_REQ_LANEX_PE_SHIFT;
861 vs[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_DC_MASK) >>
862 NV_DPCD_ADJUST_REQ_LANEX_DC_SHIFT;
863 pe[1 + 2 * cnt] =
864 (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_MASK) >>
865 NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_SHIFT;
866 vs[1 + 2 * cnt] =
867 (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_MASK) >>
868 NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_SHIFT;
869 }
870 if (pc_supported) {
871 tegra_dc_dp_dpcd_read(dp,
872 NV_DPCD_ADJUST_REQ_POST_CURSOR2, &data_ptr);
873 for (cnt = 0; cnt < n_lanes; cnt++) {
874 pc[cnt] = (data_ptr >>
875 NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_SHIFT(cnt)) &
876 NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_MASK;
877 }
878 }
879}
880
881static inline u32 tegra_dp_wait_aux_training(struct tegra_dc_dp_data *dp,
882 u8 is_clk_recovery)
883{
884 if (!dp->link_cfg.aux_rd_interval)
885 is_clk_recovery ? udelay(200) :
886 udelay(500);
887 else
888 mdelay(dp->link_cfg.aux_rd_interval * 4);
889
890 return dp->link_cfg.aux_rd_interval;
891}
892
893static void tegra_dp_tpg(struct tegra_dc_dp_data *dp, u32 tp, u32 n_lanes)
894{
895 u8 data = (tp == training_pattern_disabled)
896 ? (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_F)
897 : (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_T);
898
899 tegra_dc_sor_set_dp_linkctl(&dp->sor, 1, tp, &dp->link_cfg);
900 tegra_dc_dp_dpcd_write(dp, NV_DPCD_TRAINING_PATTERN_SET, data);
901}
902
903static int tegra_dp_link_config(struct tegra_dc_dp_data *dp,
904 const struct tegra_dc_dp_link_config *link_cfg)
905{
906 u8 dpcd_data;
907 u32 retry;
908
909 if (link_cfg->lane_count == 0) {
910 printk(BIOS_ERR, "dp: error: lane count is 0. "
911 "Can not set link config.\n");
912 return DP_LT_FAILED;
913 }
914
915 /* Set power state if it is not in normal level */
916 if (tegra_dc_dp_dpcd_read(dp, NV_DPCD_SET_POWER, &dpcd_data))
917 return DP_LT_FAILED;
918
919 if (dpcd_data == NV_DPCD_SET_POWER_VAL_D3_PWRDWN) {
920 dpcd_data = NV_DPCD_SET_POWER_VAL_D0_NORMAL;
921
922 /* DP spec requires 3 retries */
923 for (retry = 3; retry > 0; --retry) {
924 if (tegra_dc_dp_dpcd_write(dp, NV_DPCD_SET_POWER,
925 dpcd_data))
926 break;
927 if (retry == 1) {
928 printk(BIOS_ERR, "dp: Failed to set DP panel"
929 " power\n");
930 return DP_LT_FAILED;
931 }
932 }
933 }
934
935 /* Enable ASSR if possible */
936 if (link_cfg->alt_scramber_reset_cap)
937 if (tegra_dc_dp_set_assr(dp, 1))
938 return DP_LT_FAILED;
939
940 if (tegra_dp_set_link_bandwidth(dp, link_cfg->link_bw)) {
941 printk(BIOS_ERR, "dp: Failed to set link bandwidth\n");
942 return DP_LT_FAILED;
943 }
944 if (tegra_dp_set_lane_count(dp, link_cfg)) {
945 printk(BIOS_ERR, "dp: Failed to set lane count\n");
946 return DP_LT_FAILED;
947 }
948 tegra_dc_sor_set_dp_linkctl(&dp->sor, 1, training_pattern_none,
949 link_cfg);
950 return DP_LT_SUCCESS;
951}
952
953static int tegra_dp_lower_link_config(struct tegra_dc_dp_data *dp,
954 struct tegra_dc_dp_link_config *cfg)
955{
956 struct tegra_dc_dp_link_config tmp_cfg;
957
958 tmp_cfg = dp->link_cfg;
959 cfg->is_valid = 0;
960
961 if (_tegra_dp_lower_link_config(dp, cfg))
962 goto fail;
963
964 if (tegra_dc_dp_calc_config(dp, dp->dc->config, cfg))
965 goto fail;
966 tegra_dp_link_config(dp, cfg);
967
968 return DP_LT_SUCCESS;
969fail:
970 dp->link_cfg = tmp_cfg;
971 tegra_dp_link_config(dp, &tmp_cfg);
972 return DP_LT_FAILED;
973}
974
975static void tegra_dp_lt_config(struct tegra_dc_dp_data *dp,
976 u32 pe[4], u32 vs[4], u32 pc[4])
977{
978 struct tegra_dc_sor_data *sor = &dp->sor;
979 u32 n_lanes = dp->link_cfg.lane_count;
980 u8 pc_supported = dp->link_cfg.tps3_supported;
981 u32 cnt;
982 u32 val;
983
984 for (cnt = 0; cnt < n_lanes; cnt++) {
985 u32 mask = 0;
986 u32 pe_reg, vs_reg, pc_reg;
987 u32 shift = 0;
988
989 switch (cnt) {
990 case 0:
991 mask = NV_SOR_PR_LANE2_DP_LANE0_MASK;
992 shift = NV_SOR_PR_LANE2_DP_LANE0_SHIFT;
993 break;
994 case 1:
995 mask = NV_SOR_PR_LANE1_DP_LANE1_MASK;
996 shift = NV_SOR_PR_LANE1_DP_LANE1_SHIFT;
997 break;
998 case 2:
999 mask = NV_SOR_PR_LANE0_DP_LANE2_MASK;
1000 shift = NV_SOR_PR_LANE0_DP_LANE2_SHIFT;
1001 break;
1002 case 3:
1003 mask = NV_SOR_PR_LANE3_DP_LANE3_MASK;
1004 shift = NV_SOR_PR_LANE3_DP_LANE3_SHIFT;
1005 break;
1006 default:
1007 printk(BIOS_ERR,
1008 "dp: incorrect lane cnt\n");
1009 }
1010
1011 pe_reg = tegra_dp_pe_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1012 vs_reg = tegra_dp_vs_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1013 pc_reg = tegra_dp_pc_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1014
1015 tegra_dp_set_pe_vs_pc(sor, mask, pe_reg << shift,
1016 vs_reg << shift, pc_reg << shift, pc_supported);
1017 }
1018
1019 tegra_dp_disable_tx_pu(&dp->sor);
1020 udelay(20);
1021
1022 for (cnt = 0; cnt < n_lanes; cnt++) {
1023 u32 max_vs_flag = tegra_dp_is_max_vs(pe[cnt], vs[cnt]);
1024 u32 max_pe_flag = tegra_dp_is_max_pe(pe[cnt], vs[cnt]);
1025
1026 val = (vs[cnt] << NV_DPCD_TRAINING_LANEX_SET_DC_SHIFT) |
1027 (max_vs_flag ?
1028 NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_T :
1029 NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_F) |
1030 (pe[cnt] << NV_DPCD_TRAINING_LANEX_SET_PE_SHIFT) |
1031 (max_pe_flag ?
1032 NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_T :
1033 NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_F);
1034 tegra_dc_dp_dpcd_write(dp,
1035 (NV_DPCD_TRAINING_LANE0_SET + cnt), val);
1036 }
1037
1038 if (pc_supported) {
1039 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
1040 u32 max_pc_flag0 = tegra_dp_is_max_pc(pc[cnt]);
1041 u32 max_pc_flag1 = tegra_dp_is_max_pc(pc[cnt + 1]);
1042 val = (pc[cnt] << NV_DPCD_LANEX_SET2_PC2_SHIFT) |
1043 (max_pc_flag0 ?
1044 NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_T :
1045 NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_F) |
1046 (pc[cnt + 1] <<
1047 NV_DPCD_LANEXPLUS1_SET2_PC2_SHIFT) |
1048 (max_pc_flag1 ?
1049 NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_T :
1050 NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_F);
1051 tegra_dc_dp_dpcd_write(dp,
1052 (NV_DPCD_TRAINING_LANE0_1_SET2 + cnt), val);
1053 }
1054 }
1055}
1056
1057static int _tegra_dp_channel_eq(struct tegra_dc_dp_data *dp, u32 pe[4],
1058 u32 vs[4], u32 pc[4], u8 pc_supported,
1059 u32 n_lanes)
1060{
1061 u32 retry_cnt;
1062
1063 for (retry_cnt = 0; retry_cnt < 4; retry_cnt++) {
1064 if (retry_cnt) {
1065 tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported);
1066 tegra_dp_lt_config(dp, pe, vs, pc);
1067 }
1068
1069 tegra_dp_wait_aux_training(dp, 0);
1070
1071 if (!tegra_dp_clock_recovery_status(dp)) {
1072 printk(BIOS_ERR, "dp: CR failed in channel EQ"
1073 " sequence!\n");
1074 break;
1075 }
1076
1077 if (tegra_dp_channel_eq_status(dp))
1078 return DP_LT_SUCCESS;
1079 }
1080
1081 return DP_LT_FAILED;
1082}
1083
1084static int tegra_dp_channel_eq(struct tegra_dc_dp_data *dp,
1085 u32 pe[4], u32 vs[4], u32 pc[4])
1086{
1087 u32 n_lanes = dp->link_cfg.lane_count;
1088 u8 pc_supported = dp->link_cfg.tps3_supported;
1089 int err;
1090 u32 tp_src = training_pattern_2;
1091
1092 if (pc_supported)
1093 tp_src = training_pattern_3;
1094
1095 tegra_dp_tpg(dp, tp_src, n_lanes);
1096
1097 err = _tegra_dp_channel_eq(dp, pe, vs, pc, pc_supported, n_lanes);
1098
1099 tegra_dp_tpg(dp, training_pattern_disabled, n_lanes);
1100
1101 return err;
1102}
1103
1104static int _tegra_dp_clk_recovery(struct tegra_dc_dp_data *dp, u32 pe[4],
1105 u32 vs[4], u32 pc[4], u8 pc_supported,
1106 u32 n_lanes)
1107{
1108 u32 vs_temp[4];
1109 u32 retry_cnt = 0;
1110
1111 do {
1112 tegra_dp_lt_config(dp, pe, vs, pc);
1113 tegra_dp_wait_aux_training(dp, 1);
1114
1115 if (tegra_dp_clock_recovery_status(dp))
1116 return DP_LT_SUCCESS;
1117
1118 memcpy(vs_temp, vs, sizeof(vs_temp));
1119 tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported);
1120
1121 if (memcmp(vs_temp, vs, sizeof(vs_temp)))
1122 retry_cnt = 0;
1123 else
1124 ++retry_cnt;
1125 } while (retry_cnt < 5);
1126
1127 return DP_LT_FAILED;
1128}
1129
1130static int tegra_dp_clk_recovery(struct tegra_dc_dp_data *dp,
1131 u32 pe[4], u32 vs[4], u32 pc[4])
1132{
1133 u32 n_lanes = dp->link_cfg.lane_count;
1134 u8 pc_supported = dp->link_cfg.tps3_supported;
1135 int err;
1136
1137 tegra_dp_tpg(dp, training_pattern_1, n_lanes);
1138
1139 err = _tegra_dp_clk_recovery(dp, pe, vs, pc, pc_supported, n_lanes);
1140 if (err < 0)
1141 tegra_dp_tpg(dp, training_pattern_disabled, n_lanes);
1142
1143 return err;
1144}
1145
1146static int tegra_dc_dp_full_link_training(struct tegra_dc_dp_data *dp)
1147{
1148 struct tegra_dc_sor_data *sor = &dp->sor;
1149 int err;
1150 u32 pe[4], vs[4], pc[4];
1151
1152 printk(BIOS_INFO, "dp: %s\n", __func__);
1153 tegra_sor_precharge_lanes(sor);
1154
1155retry_cr:
1156 memset(pe, preEmphasis_Disabled, sizeof(pe));
1157 memset(vs, driveCurrent_Level0, sizeof(vs));
1158 memset(pc, postCursor2_Level0, sizeof(pc));
1159
1160 err = tegra_dp_clk_recovery(dp, pe, vs, pc);
1161 if (err != DP_LT_SUCCESS) {
1162 if (!tegra_dp_lower_link_config(dp, &dp->link_cfg))
1163 goto retry_cr;
1164
1165 printk(BIOS_ERR, "dp: clk recovery failed\n");
1166 goto fail;
1167 }
1168
1169 err = tegra_dp_channel_eq(dp, pe, vs, pc);
1170 if (err != DP_LT_SUCCESS) {
1171 if (!tegra_dp_lower_link_config(dp, &dp->link_cfg))
1172 goto retry_cr;
1173
1174 printk(BIOS_ERR,
1175 "dp: channel equalization failed\n");
1176 goto fail;
1177 }
1178
1179 tegra_dc_dp_dump_link_cfg(dp, &dp->link_cfg);
1180
1181 return 0;
1182
1183fail:
1184 return err;
1185}
1186
1187/*
1188 * All link training functions are ported from kernel dc driver.
1189 * See more details at drivers/video/tegra/dc/dp.c
1190 */
1191#if DO_FAST_LINK_TRAINING
1192static int tegra_dc_dp_fast_link_training(struct tegra_dc_dp_data *dp,
1193 const struct tegra_dc_dp_link_config *link_cfg)
1194{
1195 struct tegra_dc_sor_data *sor = &dp->sor;
1196 u8 link_bw;
1197 u8 lane_count;
1198 u16 data16;
1199 u32 data32;
1200 u32 size;
1201 u32 status;
1202 int j;
1203 u32 mask = 0xffff >> ((4 - link_cfg->lane_count) * 4);
1204
1205
1206 printk(BIOS_INFO, "dp: %s\n", __func__);
1207
1208 tegra_dc_sor_set_lane_parm(sor, link_cfg);
1209 tegra_dc_dp_dpcd_write(dp, NV_DPCD_MAIN_LINK_CHANNEL_CODING_SET,
1210 NV_DPCD_MAIN_LINK_CHANNEL_CODING_SET_ANSI_8B10B);
1211
1212 /* Send TP1 */
1213 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_1, link_cfg);
1214 tegra_dc_dp_dpcd_write(dp, NV_DPCD_TRAINING_PATTERN_SET,
1215 NV_DPCD_TRAINING_PATTERN_SET_TPS_TP1);
1216
1217 for (j = 0; j < link_cfg->lane_count; ++j)
1218 tegra_dc_dp_dpcd_write(dp, NV_DPCD_TRAINING_LANE0_SET + j,
1219 0x24);
1220 udelay(520);
1221
1222 size = sizeof(data16);
1223 tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
1224 NV_DPCD_LANE0_1_STATUS, (u8 *)&data16, &size, &status);
1225 status = mask & 0x1111;
1226 if ((data16 & status) != status) {
1227 printk(BIOS_ERR,
1228 "dp: Link training error for TP1 (%#x)\n", data16);
1229 return -EFAULT;
1230 }
1231
1232 /* enable ASSR */
1233 tegra_dc_dp_set_assr(dp, link_cfg->scramble_ena);
1234 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_3, link_cfg);
1235
1236 tegra_dc_dp_dpcd_write(dp, NV_DPCD_TRAINING_PATTERN_SET,
1237 link_cfg->link_bw == 20 ? 0x23 : 0x22);
1238 for (j = 0; j < link_cfg->lane_count; ++j)
1239 tegra_dc_dp_dpcd_write(dp, NV_DPCD_TRAINING_LANE0_SET + j,
1240 0x24);
1241 udelay(520);
1242
1243 size = sizeof(data32);
1244 tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
1245 NV_DPCD_LANE0_1_STATUS, (u8 *)&data32, &size, &status);
1246 if ((data32 & mask) != (0x7777 & mask)) {
1247 printk(BIOS_ERR,
1248 "dp: Link training error for TP2/3 (0x%x)\n", data32);
1249 return -EFAULT;
1250 }
1251
1252 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_disabled,
1253 link_cfg);
1254 tegra_dc_dp_dpcd_write(dp, NV_DPCD_TRAINING_PATTERN_SET, 0);
1255
1256 if (tegra_dc_dp_link_trained(dp, link_cfg)) {
1257 tegra_dc_sor_read_link_config(&dp->sor, &link_bw,
1258 &lane_count);
1259 printk(BIOS_ERR,
1260 "Fast link trainging failed, link bw %d, lane # %d\n",
1261 link_bw, lane_count);
1262 return -EFAULT;
1263 }
1264
1265 printk(BIOS_INFO,
1266 "Fast link trainging succeeded, link bw %d, lane %d\n",
1267 link_cfg->link_bw, link_cfg->lane_count);
1268
1269 return 0;
1270}
1271#endif /* DO_FAST_LINK_TRAINING */
1272
1273static int tegra_dp_do_link_training(struct tegra_dc_dp_data *dp,
1274 const struct tegra_dc_dp_link_config *link_cfg)
1275{
1276 u8 link_bw;
1277 u8 lane_count;
1278#if DO_FAST_LINK_TRAINING
1279 int ret;
1280
1281 /* Now do the fast link training for eDP */
1282 ret = tegra_dc_dp_fast_link_training(dp, link_cfg);
1283 if (ret) {
1284 printk(BIOS_ERR, "dp: fast link training failed\n");
1285
1286 /* Try full link training then */
1287 if (tegra_dc_dp_full_link_training(dp)) {
1288 printk(BIOS_ERR, "dp: full link training failed\n");
1289 return ret;
1290 }
1291 } else {
1292 /* set to a known-good drive setting if fast link succeeded */
1293 tegra_dc_sor_set_voltage_swing(&dp->sor);
1294 }
1295#else
1296 if (tegra_dc_dp_full_link_training(dp)) {
1297 printk(BIOS_ERR, "dp: full link training failed\n");
1298 return -EFAULT;
1299 }
1300#endif
1301
1302 /* Everything goes well, double check the link config */
1303 /* TODO: record edc/c2 data for debugging */
1304 tegra_dc_sor_read_link_config(&dp->sor, &link_bw, &lane_count);
1305
1306 if ((link_cfg->link_bw == link_bw) &&
1307 (link_cfg->lane_count == lane_count))
1308 return 0;
1309 else
1310 return -EFAULT;
1311}
1312
1313static int tegra_dc_dp_explore_link_cfg(struct tegra_dc_dp_data *dp,
1314 struct tegra_dc_dp_link_config *link_cfg,
1315 const struct soc_nvidia_tegra210_config *config)
1316{
1317 struct tegra_dc_dp_link_config temp_cfg;
1318
1319 if (!config->pixel_clock || !config->xres || !config->yres) {
1320 printk(BIOS_ERR,
1321 "dp: error mode configuration");
1322 return -EINVAL;
1323 }
1324 if (!link_cfg->max_link_bw || !link_cfg->max_lane_count) {
1325 printk(BIOS_ERR,
1326 "dp: error link configuration");
1327 return -EINVAL;
1328 }
1329
1330 link_cfg->is_valid = 0;
1331
1332 memcpy(&temp_cfg, link_cfg, sizeof(temp_cfg));
1333
1334 temp_cfg.link_bw = temp_cfg.max_link_bw;
1335 temp_cfg.lane_count = temp_cfg.max_lane_count;
1336
1337 /*
1338 * set to max link config
1339 */
1340 if ((!tegra_dc_dp_calc_config(dp, config, &temp_cfg)) &&
1341 (!tegra_dp_link_config(dp, &temp_cfg)) &&
1342 (!tegra_dp_do_link_training(dp, &temp_cfg)))
1343 /* the max link cfg is doable */
1344 memcpy(link_cfg, &temp_cfg, sizeof(temp_cfg));
1345
1346 return link_cfg->is_valid ? 0 : -EFAULT;
1347}
1348
1349static void tegra_dp_update_config(struct tegra_dc_dp_data *dp,
1350 struct soc_nvidia_tegra210_config *config)
1351{
1352 struct edid edid;
1353 u8 buf[128] = {0};
1354 u32 size = sizeof(buf), aux_stat = 0;
1355
1356 printk(BIOS_ERR, "%s: enable r/w dump.\n",
1357 __func__);
1358
1359 tegra_dc_dpaux_enable(dp);
1360 if (tegra_dc_i2c_aux_read(dp, TEGRA_EDID_I2C_ADDRESS, 0, buf, &size,
1361 &aux_stat)) {
1362 printk(BIOS_ERR, "%s: Failed to read EDID. Use defaults.\n",
1363 __func__);
1364 return;
1365 }
1366
Arthur Heymans8c5884e2017-04-30 08:28:05 +02001367 if (decode_edid(buf, sizeof(buf), &edid) != EDID_CONFORMANT) {
Patrick Georgi40a3e322015-06-22 19:41:29 +02001368 printk(BIOS_ERR, "%s: Failed to decode EDID. Use defaults.\n",
1369 __func__);
1370 return;
1371 }
1372
David Hendricks7dbf9c62015-07-30 18:49:48 -07001373 config->xres = config->display_xres = edid.mode.ha;
1374 config->yres = config->display_yres = edid.mode.va;
Patrick Georgi40a3e322015-06-22 19:41:29 +02001375
David Hendricks7dbf9c62015-07-30 18:49:48 -07001376 config->pixel_clock = edid.mode.pixel_clock * 1000;
Patrick Georgi40a3e322015-06-22 19:41:29 +02001377
David Hendricks7dbf9c62015-07-30 18:49:48 -07001378 config->hfront_porch = edid.mode.hso;
1379 config->hsync_width = edid.mode.hspw;
1380 config->hback_porch = edid.mode.hbl - edid.mode.hso - edid.mode.hspw;
Patrick Georgi40a3e322015-06-22 19:41:29 +02001381
David Hendricks7dbf9c62015-07-30 18:49:48 -07001382 config->vfront_porch = edid.mode.vso;
1383 config->vsync_width = edid.mode.vspw;
1384 config->vback_porch = edid.mode.vbl - edid.mode.vso - edid.mode.vspw;
Patrick Georgi40a3e322015-06-22 19:41:29 +02001385
1386 /**
1387 * Note edid->framebuffer_bits_per_pixel is currently hard-coded as 32,
1388 * so we should keep the default value in device config.
1389 *
1390 * EDID v1.3 panels may not have color depth info, so we need to check
1391 * if these values are zero before updating config.
1392 */
1393 if (edid.panel_bits_per_pixel)
1394 config->panel_bits_per_pixel = edid.panel_bits_per_pixel;
1395 if (edid.panel_bits_per_color)
1396 config->color_depth = edid.panel_bits_per_color;
1397 printk(BIOS_SPEW, "%s: configuration updated by EDID.\n", __func__);
1398}
1399
1400void dp_init(void *_config)
1401{
1402 struct soc_nvidia_tegra210_config *config = (void *)_config;
1403 struct tegra_dc *dc = config->dc_data;
1404 struct tegra_dc_dp_data *dp = &dp_data;
1405
1406 /* set up links among config, dc, dp and sor */
1407 dp->dc = dc;
1408 dc->out = dp;
1409 dp->sor.dc = dc;
1410
1411 dp->sor.power_is_up = 0;
1412 dp->sor.base = (void *)TEGRA_ARM_SOR;
1413 dp->sor.pmc_base = (void *)TEGRA_PMC_BASE;
1414 dp->sor.portnum = 0;
1415 dp->sor.link_cfg = &dp->link_cfg;
1416 dp->aux_base = (void *)TEGRA_ARM_DPAUX;
1417 dp->link_cfg.is_valid = 0;
1418 dp->enabled = 0;
1419
1420 tegra_dp_update_config(dp, config);
1421}
1422
1423static void tegra_dp_hpd_config(struct tegra_dc_dp_data *dp,
1424 struct soc_nvidia_tegra210_config *config)
1425{
1426 u32 val;
1427
1428 val = config->dp.hpd_plug_min_us |
1429 (config->dp.hpd_unplug_min_us <<
1430 DPAUX_HPD_CONFIG_UNPLUG_MIN_TIME_SHIFT);
1431 tegra_dpaux_writel(dp, DPAUX_HPD_CONFIG, val);
1432
1433 tegra_dpaux_writel(dp, DPAUX_HPD_IRQ_CONFIG, config->dp.hpd_irq_min_us);
1434}
1435
1436static int tegra_dp_hpd_plug(struct tegra_dc_dp_data *dp, int timeout_ms)
1437{
1438 u32 val;
1439 u32 timeout = timeout_ms * 1000;
1440 do {
1441 val = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
1442 if (val & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)
1443 return 0;
1444 udelay(100);
1445 timeout -= 100;
1446 } while (timeout > 0);
1447 return -1;
1448}
1449
1450static int tegra_dc_dp_sink_out_of_sync(struct tegra_dc_dp_data *dp,
1451 u32 delay_ms)
1452{
1453 u8 dpcd_data;
1454 int out_of_sync;
1455
1456 mdelay(delay_ms);
1457 tegra_dc_dp_dpcd_read(dp, NV_DPCD_SINK_STATUS, &dpcd_data);
1458
1459 out_of_sync = ((dpcd_data & NV_DPCD_SINK_STATUS_PORT0_IN_SYNC) !=
1460 NV_DPCD_SINK_STATUS_PORT0_IN_SYNC);
1461
1462 if (out_of_sync)
1463 printk(BIOS_ERR,
1464 "SINK receive port 0 is out of synchronization\n");
1465 else
1466 printk(BIOS_INFO,
1467 "SINK is in synchronization\n");
1468
1469 return out_of_sync;
1470}
1471
1472static void tegra_dc_dp_check_sink(struct tegra_dc_dp_data *dp,
1473 struct soc_nvidia_tegra210_config *config)
1474{
1475
1476 u8 max_retry = 3;
1477 int delay_frame;
1478
1479 /* DP TCON may skip some main stream frames, thus we need to wait
1480 some delay before reading the DPCD SINK STATUS register, starting
1481 from 5 */
1482 delay_frame = 5;
1483
1484 while (tegra_dc_dp_sink_out_of_sync(dp, FRAME_IN_MS * delay_frame) &&
1485 max_retry--) {
1486 tegra_dc_detach(&dp->sor);
1487 if (tegra_dc_dp_explore_link_cfg(dp, &dp->link_cfg, config)) {
1488 printk(BIOS_ERR, "dp: %s: error to configure link\n",
1489 __func__);
1490 continue;
1491 }
1492
1493 tegra_dc_sor_set_power_state(&dp->sor, 1);
1494 tegra_dc_sor_attach(&dp->sor);
1495
1496 /* Increase delay_frame for next try in case the sink is
1497 skipping more frames */
1498 delay_frame += 10;
1499 }
1500}
1501
1502void dp_enable(void *_dp)
1503{
1504 struct tegra_dc_dp_data *dp = _dp;
1505 struct tegra_dc *dc = dp->dc;
1506 struct soc_nvidia_tegra210_config *config = dc->config;
1507
1508 u8 data;
1509 u32 retry;
1510 int ret;
1511
1512 tegra_dc_dpaux_enable(dp);
1513
1514 tegra_dp_hpd_config(dp, config);
1515 if (tegra_dp_hpd_plug(dp, config->dp.vdd_to_hpd_delay_ms) < 0) {
1516 printk(BIOS_ERR, "dp: hpd plug failed\n");
1517 goto error_enable;
1518 }
1519
1520 if (tegra_dc_dp_init_max_link_cfg(config, dp, &dp->link_cfg)) {
1521 printk(BIOS_ERR, "dp: failed to init link configuration\n");
1522 goto error_enable;
1523 }
1524
1525 tegra_dc_sor_enable_dp(&dp->sor);
1526
1527 tegra_dc_sor_set_panel_power(&dp->sor, 1);
1528
1529 /* Write power on to DPCD */
1530 data = NV_DPCD_SET_POWER_VAL_D0_NORMAL;
1531 retry = 0;
1532 do {
1533 ret = tegra_dc_dp_dpcd_write(dp,
1534 NV_DPCD_SET_POWER, data);
1535 } while ((retry++ < DP_POWER_ON_MAX_TRIES) && ret);
1536
1537 if (ret || retry >= DP_POWER_ON_MAX_TRIES) {
1538 printk(BIOS_ERR,
1539 "dp: failed to power on panel (0x%x)\n", ret);
1540 goto error_enable;
1541 }
1542
1543 /* Confirm DP is plugging status */
1544 if (!(tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT) &
1545 DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
1546 printk(BIOS_ERR, "dp: could not detect HPD\n");
1547 goto error_enable;
1548 }
1549
1550 /* Check DP version */
1551 if (tegra_dc_dp_dpcd_read(dp, NV_DPCD_REV, &dp->revision))
1552 printk(BIOS_ERR,
1553 "dp: failed to read the revision number from sink\n");
1554
1555 if (tegra_dc_dp_explore_link_cfg(dp, &dp->link_cfg, config)) {
1556 printk(BIOS_ERR, "dp: error to configure link\n");
1557 goto error_enable;
1558 }
1559
1560 tegra_dc_sor_set_power_state(&dp->sor, 1);
1561 tegra_dc_sor_attach(&dp->sor);
1562
1563 tegra_dc_dp_check_sink(dp, config);
1564
1565 /*
1566 * Power down the unused lanes to save power
1567 * (about hundreds milli-watts, varies from boards).
1568 */
1569 tegra_dc_sor_power_down_unused_lanes(&dp->sor);
1570
1571 dp->enabled = 1;
1572error_enable:
1573 return;
1574}
1575
Elyes HAOUAS3fcb2182018-05-25 10:03:57 +02001576void dp_display_startup(struct device *dev)
Patrick Georgi40a3e322015-06-22 19:41:29 +02001577{
1578 struct soc_nvidia_tegra210_config *config = dev->chip_info;
1579 struct display_controller *disp_ctrl =
1580 (void *)config->display_controller;
1581
1582 u32 framebuffer_size_mb = config->framebuffer_size / MiB;
1583 u32 framebuffer_base_mb = config->framebuffer_base / MiB;
1584
1585 struct pwm_controller *pwm = (void *)TEGRA_PWM_BASE;
1586 struct tegra_dc *dc = &dc_data;
1587 u32 plld_rate;
1588
1589 printk(BIOS_INFO, "%s: entry: disp_ctrl: %p.\n",
1590 __func__, disp_ctrl);
1591
1592 if (disp_ctrl == NULL) {
1593 printk(BIOS_ERR, "Error: No dc is assigned by dt.\n");
1594 return;
1595 }
1596
1597 dc->base = (void *)disp_ctrl;
1598 dc->config = config;
1599 config->dc_data = dc;
1600
1601 /* Note dp_init may read EDID and change some config values. */
1602 dp_init(config);
1603
1604 if (framebuffer_size_mb == 0) {
1605 framebuffer_size_mb = ALIGN_UP(config->display_xres *
1606 config->display_yres *
1607 (config->framebuffer_bits_per_pixel / 8), MiB)/MiB;
1608 }
1609
1610 config->framebuffer_size = framebuffer_size_mb * MiB;
1611 config->framebuffer_base = framebuffer_base_mb * MiB;
1612
1613 /* The plld is programmed with the assumption of the SHIFT_CLK_DIVIDER
1614 * and PIXEL_CLK_DIVIDER are zero (divide by 1). See the
1615 * update_display_mode() for detail.
1616 */
1617 plld_rate = clock_configure_plld(config->pixel_clock * 2);
1618 if (plld_rate == 0) {
1619 printk(BIOS_ERR, "dc: clock init failed\n");
1620 return;
1621 } else if (plld_rate != config->pixel_clock * 2) {
1622 printk(BIOS_WARNING, "dc: plld rounded to %u\n", plld_rate);
1623 config->pixel_clock = plld_rate / 2;
1624 }
1625
1626 /* set disp1's clock source to PLLD_OUT0 */
1627 clock_configure_source(disp1, PLLD, (plld_rate/KHz)/2);
1628
1629 /* Init dc */
1630 if (tegra_dc_init(disp_ctrl)) {
1631 printk(BIOS_ERR, "dc: init failed\n");
1632 return;
1633 }
1634
1635 /* Configure dc mode */
1636 if (update_display_mode(disp_ctrl, config)) {
1637 printk(BIOS_ERR, "dc: failed to configure display mode.\n");
1638 return;
1639 }
1640
1641 /* Enable dp */
1642 dp_enable(dc->out);
1643
1644 /* Set up Tegra PWM n (where n is specified in config->dp.pwm) to drive the
1645 * panel backlight.
1646 */
1647 printk(BIOS_SPEW, "%s: enable panel backlight pwm\n", __func__);
1648 WRITEL(((1 << NV_PWM_CSR_ENABLE_SHIFT) |
1649 (220 << NV_PWM_CSR_PULSE_WIDTH_SHIFT) | /* 220/256 */
1650 0x02e), /* frequency divider */
1651 &pwm->pwm[config->dp.pwm].csr);
1652
1653 /* Set up window */
1654 update_window(config);
1655 printk(BIOS_INFO, "%s: display init done.\n", __func__);
1656
1657 /* Save panel mode to cb tables */
1658 pass_mode_info_to_payload(config);
1659
1660 /*
1661 * After this point, it is payload's responsibility to allocate
1662 * framebuffer and sets the base address to dc's
1663 * WINBUF_START_ADDR register and enables window by setting dc's
1664 * DISP_DISP_WIN_OPTIONS register.
1665 */
1666}