blob: f3bc208128a5d38cd45f1c6e4621c97a78d02478 [file] [log] [blame]
Angel Ponsa2ee7612020-04-04 18:51:15 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Angel Ponsa2ee7612020-04-04 18:51:15 +02002
Patrick Georgi40a3e322015-06-22 19:41:29 +02003/*
4 * drivers/video/tegra/dc/dp.c
Patrick Georgi40a3e322015-06-22 19:41:29 +02005 */
Elyes HAOUAS30818552019-06-23 07:03:59 +02006
Patrick Georgi40a3e322015-06-22 19:41:29 +02007#include <console/console.h>
8#include <device/device.h>
Nico Huber0f2dd1e2017-08-01 14:02:40 +02009#include <device/i2c_simple.h>
Patrick Georgi40a3e322015-06-22 19:41:29 +020010#include <edid.h>
Patrick Georgi40a3e322015-06-22 19:41:29 +020011#include <string.h>
12#include <delay.h>
13#include <soc/addressmap.h>
14#include <soc/clock.h>
15#include <soc/display.h>
16#include <soc/nvidia/tegra/i2c.h>
17#include <soc/nvidia/tegra/dc.h>
18#include <soc/nvidia/tegra/types.h>
19#include <soc/nvidia/tegra/pwm.h>
20#include <soc/nvidia/tegra/displayport.h>
21#include <soc/sor.h>
Elyes HAOUAS30818552019-06-23 07:03:59 +020022#include <types.h>
23
Patrick Georgi40a3e322015-06-22 19:41:29 +020024#include "chip.h"
25
26#define DO_FAST_LINK_TRAINING 0
27
28struct tegra_dc dc_data;
29
30enum {
31 DP_LT_SUCCESS = 0,
32 DP_LT_FAILED = -1,
33};
34
35struct tegra_dc_dp_data dp_data;
36
37static inline u32 tegra_dpaux_readl(struct tegra_dc_dp_data *dp, u32 reg)
38{
39 void *addr = dp->aux_base + (u32) (reg << 2);
40 u32 reg_val = READL(addr);
41 return reg_val;
42}
43
44static inline void tegra_dpaux_writel(struct tegra_dc_dp_data *dp,
45 u32 reg, u32 val)
46{
47 void *addr = dp->aux_base + (u32) (reg << 2);
48 WRITEL(val, addr);
49}
50
51static inline u32 tegra_dc_dpaux_poll_register(struct tegra_dc_dp_data *dp,
52 u32 reg, u32 mask, u32 exp_val,
53 u32 poll_interval_us,
54 u32 timeout_us)
55{
56 u32 reg_val = 0;
57 u32 temp = timeout_us;
58
59 do {
60 udelay(poll_interval_us);
61 reg_val = tegra_dpaux_readl(dp, reg);
62 if (timeout_us > poll_interval_us)
63 timeout_us -= poll_interval_us;
64 else
65 break;
66 } while ((reg_val & mask) != exp_val);
67
68 if ((reg_val & mask) == exp_val)
69 return 0; /* success */
70 printk(BIOS_ERR,
71 "dpaux_poll_register 0x%x: timeout: "
72 "(reg_val)0x%08x & (mask)0x%08x != (exp_val)0x%08x\n",
73 reg, reg_val, mask, exp_val);
74 return temp;
75}
76
77static inline int tegra_dpaux_wait_transaction(struct tegra_dc_dp_data *dp)
78{
79 /* According to DP spec, each aux transaction needs to finish
80 within 40ms. */
81 if (tegra_dc_dpaux_poll_register(dp, DPAUX_DP_AUXCTL,
82 DPAUX_DP_AUXCTL_TRANSACTREQ_MASK,
83 DPAUX_DP_AUXCTL_TRANSACTREQ_DONE,
84 100, DP_AUX_TIMEOUT_MS * 1000) != 0) {
85 printk(BIOS_INFO, "dp: DPAUX transaction timeout\n");
86 return -1;
87 }
88 return 0;
89}
90
91static int tegra_dc_dpaux_write_chunk(struct tegra_dc_dp_data *dp, u32 cmd,
92 u32 addr, u8 *data, u32 *size,
93 u32 *aux_stat)
94{
95 int i;
96 u32 reg_val;
97 u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
98 u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
99 u32 temp_data;
100
101 if (*size > DP_AUX_MAX_BYTES)
102 return -1; /* only write one chunk of data */
103
104 /* Make sure the command is write command */
105 switch (cmd) {
106 case DPAUX_DP_AUXCTL_CMD_I2CWR:
107 case DPAUX_DP_AUXCTL_CMD_MOTWR:
108 case DPAUX_DP_AUXCTL_CMD_AUXWR:
109 break;
110 default:
111 printk(BIOS_ERR, "dp: aux write cmd 0x%x is invalid\n", cmd);
112 return -1;
113 }
114
115 tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
116 for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i) {
117 memcpy(&temp_data, data, 4);
118 tegra_dpaux_writel(dp, DPAUX_DP_AUXDATA_WRITE_W(i), temp_data);
119 data += 4;
120 }
121
122 reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
123 reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
124 reg_val |= cmd;
125 reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
126 reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
127
128 while ((timeout_retries > 0) && (defer_retries > 0)) {
129 if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
130 (defer_retries != DP_AUX_DEFER_MAX_TRIES))
131 udelay(1);
132
133 reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
134 tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
135
136 if (tegra_dpaux_wait_transaction(dp))
137 printk(BIOS_ERR, "dp: aux write transaction timeout\n");
138
139 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
140
141 if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
142 (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
143 (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
144 (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
145 if (timeout_retries-- > 0) {
146 printk(BIOS_INFO, "dp: aux write retry (0x%x)"
147 " -- %d\n",
148 *aux_stat, timeout_retries);
149 /* clear the error bits */
150 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
151 *aux_stat);
152 continue;
153 } else {
154 printk(BIOS_ERR, "dp: aux write got error"
155 " (0x%x)\n", *aux_stat);
156 return -1;
157 }
158 }
159
160 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
161 (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
162 if (defer_retries-- > 0) {
163 printk(BIOS_INFO, "dp: aux write defer (0x%x)"
164 " -- %d\n", *aux_stat, defer_retries);
165 /* clear the error bits */
166 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
167 *aux_stat);
168 continue;
169 } else {
170 printk(BIOS_ERR, "dp: aux write defer exceeds"
171 " max retries (0x%x)\n", *aux_stat);
172 return -1;
173 }
174 }
175
176 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
177 DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
178 *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
179 return 0;
180 } else {
181 printk(BIOS_ERR, "dp: aux write failed (0x%x)\n",
182 *aux_stat);
183 return -1;
184 }
185 }
186 /* Should never come to here */
187 return -1;
188}
189
190static int tegra_dc_dpaux_read_chunk(struct tegra_dc_dp_data *dp, u32 cmd,
191 u32 addr, u8 *data, u32 *size,
192 u32 *aux_stat)
193{
194 u32 reg_val;
195 u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES;
196 u32 defer_retries = DP_AUX_DEFER_MAX_TRIES;
197
198 if (*size > DP_AUX_MAX_BYTES)
199 return -1; /* only read one chunk */
200
201 /* Check to make sure the command is read command */
202 switch (cmd) {
203 case DPAUX_DP_AUXCTL_CMD_I2CRD:
204 case DPAUX_DP_AUXCTL_CMD_I2CREQWSTAT:
205 case DPAUX_DP_AUXCTL_CMD_MOTRD:
206 case DPAUX_DP_AUXCTL_CMD_AUXRD:
207 break;
208 default:
209 printk(BIOS_ERR, "dp: aux read cmd 0x%x is invalid\n", cmd);
210 return -1;
211 }
212
213 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
214 if (!(*aux_stat & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
215 printk(BIOS_SPEW, "dp: HPD is not detected\n");
216 return -1;
217 }
218
219 tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr);
220
221 reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL);
222 reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK;
223 reg_val |= cmd;
224 reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD;
225 reg_val |= ((*size - 1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT);
226 while ((timeout_retries > 0) && (defer_retries > 0)) {
227 if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) ||
228 (defer_retries != DP_AUX_DEFER_MAX_TRIES))
229 udelay(DP_DPCP_RETRY_SLEEP_NS * 2);
230
231 reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING;
232 tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val);
233
234 if (tegra_dpaux_wait_transaction(dp))
235 printk(BIOS_INFO, "dp: aux read transaction timeout\n");
236
237 *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
238
239 if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) ||
240 (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) ||
241 (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) ||
242 (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) {
243 if (timeout_retries-- > 0) {
244 printk(BIOS_INFO, "dp: aux read retry (0x%x)"
245 " -- %d\n", *aux_stat,
246 timeout_retries);
247 /* clear the error bits */
248 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
249 *aux_stat);
250 continue; /* retry */
251 } else {
252 printk(BIOS_ERR, "dp: aux read got error"
253 " (0x%x)\n", *aux_stat);
254 return -1;
255 }
256 }
257
258 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) ||
259 (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) {
260 if (defer_retries-- > 0) {
261 printk(BIOS_INFO, "dp: aux read defer (0x%x)"
262 " -- %d\n", *aux_stat, defer_retries);
263 /* clear the error bits */
264 tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT,
265 *aux_stat);
266 continue;
267 } else {
268 printk(BIOS_INFO, "dp: aux read defer exceeds"
269 " max retries (0x%x)\n", *aux_stat);
270 return -1;
271 }
272 }
273
274 if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) ==
275 DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) {
276 int i;
277 u32 temp_data[4];
278
279 for (i = 0; i < DP_AUX_MAX_BYTES / 4; ++i)
280 temp_data[i] = tegra_dpaux_readl(dp,
281 DPAUX_DP_AUXDATA_READ_W(i));
282
283 *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK);
284 memcpy(data, temp_data, *size);
285
286 return 0;
287 } else {
288 printk(BIOS_ERR, "dp: aux read failed (0x%x\n",
289 *aux_stat);
290 return -1;
291 }
292 }
293 /* Should never come to here */
294 printk(BIOS_ERR, "%s: can't\n", __func__);
295 return -1;
296}
297
298#if DO_FAST_LINK_TRAINING
299static int tegra_dc_dpaux_read(struct tegra_dc_dp_data *dp, u32 cmd, u32 addr,
300 u8 *data, u32 *size, u32 *aux_stat)
301{
302 u32 finished = 0;
303 u32 cur_size;
304 int ret = 0;
305
306 do {
307 cur_size = *size - finished;
308 if (cur_size > DP_AUX_MAX_BYTES)
309 cur_size = DP_AUX_MAX_BYTES;
310
311 ret = tegra_dc_dpaux_read_chunk(dp, cmd, addr,
312 data, &cur_size, aux_stat);
313 if (ret)
314 break;
315
316 /* cur_size should be the real size returned */
317 addr += cur_size;
318 data += cur_size;
319 finished += cur_size;
320
321 } while (*size > finished);
322
323 *size = finished;
324 return ret;
325}
326#endif /* DO_FAST_LINK_TRAINING */
327
328static int tegra_dc_dp_dpcd_read(struct tegra_dc_dp_data *dp, u32 cmd,
329 u8 *data_ptr)
330{
331 u32 size = 1;
332 u32 status = 0;
333 int ret;
334
335 ret = tegra_dc_dpaux_read_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
336 cmd, data_ptr, &size, &status);
337 if (ret)
338 printk(BIOS_ERR,
339 "dp: Failed to read DPCD data. CMD 0x%x, Status 0x%x\n",
340 cmd, status);
341
342 return ret;
343}
344
345static int tegra_dc_dp_dpcd_write(struct tegra_dc_dp_data *dp, u32 cmd,
346 u8 data)
347{
348 u32 size = 1;
349 u32 status = 0;
350 int ret;
351
352 ret = tegra_dc_dpaux_write_chunk(dp, DPAUX_DP_AUXCTL_CMD_AUXWR,
353 cmd, &data, &size, &status);
354 if (ret)
355 printk(BIOS_ERR,
356 "dp: Failed to write DPCD data. CMD 0x%x, Status 0x%x\n",
357 cmd, status);
358 return ret;
359}
360
361static int tegra_dc_i2c_aux_read(struct tegra_dc_dp_data *dp, u32 i2c_addr,
362 u8 addr, u8 *data, u32 *size, u32 *aux_stat)
363{
364 u32 finished = 0;
365 int ret = 0;
366
367 do {
368 u32 cur_size = MIN(DP_AUX_MAX_BYTES, *size - finished);
369
370 u32 len = 1;
371 ret = tegra_dc_dpaux_write_chunk(
372 dp, DPAUX_DP_AUXCTL_CMD_MOTWR, i2c_addr,
373 &addr, &len, aux_stat);
374 if (ret) {
375 printk(BIOS_ERR, "%s: error sending address to read.\n",
376 __func__);
377 break;
378 }
379
380 ret = tegra_dc_dpaux_read_chunk(
381 dp, DPAUX_DP_AUXCTL_CMD_I2CRD, i2c_addr,
382 data, &cur_size, aux_stat);
383 if (ret) {
384 printk(BIOS_ERR, "%s: error reading data.\n", __func__);
385 break;
386 }
387
388 /* cur_size should be the real size returned */
389 addr += cur_size;
390 data += cur_size;
391 finished += cur_size;
392 } while (*size > finished);
393
394 *size = finished;
395 return ret;
396}
397
398static void tegra_dc_dpaux_enable(struct tegra_dc_dp_data *dp)
399{
400 /* clear interrupt */
401 tegra_dpaux_writel(dp, DPAUX_INTR_AUX, 0xffffffff);
402 /* do not enable interrupt for now. Enable them when Isr in place */
403 tegra_dpaux_writel(dp, DPAUX_INTR_EN_AUX, 0x0);
404
405 tegra_dpaux_writel(dp, DPAUX_HYBRID_PADCTL,
406 DPAUX_HYBRID_PADCTL_AUX_DRVZ_OHM_50 |
407 DPAUX_HYBRID_PADCTL_AUX_CMH_V0_70 |
408 0x18 << DPAUX_HYBRID_PADCTL_AUX_DRVI_SHIFT |
409 DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV_ENABLE);
410
411 tegra_dpaux_writel(dp, DPAUX_HYBRID_SPARE,
412 DPAUX_HYBRID_SPARE_PAD_PWR_POWERUP);
413}
414
415static void tegra_dc_dp_dump_link_cfg(struct tegra_dc_dp_data *dp,
416 const struct tegra_dc_dp_link_config *link_cfg)
417{
418 printk(BIOS_INFO, "DP config: cfg_name "
419 "cfg_value\n");
420 printk(BIOS_INFO, " Lane Count %d\n",
421 link_cfg->max_lane_count);
422 printk(BIOS_INFO, " SupportEnhancedFraming %s\n",
423 link_cfg->support_enhanced_framing ? "Y" : "N");
424 printk(BIOS_INFO, " Bandwidth %d\n",
425 link_cfg->max_link_bw);
426 printk(BIOS_INFO, " bpp %d\n",
427 link_cfg->bits_per_pixel);
428 printk(BIOS_INFO, " EnhancedFraming %s\n",
429 link_cfg->enhanced_framing ? "Y" : "N");
430 printk(BIOS_INFO, " Scramble_enabled %s\n",
431 link_cfg->scramble_ena ? "Y" : "N");
432 printk(BIOS_INFO, " LinkBW %d\n",
433 link_cfg->link_bw);
434 printk(BIOS_INFO, " lane_count %d\n",
435 link_cfg->lane_count);
436 printk(BIOS_INFO, " activespolarity %d\n",
437 link_cfg->activepolarity);
438 printk(BIOS_INFO, " active_count %d\n",
439 link_cfg->active_count);
440 printk(BIOS_INFO, " tu_size %d\n",
441 link_cfg->tu_size);
442 printk(BIOS_INFO, " active_frac %d\n",
443 link_cfg->active_frac);
444 printk(BIOS_INFO, " watermark %d\n",
445 link_cfg->watermark);
446 printk(BIOS_INFO, " hblank_sym %d\n",
447 link_cfg->hblank_sym);
448 printk(BIOS_INFO, " vblank_sym %d\n",
449 link_cfg->vblank_sym);
450}
451
452static int _tegra_dp_lower_link_config(struct tegra_dc_dp_data *dp,
453 struct tegra_dc_dp_link_config *link_cfg)
454{
455
456 switch (link_cfg->link_bw) {
457 case SOR_LINK_SPEED_G1_62:
458 if (link_cfg->max_link_bw > SOR_LINK_SPEED_G1_62)
459 link_cfg->link_bw = SOR_LINK_SPEED_G2_7;
460 link_cfg->lane_count /= 2;
461 break;
462 case SOR_LINK_SPEED_G2_7:
463 link_cfg->link_bw = SOR_LINK_SPEED_G1_62;
464 break;
465 case SOR_LINK_SPEED_G5_4:
466 if (link_cfg->lane_count == 1) {
467 link_cfg->link_bw = SOR_LINK_SPEED_G2_7;
468 link_cfg->lane_count = link_cfg->max_lane_count;
469 } else
470 link_cfg->lane_count /= 2;
471 break;
472 default:
473 printk(BIOS_ERR, "dp: Error link rate %d\n", link_cfg->link_bw);
474 return DP_LT_FAILED;
475 }
476
477 return (link_cfg->lane_count > 0) ? DP_LT_SUCCESS : DP_LT_FAILED;
478}
479
Martin Roth26f97f92021-10-01 14:53:22 -0600480/* Calculate if given cfg can meet the mode request. */
Patrick Georgi40a3e322015-06-22 19:41:29 +0200481/* Return true if mode is possible, false otherwise. */
482static int tegra_dc_dp_calc_config(struct tegra_dc_dp_data *dp,
483 const struct soc_nvidia_tegra210_config *config,
484 struct tegra_dc_dp_link_config *link_cfg)
485{
486 const u32 link_rate = 27 * link_cfg->link_bw * 1000 * 1000;
487 const u64 f = 100000; /* precision factor */
488
489 u32 num_linkclk_line; /* Number of link clocks per line */
490 u64 ratio_f; /* Ratio of incoming to outgoing data rate */
491
492 u64 frac_f;
493 u64 activesym_f; /* Activesym per TU */
494 u64 activecount_f;
495 u32 activecount;
496 u32 activepolarity;
497 u64 approx_value_f;
498 u32 activefrac = 0;
499 u64 accumulated_error_f = 0;
500 u32 lowest_neg_activecount = 0;
501 u32 lowest_neg_activepolarity = 0;
502 u32 lowest_neg_tusize = 64;
503 u32 num_symbols_per_line;
504 u64 lowest_neg_activefrac = 0;
505 u64 lowest_neg_error_f = 64 * f;
506 u64 watermark_f;
507
508 int i;
509 int neg;
510
511 printk(BIOS_INFO, "dp: %s\n", __func__);
512
513 if (!link_rate || !link_cfg->lane_count || !config->pixel_clock ||
514 !link_cfg->bits_per_pixel)
515 return -1;
516
517 if ((u64)config->pixel_clock * link_cfg->bits_per_pixel >=
518 (u64)link_rate * 8 * link_cfg->lane_count)
519 return -1;
520
521 num_linkclk_line = (u32)((u64)link_rate * (u64)config->xres /
522 config->pixel_clock);
523
524 ratio_f = (u64)config->pixel_clock * link_cfg->bits_per_pixel * f;
525 ratio_f /= 8;
526 ratio_f = (u64)(ratio_f / (link_rate * link_cfg->lane_count));
527
528 for (i = 64; i >= 32; --i) {
529 activesym_f = ratio_f * i;
530 activecount_f = (u64)(activesym_f / (u32)f) * f;
531 frac_f = activesym_f - activecount_f;
532 activecount = (u32)((u64)(activecount_f / (u32)f));
533
534 if (frac_f < (f / 2)) /* fraction < 0.5 */
535 activepolarity = 0;
536 else {
537 activepolarity = 1;
538 frac_f = f - frac_f;
539 }
540
541 if (frac_f != 0) {
542 frac_f = (u64)((f * f) / frac_f); /* 1/fraction */
543 if (frac_f > (15 * f))
544 activefrac = activepolarity ? 1 : 15;
545 else
546 activefrac = activepolarity ?
547 (u32)((u64)(frac_f / (u32)f)) + 1 :
548 (u32)((u64)(frac_f / (u32)f));
549 }
550
551 if (activefrac == 1)
552 activepolarity = 0;
553
554 if (activepolarity == 1)
555 approx_value_f = activefrac ? (u64)(
556 (activecount_f + (activefrac * f - f) * f) /
557 (activefrac * f)) :
558 activecount_f + f;
559 else
560 approx_value_f = activefrac ?
561 activecount_f + (u64)(f / activefrac) :
562 activecount_f;
563
564 if (activesym_f < approx_value_f) {
565 accumulated_error_f = num_linkclk_line *
566 (u64)((approx_value_f - activesym_f) / i);
567 neg = 1;
568 } else {
569 accumulated_error_f = num_linkclk_line *
570 (u64)((activesym_f - approx_value_f) / i);
571 neg = 0;
572 }
573
574 if ((neg && (lowest_neg_error_f > accumulated_error_f)) ||
575 (accumulated_error_f == 0)) {
576 lowest_neg_error_f = accumulated_error_f;
577 lowest_neg_tusize = i;
578 lowest_neg_activecount = activecount;
579 lowest_neg_activepolarity = activepolarity;
580 lowest_neg_activefrac = activefrac;
581
582 if (accumulated_error_f == 0)
583 break;
584 }
585 }
586
587 if (lowest_neg_activefrac == 0) {
588 link_cfg->activepolarity = 0;
589 link_cfg->active_count = lowest_neg_activepolarity ?
590 lowest_neg_activecount : lowest_neg_activecount - 1;
591 link_cfg->tu_size = lowest_neg_tusize;
592 link_cfg->active_frac = 1;
593 } else {
594 link_cfg->activepolarity = lowest_neg_activepolarity;
595 link_cfg->active_count = (u32)lowest_neg_activecount;
596 link_cfg->tu_size = lowest_neg_tusize;
597 link_cfg->active_frac = (u32)lowest_neg_activefrac;
598 }
599
600 watermark_f = (u64)((ratio_f * link_cfg->tu_size * (f - ratio_f)) / f);
601 link_cfg->watermark = (u32)((u64)((watermark_f + lowest_neg_error_f) /
602 f)) + link_cfg->bits_per_pixel / 4 - 1;
603 num_symbols_per_line = (config->xres * link_cfg->bits_per_pixel) /
604 (8 * link_cfg->lane_count);
605
606 if (link_cfg->watermark > 30) {
607 printk(BIOS_INFO,
608 "dp: sor setting: unable to get a good tusize, "
609 "force watermark to 30.\n");
610 link_cfg->watermark = 30;
611 return -1;
612 } else if (link_cfg->watermark > num_symbols_per_line) {
613 printk(BIOS_INFO,
614 "dp: sor setting: force watermark to the number "
615 "of symbols in the line.\n");
616 link_cfg->watermark = num_symbols_per_line;
617 return -1;
618 }
619
620 /* Refer to dev_disp.ref for more information. */
621 /* # symbols/hblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - */
622 /* SetRasterBlankStart.X - 7) * link_clk / pclk) */
623 /* - 3 * enhanced_framing - Y */
624 /* where Y = (# lanes == 4) 3 : (# lanes == 2) ? 6 : 12 */
625 link_cfg->hblank_sym = (int)((u64)(((u64)(config->hback_porch +
626 config->hfront_porch + config->hsync_width - 7) *
627 link_rate) / config->pixel_clock)) -
628 3 * link_cfg->enhanced_framing -
629 (12 / link_cfg->lane_count);
630
631 if (link_cfg->hblank_sym < 0)
632 link_cfg->hblank_sym = 0;
633
Patrick Georgi40a3e322015-06-22 19:41:29 +0200634 /* Refer to dev_disp.ref for more information. */
635 /* # symbols/vblank = ((SetRasterBlankStart.X - */
636 /* SetRasterBlankEen.X - 25) * link_clk / pclk) */
637 /* - Y - 1; */
638 /* where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39 */
639 link_cfg->vblank_sym = (int)((u64)((u64)(config->xres - 25)
640 * link_rate / config->pixel_clock)) - (36 /
641 link_cfg->lane_count) - 4;
642
643 if (link_cfg->vblank_sym < 0)
644 link_cfg->vblank_sym = 0;
645
646 link_cfg->is_valid = 1;
647 tegra_dc_dp_dump_link_cfg(dp, link_cfg);
648
649 return 0;
650}
651
652static int tegra_dc_dp_init_max_link_cfg(
653 struct soc_nvidia_tegra210_config *config,
654 struct tegra_dc_dp_data *dp,
655 struct tegra_dc_dp_link_config *link_cfg)
656{
657 u8 dpcd_data;
658 int ret;
659
660 printk(BIOS_INFO, "dp: %s\n", __func__);
661
662 CHECK_RET(tegra_dc_dp_dpcd_read(dp, NV_DPCD_MAX_LANE_COUNT,
663 &dpcd_data));
664 link_cfg->max_lane_count = dpcd_data & NV_DPCD_MAX_LANE_COUNT_MASK;
665 link_cfg->tps3_supported = (dpcd_data &
666 NV_DPCD_MAX_LANE_COUNT_TPS3_SUPPORTED_YES) ? 1 : 0;
667
668 link_cfg->support_enhanced_framing =
669 (dpcd_data & NV_DPCD_MAX_LANE_COUNT_ENHANCED_FRAMING_YES) ?
670 1 : 0;
671
672 CHECK_RET(tegra_dc_dp_dpcd_read(dp, NV_DPCD_MAX_DOWNSPREAD,
673 &dpcd_data));
674 link_cfg->downspread = (dpcd_data & NV_DPCD_MAX_DOWNSPREAD_VAL_0_5_PCT)
675 ? 1 : 0;
676
677 CHECK_RET(tegra_dc_dp_dpcd_read(dp, NV_DPCD_TRAINING_AUX_RD_INTERVAL,
678 &link_cfg->aux_rd_interval));
679
680 CHECK_RET(tegra_dc_dp_dpcd_read(dp, NV_DPCD_MAX_LINK_BANDWIDTH,
681 &link_cfg->max_link_bw));
682
683 link_cfg->bits_per_pixel = config->panel_bits_per_pixel;
684
685 /*
686 * Set to a high value for link training and attach.
687 * Will be re-programmed when dp is enabled.
688 */
689 link_cfg->drive_current = config->dp.drive_current;
690 link_cfg->preemphasis = config->dp.preemphasis;
691 link_cfg->postcursor = config->dp.postcursor;
692
693 CHECK_RET(tegra_dc_dp_dpcd_read(dp, NV_DPCD_EDP_CONFIG_CAP,
694 &dpcd_data));
695 link_cfg->alt_scramber_reset_cap =
696 (dpcd_data & NV_DPCD_EDP_CONFIG_CAP_ASC_RESET_YES) ?
697 1 : 0;
698 link_cfg->only_enhanced_framing =
699 (dpcd_data & NV_DPCD_EDP_CONFIG_CAP_FRAMING_CHANGE_YES) ?
700 1 : 0;
701
702 link_cfg->lane_count = link_cfg->max_lane_count;
703 link_cfg->link_bw = link_cfg->max_link_bw;
704 link_cfg->enhanced_framing = link_cfg->support_enhanced_framing;
705
706 tegra_dc_dp_calc_config(dp, config, link_cfg);
707 return 0;
708}
709
710static int tegra_dc_dp_set_assr(struct tegra_dc_dp_data *dp, int ena)
711{
712 int ret;
713
714 u8 dpcd_data = ena ?
715 NV_DPCD_EDP_CONFIG_SET_ASC_RESET_ENABLE :
716 NV_DPCD_EDP_CONFIG_SET_ASC_RESET_DISABLE;
717
718 CHECK_RET(tegra_dc_dp_dpcd_write(dp, NV_DPCD_EDP_CONFIG_SET,
719 dpcd_data));
720
721 /* Also reset the scrambler to 0xfffe */
722 tegra_dc_sor_set_internal_panel(&dp->sor, ena);
723 return 0;
724}
725
726static int tegra_dp_set_link_bandwidth(struct tegra_dc_dp_data *dp, u8 link_bw)
727{
728 tegra_dc_sor_set_link_bandwidth(&dp->sor, link_bw);
729
730 /* Sink side */
731 return tegra_dc_dp_dpcd_write(dp, NV_DPCD_LINK_BANDWIDTH_SET, link_bw);
732}
733
734static int tegra_dp_set_lane_count(struct tegra_dc_dp_data *dp,
735 const struct tegra_dc_dp_link_config *link_cfg)
736{
737 u8 dpcd_data;
738 int ret;
739
740 /* check if panel support enhanched_framing */
741 dpcd_data = link_cfg->lane_count;
742 if (link_cfg->enhanced_framing)
743 dpcd_data |= NV_DPCD_LANE_COUNT_SET_ENHANCEDFRAMING_T;
744 CHECK_RET(tegra_dc_dp_dpcd_write(dp, NV_DPCD_LANE_COUNT_SET,
745 dpcd_data));
746
747 tegra_dc_sor_set_lane_count(&dp->sor, link_cfg->lane_count);
748
749 /* Also power down lanes that will not be used */
750 return 0;
751}
752
753#if DO_FAST_LINK_TRAINING
754static int tegra_dc_dp_link_trained(struct tegra_dc_dp_data *dp,
755 const struct tegra_dc_dp_link_config *link_cfg)
756{
757 u32 lane;
758 u8 mask;
759 u8 data;
760 int ret;
761
762 for (lane = 0; lane < link_cfg->lane_count; ++lane) {
763 CHECK_RET(tegra_dc_dp_dpcd_read(dp, (lane/2) ?
764 NV_DPCD_LANE2_3_STATUS : NV_DPCD_LANE0_1_STATUS,
765 &data));
766 mask = (lane & 1) ?
767 NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES |
768 NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES |
769 NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES :
770 NV_DPCD_STATUS_LANEX_CR_DONE_YES |
771 NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_YES |
772 NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_YES;
773 if ((data & mask) != mask)
774 return -1;
775 }
776 return 0;
777}
778#endif /* DO_FAST_LINK_TRAINING */
779
780static int tegra_dp_channel_eq_status(struct tegra_dc_dp_data *dp)
781{
782 u32 cnt;
783 u32 n_lanes = dp->link_cfg.lane_count;
784 u8 data;
785 u8 ce_done = 1;
786
787 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
788 tegra_dc_dp_dpcd_read(dp, (NV_DPCD_LANE0_1_STATUS + cnt),
789 &data);
790
791 if (n_lanes == 1) {
792 ce_done =
793 (data & (0x1 << NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) &&
794 (data & (0x1 << NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT));
795 break;
796 } else
797 if (!(data & (0x1 << NV_DPCD_STATUS_LANEX_CHN_EQ_DONE_SHIFT)) ||
798 !(data & (0x1 << NV_DPCD_STATUS_LANEX_SYMBOL_LOCKED_SHFIT)) ||
799 !(data & (0x1 << NV_DPCD_STATUS_LANEXPLUS1_CHN_EQ_DONE_SHIFT)) ||
800 !(data & (0x1 << NV_DPCD_STATUS_LANEXPLUS1_SYMBOL_LOCKED_SHIFT)))
801 return 0;
802 }
803
804 if (ce_done) {
805 tegra_dc_dp_dpcd_read(dp, NV_DPCD_LANE_ALIGN_STATUS_UPDATED,
806 &data);
807 if (!(data & NV_DPCD_LANE_ALIGN_STATUS_UPDATED_DONE_YES))
808 ce_done = 0;
809 }
810
811 return ce_done;
812}
813
814static u8 tegra_dp_clock_recovery_status(struct tegra_dc_dp_data *dp)
815{
816 u32 cnt;
817 u32 n_lanes = dp->link_cfg.lane_count;
818 u8 data_ptr;
819
820 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
821 tegra_dc_dp_dpcd_read(dp,
822 (NV_DPCD_LANE0_1_STATUS + cnt), &data_ptr);
823
824 if (n_lanes == 1)
825 return (data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ? 1 : 0;
826 else if (!(data_ptr & NV_DPCD_STATUS_LANEX_CR_DONE_YES) ||
827 !(data_ptr & (NV_DPCD_STATUS_LANEXPLUS1_CR_DONE_YES)))
828 return 0;
829 }
830
831 return 1;
832}
833
834static void tegra_dp_lt_adjust(struct tegra_dc_dp_data *dp,
835 u32 pe[4], u32 vs[4], u32 pc[4],
836 u8 pc_supported)
837{
838 size_t cnt;
839 u8 data_ptr;
840 u32 n_lanes = dp->link_cfg.lane_count;
841
842 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
843 tegra_dc_dp_dpcd_read(dp,
844 (NV_DPCD_LANE0_1_ADJUST_REQ + cnt), &data_ptr);
845 pe[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_PE_MASK) >>
846 NV_DPCD_ADJUST_REQ_LANEX_PE_SHIFT;
847 vs[2 * cnt] = (data_ptr & NV_DPCD_ADJUST_REQ_LANEX_DC_MASK) >>
848 NV_DPCD_ADJUST_REQ_LANEX_DC_SHIFT;
849 pe[1 + 2 * cnt] =
850 (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_MASK) >>
851 NV_DPCD_ADJUST_REQ_LANEXPLUS1_PE_SHIFT;
852 vs[1 + 2 * cnt] =
853 (data_ptr & NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_MASK) >>
854 NV_DPCD_ADJUST_REQ_LANEXPLUS1_DC_SHIFT;
855 }
856 if (pc_supported) {
857 tegra_dc_dp_dpcd_read(dp,
858 NV_DPCD_ADJUST_REQ_POST_CURSOR2, &data_ptr);
859 for (cnt = 0; cnt < n_lanes; cnt++) {
860 pc[cnt] = (data_ptr >>
861 NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_SHIFT(cnt)) &
862 NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE_MASK;
863 }
864 }
865}
866
867static inline u32 tegra_dp_wait_aux_training(struct tegra_dc_dp_data *dp,
868 u8 is_clk_recovery)
869{
870 if (!dp->link_cfg.aux_rd_interval)
871 is_clk_recovery ? udelay(200) :
872 udelay(500);
873 else
874 mdelay(dp->link_cfg.aux_rd_interval * 4);
875
876 return dp->link_cfg.aux_rd_interval;
877}
878
879static void tegra_dp_tpg(struct tegra_dc_dp_data *dp, u32 tp, u32 n_lanes)
880{
881 u8 data = (tp == training_pattern_disabled)
882 ? (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_F)
883 : (tp | NV_DPCD_TRAINING_PATTERN_SET_SC_DISABLED_T);
884
885 tegra_dc_sor_set_dp_linkctl(&dp->sor, 1, tp, &dp->link_cfg);
886 tegra_dc_dp_dpcd_write(dp, NV_DPCD_TRAINING_PATTERN_SET, data);
887}
888
889static int tegra_dp_link_config(struct tegra_dc_dp_data *dp,
890 const struct tegra_dc_dp_link_config *link_cfg)
891{
892 u8 dpcd_data;
893 u32 retry;
894
895 if (link_cfg->lane_count == 0) {
896 printk(BIOS_ERR, "dp: error: lane count is 0. "
897 "Can not set link config.\n");
898 return DP_LT_FAILED;
899 }
900
901 /* Set power state if it is not in normal level */
902 if (tegra_dc_dp_dpcd_read(dp, NV_DPCD_SET_POWER, &dpcd_data))
903 return DP_LT_FAILED;
904
905 if (dpcd_data == NV_DPCD_SET_POWER_VAL_D3_PWRDWN) {
906 dpcd_data = NV_DPCD_SET_POWER_VAL_D0_NORMAL;
907
908 /* DP spec requires 3 retries */
909 for (retry = 3; retry > 0; --retry) {
910 if (tegra_dc_dp_dpcd_write(dp, NV_DPCD_SET_POWER,
911 dpcd_data))
912 break;
913 if (retry == 1) {
914 printk(BIOS_ERR, "dp: Failed to set DP panel"
915 " power\n");
916 return DP_LT_FAILED;
917 }
918 }
919 }
920
921 /* Enable ASSR if possible */
922 if (link_cfg->alt_scramber_reset_cap)
923 if (tegra_dc_dp_set_assr(dp, 1))
924 return DP_LT_FAILED;
925
926 if (tegra_dp_set_link_bandwidth(dp, link_cfg->link_bw)) {
927 printk(BIOS_ERR, "dp: Failed to set link bandwidth\n");
928 return DP_LT_FAILED;
929 }
930 if (tegra_dp_set_lane_count(dp, link_cfg)) {
931 printk(BIOS_ERR, "dp: Failed to set lane count\n");
932 return DP_LT_FAILED;
933 }
934 tegra_dc_sor_set_dp_linkctl(&dp->sor, 1, training_pattern_none,
935 link_cfg);
936 return DP_LT_SUCCESS;
937}
938
939static int tegra_dp_lower_link_config(struct tegra_dc_dp_data *dp,
940 struct tegra_dc_dp_link_config *cfg)
941{
942 struct tegra_dc_dp_link_config tmp_cfg;
943
944 tmp_cfg = dp->link_cfg;
945 cfg->is_valid = 0;
946
947 if (_tegra_dp_lower_link_config(dp, cfg))
948 goto fail;
949
950 if (tegra_dc_dp_calc_config(dp, dp->dc->config, cfg))
951 goto fail;
952 tegra_dp_link_config(dp, cfg);
953
954 return DP_LT_SUCCESS;
955fail:
956 dp->link_cfg = tmp_cfg;
957 tegra_dp_link_config(dp, &tmp_cfg);
958 return DP_LT_FAILED;
959}
960
961static void tegra_dp_lt_config(struct tegra_dc_dp_data *dp,
962 u32 pe[4], u32 vs[4], u32 pc[4])
963{
964 struct tegra_dc_sor_data *sor = &dp->sor;
965 u32 n_lanes = dp->link_cfg.lane_count;
966 u8 pc_supported = dp->link_cfg.tps3_supported;
967 u32 cnt;
968 u32 val;
969
970 for (cnt = 0; cnt < n_lanes; cnt++) {
971 u32 mask = 0;
972 u32 pe_reg, vs_reg, pc_reg;
973 u32 shift = 0;
974
975 switch (cnt) {
976 case 0:
977 mask = NV_SOR_PR_LANE2_DP_LANE0_MASK;
978 shift = NV_SOR_PR_LANE2_DP_LANE0_SHIFT;
979 break;
980 case 1:
981 mask = NV_SOR_PR_LANE1_DP_LANE1_MASK;
982 shift = NV_SOR_PR_LANE1_DP_LANE1_SHIFT;
983 break;
984 case 2:
985 mask = NV_SOR_PR_LANE0_DP_LANE2_MASK;
986 shift = NV_SOR_PR_LANE0_DP_LANE2_SHIFT;
987 break;
988 case 3:
989 mask = NV_SOR_PR_LANE3_DP_LANE3_MASK;
990 shift = NV_SOR_PR_LANE3_DP_LANE3_SHIFT;
991 break;
992 default:
993 printk(BIOS_ERR,
994 "dp: incorrect lane cnt\n");
995 }
996
997 pe_reg = tegra_dp_pe_regs[pc[cnt]][vs[cnt]][pe[cnt]];
998 vs_reg = tegra_dp_vs_regs[pc[cnt]][vs[cnt]][pe[cnt]];
999 pc_reg = tegra_dp_pc_regs[pc[cnt]][vs[cnt]][pe[cnt]];
1000
1001 tegra_dp_set_pe_vs_pc(sor, mask, pe_reg << shift,
1002 vs_reg << shift, pc_reg << shift, pc_supported);
1003 }
1004
1005 tegra_dp_disable_tx_pu(&dp->sor);
1006 udelay(20);
1007
1008 for (cnt = 0; cnt < n_lanes; cnt++) {
1009 u32 max_vs_flag = tegra_dp_is_max_vs(pe[cnt], vs[cnt]);
1010 u32 max_pe_flag = tegra_dp_is_max_pe(pe[cnt], vs[cnt]);
1011
1012 val = (vs[cnt] << NV_DPCD_TRAINING_LANEX_SET_DC_SHIFT) |
1013 (max_vs_flag ?
1014 NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_T :
1015 NV_DPCD_TRAINING_LANEX_SET_DC_MAX_REACHED_F) |
1016 (pe[cnt] << NV_DPCD_TRAINING_LANEX_SET_PE_SHIFT) |
1017 (max_pe_flag ?
1018 NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_T :
1019 NV_DPCD_TRAINING_LANEX_SET_PE_MAX_REACHED_F);
1020 tegra_dc_dp_dpcd_write(dp,
1021 (NV_DPCD_TRAINING_LANE0_SET + cnt), val);
1022 }
1023
1024 if (pc_supported) {
1025 for (cnt = 0; cnt < n_lanes / 2; cnt++) {
1026 u32 max_pc_flag0 = tegra_dp_is_max_pc(pc[cnt]);
1027 u32 max_pc_flag1 = tegra_dp_is_max_pc(pc[cnt + 1]);
1028 val = (pc[cnt] << NV_DPCD_LANEX_SET2_PC2_SHIFT) |
1029 (max_pc_flag0 ?
1030 NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_T :
1031 NV_DPCD_LANEX_SET2_PC2_MAX_REACHED_F) |
1032 (pc[cnt + 1] <<
1033 NV_DPCD_LANEXPLUS1_SET2_PC2_SHIFT) |
1034 (max_pc_flag1 ?
1035 NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_T :
1036 NV_DPCD_LANEXPLUS1_SET2_PC2_MAX_REACHED_F);
1037 tegra_dc_dp_dpcd_write(dp,
1038 (NV_DPCD_TRAINING_LANE0_1_SET2 + cnt), val);
1039 }
1040 }
1041}
1042
1043static int _tegra_dp_channel_eq(struct tegra_dc_dp_data *dp, u32 pe[4],
1044 u32 vs[4], u32 pc[4], u8 pc_supported,
1045 u32 n_lanes)
1046{
1047 u32 retry_cnt;
1048
1049 for (retry_cnt = 0; retry_cnt < 4; retry_cnt++) {
1050 if (retry_cnt) {
1051 tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported);
1052 tegra_dp_lt_config(dp, pe, vs, pc);
1053 }
1054
1055 tegra_dp_wait_aux_training(dp, 0);
1056
1057 if (!tegra_dp_clock_recovery_status(dp)) {
1058 printk(BIOS_ERR, "dp: CR failed in channel EQ"
1059 " sequence!\n");
1060 break;
1061 }
1062
1063 if (tegra_dp_channel_eq_status(dp))
1064 return DP_LT_SUCCESS;
1065 }
1066
1067 return DP_LT_FAILED;
1068}
1069
1070static int tegra_dp_channel_eq(struct tegra_dc_dp_data *dp,
1071 u32 pe[4], u32 vs[4], u32 pc[4])
1072{
1073 u32 n_lanes = dp->link_cfg.lane_count;
1074 u8 pc_supported = dp->link_cfg.tps3_supported;
1075 int err;
1076 u32 tp_src = training_pattern_2;
1077
1078 if (pc_supported)
1079 tp_src = training_pattern_3;
1080
1081 tegra_dp_tpg(dp, tp_src, n_lanes);
1082
1083 err = _tegra_dp_channel_eq(dp, pe, vs, pc, pc_supported, n_lanes);
1084
1085 tegra_dp_tpg(dp, training_pattern_disabled, n_lanes);
1086
1087 return err;
1088}
1089
1090static int _tegra_dp_clk_recovery(struct tegra_dc_dp_data *dp, u32 pe[4],
1091 u32 vs[4], u32 pc[4], u8 pc_supported,
1092 u32 n_lanes)
1093{
1094 u32 vs_temp[4];
1095 u32 retry_cnt = 0;
1096
1097 do {
1098 tegra_dp_lt_config(dp, pe, vs, pc);
1099 tegra_dp_wait_aux_training(dp, 1);
1100
1101 if (tegra_dp_clock_recovery_status(dp))
1102 return DP_LT_SUCCESS;
1103
1104 memcpy(vs_temp, vs, sizeof(vs_temp));
1105 tegra_dp_lt_adjust(dp, pe, vs, pc, pc_supported);
1106
1107 if (memcmp(vs_temp, vs, sizeof(vs_temp)))
1108 retry_cnt = 0;
1109 else
1110 ++retry_cnt;
1111 } while (retry_cnt < 5);
1112
1113 return DP_LT_FAILED;
1114}
1115
1116static int tegra_dp_clk_recovery(struct tegra_dc_dp_data *dp,
1117 u32 pe[4], u32 vs[4], u32 pc[4])
1118{
1119 u32 n_lanes = dp->link_cfg.lane_count;
1120 u8 pc_supported = dp->link_cfg.tps3_supported;
1121 int err;
1122
1123 tegra_dp_tpg(dp, training_pattern_1, n_lanes);
1124
1125 err = _tegra_dp_clk_recovery(dp, pe, vs, pc, pc_supported, n_lanes);
1126 if (err < 0)
1127 tegra_dp_tpg(dp, training_pattern_disabled, n_lanes);
1128
1129 return err;
1130}
1131
1132static int tegra_dc_dp_full_link_training(struct tegra_dc_dp_data *dp)
1133{
1134 struct tegra_dc_sor_data *sor = &dp->sor;
1135 int err;
1136 u32 pe[4], vs[4], pc[4];
1137
1138 printk(BIOS_INFO, "dp: %s\n", __func__);
1139 tegra_sor_precharge_lanes(sor);
1140
1141retry_cr:
1142 memset(pe, preEmphasis_Disabled, sizeof(pe));
1143 memset(vs, driveCurrent_Level0, sizeof(vs));
1144 memset(pc, postCursor2_Level0, sizeof(pc));
1145
1146 err = tegra_dp_clk_recovery(dp, pe, vs, pc);
1147 if (err != DP_LT_SUCCESS) {
1148 if (!tegra_dp_lower_link_config(dp, &dp->link_cfg))
1149 goto retry_cr;
1150
1151 printk(BIOS_ERR, "dp: clk recovery failed\n");
1152 goto fail;
1153 }
1154
1155 err = tegra_dp_channel_eq(dp, pe, vs, pc);
1156 if (err != DP_LT_SUCCESS) {
1157 if (!tegra_dp_lower_link_config(dp, &dp->link_cfg))
1158 goto retry_cr;
1159
1160 printk(BIOS_ERR,
1161 "dp: channel equalization failed\n");
1162 goto fail;
1163 }
1164
1165 tegra_dc_dp_dump_link_cfg(dp, &dp->link_cfg);
1166
1167 return 0;
1168
1169fail:
1170 return err;
1171}
1172
1173/*
1174 * All link training functions are ported from kernel dc driver.
1175 * See more details at drivers/video/tegra/dc/dp.c
1176 */
1177#if DO_FAST_LINK_TRAINING
1178static int tegra_dc_dp_fast_link_training(struct tegra_dc_dp_data *dp,
1179 const struct tegra_dc_dp_link_config *link_cfg)
1180{
1181 struct tegra_dc_sor_data *sor = &dp->sor;
1182 u8 link_bw;
1183 u8 lane_count;
1184 u16 data16;
1185 u32 data32;
1186 u32 size;
1187 u32 status;
1188 int j;
1189 u32 mask = 0xffff >> ((4 - link_cfg->lane_count) * 4);
1190
Patrick Georgi40a3e322015-06-22 19:41:29 +02001191 printk(BIOS_INFO, "dp: %s\n", __func__);
1192
1193 tegra_dc_sor_set_lane_parm(sor, link_cfg);
1194 tegra_dc_dp_dpcd_write(dp, NV_DPCD_MAIN_LINK_CHANNEL_CODING_SET,
1195 NV_DPCD_MAIN_LINK_CHANNEL_CODING_SET_ANSI_8B10B);
1196
1197 /* Send TP1 */
1198 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_1, link_cfg);
1199 tegra_dc_dp_dpcd_write(dp, NV_DPCD_TRAINING_PATTERN_SET,
1200 NV_DPCD_TRAINING_PATTERN_SET_TPS_TP1);
1201
1202 for (j = 0; j < link_cfg->lane_count; ++j)
1203 tegra_dc_dp_dpcd_write(dp, NV_DPCD_TRAINING_LANE0_SET + j,
1204 0x24);
1205 udelay(520);
1206
1207 size = sizeof(data16);
1208 tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
1209 NV_DPCD_LANE0_1_STATUS, (u8 *)&data16, &size, &status);
1210 status = mask & 0x1111;
1211 if ((data16 & status) != status) {
1212 printk(BIOS_ERR,
1213 "dp: Link training error for TP1 (%#x)\n", data16);
1214 return -EFAULT;
1215 }
1216
1217 /* enable ASSR */
1218 tegra_dc_dp_set_assr(dp, link_cfg->scramble_ena);
1219 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_3, link_cfg);
1220
1221 tegra_dc_dp_dpcd_write(dp, NV_DPCD_TRAINING_PATTERN_SET,
1222 link_cfg->link_bw == 20 ? 0x23 : 0x22);
1223 for (j = 0; j < link_cfg->lane_count; ++j)
1224 tegra_dc_dp_dpcd_write(dp, NV_DPCD_TRAINING_LANE0_SET + j,
1225 0x24);
1226 udelay(520);
1227
1228 size = sizeof(data32);
1229 tegra_dc_dpaux_read(dp, DPAUX_DP_AUXCTL_CMD_AUXRD,
1230 NV_DPCD_LANE0_1_STATUS, (u8 *)&data32, &size, &status);
1231 if ((data32 & mask) != (0x7777 & mask)) {
1232 printk(BIOS_ERR,
1233 "dp: Link training error for TP2/3 (0x%x)\n", data32);
1234 return -EFAULT;
1235 }
1236
1237 tegra_dc_sor_set_dp_linkctl(sor, 1, training_pattern_disabled,
1238 link_cfg);
1239 tegra_dc_dp_dpcd_write(dp, NV_DPCD_TRAINING_PATTERN_SET, 0);
1240
1241 if (tegra_dc_dp_link_trained(dp, link_cfg)) {
1242 tegra_dc_sor_read_link_config(&dp->sor, &link_bw,
1243 &lane_count);
1244 printk(BIOS_ERR,
1245 "Fast link trainging failed, link bw %d, lane # %d\n",
1246 link_bw, lane_count);
1247 return -EFAULT;
1248 }
1249
1250 printk(BIOS_INFO,
1251 "Fast link trainging succeeded, link bw %d, lane %d\n",
1252 link_cfg->link_bw, link_cfg->lane_count);
1253
1254 return 0;
1255}
1256#endif /* DO_FAST_LINK_TRAINING */
1257
1258static int tegra_dp_do_link_training(struct tegra_dc_dp_data *dp,
1259 const struct tegra_dc_dp_link_config *link_cfg)
1260{
1261 u8 link_bw;
1262 u8 lane_count;
1263#if DO_FAST_LINK_TRAINING
1264 int ret;
1265
1266 /* Now do the fast link training for eDP */
1267 ret = tegra_dc_dp_fast_link_training(dp, link_cfg);
1268 if (ret) {
1269 printk(BIOS_ERR, "dp: fast link training failed\n");
1270
1271 /* Try full link training then */
1272 if (tegra_dc_dp_full_link_training(dp)) {
1273 printk(BIOS_ERR, "dp: full link training failed\n");
1274 return ret;
1275 }
1276 } else {
1277 /* set to a known-good drive setting if fast link succeeded */
1278 tegra_dc_sor_set_voltage_swing(&dp->sor);
1279 }
1280#else
1281 if (tegra_dc_dp_full_link_training(dp)) {
1282 printk(BIOS_ERR, "dp: full link training failed\n");
1283 return -EFAULT;
1284 }
1285#endif
1286
1287 /* Everything goes well, double check the link config */
1288 /* TODO: record edc/c2 data for debugging */
1289 tegra_dc_sor_read_link_config(&dp->sor, &link_bw, &lane_count);
1290
1291 if ((link_cfg->link_bw == link_bw) &&
1292 (link_cfg->lane_count == lane_count))
1293 return 0;
1294 else
1295 return -EFAULT;
1296}
1297
1298static int tegra_dc_dp_explore_link_cfg(struct tegra_dc_dp_data *dp,
1299 struct tegra_dc_dp_link_config *link_cfg,
1300 const struct soc_nvidia_tegra210_config *config)
1301{
1302 struct tegra_dc_dp_link_config temp_cfg;
1303
1304 if (!config->pixel_clock || !config->xres || !config->yres) {
1305 printk(BIOS_ERR,
1306 "dp: error mode configuration");
1307 return -EINVAL;
1308 }
1309 if (!link_cfg->max_link_bw || !link_cfg->max_lane_count) {
1310 printk(BIOS_ERR,
1311 "dp: error link configuration");
1312 return -EINVAL;
1313 }
1314
1315 link_cfg->is_valid = 0;
1316
1317 memcpy(&temp_cfg, link_cfg, sizeof(temp_cfg));
1318
1319 temp_cfg.link_bw = temp_cfg.max_link_bw;
1320 temp_cfg.lane_count = temp_cfg.max_lane_count;
1321
1322 /*
1323 * set to max link config
1324 */
1325 if ((!tegra_dc_dp_calc_config(dp, config, &temp_cfg)) &&
1326 (!tegra_dp_link_config(dp, &temp_cfg)) &&
1327 (!tegra_dp_do_link_training(dp, &temp_cfg)))
1328 /* the max link cfg is doable */
1329 memcpy(link_cfg, &temp_cfg, sizeof(temp_cfg));
1330
1331 return link_cfg->is_valid ? 0 : -EFAULT;
1332}
1333
1334static void tegra_dp_update_config(struct tegra_dc_dp_data *dp,
1335 struct soc_nvidia_tegra210_config *config)
1336{
1337 struct edid edid;
1338 u8 buf[128] = {0};
1339 u32 size = sizeof(buf), aux_stat = 0;
1340
1341 printk(BIOS_ERR, "%s: enable r/w dump.\n",
1342 __func__);
1343
1344 tegra_dc_dpaux_enable(dp);
1345 if (tegra_dc_i2c_aux_read(dp, TEGRA_EDID_I2C_ADDRESS, 0, buf, &size,
1346 &aux_stat)) {
1347 printk(BIOS_ERR, "%s: Failed to read EDID. Use defaults.\n",
1348 __func__);
1349 return;
1350 }
1351
Arthur Heymans8c5884e2017-04-30 08:28:05 +02001352 if (decode_edid(buf, sizeof(buf), &edid) != EDID_CONFORMANT) {
Patrick Georgi40a3e322015-06-22 19:41:29 +02001353 printk(BIOS_ERR, "%s: Failed to decode EDID. Use defaults.\n",
1354 __func__);
1355 return;
1356 }
1357
David Hendricks7dbf9c62015-07-30 18:49:48 -07001358 config->xres = config->display_xres = edid.mode.ha;
1359 config->yres = config->display_yres = edid.mode.va;
Patrick Georgi40a3e322015-06-22 19:41:29 +02001360
David Hendricks7dbf9c62015-07-30 18:49:48 -07001361 config->pixel_clock = edid.mode.pixel_clock * 1000;
Patrick Georgi40a3e322015-06-22 19:41:29 +02001362
David Hendricks7dbf9c62015-07-30 18:49:48 -07001363 config->hfront_porch = edid.mode.hso;
1364 config->hsync_width = edid.mode.hspw;
1365 config->hback_porch = edid.mode.hbl - edid.mode.hso - edid.mode.hspw;
Patrick Georgi40a3e322015-06-22 19:41:29 +02001366
David Hendricks7dbf9c62015-07-30 18:49:48 -07001367 config->vfront_porch = edid.mode.vso;
1368 config->vsync_width = edid.mode.vspw;
1369 config->vback_porch = edid.mode.vbl - edid.mode.vso - edid.mode.vspw;
Patrick Georgi40a3e322015-06-22 19:41:29 +02001370
1371 /**
1372 * Note edid->framebuffer_bits_per_pixel is currently hard-coded as 32,
1373 * so we should keep the default value in device config.
1374 *
1375 * EDID v1.3 panels may not have color depth info, so we need to check
1376 * if these values are zero before updating config.
1377 */
1378 if (edid.panel_bits_per_pixel)
1379 config->panel_bits_per_pixel = edid.panel_bits_per_pixel;
1380 if (edid.panel_bits_per_color)
1381 config->color_depth = edid.panel_bits_per_color;
1382 printk(BIOS_SPEW, "%s: configuration updated by EDID.\n", __func__);
1383}
1384
1385void dp_init(void *_config)
1386{
1387 struct soc_nvidia_tegra210_config *config = (void *)_config;
1388 struct tegra_dc *dc = config->dc_data;
1389 struct tegra_dc_dp_data *dp = &dp_data;
1390
1391 /* set up links among config, dc, dp and sor */
1392 dp->dc = dc;
1393 dc->out = dp;
1394 dp->sor.dc = dc;
1395
1396 dp->sor.power_is_up = 0;
1397 dp->sor.base = (void *)TEGRA_ARM_SOR;
1398 dp->sor.pmc_base = (void *)TEGRA_PMC_BASE;
1399 dp->sor.portnum = 0;
1400 dp->sor.link_cfg = &dp->link_cfg;
1401 dp->aux_base = (void *)TEGRA_ARM_DPAUX;
1402 dp->link_cfg.is_valid = 0;
1403 dp->enabled = 0;
1404
1405 tegra_dp_update_config(dp, config);
1406}
1407
1408static void tegra_dp_hpd_config(struct tegra_dc_dp_data *dp,
1409 struct soc_nvidia_tegra210_config *config)
1410{
1411 u32 val;
1412
1413 val = config->dp.hpd_plug_min_us |
1414 (config->dp.hpd_unplug_min_us <<
1415 DPAUX_HPD_CONFIG_UNPLUG_MIN_TIME_SHIFT);
1416 tegra_dpaux_writel(dp, DPAUX_HPD_CONFIG, val);
1417
1418 tegra_dpaux_writel(dp, DPAUX_HPD_IRQ_CONFIG, config->dp.hpd_irq_min_us);
1419}
1420
1421static int tegra_dp_hpd_plug(struct tegra_dc_dp_data *dp, int timeout_ms)
1422{
1423 u32 val;
1424 u32 timeout = timeout_ms * 1000;
1425 do {
1426 val = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT);
1427 if (val & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)
1428 return 0;
1429 udelay(100);
1430 timeout -= 100;
1431 } while (timeout > 0);
1432 return -1;
1433}
1434
1435static int tegra_dc_dp_sink_out_of_sync(struct tegra_dc_dp_data *dp,
1436 u32 delay_ms)
1437{
1438 u8 dpcd_data;
1439 int out_of_sync;
1440
1441 mdelay(delay_ms);
1442 tegra_dc_dp_dpcd_read(dp, NV_DPCD_SINK_STATUS, &dpcd_data);
1443
1444 out_of_sync = ((dpcd_data & NV_DPCD_SINK_STATUS_PORT0_IN_SYNC) !=
1445 NV_DPCD_SINK_STATUS_PORT0_IN_SYNC);
1446
1447 if (out_of_sync)
1448 printk(BIOS_ERR,
1449 "SINK receive port 0 is out of synchronization\n");
1450 else
1451 printk(BIOS_INFO,
1452 "SINK is in synchronization\n");
1453
1454 return out_of_sync;
1455}
1456
1457static void tegra_dc_dp_check_sink(struct tegra_dc_dp_data *dp,
1458 struct soc_nvidia_tegra210_config *config)
1459{
1460
1461 u8 max_retry = 3;
1462 int delay_frame;
1463
1464 /* DP TCON may skip some main stream frames, thus we need to wait
1465 some delay before reading the DPCD SINK STATUS register, starting
1466 from 5 */
1467 delay_frame = 5;
1468
1469 while (tegra_dc_dp_sink_out_of_sync(dp, FRAME_IN_MS * delay_frame) &&
1470 max_retry--) {
1471 tegra_dc_detach(&dp->sor);
1472 if (tegra_dc_dp_explore_link_cfg(dp, &dp->link_cfg, config)) {
1473 printk(BIOS_ERR, "dp: %s: error to configure link\n",
1474 __func__);
1475 continue;
1476 }
1477
1478 tegra_dc_sor_set_power_state(&dp->sor, 1);
1479 tegra_dc_sor_attach(&dp->sor);
1480
1481 /* Increase delay_frame for next try in case the sink is
1482 skipping more frames */
1483 delay_frame += 10;
1484 }
1485}
1486
1487void dp_enable(void *_dp)
1488{
1489 struct tegra_dc_dp_data *dp = _dp;
1490 struct tegra_dc *dc = dp->dc;
1491 struct soc_nvidia_tegra210_config *config = dc->config;
1492
1493 u8 data;
1494 u32 retry;
1495 int ret;
1496
1497 tegra_dc_dpaux_enable(dp);
1498
1499 tegra_dp_hpd_config(dp, config);
1500 if (tegra_dp_hpd_plug(dp, config->dp.vdd_to_hpd_delay_ms) < 0) {
1501 printk(BIOS_ERR, "dp: hpd plug failed\n");
1502 goto error_enable;
1503 }
1504
1505 if (tegra_dc_dp_init_max_link_cfg(config, dp, &dp->link_cfg)) {
1506 printk(BIOS_ERR, "dp: failed to init link configuration\n");
1507 goto error_enable;
1508 }
1509
1510 tegra_dc_sor_enable_dp(&dp->sor);
1511
1512 tegra_dc_sor_set_panel_power(&dp->sor, 1);
1513
1514 /* Write power on to DPCD */
1515 data = NV_DPCD_SET_POWER_VAL_D0_NORMAL;
1516 retry = 0;
1517 do {
1518 ret = tegra_dc_dp_dpcd_write(dp,
1519 NV_DPCD_SET_POWER, data);
1520 } while ((retry++ < DP_POWER_ON_MAX_TRIES) && ret);
1521
1522 if (ret || retry >= DP_POWER_ON_MAX_TRIES) {
1523 printk(BIOS_ERR,
1524 "dp: failed to power on panel (0x%x)\n", ret);
1525 goto error_enable;
1526 }
1527
1528 /* Confirm DP is plugging status */
1529 if (!(tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT) &
1530 DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) {
1531 printk(BIOS_ERR, "dp: could not detect HPD\n");
1532 goto error_enable;
1533 }
1534
1535 /* Check DP version */
1536 if (tegra_dc_dp_dpcd_read(dp, NV_DPCD_REV, &dp->revision))
1537 printk(BIOS_ERR,
1538 "dp: failed to read the revision number from sink\n");
1539
1540 if (tegra_dc_dp_explore_link_cfg(dp, &dp->link_cfg, config)) {
1541 printk(BIOS_ERR, "dp: error to configure link\n");
1542 goto error_enable;
1543 }
1544
1545 tegra_dc_sor_set_power_state(&dp->sor, 1);
1546 tegra_dc_sor_attach(&dp->sor);
1547
1548 tegra_dc_dp_check_sink(dp, config);
1549
1550 /*
1551 * Power down the unused lanes to save power
1552 * (about hundreds milli-watts, varies from boards).
1553 */
1554 tegra_dc_sor_power_down_unused_lanes(&dp->sor);
1555
1556 dp->enabled = 1;
1557error_enable:
1558 return;
1559}
1560
Elyes HAOUAS3fcb2182018-05-25 10:03:57 +02001561void dp_display_startup(struct device *dev)
Patrick Georgi40a3e322015-06-22 19:41:29 +02001562{
1563 struct soc_nvidia_tegra210_config *config = dev->chip_info;
1564 struct display_controller *disp_ctrl =
1565 (void *)config->display_controller;
1566
1567 u32 framebuffer_size_mb = config->framebuffer_size / MiB;
1568 u32 framebuffer_base_mb = config->framebuffer_base / MiB;
1569
1570 struct pwm_controller *pwm = (void *)TEGRA_PWM_BASE;
1571 struct tegra_dc *dc = &dc_data;
1572 u32 plld_rate;
1573
1574 printk(BIOS_INFO, "%s: entry: disp_ctrl: %p.\n",
1575 __func__, disp_ctrl);
1576
1577 if (disp_ctrl == NULL) {
Julius Wernere9665952022-01-21 17:06:20 -08001578 printk(BIOS_ERR, "No dc is assigned by dt.\n");
Patrick Georgi40a3e322015-06-22 19:41:29 +02001579 return;
1580 }
1581
1582 dc->base = (void *)disp_ctrl;
1583 dc->config = config;
1584 config->dc_data = dc;
1585
1586 /* Note dp_init may read EDID and change some config values. */
1587 dp_init(config);
1588
1589 if (framebuffer_size_mb == 0) {
1590 framebuffer_size_mb = ALIGN_UP(config->display_xres *
1591 config->display_yres *
1592 (config->framebuffer_bits_per_pixel / 8), MiB)/MiB;
1593 }
1594
1595 config->framebuffer_size = framebuffer_size_mb * MiB;
1596 config->framebuffer_base = framebuffer_base_mb * MiB;
1597
1598 /* The plld is programmed with the assumption of the SHIFT_CLK_DIVIDER
1599 * and PIXEL_CLK_DIVIDER are zero (divide by 1). See the
1600 * update_display_mode() for detail.
1601 */
1602 plld_rate = clock_configure_plld(config->pixel_clock * 2);
1603 if (plld_rate == 0) {
1604 printk(BIOS_ERR, "dc: clock init failed\n");
1605 return;
1606 } else if (plld_rate != config->pixel_clock * 2) {
1607 printk(BIOS_WARNING, "dc: plld rounded to %u\n", plld_rate);
1608 config->pixel_clock = plld_rate / 2;
1609 }
1610
1611 /* set disp1's clock source to PLLD_OUT0 */
1612 clock_configure_source(disp1, PLLD, (plld_rate/KHz)/2);
1613
1614 /* Init dc */
1615 if (tegra_dc_init(disp_ctrl)) {
1616 printk(BIOS_ERR, "dc: init failed\n");
1617 return;
1618 }
1619
1620 /* Configure dc mode */
1621 if (update_display_mode(disp_ctrl, config)) {
1622 printk(BIOS_ERR, "dc: failed to configure display mode.\n");
1623 return;
1624 }
1625
1626 /* Enable dp */
1627 dp_enable(dc->out);
1628
1629 /* Set up Tegra PWM n (where n is specified in config->dp.pwm) to drive the
1630 * panel backlight.
1631 */
1632 printk(BIOS_SPEW, "%s: enable panel backlight pwm\n", __func__);
1633 WRITEL(((1 << NV_PWM_CSR_ENABLE_SHIFT) |
1634 (220 << NV_PWM_CSR_PULSE_WIDTH_SHIFT) | /* 220/256 */
1635 0x02e), /* frequency divider */
1636 &pwm->pwm[config->dp.pwm].csr);
1637
1638 /* Set up window */
1639 update_window(config);
1640 printk(BIOS_INFO, "%s: display init done.\n", __func__);
1641
1642 /* Save panel mode to cb tables */
1643 pass_mode_info_to_payload(config);
1644
1645 /*
1646 * After this point, it is payload's responsibility to allocate
1647 * framebuffer and sets the base address to dc's
1648 * WINBUF_START_ADDR register and enables window by setting dc's
1649 * DISP_DISP_WIN_OPTIONS register.
1650 */
1651}