Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the coreboot project. |
| 3 | * |
| 4 | * Copyright (C) 2011-2012 Alexandru Gagniuc <mr.nuke.me@gmail.com> |
| 5 | * |
| 6 | * This program is free software: you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation, either version 2 of the License, or |
| 9 | * (at your option) any later version. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
| 20 | #include "early_vx900.h" |
| 21 | #include "raminit.h" |
| 22 | #include <arch/io.h> |
| 23 | #include <arch/io.h> |
| 24 | #include <console/console.h> |
| 25 | #include <device/pci_ids.h> |
| 26 | #include <delay.h> |
| 27 | #include <lib.h> |
| 28 | #include <string.h> |
| 29 | |
| 30 | /** |
| 31 | * @file raminit_ddr3.c |
| 32 | * |
| 33 | * \brief DDR3 initialization for VIA VX900 chipset |
| 34 | * |
| 35 | * Rather than explain the DDR3 init algorithm, it is better to focus on what |
| 36 | * works and what doesn't. Familiarity with the DDR3 spec does not hurt. |
| 37 | * |
| 38 | * 1 DIMMs and 2 DIMMs with one rank each works. |
| 39 | * 1 rank DIMM with 2 rank DIMM works, but the odd ranks are disabled. |
| 40 | * (2) 2-rank DIMMs will not work. |
| 41 | * |
| 42 | * It is not yet clear if odd ranks do not work because of faulty timing |
| 43 | * calibration, or a misconfiguration of the MCU. I have seen this with DIMMS |
| 44 | * which mirror pins on the odd rank. That could also be the issue. |
| 45 | * |
| 46 | * The capture window is not calibrated, but preset. Whether that preset is |
| 47 | * universal or frequency dependent, and whether it is board-specific or not is |
| 48 | * not yet clear. @see vx900_dram_calibrate_recieve_delays(). |
| 49 | * |
| 50 | * 4GBit and 8GBit modules may not work. This is untested. Modules with 11 |
| 51 | * column address bits are not tested. @see vx900_dram_map_row_col_bank() |
| 52 | * |
| 53 | * Everything else should be in a more or less usable state. FIXME s are placed |
| 54 | * all over as a reminder that either something really needs fixing, or as a |
| 55 | * reminder to double-check. |
| 56 | */ |
| 57 | |
| 58 | /* Map BA0 <-> A17, BA1 <-> A18 */ |
| 59 | /* Map BA2 <-> A19, RA0/RA1 must not overlap BA[0:2] */ |
| 60 | #define VX900_MRS_MA_MAP 0x4b33 /* MA Pin Mapping for MRS commands */ |
| 61 | #define VX900_CALIB_MA_MAP 0x5911 /* MA Pin mapping for calibrations */ |
| 62 | |
| 63 | /* |
| 64 | * Registers 0x78 -> 0x7f contain the calibration settings for DRAM IO timing |
| 65 | * The dataset in these registers is selected from 0x70. |
| 66 | * Once the correct dataset is selected the delays can be altered. |
| 67 | * delay_type refers to TxDQS, TxDQ, RxDQS, or RxCR |
| 68 | * bound refers to either manual, average, upper bound, or lower bound |
| 69 | */ |
| 70 | #define CALIB_TxDQS 0 |
| 71 | #define CALIB_TxDQ 1 |
| 72 | #define CALIB_RxDQS 2 |
| 73 | #define CALIB_RxDQ_CR 3 |
| 74 | |
| 75 | #define CALIB_AVERAGE 0 |
| 76 | #define CALIB_LOWER 1 |
| 77 | #define CALIB_UPPER 2 |
| 78 | #define CALIB_MANUAL 4 /* We want this & 3 to overflow to 0 */ |
| 79 | |
| 80 | static void vx900_delay_calib_mode_select(u8 delay_type, u8 bound) |
| 81 | { |
| 82 | /* Which calibration setting */ |
| 83 | u8 reg8 = (delay_type & 0x03) << 2; |
| 84 | /* Upper, lower, average, or manual setting */ |
| 85 | reg8 |= (bound & 0x03); |
| 86 | pci_write_config8(MCU, 0x70, reg8); |
| 87 | } |
| 88 | |
| 89 | /* |
| 90 | * The vendor BIOS does something similar to vx900_delay_calib_mode_select(), |
| 91 | * then reads or write a byte, and repeats the process for all 8 bytes. This is |
| 92 | * annoyingly inefficient, and we can achieve the same result in a much more |
| 93 | * elegant manner. |
| 94 | */ |
| 95 | static void vx900_read_0x78_0x7f(timing_dly dly) |
| 96 | { |
| 97 | *((u32 *) (&(dly[0]))) = pci_read_config32(MCU, 0x78); |
| 98 | *((u32 *) (&(dly[4]))) = pci_read_config32(MCU, 0x7c); |
| 99 | } |
| 100 | |
| 101 | static void vx900_write_0x78_0x7f(const timing_dly dly) |
| 102 | { |
| 103 | pci_write_config32(MCU, 0x78, *((u32 *) (&(dly[0])))); |
| 104 | pci_write_config32(MCU, 0x7c, *((u32 *) (&(dly[4])))); |
| 105 | } |
| 106 | |
| 107 | static void vx900_read_delay_range(delay_range * d_range, u8 mode) |
| 108 | { |
| 109 | vx900_delay_calib_mode_select(mode, CALIB_LOWER); |
| 110 | vx900_read_0x78_0x7f(d_range->low); |
| 111 | vx900_delay_calib_mode_select(mode, CALIB_AVERAGE); |
| 112 | vx900_read_0x78_0x7f(d_range->avg); |
| 113 | vx900_delay_calib_mode_select(mode, CALIB_UPPER); |
| 114 | vx900_read_0x78_0x7f(d_range->high); |
| 115 | } |
| 116 | |
| 117 | static void dump_delay(const timing_dly dly) |
| 118 | { |
| 119 | u8 i; |
| 120 | for (i = 0; i < 8; i++) { |
| 121 | printram(" %.2x", dly[i]); |
| 122 | } |
| 123 | printram("\n"); |
| 124 | } |
| 125 | |
| 126 | static void dump_delay_range(const delay_range d_range) |
| 127 | { |
| 128 | printram("Lower limit: "); |
| 129 | dump_delay(d_range.low); |
| 130 | printram("Average: "); |
| 131 | dump_delay(d_range.avg); |
| 132 | printram("Upper limit: "); |
| 133 | dump_delay(d_range.high); |
| 134 | } |
| 135 | |
| 136 | /* |
| 137 | * These are some "safe" values that can be used for memory initialization. |
| 138 | * Some will stay untouched, and others will be overwritten later on |
| 139 | */ |
| 140 | static pci_reg8 mcu_init_config[] = { |
| 141 | {0x40, 0x01}, /* Virtual rank 0 ending address = 64M - 1 */ |
| 142 | {0x41, 0x00}, {0x42, 0x00}, {0x43, 0x00}, /* Virtual Ranks ending */ |
| 143 | {0x48, 0x00}, /* Virtual rank 0 starting address = 0 */ |
| 144 | {0x49, 0x00}, {0x4a, 0x00}, {0x4b, 0x00}, /* Virtual Ranks beginning */ |
| 145 | {0x50, 0xd8}, /* Set ranks 0-3 to 11 col bits, 16 row bits */ |
| 146 | /* Disable all virtual ranks */ |
| 147 | {0x54, 0x00}, {0x55, 0x00}, {0x56, 0x00}, {0x57, 0x00}, |
| 148 | /* Disable rank interleaving in ranks 0-3 */ |
| 149 | {0x58, 0x00}, {0x59, 0x00}, {0x5a, 0x00}, {0x5b, 0x00}, |
| 150 | {0x6c, 0xA0}, /* Memory type: DDR3, VDIMM: 1.5V, 64-bit DRAM */ |
| 151 | {0xc4, 0x80}, /* Enable 8 memory banks */ |
| 152 | {0xc6, 0x80}, /* Minimum latency from self-refresh. Bit [7] must be 1 */ |
| 153 | /* FIXME: do it here or in Final config? */ |
| 154 | {0xc8, 0x80}, /* Enable automatic triggering of short ZQ calibration */ |
| 155 | {0x99, 0xf0}, /* Power Management and Bypass Reorder Queue */ |
| 156 | /* Enable differential DQS; MODT assertion values suggested in DS */ |
| 157 | {0x9e, 0xa1}, {0x9f, 0x51}, |
| 158 | /* DQ/DQM Duty Control - Do not put any extra delays */ |
| 159 | {0xe9, 0x00}, {0xea, 0x00}, {0xeb, 0x00}, {0xec, 0x00}, |
| 160 | {0xed, 0x00}, {0xee, 0x00}, {0xef, 0x00}, |
| 161 | {0xfc, 0x00}, {0xfd, 0x00}, {0xfe, 0x00}, {0xff, 0x00}, |
| 162 | /* The following parameters we may or may not change */ |
| 163 | {0x61, 0x2e}, /* DRAMC Pipeline Control */ |
| 164 | {0x77, 0x10}, /* MDQS Output Control */ |
| 165 | |
| 166 | /* The following are parameters we'll most likely never change again */ |
| 167 | {0x60, 0xf4}, /* DRAM Pipeline Turn-Around Setting */ |
| 168 | {0x65, 0x49}, /* DRAM Arbitration Bandwidth Timer - I */ |
| 169 | {0x66, 0x80}, /* DRAM Queue / Arbitration */ |
| 170 | {0x69, 0xc6}, /* Bank Control: 8 banks, high priority refresh */ |
| 171 | {0x6a, 0xfc}, /* DRAMC Request Reorder Control */ |
| 172 | {0x6e, 0x38}, /* Burst lenght: 8, burst-chop: enable */ |
| 173 | {0x73, 0x04}, /* Close All Pages Threshold */ |
| 174 | |
| 175 | /* The following need to be dynamically asserted */ |
| 176 | /* See: check_special_registers.c */ |
| 177 | {0x74, 0xa0}, /* Yes, same 0x74; add one more T */ |
| 178 | {0x76, 0x60}, /* Write Data Phase Control */ |
| 179 | |
| 180 | }; |
| 181 | |
| 182 | /* |
| 183 | * This table keeps the driving strength control setting that we can safely use |
| 184 | * during initialization. This settings come in part from SerialICE, and in part |
| 185 | * from code provided by VIA. |
| 186 | */ |
| 187 | static pci_reg8 mcu_drv_ctrl_config[] = { |
| 188 | {0xd3, 0x03}, /* Enable auto-compensation circuit for ODT strength */ |
| 189 | {0xd4, 0x80}, /* Set internal ODT to dynamically turn on or off */ |
| 190 | {0xd6, 0x20}, /* Enable strong driving for MA and DRAM commands */ |
| 191 | {0xd0, 0x88}, /* (ODT) Strength ?has effect? */ |
| 192 | {0xe0, 0x88}, /* DRAM Driving – Group DQS (MDQS) */ |
| 193 | {0xe1, 0x00}, /* Disable offset mode for driving strength control */ |
| 194 | {0xe2, 0x88}, /* DRAM Driving – Group DQ (MD, MDQM) */ |
| 195 | {0xe4, 0xcc}, /* DRAM Driving – Group CSA (MCS, MCKE, MODT) */ |
| 196 | {0xe8, 0x88}, /* DRAM Driving – Group MA (MA, MBA, MSRAS, MSCAS, MSWE) */ |
| 197 | {0xe6, 0xff}, /* DRAM Driving – Group DCLK0 (DCLK[2:0] for DIMM0) */ |
| 198 | {0xe7, 0xff}, /* DRAM Driving – Group DCLK1 (DCLK[5:3] for DIMM1) */ |
| 199 | {0xe4, 0xcc}, /* DRAM Driving – Group CSA (MCS, MCKE, MODT) */ |
| 200 | {0x91, 0x08}, /* MCLKO Output Phase Delay - I */ |
| 201 | {0x92, 0x08}, /* MCLKO Output Phase Delay - II */ |
| 202 | {0x93, 0x16}, /* CS/CKE Output Phase Delay */ |
| 203 | {0x95, 0x16}, /* SCMD/MA Output Phase Delay */ |
| 204 | {0x9b, 0x3f}, /* Memory Clock Output Enable */ |
| 205 | }; |
| 206 | |
| 207 | static void vx900_dram_set_ma_pin_map(u16 map) |
| 208 | { |
| 209 | pci_write_config16(MCU, 0x52, map); |
| 210 | } |
| 211 | |
| 212 | /* |
| 213 | * FIXME: This function is a complete waste of space. All we really need is a |
| 214 | * MA MAP table based on either row address bits or column address bits. |
| 215 | * The problem is, I do not know if this mapping is applied during the column |
| 216 | * access or during the row access. At least the religiously verbose output |
| 217 | * makes pretty console output. |
| 218 | */ |
| 219 | static void vx900_dram_map_pins(u8 ba0, u8 ba1, u8 ba2, u8 ra0, u8 ra1) |
| 220 | { |
| 221 | u16 map = 0; |
| 222 | |
| 223 | printram("Mapping address pins to DRAM pins:\n"); |
| 224 | printram(" BA0 -> A%u\n", ba0); |
| 225 | printram(" BA1 -> A%u\n", ba1); |
| 226 | printram(" BA2 -> A%u\n", ba2); |
| 227 | printram(" RA0 -> A%u\n", ra0); |
| 228 | printram(" RA1 -> A%u\n", ra1); |
| 229 | /* Make sure BA2 is enabled */ |
| 230 | map |= (1 << 11); |
| 231 | |
| 232 | /* |
| 233 | * Find RA1 (15:14) |
| 234 | * 00: A14 |
| 235 | * 01: A16 |
| 236 | * 10: A18 |
| 237 | * 11: A20 |
| 238 | */ |
| 239 | if ((ra1 & 0x01) || (ra1 < 14) || (ra1 > 20)) { |
| 240 | printram("Illegal mapping RA1 -> A%u\n", ra1); |
| 241 | return; |
| 242 | } |
| 243 | map |= (((ra1 - 14) >> 1) & 0x03) << 14; |
| 244 | |
| 245 | /* |
| 246 | * Find RA0 (13:12) |
| 247 | * 00: A15 |
| 248 | * 01: A17 |
| 249 | * 10: A19 |
| 250 | * 11: A21 |
| 251 | */ |
| 252 | if ((!(ra0 & 0x01)) || (ra0 < 15) || (ra0 > 21)) { |
| 253 | printram("Illegal mapping RA0 -> A%u\n", ra0); |
| 254 | return; |
| 255 | } |
| 256 | map |= (((ra0 - 15) >> 1) & 0x03) << 12; |
| 257 | |
| 258 | /* |
| 259 | * Find BA2 (10:8) |
| 260 | * x00: A14 |
| 261 | * x01: A15 |
| 262 | * x10: A18 |
| 263 | * x11: A19 |
| 264 | */ |
| 265 | switch (ba2) { |
| 266 | case 14: |
| 267 | map |= (0 << 8); |
| 268 | break; |
| 269 | case 15: |
| 270 | map |= (1 << 8); |
| 271 | break; |
| 272 | case 18: |
| 273 | map |= (2 << 8); |
| 274 | break; |
| 275 | case 19: |
| 276 | map |= (3 << 8); |
| 277 | break; |
| 278 | default: |
| 279 | printram("Illegal mapping BA2 -> A%u\n", ba2); |
| 280 | break; |
| 281 | } |
| 282 | |
| 283 | /* |
| 284 | * Find BA1 (6:4) |
| 285 | * 000: A12 |
| 286 | * 001: A14 |
| 287 | * 010: A16 |
| 288 | * 011: A18 |
| 289 | * 1xx: A20 |
| 290 | */ |
| 291 | if (((ba1 & 0x01)) || (ba1 < 12) || (ba1 > 20)) { |
| 292 | printram("Illegal mapping BA1 -> A%u\n", ba1); |
| 293 | return; |
| 294 | } |
| 295 | map |= (((ba1 - 12) >> 1) & 0x07) << 4; |
| 296 | |
| 297 | /* |
| 298 | * Find BA0 (2:0) |
| 299 | * 000: A11 |
| 300 | * 001: A13 |
| 301 | * 010: A15 |
| 302 | * 011: A17 |
| 303 | * 1xx: A19 |
| 304 | */ |
| 305 | if ((!(ba0 & 0x01)) || (ba0 < 11) || (ba0 > 19)) { |
| 306 | printram("Illegal mapping BA0 -> A%u\n", ba0); |
| 307 | return; |
| 308 | } |
| 309 | map |= (((ba0 - 11) >> 1) & 0x07) << 0; |
| 310 | |
| 311 | printram("Setting map mask (rx52) to %.4x\n", map); |
| 312 | vx900_dram_set_ma_pin_map(map); |
| 313 | } |
| 314 | |
| 315 | static void vx900_dram_write_init_config(void) |
| 316 | { |
| 317 | /* Keep our RAM space free of legacy stuff */ |
| 318 | vx900_disable_legacy_rom_shadow(); |
| 319 | |
| 320 | /* Now worry about the real RAM init */ |
| 321 | size_t i; |
| 322 | for (i = 0; i < (sizeof(mcu_init_config) / sizeof(pci_reg8)); i++) { |
| 323 | pci_write_config8(MCU, mcu_init_config[i].addr, |
| 324 | mcu_init_config[i].val); |
| 325 | } |
| 326 | vx900_dram_set_ma_pin_map(VX900_CALIB_MA_MAP); |
| 327 | |
| 328 | /* FIXME: Slowing stuff down. Does this really help? */ |
| 329 | |
| 330 | /* Fast cycle control for CPU-to-DRAM Read Cycle 0:Disabled. |
| 331 | * This CPU bus controller will wait for all data */ |
| 332 | ////pci_mod_config8(HOST_BUS, 0x51, (1 << 7), 0); |
| 333 | /* Memory to CPU bus Controller Conversion Mode 1: Synchronous mode */ |
| 334 | ////pci_mod_config8(HOST_BUS, 0x54, 0, (1 << 1)); |
| 335 | } |
| 336 | |
| 337 | static void dram_find_spds_ddr3(const dimm_layout * addr, dimm_info * dimm) |
| 338 | { |
| 339 | size_t i = 0; |
| 340 | int dimms = 0; |
| 341 | do { |
| 342 | spd_raw_data spd; |
| 343 | spd_read(addr->spd_addr[i], spd); |
| 344 | spd_decode_ddr3(&dimm->dimm[i], spd); |
| 345 | if (dimm->dimm[i].dram_type != SPD_MEMORY_TYPE_SDRAM_DDR3) |
| 346 | continue; |
| 347 | dimms++; |
| 348 | dram_print_spd_ddr3(&dimm->dimm[i]); |
| 349 | } while (addr->spd_addr[++i] != SPD_END_LIST |
| 350 | && i < VX900_MAX_DIMM_SLOTS); |
| 351 | |
| 352 | if (!dimms) |
| 353 | die("No DIMMs were found"); |
| 354 | } |
| 355 | |
| 356 | static void dram_find_common_params(const dimm_info * dimms, |
| 357 | ramctr_timing * ctrl) |
| 358 | { |
| 359 | size_t i, valid_dimms; |
| 360 | memset(ctrl, 0, sizeof(ramctr_timing)); |
| 361 | ctrl->cas_supported = 0xff; |
| 362 | valid_dimms = 0; |
| 363 | for (i = 0; i < VX900_MAX_DIMM_SLOTS; i++) { |
| 364 | const dimm_attr *dimm = &dimms->dimm[i]; |
| 365 | if (dimm->dram_type == SPD_MEMORY_TYPE_UNDEFINED) |
| 366 | continue; |
| 367 | valid_dimms++; |
| 368 | |
| 369 | if (valid_dimms == 1) { |
| 370 | /* First DIMM defines the type of DIMM */ |
| 371 | ctrl->dram_type = dimm->dram_type; |
Vladimir Serbinenko | daf7680 | 2014-12-07 13:58:15 +0100 | [diff] [blame] | 372 | ctrl->dimm_type = dimm->dimm_type; |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 373 | } else { |
| 374 | /* Check if we have mismatched DIMMs */ |
Vladimir Serbinenko | daf7680 | 2014-12-07 13:58:15 +0100 | [diff] [blame] | 375 | if (ctrl->dram_type != dimm->dram_type |
| 376 | || ctrl->dimm_type != dimm->dimm_type) |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 377 | die("Mismatched DIMM Types"); |
| 378 | } |
| 379 | /* Find all possible CAS combinations */ |
| 380 | ctrl->cas_supported &= dimm->cas_supported; |
| 381 | |
| 382 | /* Find the smallest common latencies supported by all DIMMs */ |
Alexandru Gagniuc | 560433b | 2013-06-10 15:47:25 -0500 | [diff] [blame] | 383 | ctrl->tCK = MAX(ctrl->tCK, dimm->tCK); |
| 384 | ctrl->tAA = MAX(ctrl->tAA, dimm->tAA); |
| 385 | ctrl->tWR = MAX(ctrl->tWR, dimm->tWR); |
| 386 | ctrl->tRCD = MAX(ctrl->tRCD, dimm->tRCD); |
| 387 | ctrl->tRRD = MAX(ctrl->tRRD, dimm->tRRD); |
| 388 | ctrl->tRP = MAX(ctrl->tRP, dimm->tRP); |
| 389 | ctrl->tRAS = MAX(ctrl->tRAS, dimm->tRAS); |
| 390 | ctrl->tRC = MAX(ctrl->tRC, dimm->tRC); |
| 391 | ctrl->tRFC = MAX(ctrl->tRFC, dimm->tRFC); |
| 392 | ctrl->tWTR = MAX(ctrl->tWTR, dimm->tWTR); |
| 393 | ctrl->tRTP = MAX(ctrl->tRTP, dimm->tRTP); |
| 394 | ctrl->tFAW = MAX(ctrl->tFAW, dimm->tFAW); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 395 | |
| 396 | } |
| 397 | |
| 398 | ctrl->n_dimms = valid_dimms; |
| 399 | if (!ctrl->cas_supported) |
| 400 | die("Unsupported DIMM combination. " |
| 401 | "DIMMS do not support common CAS latency"); |
| 402 | if (!valid_dimms) |
| 403 | die("No valid DIMMs found"); |
| 404 | } |
| 405 | |
| 406 | static void vx900_dram_phys_bank_range(const dimm_info * dimms, |
| 407 | rank_layout * ranks) |
| 408 | { |
| 409 | size_t i; |
| 410 | for (i = 0; i < VX900_MAX_DIMM_SLOTS; i++) { |
| 411 | if (dimms->dimm[i].dram_type == SPD_MEMORY_TYPE_UNDEFINED) |
| 412 | continue; |
| 413 | u8 nranks = dimms->dimm[i].ranks; |
| 414 | /* Make sure we save the flags */ |
| 415 | ranks->flags[i * 2 + 1] = ranks->flags[i * 2] = |
| 416 | dimms->dimm[i].flags; |
| 417 | /* Only Rank1 has a mirrored pin mapping */ |
| 418 | ranks->flags[i * 2].pins_mirrored = 0; |
| 419 | if (nranks > 2) |
| 420 | die("Found DIMM with more than two ranks, which is not " |
| 421 | "supported by this chipset"); |
| 422 | u32 size = dimms->dimm[i].size_mb; |
| 423 | if (nranks == 2) { |
| 424 | /* Each rank holds half the capacity of the DIMM */ |
| 425 | size >>= 1; |
| 426 | ranks->phys_rank_size_mb[i << 1] = size; |
| 427 | ranks->phys_rank_size_mb[(i << 1) | 1] = size; |
| 428 | } else { |
| 429 | /* Otherwise, everything is held in the first bank */ |
| 430 | ranks->phys_rank_size_mb[i << 1] = size; |
Idwer Vollering | d26da9c | 2013-12-22 21:38:18 +0000 | [diff] [blame] | 431 | ranks->phys_rank_size_mb[(i << 1) | 1] = 0; |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 432 | } |
| 433 | } |
| 434 | } |
| 435 | |
| 436 | #define ODT_R0 0 |
| 437 | #define ODT_R1 1 |
| 438 | #define ODT_R2 2 |
| 439 | #define ODT_R3 3 |
| 440 | /* |
| 441 | * This is the table that tells us which MODT pin to map to which rank. |
| 442 | * |
| 443 | * This table is taken from code provided by VIA, but no explanation was |
| 444 | * provided as to why it is done this way. It may be possible that this table is |
| 445 | * not suitable for the way we map ranks later on. |
| 446 | */ |
| 447 | static const u8 odt_lookup_table[][2] = { |
| 448 | /* RankMAP Rank 3 Rank 2 Rank 1 Rank 0 */ |
| 449 | {0x01, (ODT_R3 << 6) | (ODT_R2 << 4) | (ODT_R1 << 2) | (ODT_R0 << 0)}, |
| 450 | {0x03, (ODT_R3 << 6) | (ODT_R2 << 4) | (ODT_R0 << 2) | (ODT_R1 << 0)}, |
| 451 | {0x04, (ODT_R3 << 6) | (ODT_R2 << 4) | (ODT_R1 << 2) | (ODT_R0 << 0)}, |
| 452 | {0x05, (ODT_R3 << 6) | (ODT_R0 << 4) | (ODT_R1 << 2) | (ODT_R2 << 0)}, |
| 453 | {0x07, (ODT_R3 << 6) | (ODT_R0 << 4) | (ODT_R2 << 2) | (ODT_R2 << 0)}, |
| 454 | {0x0c, (ODT_R2 << 6) | (ODT_R3 << 4) | (ODT_R1 << 2) | (ODT_R0 << 0)}, |
| 455 | {0x0d, (ODT_R0 << 6) | (ODT_R0 << 4) | (ODT_R1 << 2) | (ODT_R2 << 0)}, |
| 456 | {0x0f, (ODT_R0 << 6) | (ODT_R0 << 4) | (ODT_R2 << 2) | (ODT_R2 << 0)}, |
| 457 | {0, 0}, |
| 458 | }; |
| 459 | |
| 460 | static void vx900_dram_driving_ctrl(const dimm_info * dimm) |
| 461 | { |
| 462 | size_t i, ndimms; |
| 463 | u8 reg8, regxd5, rank_mask; |
| 464 | |
| 465 | rank_mask = 0; |
| 466 | /* For ODT range selection, datasheet recommends |
| 467 | * when 1 DIMM present: 60 Ohm |
| 468 | * when 2 DIMMs present: 120 Ohm */ |
| 469 | ndimms = 0; |
| 470 | for (i = 0; i < VX900_MAX_DIMM_SLOTS; i++) { |
| 471 | if (dimm->dimm[i].dram_type != SPD_MEMORY_TYPE_SDRAM_DDR3) |
| 472 | continue; |
| 473 | ndimms++; |
| 474 | rank_mask |= (1 << (i * 2)); |
| 475 | if (dimm->dimm[i].ranks > 1) |
| 476 | rank_mask |= (2 << (i * 2)); |
| 477 | } |
| 478 | /* ODT strength and MD/MDQM/MDQS driving strength */ |
| 479 | if (ndimms > 1) { |
| 480 | /* Enable 1 ODT block (120 Ohm ODT) */ |
| 481 | regxd5 = 0 << 2; |
| 482 | /* Enable strong driving for MD/MDQM/MDQS */ |
| 483 | regxd5 |= (1 << 7); |
| 484 | } else { |
| 485 | /* Enable 2 ODT blocks (60 Ohm ODT) */ |
| 486 | regxd5 = 1 << 2; |
| 487 | /* Leave MD/MDQM/MDQS driving weak */ |
| 488 | } |
| 489 | pci_write_config8(MCU, 0xd5, regxd5); |
| 490 | |
| 491 | /* Enable strong CLK driving for DIMMs with more than one rank */ |
| 492 | if (dimm->dimm[0].ranks > 1) |
| 493 | pci_mod_config8(MCU, 0xd6, 0, (1 << 7)); |
| 494 | if (dimm->dimm[1].ranks > 1) |
| 495 | pci_mod_config8(MCU, 0xd6, 0, (1 << 6)); |
| 496 | |
| 497 | /* DRAM ODT Lookup Table */ |
| 498 | for (i = 0;; i++) { |
| 499 | if (odt_lookup_table[i][0] == 0) { |
| 500 | printram("No ODT entry for rank mask %x\n", rank_mask); |
| 501 | die("Aborting"); |
| 502 | } |
| 503 | if (odt_lookup_table[i][0] != rank_mask) |
| 504 | continue; |
| 505 | |
| 506 | reg8 = odt_lookup_table[i][1]; |
| 507 | break; |
| 508 | } |
| 509 | |
| 510 | printram("Mapping rank mask %x to ODT entry %.2x\n", rank_mask, reg8); |
| 511 | pci_write_config8(MCU, 0x9c, reg8); |
| 512 | |
| 513 | for (i = 0; i < (sizeof(mcu_drv_ctrl_config) / sizeof(pci_reg8)); i++) { |
| 514 | pci_write_config8(MCU, mcu_drv_ctrl_config[i].addr, |
| 515 | mcu_drv_ctrl_config[i].val); |
| 516 | } |
| 517 | } |
| 518 | |
| 519 | static void vx900_pr_map_all_vr3(void) |
| 520 | { |
| 521 | /* Enable all ranks and set them to VR3 */ |
| 522 | pci_write_config16(MCU, 0x54, 0xbbbb); |
| 523 | } |
| 524 | |
| 525 | /* Map physical rank pr to virtual rank vr */ |
| 526 | static void vx900_map_pr_vr(u8 pr, u8 vr) |
| 527 | { |
| 528 | u16 val; |
| 529 | |
| 530 | pr &= 0x3; |
| 531 | vr &= 0x3; |
| 532 | /* Enable rank (bit [3], and set the VR number bits [1:0] */ |
| 533 | val = 0x8 | vr; |
| 534 | /* Now move the value to the appropriate PR */ |
| 535 | val <<= (pr * 4); |
| 536 | pci_mod_config16(MCU, 0x54, 0xf << (pr * 4), val); |
| 537 | printram("Mapping PR %u to VR %u\n", pr, vr); |
| 538 | } |
| 539 | |
| 540 | static u8 vx900_get_CWL(u8 CAS) |
| 541 | { |
| 542 | /* Get CWL based on CAS using the following rule: |
| 543 | * _________________________________________ |
| 544 | * CAS: | 4T | 5T | 6T | 7T | 8T | 9T | 10T | 11T | |
| 545 | * CWL: | 5T | 5T | 5T | 6T | 6T | 7T | 7T | 8T | |
| 546 | */ |
| 547 | static const u8 cas_cwl_map[] = { 5, 5, 5, 6, 6, 7, 7, 8 }; |
| 548 | if (CAS > 11) |
| 549 | return 8; |
| 550 | return cas_cwl_map[CAS - 4]; |
| 551 | } |
| 552 | |
| 553 | /* |
| 554 | * Here we are calculating latencies, and writing them to the appropriate |
| 555 | * registers. Note that some registers do not take latencies from 0 = 0T, |
| 556 | * 1 = 1T, so each register gets its own math formula. |
| 557 | */ |
| 558 | static void vx900_dram_timing(ramctr_timing * ctrl) |
| 559 | { |
| 560 | u8 reg8, val, tFAW, tRRD; |
| 561 | u32 val32; |
| 562 | |
| 563 | /* Maximum supported DDR3 frequency is 533MHz (DDR3 1066) so make sure |
| 564 | * we cap it if we have faster DIMMs. |
| 565 | * Then, align it to the closest JEDEC standard frequency */ |
| 566 | if (ctrl->tCK <= TCK_533MHZ) { |
| 567 | ctrl->tCK = TCK_533MHZ; |
| 568 | } else if (ctrl->tCK <= TCK_400MHZ) { |
| 569 | ctrl->tCK = TCK_400MHZ; |
| 570 | } else if (ctrl->tCK <= TCK_333MHZ) { |
| 571 | ctrl->tCK = TCK_333MHZ; |
| 572 | } else { |
| 573 | ctrl->tCK = TCK_266MHZ; |
| 574 | } |
| 575 | |
| 576 | val32 = (1000 << 8) / ctrl->tCK; |
| 577 | printram("Selected DRAM frequency: %u MHz\n", val32); |
| 578 | |
| 579 | /* Find CAS and CWL latencies */ |
Edward O'Callaghan | 7116ac8 | 2014-07-08 01:53:24 +1000 | [diff] [blame] | 580 | val = CEIL_DIV(ctrl->tAA, ctrl->tCK); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 581 | printram("Minimum CAS latency : %uT\n", val); |
| 582 | /* Find lowest supported CAS latency that satisfies the minimum value */ |
| 583 | while (!((ctrl->cas_supported >> (val - 4)) & 1) |
| 584 | && (ctrl->cas_supported >> (val - 4))) { |
| 585 | val++; |
| 586 | } |
| 587 | /* Is CAS supported */ |
| 588 | if (!(ctrl->cas_supported & (1 << (val - 4)))) |
| 589 | printram("CAS not supported\n"); |
| 590 | printram("Selected CAS latency : %uT\n", val); |
| 591 | ctrl->CAS = val; |
| 592 | ctrl->CWL = vx900_get_CWL(ctrl->CAS); |
| 593 | printram("Selected CWL latency : %uT\n", ctrl->CWL); |
| 594 | /* Write CAS and CWL */ |
| 595 | reg8 = (((ctrl->CWL - 4) & 0x07) << 4) | ((ctrl->CAS - 4) & 0x07); |
| 596 | pci_write_config8(MCU, 0xc0, reg8); |
| 597 | |
| 598 | /* Find tRCD */ |
Edward O'Callaghan | 7116ac8 | 2014-07-08 01:53:24 +1000 | [diff] [blame] | 599 | val = CEIL_DIV(ctrl->tRCD, ctrl->tCK); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 600 | printram("Selected tRCD : %uT\n", val); |
| 601 | reg8 = ((val - 4) & 0x7) << 4; |
| 602 | /* Find tRP */ |
Edward O'Callaghan | 7116ac8 | 2014-07-08 01:53:24 +1000 | [diff] [blame] | 603 | val = CEIL_DIV(ctrl->tRP, ctrl->tCK); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 604 | printram("Selected tRP : %uT\n", val); |
| 605 | reg8 |= ((val - 4) & 0x7); |
| 606 | pci_write_config8(MCU, 0xc1, reg8); |
| 607 | |
| 608 | /* Find tRAS */ |
Edward O'Callaghan | 7116ac8 | 2014-07-08 01:53:24 +1000 | [diff] [blame] | 609 | val = CEIL_DIV(ctrl->tRAS, ctrl->tCK); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 610 | printram("Selected tRAS : %uT\n", val); |
| 611 | reg8 = ((val - 15) & 0x7) << 4; |
| 612 | /* Find tWR */ |
Edward O'Callaghan | 7116ac8 | 2014-07-08 01:53:24 +1000 | [diff] [blame] | 613 | ctrl->WR = CEIL_DIV(ctrl->tWR, ctrl->tCK); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 614 | printram("Selected tWR : %uT\n", ctrl->WR); |
| 615 | reg8 |= ((ctrl->WR - 4) & 0x7); |
| 616 | pci_write_config8(MCU, 0xc2, reg8); |
| 617 | |
| 618 | /* Find tFAW */ |
Edward O'Callaghan | 7116ac8 | 2014-07-08 01:53:24 +1000 | [diff] [blame] | 619 | tFAW = CEIL_DIV(ctrl->tFAW, ctrl->tCK); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 620 | printram("Selected tFAW : %uT\n", tFAW); |
| 621 | /* Find tRRD */ |
Edward O'Callaghan | 7116ac8 | 2014-07-08 01:53:24 +1000 | [diff] [blame] | 622 | tRRD = CEIL_DIV(ctrl->tRRD, ctrl->tCK); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 623 | printram("Selected tRRD : %uT\n", tRRD); |
| 624 | val = tFAW - 4 * tRRD; /* number of cycles above 4*tRRD */ |
| 625 | reg8 = ((val - 0) & 0x7) << 4; |
| 626 | reg8 |= ((tRRD - 2) & 0x7); |
| 627 | pci_write_config8(MCU, 0xc3, reg8); |
| 628 | |
| 629 | /* Find tRTP */ |
Edward O'Callaghan | 7116ac8 | 2014-07-08 01:53:24 +1000 | [diff] [blame] | 630 | val = CEIL_DIV(ctrl->tRTP, ctrl->tCK); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 631 | printram("Selected tRTP : %uT\n", val); |
| 632 | reg8 = ((val & 0x3) << 4); |
| 633 | /* Find tWTR */ |
Edward O'Callaghan | 7116ac8 | 2014-07-08 01:53:24 +1000 | [diff] [blame] | 634 | val = CEIL_DIV(ctrl->tWTR, ctrl->tCK); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 635 | printram("Selected tWTR : %uT\n", val); |
| 636 | reg8 |= ((val - 2) & 0x7); |
| 637 | pci_mod_config8(MCU, 0xc4, 0x3f, reg8); |
| 638 | |
| 639 | /* DRAM Timing for All Ranks - VI |
| 640 | * [7:6] CKE Assertion Minimum Pulse Width |
| 641 | * We probably don't want to mess with this just yet. |
| 642 | * [5:0] Refresh-to-Active or Refresh-to-Refresh (tRFC) |
| 643 | * tRFC = (30 + 2 * [5:0])T |
| 644 | * Since we previously set RxC4[7] |
| 645 | */ |
| 646 | reg8 = pci_read_config8(MCU, 0xc5); |
Edward O'Callaghan | 7116ac8 | 2014-07-08 01:53:24 +1000 | [diff] [blame] | 647 | val = CEIL_DIV(ctrl->tRFC, ctrl->tCK); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 648 | printram("Minimum tRFC : %uT\n", val); |
| 649 | if (val < 30) { |
| 650 | val = 0; |
| 651 | } else { |
| 652 | val = (val - 30 + 1) / 2; |
| 653 | } |
| 654 | ; |
| 655 | printram("Selected tRFC : %uT\n", 30 + 2 * val); |
| 656 | reg8 |= (val & 0x3f); |
| 657 | pci_write_config8(MCU, 0xc5, reg8); |
| 658 | |
| 659 | /* Where does this go??? */ |
Edward O'Callaghan | 7116ac8 | 2014-07-08 01:53:24 +1000 | [diff] [blame] | 660 | val = CEIL_DIV(ctrl->tRC, ctrl->tCK); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 661 | printram("Required tRC : %uT\n", val); |
| 662 | } |
| 663 | |
| 664 | /* Program the DRAM frequency */ |
| 665 | static void vx900_dram_freq(ramctr_timing * ctrl) |
| 666 | { |
| 667 | u8 val; |
| 668 | |
| 669 | /* Step 1 - Reset the PLL */ |
| 670 | pci_mod_config8(MCU, 0x90, 0x00, 0x0f); |
| 671 | /* Wait at least 10 ns; VIA code delays by 640us */ |
| 672 | udelay(640); |
| 673 | |
| 674 | /* Step 2 - Set target frequency */ |
| 675 | if (ctrl->tCK <= TCK_533MHZ) { |
| 676 | val = 0x07; |
| 677 | ctrl->tCK = TCK_533MHZ; |
| 678 | } else if (ctrl->tCK <= TCK_400MHZ) { |
| 679 | val = 0x06; |
| 680 | ctrl->tCK = TCK_400MHZ; |
| 681 | } else if (ctrl->tCK <= TCK_333MHZ) { |
| 682 | val = 0x05; |
| 683 | ctrl->tCK = TCK_333MHZ; |
| 684 | } else { /*ctrl->tCK <= TCK_266MHZ */ |
| 685 | val = 0x04; |
| 686 | ctrl->tCK = TCK_266MHZ; |
| 687 | } |
| 688 | /* Restart the PLL with the desired frequency */ |
| 689 | pci_mod_config8(MCU, 0x90, 0x0f, val); |
| 690 | |
| 691 | /* Step 3 - Wait for PLL to stabilize */ |
| 692 | udelay(2000); |
| 693 | |
| 694 | /* Step 4 - Reset the DLL - Clear [7,4] */ |
| 695 | pci_mod_config8(MCU, 0x6b, 0x90, 0x00); |
| 696 | udelay(2000); |
| 697 | |
| 698 | /* Step 5 - Enable the DLL - Set bits [7,4] to 01b */ |
| 699 | pci_mod_config8(MCU, 0x6b, 0x00, 0x10); |
| 700 | udelay(2000); |
| 701 | |
| 702 | /* Step 6 - Start DLL Calibration - Set bit [7] */ |
| 703 | pci_mod_config8(MCU, 0x6b, 0x00, 0x80); |
| 704 | udelay(5); |
| 705 | |
| 706 | /* Step 7 - Finish DLL Calibration - Clear bit [7] */ |
| 707 | pci_mod_config8(MCU, 0x6b, 0x80, 0x00); |
| 708 | |
| 709 | /* Step 8 - If we have registered DIMMs, we need to set bit[0] */ |
Vladimir Serbinenko | daf7680 | 2014-12-07 13:58:15 +0100 | [diff] [blame] | 710 | if (dimm_is_registered(ctrl->dimm_type)) { |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 711 | printram("Enabling RDIMM support in memory controller\n"); |
| 712 | pci_mod_config8(MCU, 0x6c, 0x00, 0x01); |
| 713 | } |
| 714 | } |
| 715 | |
| 716 | /* |
| 717 | * The VX900 can send the MRS commands directly through hardware |
| 718 | * It does the MR2->MR3->MR1->MR0->LongZQ JEDEC dance |
| 719 | * The parameters that we don't worry about are extracted from the timing |
| 720 | * registers we have programmed earlier. |
| 721 | */ |
| 722 | static void vx900_dram_ddr3_do_hw_mrs(u8 ma_swap, u8 rtt_nom, |
| 723 | u8 ods, u8 rtt_wr, u8 srt, u8 asr) |
| 724 | { |
| 725 | u16 reg16 = 0; |
| 726 | |
| 727 | printram("Using Hardware method for DRAM MRS commands.\n"); |
| 728 | |
| 729 | reg16 |= ((rtt_wr & 0x03) << 12); |
| 730 | if (srt) |
| 731 | reg16 |= (1 << 9); |
| 732 | if (asr) |
| 733 | reg16 |= (1 << 8); |
| 734 | reg16 |= ((rtt_nom & 0x7) << 4); |
| 735 | reg16 |= ((ods & 0x03) << 2); |
| 736 | if (ma_swap) |
| 737 | reg16 |= (1 << 1); |
| 738 | reg16 |= (1 << 14); |
| 739 | reg16 |= (1 << 0); /* This is the trigger bit */ |
| 740 | printram("Hw MRS set is 0x%4x\n", reg16); |
| 741 | pci_write_config16(MCU, 0xcc, reg16); |
| 742 | /* Wait for MRS commands to be sent */ |
| 743 | while (pci_read_config8(MCU, 0xcc) & 1) ; |
| 744 | } |
| 745 | |
| 746 | /* |
| 747 | * Translate the MRS command into an address on the CPU bus |
| 748 | * |
| 749 | * Take an MRS command (mrs_cmd_t) and translate it to a read address on the CPU |
| 750 | * bus. Thus, reading from the returned address, will issue the correct MRS |
| 751 | * command, assuming we are in MRS mode, of course. |
| 752 | * |
| 753 | * A read from the returned address will produce the correct MRS command |
| 754 | * provided the following conditions are met: |
| 755 | * - The MA pin mapping is set to VX900_MRS_MA_MAP |
| 756 | * - The memory controller's Fun3_RX6B[2:0] is set to 011b (MSR Enable) |
| 757 | */ |
| 758 | static u32 vx900_get_mrs_addr(mrs_cmd_t cmd) |
| 759 | { |
| 760 | u32 addr = 0; |
| 761 | u8 mrs_type = (cmd >> 16) & 0x07; |
| 762 | /* MA[9:0] <-> A[12:3] */ |
| 763 | addr |= ((cmd & 0x3ff) << 3); |
| 764 | /* MA10 <-> A20 */ |
| 765 | addr |= (((cmd >> 10) & 0x1) << 20); |
| 766 | /* MA[12:11] <-> A[14:13] */ |
| 767 | addr |= (((cmd >> 11) & 0x3) << 13); |
| 768 | /* BA[2:0] <-> A[19:17] */ |
| 769 | addr |= mrs_type << 17; |
| 770 | return addr; |
| 771 | } |
| 772 | |
| 773 | /* |
| 774 | * Here, we do the MR2->MR3->MR1->MR0->LongZQ JEDEC dance manually |
| 775 | * |
| 776 | * Why would we do this in software, when the VX900 can do it in hardware? The |
| 777 | * problem is the hardware sequence seems to be buggy on ranks with mirrored |
| 778 | * pins. Is this a hardware bug or a misconfigured MCU? No idea. |
| 779 | * |
| 780 | * To maintain API compatibility with the function that implements the hardware |
| 781 | * sequence, we don't ask for all parameters. To keep an overall cleaner code |
| 782 | * structure, we don't try to pass down all that information. Instead, we |
| 783 | * extract the extra parameters from the timing registers we have programmed |
| 784 | * earlier. |
| 785 | */ |
| 786 | static void vx900_dram_ddr3_do_sw_mrs(u8 ma_swap, enum ddr3_mr1_rtt_nom rtt_nom, |
| 787 | enum ddr3_mr1_ods ods, |
| 788 | enum ddr3_mr2_rttwr rtt_wr, |
| 789 | enum ddr3_mr2_srt_range srt, |
| 790 | enum ddr3_mr2_asr asr) |
| 791 | { |
| 792 | mrs_cmd_t mrs; |
| 793 | u8 reg8, cas, cwl, twr; |
| 794 | |
| 795 | printram("Using Software method for DRAM MRS commands.\n"); |
| 796 | |
| 797 | /* Get CAS, CWL, and tWR that we programmed earlier */ |
| 798 | reg8 = pci_read_config8(MCU, 0xc0); |
| 799 | cas = (reg8 & 0x07) + 4; |
| 800 | cwl = ((reg8 >> 4) & 0x07) + 4; |
| 801 | reg8 = pci_read_config8(MCU, 0xc2); |
| 802 | twr = (reg8 & 0x07) + 4; |
| 803 | |
| 804 | /* Step 06 - Set Fun3_RX6B[2:0] to 001b (NOP Command Enable). */ |
| 805 | /* Was already done for us before calling us */ |
| 806 | |
| 807 | /* Step 07 - Read a double word from any address of the DIMM. */ |
| 808 | /* Was already done for us before calling us */ |
| 809 | |
| 810 | /* Step 08 - Set Fun3_RX6B[2:0] to 011b (MSR Enable). */ |
| 811 | pci_mod_config8(MCU, 0x6b, 0x07, 0x03); /* MSR Enable */ |
| 812 | |
| 813 | /* Step 09 – Issue MR2 cycle. Read a double word from the address |
| 814 | * depended on DRAM’s Rtt_WR and CWL settings. */ |
| 815 | mrs = ddr3_get_mr2(rtt_wr, srt, asr, cwl); |
| 816 | if (ma_swap) |
| 817 | mrs = ddr3_mrs_mirror_pins(mrs); |
| 818 | volatile_read(vx900_get_mrs_addr(mrs)); |
| 819 | printram("MR2: %.5x\n", mrs); |
| 820 | udelay(1000); |
| 821 | |
| 822 | /* Step 10 – Issue MR3 cycle. Read a double word from the address 60000h |
| 823 | * to set DRAM to normal operation mode. */ |
| 824 | mrs = ddr3_get_mr3(0); |
| 825 | if (ma_swap) |
| 826 | mrs = ddr3_mrs_mirror_pins(mrs); |
| 827 | volatile_read(vx900_get_mrs_addr(mrs)); |
| 828 | printram("MR3: %.5x\n", mrs); |
| 829 | udelay(1000); |
| 830 | |
| 831 | /* Step 11 –Issue MR1 cycle. Read a double word from the address |
| 832 | * depended on DRAM’s output driver impedance and Rtt_Nom settings. |
| 833 | * The DLL enable field, TDQS field, write leveling enable field, |
| 834 | * additive latency field and Qoff field should be set to 0. */ |
| 835 | mrs = ddr3_get_mr1(DDR3_MR1_QOFF_ENABLE, DDR3_MR1_TQDS_DISABLE, rtt_nom, |
| 836 | DDR3_MR1_WRLVL_DISABLE, ods, DDR3_MR1_AL_DISABLE, |
| 837 | DDR3_MR1_DLL_ENABLE); |
| 838 | if (ma_swap) |
| 839 | mrs = ddr3_mrs_mirror_pins(mrs); |
| 840 | volatile_read(vx900_get_mrs_addr(mrs)); |
| 841 | printram("MR1: %.5x\n", mrs); |
| 842 | udelay(1000); |
| 843 | |
| 844 | /* Step 12 - Issue MR0 cycle. Read a double word from the address |
| 845 | * depended on DRAM’s burst length, CAS latency and write recovery time |
| 846 | * settings. |
| 847 | * The read burst type field should be set to interleave. |
| 848 | * The mode field should be set to normal mode. |
| 849 | * The DLL reset field should be set to No. |
| 850 | * The DLL control for precharge PD field should be set to Fast exit. |
| 851 | */ |
| 852 | mrs = ddr3_get_mr0(DDR3_MR0_PRECHARGE_FAST, twr, |
| 853 | DDR3_MR0_DLL_RESET_NO, DDR3_MR0_MODE_NORMAL, cas, |
| 854 | DDR3_MR0_BURST_TYPE_INTERLEAVED, |
| 855 | DDR3_MR0_BURST_LENGTH_CHOP); |
| 856 | volatile_read(vx900_get_mrs_addr(mrs)); |
| 857 | printram("MR0: %.5x\n", mrs); |
| 858 | udelay(1000); |
| 859 | |
| 860 | /* Step 13 - Set Fun3_RX6B[2:0] to 110b (Long ZQ calibration cmd) */ |
| 861 | pci_mod_config8(MCU, 0x6b, 0x07, 0x06); /* Long ZQ */ |
| 862 | /* Step 14 - Read a double word from any address of the DIMM. */ |
| 863 | volatile_read(0); |
| 864 | udelay(1000); |
| 865 | } |
| 866 | |
| 867 | /* |
| 868 | * This is where we take the DIMMs out of reset and do the JEDEC dance for each |
| 869 | * individual physical rank. |
| 870 | */ |
| 871 | static void vx900_dram_ddr3_dimm_init(const ramctr_timing * ctrl, |
| 872 | const rank_layout * ranks) |
| 873 | { |
| 874 | size_t i; |
| 875 | u8 rtt_nom, rtt_wr, ods, pinswap; |
| 876 | |
| 877 | /* Set BA[0/1/2] to [A17/18/19] */ |
| 878 | vx900_dram_set_ma_pin_map(VX900_MRS_MA_MAP); |
| 879 | |
| 880 | /* Step 01 - Set Fun3_Rx6E[5] to 1b to support burst length. */ |
| 881 | pci_mod_config8(MCU, 0x6e, 0, 1 << 5); |
| 882 | /* Step 02 - Set Fun3_RX69[0] to 0b (Disable Multiple Page Mode). */ |
| 883 | pci_mod_config8(MCU, 0x69, (1 << 0), 0x00); |
| 884 | /* And set [7:6] to 10b ? */ |
| 885 | pci_write_config8(MCU, 0x69, 0x87); |
| 886 | |
| 887 | /* Step 03 - Set the target physical rank to virtual rank0 and other |
| 888 | * ranks to virtual rank3. */ |
| 889 | vx900_pr_map_all_vr3(); |
| 890 | |
| 891 | /* Step 04 - Set Fun3_Rx50 to D8h. */ |
| 892 | pci_write_config8(MCU, 0x50, 0xd8); |
| 893 | /* Step 05 - Set Fun3_RX6B[5] to 1b to de-assert RESET# and wait for at |
| 894 | * least 500 us. */ |
| 895 | pci_mod_config8(MCU, 0x6b, 0x00, (1 << 5)); |
| 896 | udelay(500); |
| 897 | |
| 898 | /* Step 6 -> 15 - Set the target physical rank to virtual rank 0 and |
| 899 | * other ranks to virtual rank 3. |
| 900 | * Repeat Step 6 to 14 for every rank present, then jump to Step 16. */ |
| 901 | for (i = 0; i < VX900_MAX_MEM_RANKS; i++) { |
| 902 | if (ranks->phys_rank_size_mb[i] == 0) |
| 903 | continue; |
| 904 | printram("Initializing rank %lu\n", i); |
| 905 | |
| 906 | /* Set target physical rank to virtual rank 0 |
| 907 | * other ranks to virtual rank 3*/ |
| 908 | vx900_map_pr_vr(i, 0); |
| 909 | |
| 910 | /* FIXME: Is this needed on HW init? */ |
| 911 | pci_mod_config8(MCU, 0x6b, 0x07, 0x01); /* Enable NOP */ |
| 912 | volatile_read(0x0); /* Do NOP */ |
| 913 | pci_mod_config8(MCU, 0x6b, 0x07, 0x03); /* MSR Enable */ |
| 914 | |
| 915 | /* See init_dram_by_rank.c and get_basic_information.c |
| 916 | * in the VIA provided code */ |
| 917 | if (ctrl->n_dimms == 1) { |
| 918 | rtt_nom = DDR3_MR1_RTT_NOM_RZQ2; |
| 919 | rtt_wr = DDR3_MR2_RTTWR_OFF; |
| 920 | } else { |
| 921 | rtt_nom = DDR3_MR1_RTT_NOM_RZQ8; |
| 922 | rtt_wr = DDR3_MR2_RTTWR_RZQ2; |
| 923 | } |
| 924 | ods = ranks->flags[i].rzq7_supported ? |
| 925 | DDR3_MR1_ODS_RZQ7 : DDR3_MR1_ODS_RZQ6; |
| 926 | |
| 927 | pinswap = (ranks->flags[i].pins_mirrored); |
| 928 | if (pinswap) |
| 929 | printram("Pins mirrored\n"); |
| 930 | printram(" Swap : %x\n", pinswap); |
| 931 | printram(" rtt_nom : %x\n", rtt_nom); |
| 932 | printram(" ods : %x\n", ods); |
| 933 | printram(" rtt_wr : %x\n", rtt_wr); |
| 934 | if (RAMINIT_USE_HW_MRS_SEQ) |
| 935 | vx900_dram_ddr3_do_hw_mrs(pinswap, rtt_nom, ods, rtt_wr, |
| 936 | 0, 0); |
| 937 | else |
| 938 | vx900_dram_ddr3_do_sw_mrs(pinswap, rtt_nom, ods, rtt_wr, |
| 939 | 0, 0); |
| 940 | |
| 941 | /* Normal SDRAM Mode */ |
| 942 | pci_mod_config8(MCU, 0x6b, 0x07, 0x00); |
| 943 | |
| 944 | /* Step 15, set the rank to virtual rank 3 */ |
| 945 | vx900_map_pr_vr(i, 3); |
| 946 | } |
| 947 | |
| 948 | /* Step 16 – Set Fun3_Rx6B[2:0] to 000b (Normal SDRAM Mode). */ |
| 949 | pci_mod_config8(MCU, 0x6b, 0x07, 0x00); |
| 950 | |
| 951 | /* Set BA[0/1/2] to [A13/14/15] */ |
| 952 | vx900_dram_set_ma_pin_map(VX900_CALIB_MA_MAP); |
| 953 | |
| 954 | /* Step 17 – Set Fun3_Rx69[0] to 1b (Enable Multiple Page Mode). */ |
| 955 | pci_mod_config8(MCU, 0x69, 0x00, (1 << 0)); |
| 956 | |
| 957 | printram("DIMM initialization sequence complete\n"); |
| 958 | } |
| 959 | |
| 960 | /* |
| 961 | * This a small utility to send a single MRS command, but where we don't want to |
| 962 | * have to worry about changing the MCU mode. It gives the MCU back to us in |
| 963 | * normal operating mode. |
| 964 | */ |
| 965 | static void vx900_dram_send_soft_mrs(mrs_cmd_t cmd, u8 pin_swap) |
| 966 | { |
| 967 | u32 addr; |
| 968 | /* Set Fun3_RX6B[2:0] to 011b (MSR Enable). */ |
| 969 | pci_mod_config8(MCU, 0x6b, 0x07, (3 << 0)); |
| 970 | /* Is this a funky rank with Address pins swapped? */ |
| 971 | if (pin_swap) |
| 972 | cmd = ddr3_mrs_mirror_pins(cmd); |
| 973 | /* Find the address corresponding to the MRS */ |
| 974 | addr = vx900_get_mrs_addr(cmd); |
| 975 | /* Execute the MRS */ |
| 976 | volatile_read(addr); |
| 977 | /* Set Fun3_Rx6B[2:0] to 000b (Normal SDRAM Mode). */ |
| 978 | pci_mod_config8(MCU, 0x6b, 0x07, 0x00); |
| 979 | } |
| 980 | |
| 981 | static void vx900_dram_enter_read_leveling(u8 pinswap) |
| 982 | { |
| 983 | /* Precharge all before issuing read leveling MRS to DRAM */ |
| 984 | pci_mod_config8(MCU, 0x06b, 0x07, 0x02); |
| 985 | volatile_read(0x0); |
| 986 | udelay(1000); |
| 987 | |
| 988 | /* Enable read leveling: Set D0F3Rx71[7]=1 */ |
| 989 | pci_mod_config8(MCU, 0x71, 0, (1 << 7)); |
| 990 | |
| 991 | /* Put DRAM in read leveling mode */ |
| 992 | mrs_cmd_t cmd = ddr3_get_mr3(1); |
| 993 | vx900_dram_send_soft_mrs(cmd, pinswap); |
| 994 | } |
| 995 | |
| 996 | static void vx900_dram_exit_read_leveling(u8 pinswap) |
| 997 | { |
| 998 | /* Disable read leveling, and put dram in normal operation mode */ |
| 999 | mrs_cmd_t cmd = ddr3_get_mr3(0); |
| 1000 | vx900_dram_send_soft_mrs(cmd, pinswap); |
| 1001 | |
| 1002 | /* Disable read leveling: Set D0F3Rx71[7]=0 */ |
| 1003 | pci_mod_config8(MCU, 0x71, (1 << 7), 0); |
| 1004 | } |
| 1005 | |
| 1006 | /* |
| 1007 | * We need to see if the delay window (difference between minimum and maximum) |
| 1008 | * is large enough so that we actually have a valid window. The signal should be |
| 1009 | * valid for at least 1/2T in general. If the window is significantly smaller, |
| 1010 | * then chances are our window does not latch at the correct time, and the |
| 1011 | * calibration will not work. |
| 1012 | */ |
| 1013 | #define DQSI_THRESHOLD 0x10 |
| 1014 | #define DQO_THRESHOLD 0x09 |
| 1015 | #define DQSO_THRESHOLD 0x12 |
| 1016 | #define DELAY_RANGE_GOOD 0 |
| 1017 | #define DELAY_RANGE_BAD -1 |
| 1018 | static u8 vx900_dram_check_calib_range(const delay_range * dly, u8 window) |
| 1019 | { |
| 1020 | size_t i; |
| 1021 | for (i = 0; i < 8; i++) { |
| 1022 | if (dly->high[i] - dly->low[i] < window) |
| 1023 | return DELAY_RANGE_BAD; |
| 1024 | /* When our maximum value is lower than our min, both values |
| 1025 | * have overshot, and the window is definitely invalid */ |
| 1026 | if (dly->high[i] < dly->low[i]) |
| 1027 | return DELAY_RANGE_BAD; |
| 1028 | } |
| 1029 | return DELAY_RANGE_GOOD; |
| 1030 | } |
| 1031 | |
| 1032 | static void vx900_dram_find_avg_delays(vx900_delay_calib * delays) |
| 1033 | { |
| 1034 | size_t i; |
| 1035 | u16 dq_low, dq_high, dqs_low, dqs_high, dq_final, dqs_final; |
| 1036 | /* |
| 1037 | * At this point, we have transmit delays for both DIMMA and DIMMB, each |
| 1038 | * with a slightly different window We want to find the intersection of |
| 1039 | * those windows, so that we have a constrained window which both |
| 1040 | * DIMMA and DIMMB can use. The center of our constrained window will |
| 1041 | * also be the safest setting for the transmit delays |
| 1042 | * |
| 1043 | * DIMMA window t:|xxxxxxxxxxxxxx---------------xxxxxxxxxxxxxxxxxxxxxxx| |
| 1044 | * DIMMB window t:|xxxxxxxxxxxxxxxxxxx---------------xxxxxxxxxxxxxxxxxx| |
| 1045 | * Safe window t:|xxxxxxxxxxxxxxxxxxx----------xxxxxxxxxxxxxxxxxxxxxxx| |
| 1046 | */ |
| 1047 | delay_range *tx_dq_a = &(delays->tx_dq[0]); |
| 1048 | delay_range *tx_dq_b = &(delays->tx_dq[1]); |
| 1049 | delay_range *tx_dqs_a = &(delays->tx_dqs[0]); |
| 1050 | delay_range *tx_dqs_b = &(delays->tx_dqs[1]); |
| 1051 | |
| 1052 | for (i = 0; i < 8; i++) { |
Alexandru Gagniuc | 560433b | 2013-06-10 15:47:25 -0500 | [diff] [blame] | 1053 | dq_low = MAX(tx_dq_a->low[i], tx_dq_b->low[i]); |
| 1054 | dq_high = MIN(tx_dq_a->high[i], tx_dq_b->high[i]); |
| 1055 | dqs_low = MAX(tx_dqs_a->low[i], tx_dqs_b->low[i]); |
| 1056 | dqs_high = MIN(tx_dqs_a->high[i], tx_dqs_b->high[i]); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 1057 | |
| 1058 | /* Find the average */ |
| 1059 | dq_final = ((dq_low + dq_high) / 2); |
| 1060 | dqs_final = ((dqs_low + dqs_high) / 2); |
| 1061 | |
| 1062 | /* |
| 1063 | * These adjustments are done in code provided by VIA. |
| 1064 | * There is no explanation as to why this is done. |
| 1065 | * |
| 1066 | * We can get away without doing the DQS adjustment, but doing |
| 1067 | * it, brings the values closer to what the vendor BIOS |
| 1068 | * calibrates to. |
| 1069 | */ |
| 1070 | if ((dqs_final & 0x1f) >= 0x1c) |
| 1071 | dqs_final -= 0x1c; |
| 1072 | else |
| 1073 | dqs_final += 0x04; |
| 1074 | /* |
| 1075 | * The DQ adjustment is more critical. If we don't do this |
| 1076 | * adjustment our MCU won't be configured properly, and |
| 1077 | * ram_check() will fail. |
| 1078 | */ |
| 1079 | if ((dq_final & 0x1f) >= 0x14) |
| 1080 | dq_final -= 0x14; |
| 1081 | else |
| 1082 | dq_final += 0x0c; |
| 1083 | |
| 1084 | /* Store our values in the first delay */ |
| 1085 | delays->tx_dq[0].avg[i] = dq_final; |
| 1086 | delays->tx_dqs[0].avg[i] = dqs_final; |
| 1087 | |
| 1088 | } |
| 1089 | } |
| 1090 | |
| 1091 | /* |
| 1092 | * First calibration: When to receive data from the DRAM |
| 1093 | * (MD and MDQS input delay) |
| 1094 | * |
| 1095 | * This calibration unfortunately does not seem to work. Whether this is due to |
| 1096 | * a misconfigured MCU or hardware bug is unknown. |
| 1097 | */ |
| 1098 | static void vx900_rx_capture_range_calib(u8 pinswap) |
| 1099 | { |
| 1100 | u8 reg8; |
| 1101 | const u32 cal_addr = 0x20; |
| 1102 | |
| 1103 | /* Set IO calibration address */ |
| 1104 | pci_mod_config16(MCU, 0x8c, 0xfff0, cal_addr & (0xfff0)); |
| 1105 | /* Data pattern must be 0x00 for this calibration |
| 1106 | * See paragraph describing Rx8e */ |
| 1107 | pci_write_config8(MCU, 0x8e, 0x00); |
| 1108 | |
| 1109 | /* Need to put DRAM and MCU in read leveling */ |
| 1110 | vx900_dram_enter_read_leveling(pinswap); |
| 1111 | |
| 1112 | /* Data pattern must be 0x00 for this calibration |
| 1113 | * See paragraph describing Rx8e */ |
| 1114 | pci_write_config8(MCU, 0x8e, 0x00); |
| 1115 | /* Trigger calibration */ |
| 1116 | reg8 = 0xa0; |
| 1117 | pci_write_config8(MCU, 0x71, reg8); |
| 1118 | |
| 1119 | /* Wait for it */ |
| 1120 | while (pci_read_config8(MCU, 0x71) & 0x10) ; |
| 1121 | vx900_dram_exit_read_leveling(pinswap); |
| 1122 | } |
| 1123 | |
| 1124 | /* |
| 1125 | * Second calibration: How much to delay DQS signal by |
| 1126 | * (MDQS input delay) |
| 1127 | */ |
| 1128 | static void vx900_rx_dqs_delay_calib(u8 pinswap) |
| 1129 | { |
| 1130 | const u32 cal_addr = 0x30; |
| 1131 | |
| 1132 | /* We need to disable refresh commands so that they don't interfere */ |
| 1133 | const u8 ref_cnt = pci_read_config8(MCU, 0xc7); |
| 1134 | pci_write_config8(MCU, 0xc7, 0); |
| 1135 | /* Set IO calibration address */ |
| 1136 | pci_mod_config16(MCU, 0x8c, 0xfff0, cal_addr & (0xfff0)); |
| 1137 | /* Data pattern must be 0x00 for this calibration |
| 1138 | * See paragraph describing Rx8e */ |
| 1139 | pci_write_config8(MCU, 0x8e, 0x00); |
| 1140 | |
| 1141 | /* Need to put DRAM and MCU in read leveling */ |
| 1142 | vx900_dram_enter_read_leveling(pinswap); |
| 1143 | |
| 1144 | /* From VIA code; Undocumented |
| 1145 | * In theory this enables MODT[3:0] to be asserted */ |
| 1146 | pci_mod_config8(MCU, 0x9e, 0, 0x80); |
| 1147 | |
| 1148 | /* Trigger calibration: Set D0F3Rx71[1:0]=10b */ |
| 1149 | pci_mod_config8(MCU, 0x71, 0x03, 0x02); |
| 1150 | |
| 1151 | /* Wait for calibration to complete */ |
| 1152 | while (pci_read_config8(MCU, 0x71) & 0x02) ; |
| 1153 | vx900_dram_exit_read_leveling(pinswap); |
| 1154 | |
| 1155 | /* Restore the refresh counter */ |
| 1156 | pci_write_config8(MCU, 0xc7, ref_cnt); |
| 1157 | |
| 1158 | /* FIXME: should we save it before, or should we just set it as is */ |
| 1159 | vx900_dram_set_ma_pin_map(VX900_CALIB_MA_MAP); |
| 1160 | } |
| 1161 | |
| 1162 | static void vx900_tx_dqs_trigger_calib(u8 pattern) |
| 1163 | { |
| 1164 | /* Data pattern for calibration */ |
| 1165 | pci_write_config8(MCU, 0x8e, pattern); |
| 1166 | /* Trigger calibration */ |
| 1167 | pci_mod_config8(MCU, 0x75, 0, 0x20); |
| 1168 | /* Wait for calibration */ |
| 1169 | while (pci_read_config8(MCU, 0x75) & 0x20) ; |
| 1170 | } |
| 1171 | |
| 1172 | /* |
| 1173 | * Third calibration: How much to wait before asserting DQS |
| 1174 | */ |
| 1175 | static void vx900_tx_dqs_delay_calib(void) |
| 1176 | { |
| 1177 | const u32 cal_addr = 0x00; |
| 1178 | /* Set IO calibration address */ |
| 1179 | pci_mod_config16(MCU, 0x8c, 0xfff0, cal_addr & (0xfff0)); |
| 1180 | /* Set circuit to use calibration results - Clear Rx75[0] */ |
| 1181 | pci_mod_config8(MCU, 0x75, 0x01, 0); |
| 1182 | /* Run calibration with first data pattern */ |
| 1183 | vx900_tx_dqs_trigger_calib(0x5a); |
| 1184 | /* Run again with different pattern */ |
| 1185 | vx900_tx_dqs_trigger_calib(0xa5); |
| 1186 | } |
| 1187 | |
| 1188 | /* |
| 1189 | * Fourt calibration: How much to wait before putting data on DQ lines |
| 1190 | */ |
| 1191 | static void vx900_tx_dq_delay_calib(void) |
| 1192 | { |
| 1193 | /* Data pattern for calibration */ |
| 1194 | pci_write_config8(MCU, 0x8e, 0x5a); |
| 1195 | /* Trigger calibration */ |
| 1196 | pci_mod_config8(MCU, 0x75, 0, 0x02); |
| 1197 | /* Wait for calibration */ |
| 1198 | while (pci_read_config8(MCU, 0x75) & 0x02) ; |
| 1199 | } |
| 1200 | |
| 1201 | static void vx900_rxdqs_adjust(delay_range * dly) |
| 1202 | { |
| 1203 | /* Adjust Rx DQS delay after calibration has been run. This is |
| 1204 | * recommended by VIA, but no explanation was provided as to why */ |
| 1205 | size_t i; |
| 1206 | for (i = 0; i < 8; i++) { |
| 1207 | if (dly->low[i] < 3) { |
| 1208 | if (i == 2 || i == 4) |
| 1209 | dly->avg[i] += 4; |
| 1210 | else |
| 1211 | dly->avg[i] += 3; |
| 1212 | |
| 1213 | } |
| 1214 | |
| 1215 | if (dly->high[i] > 0x38) |
| 1216 | dly->avg[i] -= 6; |
| 1217 | else if (dly->high[i] > 0x30) |
| 1218 | dly->avg[i] -= 4; |
| 1219 | |
| 1220 | if (dly->avg[i] > 0x20) |
| 1221 | dly->avg[i] = 0x20; |
| 1222 | } |
| 1223 | |
| 1224 | /* Put Rx DQS delay into manual mode (Set Rx[2,0] to 01) */ |
| 1225 | pci_mod_config8(MCU, 0x71, 0x05, 0x01); |
| 1226 | /* Now write the new settings */ |
| 1227 | vx900_delay_calib_mode_select(CALIB_RxDQS, CALIB_MANUAL); |
| 1228 | vx900_write_0x78_0x7f(dly->avg); |
| 1229 | } |
| 1230 | |
| 1231 | static void vx900_dram_calibrate_recieve_delays(vx900_delay_calib * delays, |
| 1232 | u8 pinswap) |
| 1233 | { |
| 1234 | size_t n_tries = 0; |
| 1235 | delay_range *rx_dq_cr = &(delays->rx_dq_cr); |
| 1236 | delay_range *rx_dqs = &(delays->rx_dqs); |
| 1237 | /* We really should be able to finish this in a single pass, but it may |
| 1238 | * in very rare circumstances not work the first time. We define a limit |
| 1239 | * on the number of tries so that we have a way of warning the user */ |
| 1240 | const size_t max_tries = 100; |
| 1241 | for (;;) { |
| 1242 | if (n_tries++ >= max_tries) { |
| 1243 | die("Could not calibrate receive delays. Giving up"); |
| 1244 | } |
| 1245 | u8 result; |
| 1246 | /* Run calibrations */ |
| 1247 | if (RAMINIT_USE_HW_RXCR_CALIB) { |
| 1248 | vx900_rx_capture_range_calib(pinswap); |
| 1249 | vx900_read_delay_range(rx_dq_cr, CALIB_RxDQ_CR); |
| 1250 | dump_delay_range(*rx_dq_cr); |
| 1251 | |
| 1252 | } else { |
| 1253 | /*FIXME: Cheating with Rx CR setting\ |
| 1254 | * We need to either use Rx CR calibration |
| 1255 | * or set up a table for the calibration */ |
| 1256 | u8 *override = &(rx_dq_cr->avg[0]); |
| 1257 | override[0] = 0x28; |
| 1258 | override[1] = 0x1c; |
| 1259 | override[2] = 0x28; |
| 1260 | override[3] = 0x28; |
| 1261 | override[4] = 0x2c; |
| 1262 | override[5] = 0x30; |
| 1263 | override[6] = 0x30; |
| 1264 | override[7] = 0x34; |
| 1265 | printram("Bypassing RxCR 78-7f calibration with:\n"); |
| 1266 | dump_delay(rx_dq_cr->avg); |
| 1267 | } |
| 1268 | /* We need to put the setting on manual mode */ |
| 1269 | pci_mod_config8(MCU, 0x71, 0, 1 << 4); |
| 1270 | vx900_delay_calib_mode_select(CALIB_RxDQ_CR, CALIB_MANUAL); |
| 1271 | vx900_write_0x78_0x7f(rx_dq_cr->avg); |
| 1272 | |
| 1273 | /************* RxDQS *************/ |
| 1274 | vx900_rx_dqs_delay_calib(pinswap); |
| 1275 | vx900_read_delay_range(rx_dqs, CALIB_RxDQS); |
| 1276 | vx900_rxdqs_adjust(rx_dqs); |
| 1277 | |
| 1278 | result = vx900_dram_check_calib_range(rx_dqs, DQSI_THRESHOLD); |
| 1279 | if (result != DELAY_RANGE_GOOD) |
| 1280 | continue; |
| 1281 | |
| 1282 | /* We're good to go. Switch to manual and write the manual |
| 1283 | * setting */ |
| 1284 | pci_mod_config8(MCU, 0x71, 0, 1 << 0); |
| 1285 | vx900_delay_calib_mode_select(CALIB_RxDQS, CALIB_MANUAL); |
| 1286 | vx900_write_0x78_0x7f(rx_dqs->avg); |
| 1287 | break; |
| 1288 | } |
| 1289 | if (n_tries > 1) |
| 1290 | printram("Hmm, we had to try %lu times before our calibration " |
| 1291 | "was good.\n", n_tries); |
| 1292 | } |
| 1293 | |
| 1294 | static void vx900_dram_calibrate_transmit_delays(delay_range * tx_dq, |
| 1295 | delay_range * tx_dqs) |
| 1296 | { |
| 1297 | /* Same timeout reasoning as in receive delays */ |
| 1298 | size_t n_tries = 0; |
Idwer Vollering | d26da9c | 2013-12-22 21:38:18 +0000 | [diff] [blame] | 1299 | int dq_tries = 0, dqs_tries = 0; |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 1300 | const size_t max_tries = 100; |
| 1301 | for (;;) { |
| 1302 | if (n_tries++ >= max_tries) { |
| 1303 | printram("Tried DQS %i times and DQ %i times\n", |
| 1304 | dqs_tries, dq_tries); |
| 1305 | printram("Tx DQS calibration results\n"); |
| 1306 | dump_delay_range(*tx_dqs); |
| 1307 | printram("TX DQ delay calibration results:\n"); |
| 1308 | dump_delay_range(*tx_dq); |
| 1309 | die("Could not calibrate transmit delays. Giving up"); |
| 1310 | } |
| 1311 | u8 result; |
| 1312 | /************* TxDQS *************/ |
| 1313 | dqs_tries++; |
| 1314 | vx900_tx_dqs_delay_calib(); |
| 1315 | vx900_read_delay_range(tx_dqs, CALIB_TxDQS); |
| 1316 | |
| 1317 | result = vx900_dram_check_calib_range(tx_dqs, DQSO_THRESHOLD); |
| 1318 | if (result != DELAY_RANGE_GOOD) |
| 1319 | continue; |
| 1320 | |
| 1321 | /************* TxDQ *************/ |
| 1322 | /* FIXME: not sure if multiple page mode should be enabled here |
| 1323 | * Vendor BIOS does it */ |
| 1324 | pci_mod_config8(MCU, 0x69, 0, 0x01); |
| 1325 | |
| 1326 | dq_tries++; |
| 1327 | vx900_tx_dq_delay_calib(); |
| 1328 | vx900_read_delay_range(tx_dq, CALIB_TxDQ); |
| 1329 | |
| 1330 | result = vx900_dram_check_calib_range(tx_dq, DQO_THRESHOLD); |
| 1331 | if (result != DELAY_RANGE_GOOD) |
| 1332 | continue; |
| 1333 | |
| 1334 | /* At this point, our RAM should give correct read-backs for |
| 1335 | * addresses under 64 MB. If it doesn't, it won't work */ |
| 1336 | if (ram_check_noprint_nodie(1 << 20, 1 << 20)) { |
| 1337 | /* No, our RAM is not working, try again */ |
| 1338 | /* FIXME: Except that we have not yet told the MCU what |
| 1339 | * the geometry of the DIMM is, hence we don't trust |
| 1340 | * this test for now */ |
| 1341 | ////continue; |
| 1342 | } |
| 1343 | /* Good. We should be able to use this DIMM */ |
| 1344 | /* That's it. We're done */ |
| 1345 | break; |
| 1346 | } |
| 1347 | if (n_tries > 1) |
| 1348 | printram("Hmm, we had to try %lu times before our calibration " |
| 1349 | "was good.\n", n_tries); |
| 1350 | } |
| 1351 | |
| 1352 | /* |
| 1353 | * The meat and potatoes of getting our MCU to operate the DIMMs properly. |
| 1354 | * |
| 1355 | * Thank you JEDEC for making us need configurable delays for each set of MD |
| 1356 | * signals. |
| 1357 | */ |
| 1358 | static void vx900_dram_calibrate_delays(const ramctr_timing * ctrl, |
| 1359 | const rank_layout * ranks) |
| 1360 | { |
| 1361 | size_t i; |
| 1362 | u8 val; |
| 1363 | u8 dimm; |
| 1364 | vx900_delay_calib delay_cal; |
| 1365 | memset(&delay_cal, 0, sizeof(delay_cal)); |
| 1366 | printram("Starting delay calibration\n"); |
| 1367 | |
| 1368 | /**** Read delay control ****/ |
| 1369 | /* MD Input Data Push Timing Control; |
| 1370 | * use values recommended in datasheet |
| 1371 | * Setting this too low causes the Rx window to move below the range we |
| 1372 | * need it so we can capture it with Rx_78_7f |
| 1373 | * This causes Rx calibrations to be too close to 0, and Tx |
| 1374 | * calibrations will fail. |
| 1375 | * Setting this too high causes the window to move above the range. |
| 1376 | */ |
| 1377 | if (ctrl->tCK <= TCK_533MHZ) |
| 1378 | val = 2; |
| 1379 | else if (ctrl->tCK <= TCK_333MHZ) |
| 1380 | val = 1; |
| 1381 | else |
| 1382 | val = 0; |
| 1383 | val++; /* FIXME: vendor BIOS sets this to 3 */ |
| 1384 | pci_mod_config8(MCU, 0x74, (0x03 << 1), ((val & 0x03) << 1)); |
| 1385 | |
| 1386 | /* FIXME: The vendor BIOS increases the MD input delay - WHY ? */ |
| 1387 | pci_mod_config8(MCU, 0xef, (3 << 4), 3 << 4); |
| 1388 | |
| 1389 | /**** Write delay control ****/ |
| 1390 | /* FIXME: The vendor BIOS does this, but WHY? |
| 1391 | * See check_special_registers in VIA provided code. This value seems |
| 1392 | * to depend on the DRAM frequency. |
| 1393 | */ |
| 1394 | /* Early DQ/DQS for write cycles */ |
| 1395 | pci_mod_config8(MCU, 0x76, (3 << 2), 2 << 2); |
| 1396 | /* FIXME: The vendor BIOS does this - Output preamble ? */ |
| 1397 | pci_write_config8(MCU, 0x77, 0x10); |
| 1398 | |
| 1399 | /* Set BA[0/1/2] to [A17/18/19] */ |
| 1400 | vx900_dram_set_ma_pin_map(VX900_MRS_MA_MAP); |
| 1401 | /* Disable Multiple Page Mode - Set Rx69[0] to 0 */ |
| 1402 | pci_mod_config8(MCU, 0x69, (1 << 0), 0x00); |
| 1403 | |
| 1404 | /* It's very important that we keep all ranks which are not calibrated |
| 1405 | * mapped to VR3. Even if we disable them, if they are mapped to VR0 |
| 1406 | * (the rank we use for calibrations), the calibrations may fail in |
| 1407 | * unexpected ways. */ |
| 1408 | vx900_pr_map_all_vr3(); |
| 1409 | |
| 1410 | /* We only really need to run the receive calibrations once. They are |
| 1411 | * meant to account for signal travel differences in the internal paths |
| 1412 | * of the MCU, so it doesn't really matter which rank we use for this. |
| 1413 | * Differences between ranks will be accounted for in the transmit |
| 1414 | * calibration. */ |
| 1415 | for (i = 0; i < VX900_MAX_DIMM_SLOTS; i += 2) { |
| 1416 | /* Do we have a valid DIMM? */ |
| 1417 | if (ranks->phys_rank_size_mb[i] + |
| 1418 | ranks->phys_rank_size_mb[i + 1] == 0) |
| 1419 | continue; |
| 1420 | /* Map the first rank of the DIMM to VR0 */ |
| 1421 | vx900_map_pr_vr(2 * i, 0); |
| 1422 | /* Only run on first rank, remember? */ |
| 1423 | break; |
| 1424 | } |
| 1425 | vx900_dram_calibrate_recieve_delays(&delay_cal, |
| 1426 | ranks->flags[i].pins_mirrored); |
| 1427 | printram("RX DQS calibration results\n"); |
| 1428 | dump_delay_range(delay_cal.rx_dqs); |
| 1429 | |
| 1430 | /* Enable multiple page mode for when calibrating transmit delays */ |
| 1431 | pci_mod_config8(MCU, 0x69, 0, 1 << 1); |
| 1432 | |
| 1433 | /* |
| 1434 | * Unlike the receive delays, we need to run the transmit calibration |
| 1435 | * for each DIMM (not rank). We run the calibration on the even rank. |
| 1436 | * The odd rank may have memory pins swapped, and this, it seems, |
| 1437 | * confuses the calibration circuit. |
| 1438 | */ |
| 1439 | dimm = 0; |
| 1440 | for (i = 0; i < VX900_MAX_DIMM_SLOTS; i++) { |
| 1441 | /* Do we have a valid DIMM? */ |
| 1442 | u32 dimm_size_mb = ranks->phys_rank_size_mb[2 * i] |
| 1443 | + ranks->phys_rank_size_mb[2 * i + 1]; |
| 1444 | if (dimm_size_mb == 0) |
| 1445 | continue; |
| 1446 | /* Map the first rank of the DIMM to VR0 */ |
| 1447 | vx900_map_pr_vr(2 * i, 0); |
| 1448 | vx900_dram_calibrate_transmit_delays(&(delay_cal.tx_dq[dimm]), |
| 1449 | &(delay_cal.tx_dqs[dimm])); |
| 1450 | /* We run this more than once, so dump delays for each DIMM */ |
| 1451 | printram("Tx DQS calibration results\n"); |
| 1452 | dump_delay_range(delay_cal.tx_dqs[dimm]); |
| 1453 | printram("TX DQ delay calibration results:\n"); |
| 1454 | dump_delay_range(delay_cal.tx_dq[dimm]); |
| 1455 | /* Now move the DIMM back to VR3 */ |
| 1456 | vx900_map_pr_vr(2 * i, 3); |
| 1457 | /* We use dimm as a counter so that we fill tx_dq[] and tx_dqs[] |
| 1458 | * results in order from 0, and do not leave any gaps */ |
| 1459 | dimm++; |
| 1460 | } |
| 1461 | |
| 1462 | /* When we have more dimms, we need to find a tx window with which all |
| 1463 | * dimms can safely work */ |
| 1464 | if (dimm > 1) { |
| 1465 | vx900_dram_find_avg_delays(&delay_cal); |
| 1466 | printram("Final delay values\n"); |
| 1467 | printram("Tx DQS: "); |
| 1468 | dump_delay(delay_cal.tx_dqs[0].avg); |
| 1469 | printram("Tx DQ: "); |
| 1470 | dump_delay(delay_cal.tx_dq[0].avg); |
| 1471 | } |
| 1472 | /* Write manual settings */ |
| 1473 | pci_mod_config8(MCU, 0x75, 0, 0x01); |
| 1474 | vx900_delay_calib_mode_select(CALIB_TxDQS, CALIB_MANUAL); |
| 1475 | vx900_write_0x78_0x7f(delay_cal.tx_dqs[0].avg); |
| 1476 | vx900_delay_calib_mode_select(CALIB_TxDQ, CALIB_MANUAL); |
| 1477 | vx900_write_0x78_0x7f(delay_cal.tx_dq[0].avg); |
| 1478 | } |
| 1479 | |
| 1480 | static void vx900_dram_set_refresh_counter(ramctr_timing * ctrl) |
| 1481 | { |
| 1482 | u8 reg8; |
| 1483 | /* Set DRAM refresh counter |
| 1484 | * Based on a refresh counter of 0x61 at 400MHz */ |
| 1485 | reg8 = (TCK_400MHZ * 0x61) / ctrl->tCK; |
| 1486 | pci_write_config8(MCU, 0xc7, reg8); |
| 1487 | } |
| 1488 | |
| 1489 | /* |
| 1490 | * Here, we map each rank somewhere in our address space. We don't really care |
| 1491 | * at this point if this will overlap the PCI config space. If needed, remapping |
| 1492 | * is done in ramstage, where we actually know how much PCI space we actually |
| 1493 | * need. |
| 1494 | */ |
| 1495 | static void vx900_dram_range(ramctr_timing * ctrl, rank_layout * ranks) |
| 1496 | { |
| 1497 | size_t i, vrank = 0; |
| 1498 | u8 reg8; |
| 1499 | u32 ramsize_mb = 0, tolm_mb; |
| 1500 | const u32 TOLM_3_5G = (7 << 29); |
| 1501 | /* All unused physical ranks go to VR3. Otherwise, the MCU might be |
| 1502 | * trying to read or write from unused ranks, or even worse, write some |
| 1503 | * bits to the rank we want, and some to the unused ranks, even though |
| 1504 | * they are disabled. Since VR3 is the last virtual rank to be used, we |
| 1505 | * eliminate any ambiguities that the MCU may face. */ |
| 1506 | vx900_pr_map_all_vr3(); |
| 1507 | for (i = 0; i < VX900_MAX_MEM_RANKS; i++) { |
| 1508 | u32 rank_size_mb = ranks->phys_rank_size_mb[i]; |
| 1509 | if (!rank_size_mb) |
| 1510 | continue; |
| 1511 | |
| 1512 | /* vvvvvvvvvv FIXME: Fix odd rank init vvvvvvvvvv */ |
| 1513 | if ((i & 1)) { |
Patrick Georgi | 6f7e4b2 | 2014-05-19 09:18:11 +0200 | [diff] [blame] | 1514 | printk(BIOS_EMERG, "I cannot initialize rank %zu\n", i); |
Stefan Reinauer | 65b72ab | 2015-01-05 12:59:54 -0800 | [diff] [blame] | 1515 | printk(BIOS_EMERG, "I have to disable it\n"); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 1516 | continue; |
| 1517 | } |
| 1518 | /* ^^^^^^^^^^ FIXME: Fix odd rank init ^^^^^^^^^^ */ |
| 1519 | |
| 1520 | ranks->virt[vrank].start_addr = ramsize_mb; |
| 1521 | ramsize_mb += rank_size_mb; |
| 1522 | ranks->virt[vrank].end_addr = ramsize_mb; |
| 1523 | |
| 1524 | /* Rank memory range */ |
| 1525 | reg8 = (ranks->virt[vrank].start_addr >> 6); |
| 1526 | pci_write_config8(MCU, 0x48 + vrank, reg8); |
| 1527 | reg8 = (ranks->virt[vrank].end_addr >> 6); |
| 1528 | pci_write_config8(MCU, 0x40 + vrank, reg8); |
| 1529 | |
| 1530 | vx900_map_pr_vr(i, vrank); |
| 1531 | |
| 1532 | printram("Mapped Physical rank %u, to virtual rank %u\n" |
| 1533 | " Start address: 0x%.10llx\n" |
| 1534 | " End address: 0x%.10llx\n", |
| 1535 | (int)i, (int)vrank, |
| 1536 | (u64) ranks->virt[vrank].start_addr << 20, |
| 1537 | (u64) ranks->virt[vrank].end_addr << 20); |
| 1538 | /* Move on to next virtual rank */ |
| 1539 | vrank++; |
| 1540 | } |
| 1541 | |
| 1542 | /* Limit the Top of Low memory at 3.5G |
| 1543 | * Not to worry, we'll set tolm in ramstage, once we have initialized |
| 1544 | * all devices and know pci_tolm. */ |
Alexandru Gagniuc | 560433b | 2013-06-10 15:47:25 -0500 | [diff] [blame] | 1545 | tolm_mb = MIN(ramsize_mb, TOLM_3_5G >> 20); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 1546 | u16 reg_tolm = (tolm_mb << 4) & 0xfff0; |
| 1547 | pci_mod_config16(MCU, 0x84, 0xfff0, reg_tolm); |
| 1548 | |
| 1549 | printram("Initialized %u virtual ranks, with a total size of %u MB\n", |
| 1550 | (int)vrank, ramsize_mb); |
| 1551 | } |
| 1552 | |
| 1553 | /* |
| 1554 | * Here, we tell the memory controller how to treat a DIMM. This is an extremely |
| 1555 | * important step. It tells the MCU how many address bits we have in each DIMM, |
| 1556 | * and how to use them. This information is essential for the controller to |
| 1557 | * understand the DIMM addressing, and write and read data in the correct place. |
| 1558 | */ |
| 1559 | static void vx900_dram_map_row_col_bank(dimm_info * dimms) |
| 1560 | { |
| 1561 | u8 reg8, rcb_val, col_bits, max_row_bits; |
| 1562 | size_t i; |
| 1563 | /* Do we have 4Gbit chips? */ |
| 1564 | /* FIXME: Implement this */ |
| 1565 | |
| 1566 | /* Do we have 8Gbit chips? */ |
| 1567 | /* FIXME: Implement this */ |
| 1568 | |
| 1569 | max_row_bits = rcb_val = reg8 = 0; |
| 1570 | for (i = 0; i < VX900_MAX_DIMM_SLOTS; i++) { |
| 1571 | if (dimms->dimm[i].dram_type == SPD_MEMORY_TYPE_UNDEFINED) |
| 1572 | continue; |
| 1573 | |
| 1574 | col_bits = dimms->dimm[i].col_bits; |
| 1575 | |
| 1576 | /* |
| 1577 | * DDR3 always uses 3 bank address bits, and MA type 111b cannot |
| 1578 | * be used due to chipset limitation. We are left with only two |
| 1579 | * options, which we can choose based solely on the number of |
| 1580 | * column address bits. |
| 1581 | */ |
| 1582 | if ((col_bits < 10) || (col_bits > 11)) { |
| 1583 | printram("DIMM %ld has %d column address bits.\n", |
| 1584 | i, col_bits); |
| 1585 | die("Unsupported DIMM. Try booting without this DIMM"); |
| 1586 | } |
| 1587 | |
| 1588 | rcb_val = col_bits - 5; |
| 1589 | reg8 |= (rcb_val << ((i * 3) + 2)); |
| 1590 | |
| 1591 | /* */ |
Alexandru Gagniuc | 560433b | 2013-06-10 15:47:25 -0500 | [diff] [blame] | 1592 | max_row_bits = MAX(max_row_bits, dimms->dimm[i].row_bits); |
Alexandru Gagniuc | 7d31e7c | 2013-06-08 11:49:10 -0500 | [diff] [blame] | 1593 | } |
| 1594 | |
| 1595 | printram("RCBA map (rx50) <- %.2x\n", reg8); |
| 1596 | pci_write_config8(MCU, 0x50, reg8); |
| 1597 | |
| 1598 | printram("Houston, we have %d row address bits\n", max_row_bits); |
| 1599 | /* FIXME: Do this properly */ |
| 1600 | vx900_dram_map_pins(13, 14, 15, 17, 16); |
| 1601 | |
| 1602 | } |
| 1603 | |
| 1604 | /* |
| 1605 | * Here, we set some final configuration bits, which should improve the |
| 1606 | * performance of the memory slightly (arbitration, expiration counters, etc.) |
| 1607 | * |
| 1608 | * FIXME: We don't really do much else than the minimum to get the MCU properly |
| 1609 | * configured. We don't yet do set the "performance-enhancing" bits referenced |
| 1610 | * in the comment above. |
| 1611 | */ |
| 1612 | static void vx900_dram_write_final_config(ramctr_timing * ctrl) |
| 1613 | { |
| 1614 | /* FIXME: These are quick cheats */ |
| 1615 | |
| 1616 | /* FIXME: Why are we doing this? */ |
| 1617 | /* Tri-state MCSi# when rank is in self-refresh */ |
| 1618 | pci_mod_config8(MCU, 0x99, 0, 0x0f); |
| 1619 | |
| 1620 | ////pci_write_config8(MCU, 0x69, 0xe7); |
| 1621 | /* Enable paging mode and 8 page registers */ |
| 1622 | pci_mod_config8(MCU, 0x69, 0, 0xe5); |
| 1623 | ////pci_write_config8(MCU, 0x72, 0x0f); |
| 1624 | |
| 1625 | ////pci_write_config8(MCU, 0x97, 0xa4); /* self-refresh */ |
| 1626 | ////pci_write_config8(MCU, 0x98, 0xba); /* self-refresh II */ |
| 1627 | ////pci_write_config8(MCU, 0x9a, 0x80); /* self-refresh III */ |
| 1628 | |
| 1629 | /* Enable automatic triggering of short ZQ calibration */ |
| 1630 | pci_write_config8(MCU, 0xc8, 0x80); |
| 1631 | |
| 1632 | /* And last but not least, Enable A20 line */ |
| 1633 | outb(inb(0x92) | (1 << 1), 0x92); |
| 1634 | } |
| 1635 | |
| 1636 | void vx900_init_dram_ddr3(const dimm_layout * dimm_addr) |
| 1637 | { |
| 1638 | dimm_info dimm_prop; |
| 1639 | ramctr_timing ctrl_prop; |
| 1640 | rank_layout ranks; |
| 1641 | device_t mcu; |
| 1642 | |
| 1643 | if (!ram_check_noprint_nodie(1 << 20, 1 << 20)) { |
| 1644 | printram("RAM is already initialized. Skipping init\n"); |
| 1645 | return; |
| 1646 | } |
| 1647 | /* Locate the Memory controller */ |
| 1648 | mcu = pci_locate_device(PCI_ID(PCI_VENDOR_ID_VIA, |
| 1649 | PCI_DEVICE_ID_VIA_VX900_MEMCTRL), 0); |
| 1650 | |
| 1651 | if (mcu == PCI_DEV_INVALID) { |
| 1652 | die("Memory Controller not found\n"); |
| 1653 | } |
| 1654 | memset(&dimm_prop, 0, sizeof(dimm_prop)); |
| 1655 | memset(&ctrl_prop, 0, sizeof(ctrl_prop)); |
| 1656 | memset(&ranks, 0, sizeof(ranks)); |
| 1657 | /* 1) Write some initial "safe" parameters */ |
| 1658 | vx900_dram_write_init_config(); |
| 1659 | /* 2) Get timing information from SPDs */ |
| 1660 | dram_find_spds_ddr3(dimm_addr, &dimm_prop); |
| 1661 | /* 3) Find lowest common denominator for all modules */ |
| 1662 | dram_find_common_params(&dimm_prop, &ctrl_prop); |
| 1663 | /* 4) Find the size of each memory rank */ |
| 1664 | vx900_dram_phys_bank_range(&dimm_prop, &ranks); |
| 1665 | /* 5) Set DRAM driving strength */ |
| 1666 | vx900_dram_driving_ctrl(&dimm_prop); |
| 1667 | /* 6) Set DRAM frequency and latencies */ |
| 1668 | vx900_dram_timing(&ctrl_prop); |
| 1669 | vx900_dram_freq(&ctrl_prop); |
| 1670 | /* 7) Initialize the modules themselves */ |
| 1671 | vx900_dram_ddr3_dimm_init(&ctrl_prop, &ranks); |
| 1672 | /* 8) Set refresh counter based on DRAM frequency */ |
| 1673 | vx900_dram_set_refresh_counter(&ctrl_prop); |
| 1674 | /* 9) Calibrate receive and transmit delays */ |
| 1675 | vx900_dram_calibrate_delays(&ctrl_prop, &ranks); |
| 1676 | /* 10) Enable Physical to Virtual Rank mapping */ |
| 1677 | vx900_dram_range(&ctrl_prop, &ranks); |
| 1678 | /* 11) Map address bits to DRAM pins */ |
| 1679 | vx900_dram_map_row_col_bank(&dimm_prop); |
| 1680 | /* 99) Some final adjustments */ |
| 1681 | vx900_dram_write_final_config(&ctrl_prop); |
| 1682 | /* Take a dump */ |
| 1683 | dump_pci_device(mcu); |
| 1684 | } |