blob: 91d929a8bbb31d567b4ed3e518250692a5b72b2c [file] [log] [blame]
Marc Jones8ae8c882007-12-19 01:32:08 +00001/*
Stefan Reinauer7e61e452008-01-18 10:35:56 +00002 * This file is part of the coreboot project.
Marc Jones8ae8c882007-12-19 01:32:08 +00003 *
Timothy Pearsonb6fa61a2015-02-20 13:13:35 -06004 * Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00005 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
Marc Jones8ae8c882007-12-19 01:32:08 +00006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
Marc Jones8ae8c882007-12-19 01:32:08 +000015 */
16
17/* Description: Main memory controller system configuration for DDR 2 */
18
19
20/* KNOWN ISSUES - ERRATA
21 *
22 * Trtp is not calculated correctly when the controller is in 64-bit mode, it
23 * is 1 busclock off. No fix planned. The controller is not ordinarily in
24 * 64-bit mode.
25 *
26 * 32 Byte burst not supported. No fix planned. The controller is not
27 * ordinarily in 64-bit mode.
28 *
29 * Trc precision does not use extra Jedec defined fractional component.
Zheng Baoc3af12f2010-10-08 05:08:47 +000030 * Instead Trc (course) is rounded up to nearest 1 ns.
Marc Jones8ae8c882007-12-19 01:32:08 +000031 *
32 * Mini and Micro DIMM not supported. Only RDIMM, UDIMM, SO-DIMM defined types
33 * supported.
34 */
35
36static u8 ReconfigureDIMMspare_D(struct MCTStatStruc *pMCTstat,
37 struct DCTStatStruc *pDCTstatA);
38static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
39 struct DCTStatStruc *pDCTstatA);
40static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
41 struct DCTStatStruc *pDCTstatA);
Marc Jones8ae8c882007-12-19 01:32:08 +000042static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
43 struct DCTStatStruc *pDCTstatA);
44static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
45 struct DCTStatStruc *pDCTstatA);
46static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
47 struct DCTStatStruc *pDCTstat);
48static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
49 struct DCTStatStruc *pDCTstat);
50static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
51 struct DCTStatStruc *pDCTstatA);
52static u8 NodePresent_D(u8 Node);
53static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
54 struct DCTStatStruc *pDCTstatA);
55static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
56 struct DCTStatStruc *pDCTstat, u8 dct);
57static void ClearDCT_D(struct MCTStatStruc *pMCTstat,
58 struct DCTStatStruc *pDCTstat, u8 dct);
59static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
60 struct DCTStatStruc *pDCTstat, u8 dct);
61static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
62 struct DCTStatStruc *pDCTstat);
63static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
64 struct DCTStatStruc *pDCTstat, u8 dct);
65static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
66 struct DCTStatStruc *pDCTstat, u8 dct);
67static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
68 struct DCTStatStruc *pDCTstat, u8 dct);
69static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
70 struct DCTStatStruc *pDCTstat, u8 dct);
71static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
72 struct DCTStatStruc *pDCTstat, u8 dct);
73static u8 Get_DefTrc_k_D(u8 k);
74static u16 Get_40Tk_D(u8 k);
75static u16 Get_Fk_D(u8 k);
76static u8 Dimm_Supports_D(struct DCTStatStruc *pDCTstat, u8 i, u8 j, u8 k);
77static u8 Sys_Capability_D(struct MCTStatStruc *pMCTstat,
78 struct DCTStatStruc *pDCTstat, int j, int k);
79static u8 Get_DIMMAddress_D(struct DCTStatStruc *pDCTstat, u8 i);
80static void mct_initDCT(struct MCTStatStruc *pMCTstat,
81 struct DCTStatStruc *pDCTstat);
82static void mct_DramInit(struct MCTStatStruc *pMCTstat,
83 struct DCTStatStruc *pDCTstat, u8 dct);
84static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
85 struct DCTStatStruc *pDCTstat, u8 dct);
86static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat);
87static void Get_Trdrd(struct MCTStatStruc *pMCTstat,
88 struct DCTStatStruc *pDCTstat, u8 dct);
89static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
90 struct DCTStatStruc *pDCTstat, u8 dct);
91static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,\
92 struct DCTStatStruc *pDCTstat, u8 dct);
93static void mct_AfterStitchMemory(struct MCTStatStruc *pMCTstat,
94 struct DCTStatStruc *pDCTstat, u8 dct);
95static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
96 struct DCTStatStruc *pDCTstat, u8 dct);
97static void Set_OtherTiming(struct MCTStatStruc *pMCTstat,
98 struct DCTStatStruc *pDCTstat, u8 dct);
99static void Get_Twrwr(struct MCTStatStruc *pMCTstat,
100 struct DCTStatStruc *pDCTstat, u8 dct);
101static void Get_Twrrd(struct MCTStatStruc *pMCTstat,
102 struct DCTStatStruc *pDCTstat, u8 dct);
103static void Get_TrwtTO(struct MCTStatStruc *pMCTstat,
104 struct DCTStatStruc *pDCTstat, u8 dct);
105static void Get_TrwtWB(struct MCTStatStruc *pMCTstat,
106 struct DCTStatStruc *pDCTstat);
107static u8 Check_DqsRcvEn_Diff(struct DCTStatStruc *pDCTstat, u8 dct,
108 u32 dev, u32 index_reg, u32 index);
109static u8 Get_DqsRcvEnGross_Diff(struct DCTStatStruc *pDCTstat,
110 u32 dev, u32 index_reg);
111static u8 Get_WrDatGross_Diff(struct DCTStatStruc *pDCTstat, u8 dct,
112 u32 dev, u32 index_reg);
113static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
114 u32 dev, u32 index_reg, u32 index);
115static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
116 struct DCTStatStruc *pDCTstat);
117static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat, u8 dct,
118 u32 dev, u32 index_reg, u32 index);
119static void mct_InitialMCT_D(struct MCTStatStruc *pMCTstat,
120 struct DCTStatStruc *pDCTstat);
121static void mct_init(struct MCTStatStruc *pMCTstat,
122 struct DCTStatStruc *pDCTstat);
123static void clear_legacy_Mode(struct MCTStatStruc *pMCTstat,
124 struct DCTStatStruc *pDCTstat);
125static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
126 struct DCTStatStruc *pDCTstatA);
127static void SetCSTriState(struct MCTStatStruc *pMCTstat,
128 struct DCTStatStruc *pDCTstat, u8 dct);
129static void SetODTTriState(struct MCTStatStruc *pMCTstat,
130 struct DCTStatStruc *pDCTstat, u8 dct);
131static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
132 struct DCTStatStruc *pDCTstat, u8 dct);
133static u32 mct_NodePresent_D(void);
134static void WaitRoutine_D(u32 time);
135static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
136 struct DCTStatStruc *pDCTstatA);
137static void mct_ResetDataStruct_D(struct MCTStatStruc *pMCTstat,
138 struct DCTStatStruc *pDCTstatA);
139static void mct_EarlyArbEn_D(struct MCTStatStruc *pMCTstat,
140 struct DCTStatStruc *pDCTstat);
141static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
142 struct DCTStatStruc *pDCTstat);
143void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
144 struct DCTStatStruc *pDCTstat);
145static u8 CheckNBCOFEarlyArbEn(struct MCTStatStruc *pMCTstat,
146 struct DCTStatStruc *pDCTstat);
147void mct_ClrWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
148 struct DCTStatStruc *pDCTstat);
149static void mct_BeforeDQSTrain_D(struct MCTStatStruc *pMCTstat,
150 struct DCTStatStruc *pDCTstatA);
151static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct);
152static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
153 struct DCTStatStruc *pDCTstat, u8 dct);
Zheng Bao69436e12011-01-06 02:18:12 +0000154static u32 mct_DisDllShutdownSR(struct MCTStatStruc *pMCTstat,
155 struct DCTStatStruc *pDCTstat, u32 DramConfigLo, u8 dct);
156static void mct_EnDllShutdownSR(struct MCTStatStruc *pMCTstat,
157 struct DCTStatStruc *pDCTstat, u8 dct);
Marc Jones8ae8c882007-12-19 01:32:08 +0000158
159/*See mctAutoInitMCT header for index relationships to CL and T*/
160static const u16 Table_F_k[] = {00,200,266,333,400,533 };
161static const u8 Table_T_k[] = {0x00,0x50,0x3D,0x30,0x25, 0x18 };
162static const u8 Table_CL2_j[] = {0x04,0x08,0x10,0x20,0x40, 0x80 };
163static const u8 Tab_defTrc_k[] = {0x0,0x41,0x3C,0x3C,0x3A, 0x3A };
164static const u16 Tab_40T_k[] = {00,200,150,120,100,75 };
165static const u8 Tab_TrefT_k[] = {00,0,1,1,2,2,3,4,5,6,0,0};
166static const u8 Tab_BankAddr[] = {0x0,0x08,0x09,0x10,0x0C,0x0D,0x11,0x0E,0x15,0x16,0x0F,0x17};
167static const u8 Tab_tCL_j[] = {0,2,3,4,5};
168static const u8 Tab_1KTfawT_k[] = {00,8,10,13,14,20};
169static const u8 Tab_2KTfawT_k[] = {00,10,14,17,18,24};
170static const u8 Tab_L1CLKDis[] = {8,8,6,4,2,0,8,8};
171static const u8 Tab_M2CLKDis[] = {2,0,8,8,2,0,2,0};
172static const u8 Tab_S1CLKDis[] = {8,0,8,8,8,0,8,0};
173static const u8 Table_Comp_Rise_Slew_20x[] = {7, 3, 2, 2, 0xFF};
174static const u8 Table_Comp_Rise_Slew_15x[] = {7, 7, 3, 2, 0xFF};
175static const u8 Table_Comp_Fall_Slew_20x[] = {7, 5, 3, 2, 0xFF};
176static const u8 Table_Comp_Fall_Slew_15x[] = {7, 7, 5, 3, 0xFF};
177
Myles Watson075fbe82010-04-15 05:19:29 +0000178static void mctAutoInitMCT_D(struct MCTStatStruc *pMCTstat,
Marc Jones8ae8c882007-12-19 01:32:08 +0000179 struct DCTStatStruc *pDCTstatA)
180{
181 /*
182 * Memory may be mapped contiguously all the way up to 4GB (depending
183 * on setup options). It is the responsibility of PCI subsystem to
184 * create an uncacheable IO region below 4GB and to adjust TOP_MEM
185 * downward prior to any IO mapping or accesses. It is the same
Zheng Baoc3af12f2010-10-08 05:08:47 +0000186 * responsibility of the CPU sub-system prior to accessing LAPIC.
Marc Jones8ae8c882007-12-19 01:32:08 +0000187 *
188 * Slot Number is an external convention, and is determined by OEM with
189 * accompanying silk screening. OEM may choose to use Slot number
190 * convention which is consistent with DIMM number conventions.
191 * All AMD engineering
192 * platforms do.
193 *
194 * Run-Time Requirements:
195 * 1. Complete Hypertransport Bus Configuration
196 * 2. SMBus Controller Initialized
197 * 3. Checksummed or Valid NVRAM bits
198 * 4. MCG_CTL=-1, MC4_CTL_EN=0 for all CPUs
199 * 5. MCi_STS from shutdown/warm reset recorded (if desired) prior to
200 * entry
201 * 6. All var MTRRs reset to zero
202 * 7. State of NB_CFG.DisDatMsk set properly on all CPUs
Elyes HAOUAS0f92f632014-07-27 19:37:31 +0200203 * 8. All CPUs at 2GHz Speed (unless DQS training is not installed).
Marc Jones8ae8c882007-12-19 01:32:08 +0000204 * 9. All cHT links at max Speed/Width (unless DQS training is not
205 * installed).
206 *
207 *
208 * Global relationship between index values and item values:
209 * j CL(j) k F(k)
210 * --------------------------
211 * 0 2.0 - -
Elyes HAOUAS0f92f632014-07-27 19:37:31 +0200212 * 1 3.0 1 200 MHz
213 * 2 4.0 2 266 MHz
214 * 3 5.0 3 333 MHz
215 * 4 6.0 4 400 MHz
216 * 5 7.0 5 533 MHz
Marc Jones8ae8c882007-12-19 01:32:08 +0000217 */
218 u8 Node, NodesWmem;
219 u32 node_sys_base;
220
221restartinit:
222 mctInitMemGPIOs_A_D(); /* Set any required GPIOs*/
223 NodesWmem = 0;
224 node_sys_base = 0;
225 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
226 struct DCTStatStruc *pDCTstat;
227 pDCTstat = pDCTstatA + Node;
228 pDCTstat->Node_ID = Node;
229 pDCTstat->dev_host = PA_HOST(Node);
230 pDCTstat->dev_map = PA_MAP(Node);
231 pDCTstat->dev_dct = PA_DCT(Node);
232 pDCTstat->dev_nbmisc = PA_NBMISC(Node);
233 pDCTstat->NodeSysBase = node_sys_base;
234
235 print_tx("mctAutoInitMCT_D: mct_init Node ", Node);
236 mct_init(pMCTstat, pDCTstat);
237 mctNodeIDDebugPort_D();
238 pDCTstat->NodePresent = NodePresent_D(Node);
239 if (pDCTstat->NodePresent) { /* See if Node is there*/
240 print_t("mctAutoInitMCT_D: clear_legacy_Mode\n");
241 clear_legacy_Mode(pMCTstat, pDCTstat);
242 pDCTstat->LogicalCPUID = mctGetLogicalCPUID_D(Node);
243
244 print_t("mctAutoInitMCT_D: mct_InitialMCT_D\n");
245 mct_InitialMCT_D(pMCTstat, pDCTstat);
246
247 print_t("mctAutoInitMCT_D: mctSMBhub_Init\n");
248 mctSMBhub_Init(Node); /* Switch SMBUS crossbar to proper node*/
249
250 print_t("mctAutoInitMCT_D: mct_initDCT\n");
251 mct_initDCT(pMCTstat, pDCTstat);
252 if (pDCTstat->ErrCode == SC_FatalErr) {
253 goto fatalexit; /* any fatal errors?*/
254 } else if (pDCTstat->ErrCode < SC_StopError) {
255 NodesWmem++;
256 }
257 } /* if Node present */
258 node_sys_base = pDCTstat->NodeSysBase;
259 node_sys_base += (pDCTstat->NodeSysLimit + 2) & ~0x0F;
260 }
261 if (NodesWmem == 0) {
Stefan Reinauer65b72ab2015-01-05 12:59:54 -0800262 printk(BIOS_DEBUG, "No Nodes?!\n");
Marc Jones8ae8c882007-12-19 01:32:08 +0000263 goto fatalexit;
264 }
265
266 print_t("mctAutoInitMCT_D: SyncDCTsReady_D\n");
267 SyncDCTsReady_D(pMCTstat, pDCTstatA); /* Make sure DCTs are ready for accesses.*/
268
269 print_t("mctAutoInitMCT_D: HTMemMapInit_D\n");
270 HTMemMapInit_D(pMCTstat, pDCTstatA); /* Map local memory into system address space.*/
271 mctHookAfterHTMap();
272
273 print_t("mctAutoInitMCT_D: CPUMemTyping_D\n");
274 CPUMemTyping_D(pMCTstat, pDCTstatA); /* Map dram into WB/UC CPU cacheability */
275 mctHookAfterCPU(); /* Setup external northbridge(s) */
276
277 print_t("mctAutoInitMCT_D: DQSTiming_D\n");
278 DQSTiming_D(pMCTstat, pDCTstatA); /* Get Receiver Enable and DQS signal timing*/
279
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000280 print_t("mctAutoInitMCT_D: UMAMemTyping_D\n");
281 UMAMemTyping_D(pMCTstat, pDCTstatA); /* Fix up for UMA sizing */
282
Marc Jones8ae8c882007-12-19 01:32:08 +0000283 print_t("mctAutoInitMCT_D: :OtherTiming\n");
284 mct_OtherTiming(pMCTstat, pDCTstatA);
285
286 if (ReconfigureDIMMspare_D(pMCTstat, pDCTstatA)) { /* RESET# if 1st pass of DIMM spare enabled*/
287 goto restartinit;
288 }
289
290 InterleaveNodes_D(pMCTstat, pDCTstatA);
291 InterleaveChannels_D(pMCTstat, pDCTstatA);
292
293 print_t("mctAutoInitMCT_D: ECCInit_D\n");
294 if (ECCInit_D(pMCTstat, pDCTstatA)) { /* Setup ECC control and ECC check-bits*/
295 print_t("mctAutoInitMCT_D: MCTMemClr_D\n");
296 MCTMemClr_D(pMCTstat,pDCTstatA);
297 }
298
299 mct_FinalMCT_D(pMCTstat, (pDCTstatA + 0) ); // Node 0
Marc Jones067d2232012-02-21 17:06:40 -0700300 print_tx("mctAutoInitMCT_D Done: Global Status: ", pMCTstat->GStatus);
Marc Jones8ae8c882007-12-19 01:32:08 +0000301 return;
302
303fatalexit:
304 die("mct_d: fatalexit");
305}
306
307
308static u8 ReconfigureDIMMspare_D(struct MCTStatStruc *pMCTstat,
309 struct DCTStatStruc *pDCTstatA)
310{
311 u8 ret;
312
313 if (mctGet_NVbits(NV_CS_SpareCTL)) {
314 if (MCT_DIMM_SPARE_NO_WARM) {
315 /* Do no warm-reset DIMM spare */
316 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
317 LoadDQSSigTmgRegs_D(pMCTstat, pDCTstatA);
318 ret = 0;
319 } else {
320 mct_ResetDataStruct_D(pMCTstat, pDCTstatA);
321 pMCTstat->GStatus |= 1 << GSB_EnDIMMSpareNW;
322 ret = 1;
323 }
324 } else {
325 /* Do warm-reset DIMM spare */
326 if (mctGet_NVbits(NV_DQSTrainCTL))
327 mctWarmReset_D();
328 ret = 0;
329 }
330
331
332 } else {
333 ret = 0;
334 }
335
336 return ret;
337}
338
339
340static void DQSTiming_D(struct MCTStatStruc *pMCTstat,
341 struct DCTStatStruc *pDCTstatA)
342{
343 u8 nv_DQSTrainCTL;
344
345 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
346 return;
347 }
348 nv_DQSTrainCTL = mctGet_NVbits(NV_DQSTrainCTL);
349 /* FIXME: BOZO- DQS training every time*/
350 nv_DQSTrainCTL = 1;
351
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000352 print_t("DQSTiming_D: mct_BeforeDQSTrain_D:\n");
Zheng Bao1476a9e2009-08-25 04:12:55 +0000353 mct_BeforeDQSTrain_D(pMCTstat, pDCTstatA);
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000354 phyAssistedMemFnceTraining(pMCTstat, pDCTstatA);
355
Marc Jones8ae8c882007-12-19 01:32:08 +0000356 if (nv_DQSTrainCTL) {
Marco Schmidta7741922009-06-06 11:33:58 +0000357 mctHookBeforeAnyTraining(pMCTstat, pDCTstatA);
Marc Jones8ae8c882007-12-19 01:32:08 +0000358
359 print_t("DQSTiming_D: TrainReceiverEn_D FirstPass:\n");
360 TrainReceiverEn_D(pMCTstat, pDCTstatA, FirstPass);
361
362 print_t("DQSTiming_D: mct_TrainDQSPos_D\n");
363 mct_TrainDQSPos_D(pMCTstat, pDCTstatA);
364
365 // Second Pass never used for Barcelona!
366 //print_t("DQSTiming_D: TrainReceiverEn_D SecondPass:\n");
367 //TrainReceiverEn_D(pMCTstat, pDCTstatA, SecondPass);
368
369 print_t("DQSTiming_D: mctSetEccDQSRcvrEn_D\n");
370 mctSetEccDQSRcvrEn_D(pMCTstat, pDCTstatA);
371
372 print_t("DQSTiming_D: TrainMaxReadLatency_D\n");
373//FIXME - currently uses calculated value TrainMaxReadLatency_D(pMCTstat, pDCTstatA);
374 mctHookAfterAnyTraining();
375 mctSaveDQSSigTmg_D();
376
377 print_t("DQSTiming_D: mct_EndDQSTraining_D\n");
378 mct_EndDQSTraining_D(pMCTstat, pDCTstatA);
379
380 print_t("DQSTiming_D: MCTMemClr_D\n");
381 MCTMemClr_D(pMCTstat, pDCTstatA);
382 } else {
383 mctGetDQSSigTmg_D(); /* get values into data structure */
384 LoadDQSSigTmgRegs_D(pMCTstat, pDCTstatA); /* load values into registers.*/
385 //mctDoWarmResetMemClr_D();
386 MCTMemClr_D(pMCTstat, pDCTstatA);
387 }
388}
389
390
391static void LoadDQSSigTmgRegs_D(struct MCTStatStruc *pMCTstat,
392 struct DCTStatStruc *pDCTstatA)
393{
394 u8 Node, Receiver, Channel, Dir, DIMM;
395 u32 dev;
396 u32 index_reg;
397 u32 reg;
398 u32 index;
399 u32 val;
400
401
402 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
403 struct DCTStatStruc *pDCTstat;
404 pDCTstat = pDCTstatA + Node;
405
406 if (pDCTstat->DCTSysLimit) {
407 dev = pDCTstat->dev_dct;
408 for (Channel = 0;Channel < 2; Channel++) {
409 /* there are four receiver pairs,
410 loosely associated with chipselects.*/
411 index_reg = 0x98 + Channel * 0x100;
412 for (Receiver = 0; Receiver < 8; Receiver += 2) {
413 /* Set Receiver Enable Values */
414 mct_SetRcvrEnDly_D(pDCTstat,
415 0, /* RcvrEnDly */
416 1, /* FinalValue, From stack */
417 Channel,
418 Receiver,
419 dev, index_reg,
420 (Receiver >> 1) * 3 + 0x10, /* Addl_Index */
421 2); /* Pass Second Pass ? */
422
423 }
424 }
425 for (Channel = 0; Channel<2; Channel++) {
426 SetEccDQSRcvrEn_D(pDCTstat, Channel);
427 }
428
429 for (Channel = 0; Channel < 2; Channel++) {
430 u8 *p;
431 index_reg = 0x98 + Channel * 0x100;
432
433 /* NOTE:
434 * when 400, 533, 667, it will support dimm0/1/2/3,
435 * and set conf for dimm0, hw will copy to dimm1/2/3
436 * set for dimm1, hw will copy to dimm3
Elyes HAOUAS0f92f632014-07-27 19:37:31 +0200437 * Rev A/B only support DIMM0/1 when 800MHz and above
Marc Jones8ae8c882007-12-19 01:32:08 +0000438 * + 0x100 to next dimm
Elyes HAOUAS0f92f632014-07-27 19:37:31 +0200439 * Rev C support DIMM0/1/2/3 when 800MHz and above
Marc Jones8ae8c882007-12-19 01:32:08 +0000440 * + 0x100 to next dimm
441 */
442 for (DIMM = 0; DIMM < 2; DIMM++) {
443 if (DIMM==0) {
444 index = 0; /* CHA Write Data Timing Low */
445 } else {
446 if (pDCTstat->Speed >= 4) {
447 index = 0x100 * DIMM;
448 } else {
449 break;
450 }
451 }
452 for (Dir=0;Dir<2;Dir++) {//RD/WR
453 p = pDCTstat->CH_D_DIR_B_DQS[Channel][DIMM][Dir];
454 val = stream_to_int(p); /* CHA Read Data Timing High */
455 Set_NB32_index_wait(dev, index_reg, index+1, val);
456 val = stream_to_int(p+4); /* CHA Write Data Timing High */
457 Set_NB32_index_wait(dev, index_reg, index+2, val);
458 val = *(p+8); /* CHA Write ECC Timing */
459 Set_NB32_index_wait(dev, index_reg, index+3, val);
460 index += 4;
461 }
462 }
463 }
464
465 for (Channel = 0; Channel<2; Channel++) {
466 reg = 0x78 + Channel * 0x100;
467 val = Get_NB32(dev, reg);
468 val &= ~(0x3ff<<22);
469 val |= ((u32) pDCTstat->CH_MaxRdLat[Channel] << 22);
470 val &= ~(1<<DqsRcvEnTrain);
471 Set_NB32(dev, reg, val); /* program MaxRdLatency to correspond with current delay*/
472 }
473 }
474 }
475}
476
Stefan Reinauerd6532112010-04-16 00:31:44 +0000477#ifdef UNUSED_CODE
Marc Jones8ae8c882007-12-19 01:32:08 +0000478static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
Myles Watsonad894c52010-04-30 17:11:03 +0000479 struct DCTStatStruc *pDCTstatA);
480static void ResetNBECCstat_D(struct MCTStatStruc *pMCTstat,
Marc Jones8ae8c882007-12-19 01:32:08 +0000481 struct DCTStatStruc *pDCTstatA)
482{
483 /* Clear MC4_STS for all Nodes in the system. This is required in some
484 * circumstances to clear left over garbage from cold reset, shutdown,
485 * or normal ECC memory conditioning.
486 */
487
488 //FIXME: this function depends on pDCTstat Array ( with Node id ) - Is this really a problem?
489
490 u32 dev;
491 u8 Node;
492
493 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
494 struct DCTStatStruc *pDCTstat;
495 pDCTstat = pDCTstatA + Node;
496
497 if (pDCTstat->NodePresent) {
498 dev = pDCTstat->dev_nbmisc;
499 /*MCA NB Status Low (alias to MC4_STS[31:0] */
500 Set_NB32(dev, 0x48, 0);
501 /* MCA NB Status High (alias to MC4_STS[63:32] */
502 Set_NB32(dev, 0x4C, 0);
503 }
504 }
505}
Stefan Reinauerd6532112010-04-16 00:31:44 +0000506#endif
Marc Jones8ae8c882007-12-19 01:32:08 +0000507
508static void HTMemMapInit_D(struct MCTStatStruc *pMCTstat,
509 struct DCTStatStruc *pDCTstatA)
510{
511 u8 Node;
512 u32 NextBase, BottomIO;
513 u8 _MemHoleRemap, DramHoleBase, DramHoleOffset;
514 u32 HoleSize, DramSelBaseAddr;
515
516 u32 val;
517 u32 base;
518 u32 limit;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000519 u32 dev, devx;
Marc Jones8ae8c882007-12-19 01:32:08 +0000520 struct DCTStatStruc *pDCTstat;
521
522 _MemHoleRemap = mctGet_NVbits(NV_MemHole);
523
524 if (pMCTstat->HoleBase == 0) {
525 DramHoleBase = mctGet_NVbits(NV_BottomIO);
526 } else {
527 DramHoleBase = pMCTstat->HoleBase >> (24-8);
528 }
529
530 BottomIO = DramHoleBase << (24-8);
531
532 NextBase = 0;
533 pDCTstat = pDCTstatA + 0;
534 dev = pDCTstat->dev_map;
535
536
537 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000538 pDCTstat = pDCTstatA + Node;
539 devx = pDCTstat->dev_map;
Marc Jones8ae8c882007-12-19 01:32:08 +0000540 DramSelBaseAddr = 0;
Marc Jones8ae8c882007-12-19 01:32:08 +0000541 if (!pDCTstat->GangedMode) {
542 DramSelBaseAddr = pDCTstat->NodeSysLimit - pDCTstat->DCTSysLimit;
543 /*In unganged mode, we must add DCT0 and DCT1 to DCTSysLimit */
544 val = pDCTstat->NodeSysLimit;
545 if ((val & 0xFF) == 0xFE) {
546 DramSelBaseAddr++;
547 val++;
548 }
549 pDCTstat->DCTSysLimit = val;
550 }
551
552 base = pDCTstat->DCTSysBase;
553 limit = pDCTstat->DCTSysLimit;
554 if (limit > base) {
555 base += NextBase;
556 limit += NextBase;
557 DramSelBaseAddr += NextBase;
Stefan Reinauerc02b4fc2010-03-22 11:42:32 +0000558 printk(BIOS_DEBUG, " Node: %02x base: %02x limit: %02x BottomIO: %02x\n", Node, base, limit, BottomIO);
Marc Jones8ae8c882007-12-19 01:32:08 +0000559
560 if (_MemHoleRemap) {
561 if ((base < BottomIO) && (limit >= BottomIO)) {
562 /* HW Dram Remap */
563 pDCTstat->Status |= 1 << SB_HWHole;
564 pMCTstat->GStatus |= 1 << GSB_HWHole;
565 pDCTstat->DCTSysBase = base;
566 pDCTstat->DCTSysLimit = limit;
567 pDCTstat->DCTHoleBase = BottomIO;
568 pMCTstat->HoleBase = BottomIO;
569 HoleSize = _4GB_RJ8 - BottomIO; /* HoleSize[39:8] */
570 if ((DramSelBaseAddr > 0) && (DramSelBaseAddr < BottomIO))
571 base = DramSelBaseAddr;
572 val = ((base + HoleSize) >> (24-8)) & 0xFF;
573 DramHoleOffset = val;
574 val <<= 8; /* shl 16, rol 24 */
575 val |= DramHoleBase << 24;
576 val |= 1 << DramHoleValid;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000577 Set_NB32(devx, 0xF0, val); /* Dram Hole Address Reg */
Marc Jones8ae8c882007-12-19 01:32:08 +0000578 pDCTstat->DCTSysLimit += HoleSize;
579 base = pDCTstat->DCTSysBase;
580 limit = pDCTstat->DCTSysLimit;
581 } else if (base == BottomIO) {
582 /* SW Node Hoist */
583 pMCTstat->GStatus |= 1<<GSB_SpIntRemapHole;
584 pDCTstat->Status |= 1<<SB_SWNodeHole;
585 pMCTstat->GStatus |= 1<<GSB_SoftHole;
586 pMCTstat->HoleBase = base;
587 limit -= base;
588 base = _4GB_RJ8;
589 limit += base;
590 pDCTstat->DCTSysBase = base;
591 pDCTstat->DCTSysLimit = limit;
592 } else {
593 /* No Remapping. Normal Contiguous mapping */
594 pDCTstat->DCTSysBase = base;
595 pDCTstat->DCTSysLimit = limit;
596 }
597 } else {
598 /*No Remapping. Normal Contiguous mapping*/
599 pDCTstat->DCTSysBase = base;
600 pDCTstat->DCTSysLimit = limit;
601 }
602 base |= 3; /* set WE,RE fields*/
603 pMCTstat->SysLimit = limit;
604 }
605 Set_NB32(dev, 0x40 + (Node << 3), base); /* [Node] + Dram Base 0 */
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000606
607 /* if Node limit > 1GB then set it to 1GB boundary for each node */
608 if ((mctSetNodeBoundary_D()) && (limit > 0x00400000)) {
609 limit++;
610 limit &= 0xFFC00000;
611 limit--;
612 }
613 val = limit & 0xFFFF0000;
614 val |= Node;
Marc Jones8ae8c882007-12-19 01:32:08 +0000615 Set_NB32(dev, 0x44 + (Node << 3), val); /* set DstNode */
616
617 limit = pDCTstat->DCTSysLimit;
618 if (limit) {
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000619 NextBase = (limit & 0xFFFF0000) + 0x10000;
620 if ((mctSetNodeBoundary_D()) && (NextBase > 0x00400000)) {
621 NextBase++;
622 NextBase &= 0xFFC00000;
623 NextBase--;
624 }
Marc Jones8ae8c882007-12-19 01:32:08 +0000625 }
626 }
627
628 /* Copy dram map from Node 0 to Node 1-7 */
629 for (Node = 1; Node < MAX_NODES_SUPPORTED; Node++) {
Marc Jones8ae8c882007-12-19 01:32:08 +0000630 u32 reg;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +0000631 pDCTstat = pDCTstatA + Node;
632 devx = pDCTstat->dev_map;
Marc Jones8ae8c882007-12-19 01:32:08 +0000633
634 if (pDCTstat->NodePresent) {
Xavi Drudis Ferran7cdf1ec2010-09-27 21:08:40 +0000635 printk(BIOS_DEBUG, " Copy dram map from Node 0 to Node %02x \n", Node);
Marc Jones8ae8c882007-12-19 01:32:08 +0000636 reg = 0x40; /*Dram Base 0*/
637 do {
638 val = Get_NB32(dev, reg);
639 Set_NB32(devx, reg, val);
640 reg += 4;
641 } while ( reg < 0x80);
642 } else {
643 break; /* stop at first absent Node */
644 }
645 }
646
647 /*Copy dram map to F1x120/124*/
648 mct_HTMemMapExt(pMCTstat, pDCTstatA);
649}
650
651
652static void MCTMemClr_D(struct MCTStatStruc *pMCTstat,
653 struct DCTStatStruc *pDCTstatA)
654{
655
656 /* Initiates a memory clear operation for all node. The mem clr
Zheng Baoc3af12f2010-10-08 05:08:47 +0000657 * is done in parallel. After the memclr is complete, all processors
Marc Jones8ae8c882007-12-19 01:32:08 +0000658 * status are checked to ensure that memclr has completed.
659 */
660 u8 Node;
661 struct DCTStatStruc *pDCTstat;
662
663 if (!mctGet_NVbits(NV_DQSTrainCTL)){
664 // FIXME: callback to wrapper: mctDoWarmResetMemClr_D
665 } else { // NV_DQSTrainCTL == 1
666 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
667 pDCTstat = pDCTstatA + Node;
668
669 if (pDCTstat->NodePresent) {
670 DCTMemClr_Init_D(pMCTstat, pDCTstat);
671 }
672 }
673 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
674 pDCTstat = pDCTstatA + Node;
675
676 if (pDCTstat->NodePresent) {
677 DCTMemClr_Sync_D(pMCTstat, pDCTstat);
678 }
679 }
680 }
681}
682
683
684static void DCTMemClr_Init_D(struct MCTStatStruc *pMCTstat,
685 struct DCTStatStruc *pDCTstat)
686{
687 u32 val;
688 u32 dev;
689 u32 reg;
690
691 /* Initiates a memory clear operation on one node */
692 if (pDCTstat->DCTSysLimit) {
693 dev = pDCTstat->dev_dct;
694 reg = 0x110;
695
696 do {
697 val = Get_NB32(dev, reg);
698 } while (val & (1 << MemClrBusy));
699
700 val |= (1 << MemClrInit);
701 Set_NB32(dev, reg, val);
702
703 }
704}
705
706
707static void MCTMemClrSync_D(struct MCTStatStruc *pMCTstat,
708 struct DCTStatStruc *pDCTstatA)
709{
710 /* Ensures that memory clear has completed on all node.*/
711 u8 Node;
712 struct DCTStatStruc *pDCTstat;
713
714 if (!mctGet_NVbits(NV_DQSTrainCTL)){
715 // callback to wrapper: mctDoWarmResetMemClr_D
716 } else { // NV_DQSTrainCTL == 1
717 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
718 pDCTstat = pDCTstatA + Node;
719
720 if (pDCTstat->NodePresent) {
721 DCTMemClr_Sync_D(pMCTstat, pDCTstat);
722 }
723 }
724 }
725}
726
727
728static void DCTMemClr_Sync_D(struct MCTStatStruc *pMCTstat,
729 struct DCTStatStruc *pDCTstat)
730{
731 u32 val;
732 u32 dev = pDCTstat->dev_dct;
733 u32 reg;
734
735 /* Ensure that a memory clear operation has completed on one node */
736 if (pDCTstat->DCTSysLimit){
737 reg = 0x110;
738
739 do {
740 val = Get_NB32(dev, reg);
741 } while (val & (1 << MemClrBusy));
742
743 do {
744 val = Get_NB32(dev, reg);
745 } while (!(val & (1 << Dr_MemClrStatus)));
746 }
747
Timothy Pearsonb6fa61a2015-02-20 13:13:35 -0600748 /* Implement BKDG Rev 3.62 recommendations */
749 val = 0x0FE40F80;
750 if (!(mctGetLogicalCPUID(0) & AMD_FAM10_LT_D) && mctGet_NVbits(NV_Unganged))
751 val |= (0x18 << 2);
752 else
753 val |= (0x10 << 2);
Marc Jones8ae8c882007-12-19 01:32:08 +0000754 val |= MCCH_FlushWrOnStpGnt; // Set for S3
755 Set_NB32(dev, 0x11C, val);
756}
757
758
759static u8 NodePresent_D(u8 Node)
760{
761 /*
762 * Determine if a single Hammer Node exists within the network.
763 */
764
765 u32 dev;
766 u32 val;
767 u32 dword;
768 u8 ret = 0;
769
770 dev = PA_HOST(Node); /*test device/vendor id at host bridge */
771 val = Get_NB32(dev, 0);
772 dword = mct_NodePresent_D(); /* FIXME: BOZO -11001022h rev for F */
773 if (val == dword) { /* AMD Hammer Family CPU HT Configuration */
774 if (oemNodePresent_D(Node, &ret))
775 goto finish;
776 /* Node ID register */
777 val = Get_NB32(dev, 0x60);
778 val &= 0x07;
779 dword = Node;
780 if (val == dword) /* current nodeID = requested nodeID ? */
781 ret = 1;
782finish:
Zheng Bao7b1a3c32010-09-28 04:43:16 +0000783 ;
Marc Jones8ae8c882007-12-19 01:32:08 +0000784 }
785
786 return ret;
787}
788
789
790static void DCTInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 dct)
791{
792 /*
793 * Initialize DRAM on single Athlon 64/Opteron Node.
794 */
795
796 u8 stopDCTflag;
797 u32 val;
798
799 ClearDCT_D(pMCTstat, pDCTstat, dct);
800 stopDCTflag = 1; /*preload flag with 'disable' */
801 if (mct_DIMMPresence(pMCTstat, pDCTstat, dct) < SC_StopError) {
802 print_t("\t\tDCTInit_D: mct_DIMMPresence Done\n");
803 if (mct_SPDCalcWidth(pMCTstat, pDCTstat, dct) < SC_StopError) {
804 print_t("\t\tDCTInit_D: mct_SPDCalcWidth Done\n");
805 if (AutoCycTiming_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
806 print_t("\t\tDCTInit_D: AutoCycTiming_D Done\n");
807 if (AutoConfig_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
808 print_t("\t\tDCTInit_D: AutoConfig_D Done\n");
809 if (PlatformSpec_D(pMCTstat, pDCTstat, dct) < SC_StopError) {
810 print_t("\t\tDCTInit_D: PlatformSpec_D Done\n");
811 stopDCTflag = 0;
812 if (!(pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW))) {
813 print_t("\t\tDCTInit_D: StartupDCT_D\n");
814 StartupDCT_D(pMCTstat, pDCTstat, dct); /*yeaahhh! */
815 }
816 }
817 }
818 }
819 }
820 }
821 if (stopDCTflag) {
822 u32 reg_off = dct * 0x100;
823 val = 1<<DisDramInterface;
824 Set_NB32(pDCTstat->dev_dct, reg_off+0x94, val);
825 /*To maximize power savings when DisDramInterface=1b,
826 all of the MemClkDis bits should also be set.*/
827 val = 0xFF000000;
828 Set_NB32(pDCTstat->dev_dct, reg_off+0x88, val);
Zheng Bao69436e12011-01-06 02:18:12 +0000829 } else {
830 mct_EnDllShutdownSR(pMCTstat, pDCTstat, dct);
Marc Jones8ae8c882007-12-19 01:32:08 +0000831 }
832}
833
834
835static void SyncDCTsReady_D(struct MCTStatStruc *pMCTstat,
836 struct DCTStatStruc *pDCTstatA)
837{
838 /* Wait (and block further access to dram) for all DCTs to be ready,
839 * by polling all InitDram bits and waiting for possible memory clear
840 * operations to be complete. Read MemClkFreqVal bit to see if
841 * the DIMMs are present in this node.
842 */
843
844 u8 Node;
845
846 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
847 struct DCTStatStruc *pDCTstat;
848 pDCTstat = pDCTstatA + Node;
849 mct_SyncDCTsReady(pDCTstat);
850 }
851}
852
853
854static void StartupDCT_D(struct MCTStatStruc *pMCTstat,
855 struct DCTStatStruc *pDCTstat, u8 dct)
856{
857 /* Read MemClkFreqVal bit to see if the DIMMs are present in this node.
858 * If the DIMMs are present then set the DRAM Enable bit for this node.
859 *
860 * Setting dram init starts up the DCT state machine, initializes the
861 * dram devices with MRS commands, and kicks off any
862 * HW memory clear process that the chip is capable of. The sooner
863 * that dram init is set for all nodes, the faster the memory system
864 * initialization can complete. Thus, the init loop is unrolled into
Zheng Baoc3af12f2010-10-08 05:08:47 +0000865 * two loops so as to start the processes for non BSP nodes sooner.
Marc Jones8ae8c882007-12-19 01:32:08 +0000866 * This procedure will not wait for the process to finish.
867 * Synchronization is handled elsewhere.
868 */
869
870 u32 val;
871 u32 dev;
872 u8 byte;
873 u32 reg;
874 u32 reg_off = dct * 0x100;
875
876 dev = pDCTstat->dev_dct;
877 val = Get_NB32(dev, 0x94 + reg_off);
878 if (val & (1<<MemClkFreqVal)) {
879 print_t("\t\t\tStartupDCT_D: MemClkFreqVal\n");
880 byte = mctGet_NVbits(NV_DQSTrainCTL);
881 if (byte == 1) {
882 /* Enable DQSRcvEn training mode */
883 print_t("\t\t\tStartupDCT_D: DqsRcvEnTrain set \n");
884 reg = 0x78 + reg_off;
885 val = Get_NB32(dev, reg);
886 /* Setting this bit forces a 1T window with hard left
Zheng Baoc3af12f2010-10-08 05:08:47 +0000887 * pass/fail edge and a probabilistic right pass/fail
Marc Jones8ae8c882007-12-19 01:32:08 +0000888 * edge. LEFT edge is referenced for final
889 * receiver enable position.*/
890 val |= 1 << DqsRcvEnTrain;
891 Set_NB32(dev, reg, val);
892 }
893 mctHookBeforeDramInit(); /* generalized Hook */
894 print_t("\t\t\tStartupDCT_D: DramInit \n");
895 mct_DramInit(pMCTstat, pDCTstat, dct);
896 AfterDramInit_D(pDCTstat, dct);
897 mctHookAfterDramInit(); /* generalized Hook*/
898 }
899}
900
901
902static void ClearDCT_D(struct MCTStatStruc *pMCTstat,
903 struct DCTStatStruc *pDCTstat, u8 dct)
904{
905 u32 reg_end;
906 u32 dev = pDCTstat->dev_dct;
907 u32 reg = 0x40 + 0x100 * dct;
908 u32 val = 0;
909
910 if (pMCTstat->GStatus & (1 << GSB_EnDIMMSpareNW)) {
911 reg_end = 0x78 + 0x100 * dct;
912 } else {
913 reg_end = 0xA4 + 0x100 * dct;
914 }
915
916 while(reg < reg_end) {
917 Set_NB32(dev, reg, val);
918 reg += 4;
919 }
920
921 val = 0;
922 dev = pDCTstat->dev_map;
923 reg = 0xF0;
924 Set_NB32(dev, reg, val);
925}
926
927
928static u8 AutoCycTiming_D(struct MCTStatStruc *pMCTstat,
929 struct DCTStatStruc *pDCTstat, u8 dct)
930{
931 /* Initialize DCT Timing registers as per DIMM SPD.
932 * For primary timing (T, CL) use best case T value.
933 * For secondary timing params., use most aggressive settings
934 * of slowest DIMM.
935 *
936 * There are three components to determining "maximum frequency":
937 * SPD component, Bus load component, and "Preset" max frequency
938 * component.
939 *
940 * The SPD component is a function of the min cycle time specified
941 * by each DIMM, and the interaction of cycle times from all DIMMs
942 * in conjunction with CAS latency. The SPD component only applies
943 * when user timing mode is 'Auto'.
944 *
945 * The Bus load component is a limiting factor determined by electrical
946 * characteristics on the bus as a result of varying number of device
947 * loads. The Bus load component is specific to each platform but may
948 * also be a function of other factors. The bus load component only
949 * applies when user timing mode is 'Auto'.
950 *
951 * The Preset component is subdivided into three items and is the
952 * minimum of the set: Silicon revision, user limit setting when user
953 * timing mode is 'Auto' and memclock mode is 'Limit', OEM build
954 * specification of the maximum frequency. The Preset component is only
955 * applies when user timing mode is 'Auto'.
956 */
957
958 u8 i;
959 u8 Twr, Trtp;
960 u8 Trp, Trrd, Trcd, Tras, Trc, Trfc[4], Rows;
961 u32 DramTimingLo, DramTimingHi;
962 u16 Tk10, Tk40;
963 u8 Twtr;
964 u8 LDIMM;
965 u8 DDR2_1066;
966 u8 byte;
967 u32 dword;
968 u32 dev;
969 u32 reg;
970 u32 reg_off;
971 u32 val;
972 u16 smbaddr;
973
974 /* Get primary timing (CAS Latency and Cycle Time) */
975 if (pDCTstat->Speed == 0) {
976 mctGet_MaxLoadFreq(pDCTstat);
977
978 /* and Factor in presets (setup options, Si cap, etc.) */
979 GetPresetmaxF_D(pMCTstat, pDCTstat);
980
981 /* Go get best T and CL as specified by DIMM mfgs. and OEM */
982 SPDGetTCL_D(pMCTstat, pDCTstat, dct);
983 /* skip callback mctForce800to1067_D */
984 pDCTstat->Speed = pDCTstat->DIMMAutoSpeed;
985 pDCTstat->CASL = pDCTstat->DIMMCASL;
986
987 /* if "manual" memclock mode */
988 if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 2)
989 pDCTstat->Speed = mctGet_NVbits(NV_MemCkVal) + 1;
990
Marc Jones8ae8c882007-12-19 01:32:08 +0000991 }
Zheng Bao1476a9e2009-08-25 04:12:55 +0000992 mct_AfterGetCLT(pMCTstat, pDCTstat, dct);
Marc Jones8ae8c882007-12-19 01:32:08 +0000993
994 /* Gather all DIMM mini-max values for cycle timing data */
995 Rows = 0;
996 Trp = 0;
997 Trrd = 0;
998 Trcd = 0;
999 Trtp = 0;
1000 Tras = 0;
1001 Trc = 0;
1002 Twr = 0;
1003 Twtr = 0;
1004 for (i=0; i < 4; i++)
1005 Trfc[i] = 0;
1006
1007 for ( i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
1008 LDIMM = i >> 1;
1009 if (pDCTstat->DIMMValid & (1 << i)) {
Zheng Bao1476a9e2009-08-25 04:12:55 +00001010 smbaddr = Get_DIMMAddress_D(pDCTstat, dct + i);
Marc Jones8ae8c882007-12-19 01:32:08 +00001011 byte = mctRead_SPD(smbaddr, SPD_ROWSZ);
1012 if (Rows < byte)
1013 Rows = byte; /* keep track of largest row sz */
1014
1015 byte = mctRead_SPD(smbaddr, SPD_TRP);
1016 if (Trp < byte)
1017 Trp = byte;
1018
1019 byte = mctRead_SPD(smbaddr, SPD_TRRD);
1020 if (Trrd < byte)
1021 Trrd = byte;
1022
1023 byte = mctRead_SPD(smbaddr, SPD_TRCD);
1024 if (Trcd < byte)
1025 Trcd = byte;
1026
1027 byte = mctRead_SPD(smbaddr, SPD_TRTP);
1028 if (Trtp < byte)
1029 Trtp = byte;
1030
1031 byte = mctRead_SPD(smbaddr, SPD_TWR);
1032 if (Twr < byte)
1033 Twr = byte;
1034
1035 byte = mctRead_SPD(smbaddr, SPD_TWTR);
1036 if (Twtr < byte)
1037 Twtr = byte;
1038
1039 val = mctRead_SPD(smbaddr, SPD_TRC);
1040 if ((val == 0) || (val == 0xFF)) {
1041 pDCTstat->ErrStatus |= 1<<SB_NoTrcTrfc;
1042 pDCTstat->ErrCode = SC_VarianceErr;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00001043 val = Get_DefTrc_k_D(pDCTstat->Speed);
Marc Jones8ae8c882007-12-19 01:32:08 +00001044 } else {
1045 byte = mctRead_SPD(smbaddr, SPD_TRCRFC);
1046 if (byte & 0xF0) {
Zheng Baoc3af12f2010-10-08 05:08:47 +00001047 val++; /* round up in case fractional extension is non-zero.*/
Marc Jones8ae8c882007-12-19 01:32:08 +00001048 }
Zheng Bao7b1a3c32010-09-28 04:43:16 +00001049 }
1050 if (Trc < val)
1051 Trc = val;
Marc Jones8ae8c882007-12-19 01:32:08 +00001052
Zheng Bao7b1a3c32010-09-28 04:43:16 +00001053 /* dev density=rank size/#devs per rank */
1054 byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
Marc Jones8ae8c882007-12-19 01:32:08 +00001055
Zheng Bao7b1a3c32010-09-28 04:43:16 +00001056 val = ((byte >> 5) | (byte << 3)) & 0xFF;
1057 val <<= 2;
Marc Jones8ae8c882007-12-19 01:32:08 +00001058
Zheng Bao7b1a3c32010-09-28 04:43:16 +00001059 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE; /* dev density=2^(rows+columns+banks) */
1060 if (byte == 4) {
1061 val >>= 4;
1062 } else if (byte == 8) {
1063 val >>= 3;
1064 } else if (byte == 16) {
1065 val >>= 2;
1066 }
Marc Jones8ae8c882007-12-19 01:32:08 +00001067
Zheng Bao7b1a3c32010-09-28 04:43:16 +00001068 byte = bsr(val);
Marc Jones8ae8c882007-12-19 01:32:08 +00001069
Zheng Bao7b1a3c32010-09-28 04:43:16 +00001070 if (Trfc[LDIMM] < byte)
1071 Trfc[LDIMM] = byte;
Marc Jones8ae8c882007-12-19 01:32:08 +00001072
Zheng Bao7b1a3c32010-09-28 04:43:16 +00001073 byte = mctRead_SPD(smbaddr, SPD_TRAS);
1074 if (Tras < byte)
1075 Tras = byte;
Marc Jones8ae8c882007-12-19 01:32:08 +00001076 } /* Dimm Present */
1077 }
1078
1079 /* Convert DRAM CycleTiming values and store into DCT structure */
1080 DDR2_1066 = 0;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00001081 byte = pDCTstat->Speed;
Marc Jones8ae8c882007-12-19 01:32:08 +00001082 if (byte == 5)
1083 DDR2_1066 = 1;
1084 Tk40 = Get_40Tk_D(byte);
1085 Tk10 = Tk40>>2;
1086
1087 /* Notes:
1088 1. All secondary time values given in SPDs are in binary with units of ns.
1089 2. Some time values are scaled by four, in order to have least count of 0.25 ns
1090 (more accuracy). JEDEC SPD spec. shows which ones are x1 and x4.
1091 3. Internally to this SW, cycle time, Tk, is scaled by 10 to affect a
1092 least count of 0.1 ns (more accuracy).
1093 4. SPD values not scaled are multiplied by 10 and then divided by 10T to find
1094 equivalent minimum number of bus clocks (a remainder causes round-up of clocks).
1095 5. SPD values that are prescaled by 4 are multiplied by 10 and then divided by 40T to find
1096 equivalent minimum number of bus clocks (a remainder causes round-up of clocks).*/
1097
1098 /* Tras */
1099 dword = Tras * 40;
1100 pDCTstat->DIMMTras = (u16)dword;
1101 val = dword / Tk40;
1102 if (dword % Tk40) { /* round up number of busclocks */
1103 val++;
1104 }
1105 if (DDR2_1066) {
1106 if (val < Min_TrasT_1066)
1107 val = Min_TrasT_1066;
1108 else if (val > Max_TrasT_1066)
1109 val = Max_TrasT_1066;
1110 } else {
1111 if (val < Min_TrasT)
1112 val = Min_TrasT;
1113 else if (val > Max_TrasT)
1114 val = Max_TrasT;
1115 }
1116 pDCTstat->Tras = val;
1117
1118 /* Trp */
1119 dword = Trp * 10;
1120 pDCTstat->DIMMTrp = dword;
1121 val = dword / Tk40;
1122 if (dword % Tk40) { /* round up number of busclocks */
1123 val++;
1124 }
1125 if (DDR2_1066) {
1126 if (val < Min_TrasT_1066)
1127 val = Min_TrpT_1066;
1128 else if (val > Max_TrpT_1066)
1129 val = Max_TrpT_1066;
1130 } else {
1131 if (val < Min_TrpT)
1132 val = Min_TrpT;
1133 else if (val > Max_TrpT)
1134 val = Max_TrpT;
1135 }
1136 pDCTstat->Trp = val;
1137
Zheng Bao7b1a3c32010-09-28 04:43:16 +00001138 /*Trrd*/
Marc Jones8ae8c882007-12-19 01:32:08 +00001139 dword = Trrd * 10;
1140 pDCTstat->DIMMTrrd = dword;
1141 val = dword / Tk40;
1142 if (dword % Tk40) { /* round up number of busclocks */
1143 val++;
1144 }
1145 if (DDR2_1066) {
1146 if (val < Min_TrrdT_1066)
1147 val = Min_TrrdT_1066;
1148 else if (val > Max_TrrdT_1066)
1149 val = Max_TrrdT_1066;
1150 } else {
1151 if (val < Min_TrrdT)
1152 val = Min_TrrdT;
1153 else if (val > Max_TrrdT)
1154 val = Max_TrrdT;
1155 }
1156 pDCTstat->Trrd = val;
1157
1158 /* Trcd */
1159 dword = Trcd * 10;
1160 pDCTstat->DIMMTrcd = dword;
1161 val = dword / Tk40;
1162 if (dword % Tk40) { /* round up number of busclocks */
1163 val++;
1164 }
1165 if (DDR2_1066) {
1166 if (val < Min_TrcdT_1066)
1167 val = Min_TrcdT_1066;
1168 else if (val > Max_TrcdT_1066)
1169 val = Max_TrcdT_1066;
1170 } else {
1171 if (val < Min_TrcdT)
1172 val = Min_TrcdT;
1173 else if (val > Max_TrcdT)
1174 val = Max_TrcdT;
1175 }
1176 pDCTstat->Trcd = val;
1177
1178 /* Trc */
1179 dword = Trc * 40;
1180 pDCTstat->DIMMTrc = dword;
1181 val = dword / Tk40;
1182 if (dword % Tk40) { /* round up number of busclocks */
1183 val++;
1184 }
1185 if (DDR2_1066) {
1186 if (val < Min_TrcT_1066)
1187 val = Min_TrcT_1066;
1188 else if (val > Max_TrcT_1066)
1189 val = Max_TrcT_1066;
1190 } else {
1191 if (val < Min_TrcT)
1192 val = Min_TrcT;
1193 else if (val > Max_TrcT)
1194 val = Max_TrcT;
1195 }
1196 pDCTstat->Trc = val;
1197
1198 /* Trtp */
1199 dword = Trtp * 10;
1200 pDCTstat->DIMMTrtp = dword;
1201 val = pDCTstat->Speed;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00001202 if (val <= 2) { /* 7.75ns / Speed in ns to get clock # */
1203 val = 2; /* for DDR400/DDR533 */
1204 } else { /* Note a speed of 3 will be a Trtp of 3 */
1205 val = 3; /* for DDR667/DDR800/DDR1066 */
Marc Jones8ae8c882007-12-19 01:32:08 +00001206 }
1207 pDCTstat->Trtp = val;
1208
1209 /* Twr */
1210 dword = Twr * 10;
1211 pDCTstat->DIMMTwr = dword;
1212 val = dword / Tk40;
1213 if (dword % Tk40) { /* round up number of busclocks */
1214 val++;
1215 }
1216 if (DDR2_1066) {
1217 if (val < Min_TwrT_1066)
1218 val = Min_TwrT_1066;
1219 else if (val > Max_TwrT_1066)
1220 val = Max_TwrT_1066;
1221 } else {
1222 if (val < Min_TwrT)
1223 val = Min_TwrT;
1224 else if (val > Max_TwrT)
1225 val = Max_TwrT;
1226 }
1227 pDCTstat->Twr = val;
1228
1229 /* Twtr */
1230 dword = Twtr * 10;
1231 pDCTstat->DIMMTwtr = dword;
1232 val = dword / Tk40;
1233 if (dword % Tk40) { /* round up number of busclocks */
1234 val++;
1235 }
1236 if (DDR2_1066) {
1237 if (val < Min_TwrT_1066)
1238 val = Min_TwtrT_1066;
1239 else if (val > Max_TwtrT_1066)
1240 val = Max_TwtrT_1066;
1241 } else {
1242 if (val < Min_TwtrT)
1243 val = Min_TwtrT;
1244 else if (val > Max_TwtrT)
1245 val = Max_TwtrT;
1246 }
1247 pDCTstat->Twtr = val;
1248
1249
1250 /* Trfc0-Trfc3 */
1251 for (i=0; i<4; i++)
1252 pDCTstat->Trfc[i] = Trfc[i];
1253
1254 mctAdjustAutoCycTmg_D();
1255
1256 /* Program DRAM Timing values */
1257 DramTimingLo = 0; /* Dram Timing Low init */
1258 val = pDCTstat->CASL;
1259 val = Tab_tCL_j[val];
1260 DramTimingLo |= val;
1261
1262 val = pDCTstat->Trcd;
1263 if (DDR2_1066)
1264 val -= Bias_TrcdT_1066;
1265 else
1266 val -= Bias_TrcdT;
1267
1268 DramTimingLo |= val<<4;
1269
1270 val = pDCTstat->Trp;
1271 if (DDR2_1066)
1272 val -= Bias_TrpT_1066;
1273 else {
1274 val -= Bias_TrpT;
1275 val <<= 1;
1276 }
1277 DramTimingLo |= val<<7;
1278
1279 val = pDCTstat->Trtp;
1280 val -= Bias_TrtpT;
1281 DramTimingLo |= val<<11;
1282
1283 val = pDCTstat->Tras;
1284 if (DDR2_1066)
1285 val -= Bias_TrasT_1066;
1286 else
1287 val -= Bias_TrasT;
1288 DramTimingLo |= val<<12;
1289
1290 val = pDCTstat->Trc;
1291 val -= Bias_TrcT;
1292 DramTimingLo |= val<<16;
1293
1294 if (!DDR2_1066) {
1295 val = pDCTstat->Twr;
1296 val -= Bias_TwrT;
1297 DramTimingLo |= val<<20;
1298 }
1299
1300 val = pDCTstat->Trrd;
1301 if (DDR2_1066)
1302 val -= Bias_TrrdT_1066;
1303 else
1304 val -= Bias_TrrdT;
1305 DramTimingLo |= val<<22;
1306
1307
1308 DramTimingHi = 0; /* Dram Timing Low init */
1309 val = pDCTstat->Twtr;
1310 if (DDR2_1066)
1311 val -= Bias_TwtrT_1066;
1312 else
1313 val -= Bias_TwtrT;
1314 DramTimingHi |= val<<8;
1315
1316 val = 2;
1317 DramTimingHi |= val<<16;
1318
1319 val = 0;
1320 for (i=4;i>0;i--) {
1321 val <<= 3;
1322 val |= Trfc[i-1];
1323 }
1324 DramTimingHi |= val << 20;
1325
1326
1327 dev = pDCTstat->dev_dct;
1328 reg_off = 0x100 * dct;
1329 print_tx("AutoCycTiming: DramTimingLo ", DramTimingLo);
1330 print_tx("AutoCycTiming: DramTimingHi ", DramTimingHi);
1331
1332 Set_NB32(dev, 0x88 + reg_off, DramTimingLo); /*DCT Timing Low*/
1333 DramTimingHi |=0x0000FC77;
1334 Set_NB32(dev, 0x8c + reg_off, DramTimingHi); /*DCT Timing Hi*/
1335
1336 if (DDR2_1066) {
1337 /* Twr */
1338 dword = pDCTstat->Twr;
1339 dword -= Bias_TwrT_1066;
1340 dword <<= 4;
1341 reg = 0x84 + reg_off;
1342 val = Get_NB32(dev, reg);
1343 val &= 0x8F;
1344 val |= dword;
1345 Set_NB32(dev, reg, val);
1346 }
1347// dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1348
1349 print_tx("AutoCycTiming: Status ", pDCTstat->Status);
1350 print_tx("AutoCycTiming: ErrStatus ", pDCTstat->ErrStatus);
1351 print_tx("AutoCycTiming: ErrCode ", pDCTstat->ErrCode);
1352 print_t("AutoCycTiming: Done\n");
1353
1354 mctHookAfterAutoCycTmg();
1355
1356 return pDCTstat->ErrCode;
1357}
1358
1359
1360static void GetPresetmaxF_D(struct MCTStatStruc *pMCTstat,
1361 struct DCTStatStruc *pDCTstat)
1362{
1363 /* Get max frequency from OEM platform definition, from any user
1364 * override (limiting) of max frequency, and from any Si Revision
1365 * Specific information. Return the least of these three in
1366 * DCTStatStruc.PresetmaxFreq.
1367 */
1368
1369 u16 proposedFreq;
1370 u16 word;
1371
1372 /* Get CPU Si Revision defined limit (NPT) */
1373 proposedFreq = 533; /* Rev F0 programmable max memclock is */
1374
1375 /*Get User defined limit if "limit" mode */
1376 if ( mctGet_NVbits(NV_MCTUSRTMGMODE) == 1) {
1377 word = Get_Fk_D(mctGet_NVbits(NV_MemCkVal) + 1);
1378 if (word < proposedFreq)
1379 proposedFreq = word;
1380
1381 /* Get Platform defined limit */
1382 word = mctGet_NVbits(NV_MAX_MEMCLK);
1383 if (word < proposedFreq)
1384 proposedFreq = word;
1385
1386 word = pDCTstat->PresetmaxFreq;
1387 if (word > proposedFreq)
1388 word = proposedFreq;
1389
1390 pDCTstat->PresetmaxFreq = word;
1391 }
1392}
1393
1394
1395
1396static void SPDGetTCL_D(struct MCTStatStruc *pMCTstat,
1397 struct DCTStatStruc *pDCTstat, u8 dct)
1398{
1399 /* Find the best T and CL primary timing parameter pair, per Mfg.,
1400 * for the given set of DIMMs, and store into DCTStatStruc
1401 * (.DIMMAutoSpeed and .DIMMCASL). See "Global relationship between
1402 * index values and item values" for definition of CAS latency
1403 * index (j) and Frequency index (k).
1404 */
1405 int i, j, k;
1406 u8 T1min, CL1min;
1407
1408 /* i={0..7} (std. physical DIMM number)
1409 * j is an integer which enumerates increasing CAS latency.
1410 * k is an integer which enumerates decreasing cycle time.
1411 * CL no. {0,1,2} corresponds to CL X, CL X-.5, or CL X-1 (per individual DIMM)
1412 * Max timing values are per parameter, of all DIMMs, spec'd in ns like the SPD.
1413 */
1414
1415 CL1min = 0xFF;
1416 T1min = 0xFF;
1417 for (k=K_MAX; k >= K_MIN; k--) {
1418 for (j = J_MIN; j <= J_MAX; j++) {
1419 if (Sys_Capability_D(pMCTstat, pDCTstat, j, k) ) {
1420 /* 1. check to see if DIMMi is populated.
1421 2. check if DIMMi supports CLj and Tjk */
1422 for (i = 0; i < MAX_DIMMS_SUPPORTED; i++) {
1423 if (pDCTstat->DIMMValid & (1 << i)) {
1424 if (Dimm_Supports_D(pDCTstat, i, j, k))
1425 break;
1426 }
1427 } /* while ++i */
1428 if (i == MAX_DIMMS_SUPPORTED) {
1429 T1min = k;
1430 CL1min = j;
1431 goto got_TCL;
1432 }
1433 }
1434 } /* while ++j */
1435 } /* while --k */
1436
1437got_TCL:
1438 if (T1min != 0xFF) {
1439 pDCTstat->DIMMCASL = CL1min; /*mfg. optimized */
1440 pDCTstat->DIMMAutoSpeed = T1min;
1441 print_tx("SPDGetTCL_D: DIMMCASL ", pDCTstat->DIMMCASL);
1442 print_tx("SPDGetTCL_D: DIMMAutoSpeed ", pDCTstat->DIMMAutoSpeed);
1443
1444 } else {
1445 pDCTstat->DIMMCASL = CL_DEF; /* failsafe values (running in min. mode) */
1446 pDCTstat->DIMMAutoSpeed = T_DEF;
1447 pDCTstat->ErrStatus |= 1 << SB_DimmMismatchT;
1448 pDCTstat->ErrStatus |= 1 << SB_MinimumMode;
1449 pDCTstat->ErrCode = SC_VarianceErr;
1450 }
1451 print_tx("SPDGetTCL_D: Status ", pDCTstat->Status);
1452 print_tx("SPDGetTCL_D: ErrStatus ", pDCTstat->ErrStatus);
1453 print_tx("SPDGetTCL_D: ErrCode ", pDCTstat->ErrCode);
1454 print_t("SPDGetTCL_D: Done\n");
1455}
1456
1457
1458static u8 PlatformSpec_D(struct MCTStatStruc *pMCTstat,
1459 struct DCTStatStruc *pDCTstat, u8 dct)
1460{
1461 u32 dev;
1462 u32 reg;
1463 u32 val;
1464
1465 mctGet_PS_Cfg_D(pMCTstat, pDCTstat, dct);
1466
1467 if (pDCTstat->GangedMode) {
1468 mctGet_PS_Cfg_D(pMCTstat, pDCTstat, 1);
1469 }
1470
1471 if ( pDCTstat->_2Tmode == 2) {
1472 dev = pDCTstat->dev_dct;
1473 reg = 0x94 + 0x100 * dct; /* Dram Configuration Hi */
1474 val = Get_NB32(dev, reg);
1475 val |= 1 << 20; /* 2T CMD mode */
1476 Set_NB32(dev, reg, val);
1477 }
1478
1479 mct_PlatformSpec(pMCTstat, pDCTstat, dct);
1480 InitPhyCompensation(pMCTstat, pDCTstat, dct);
1481 mctHookAfterPSCfg();
1482 return pDCTstat->ErrCode;
1483}
1484
1485
1486static u8 AutoConfig_D(struct MCTStatStruc *pMCTstat,
1487 struct DCTStatStruc *pDCTstat, u8 dct)
1488{
1489 u32 DramControl, DramTimingLo, Status;
1490 u32 DramConfigLo, DramConfigHi, DramConfigMisc, DramConfigMisc2;
1491 u32 val;
1492 u32 reg_off;
1493 u32 dev;
1494 u16 word;
1495 u32 dword;
1496 u8 byte;
1497
1498 print_tx("AutoConfig_D: DCT: ", dct);
1499
1500 DramConfigLo = 0;
1501 DramConfigHi = 0;
1502 DramConfigMisc = 0;
1503 DramConfigMisc2 = 0;
1504
Zheng Baoc3af12f2010-10-08 05:08:47 +00001505 /* set bank addressing and Masks, plus CS pops */
Marc Jones8ae8c882007-12-19 01:32:08 +00001506 SPDSetBanks_D(pMCTstat, pDCTstat, dct);
1507 if (pDCTstat->ErrCode == SC_StopError)
1508 goto AutoConfig_exit;
1509
1510 /* map chip-selects into local address space */
1511 StitchMemory_D(pMCTstat, pDCTstat, dct);
1512 InterleaveBanks_D(pMCTstat, pDCTstat, dct);
1513
1514 /* temp image of status (for convenience). RO usage! */
1515 Status = pDCTstat->Status;
1516
1517 dev = pDCTstat->dev_dct;
1518 reg_off = 0x100 * dct;
1519
1520
1521 /* Build Dram Control Register Value */
1522 DramConfigMisc2 = Get_NB32 (dev, 0xA8 + reg_off); /* Dram Control*/
1523 DramControl = Get_NB32 (dev, 0x78 + reg_off); /* Dram Control*/
1524
1525 if (mctGet_NVbits(NV_CLKHZAltVidC3))
1526 DramControl |= 1<<16;
1527
1528 // FIXME: Add support(skip) for Ax and Cx versions
1529 DramControl |= 5; /* RdPtrInit */
1530
1531
1532 /* Build Dram Config Lo Register Value */
1533 DramConfigLo |= 1 << 4; /* 75 Ohms ODT */
1534 if (mctGet_NVbits(NV_MAX_DIMMS) == 8) {
1535 if (pDCTstat->Speed == 3) {
Edward O'Callaghanba363d32014-05-23 05:58:27 +10001536 if (pDCTstat->MAdimms[dct] == 4)
Marc Jones8ae8c882007-12-19 01:32:08 +00001537 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1538 } else if (pDCTstat->Speed == 4){
Edward O'Callaghanba363d32014-05-23 05:58:27 +10001539 if (pDCTstat->MAdimms[dct] != 1)
Marc Jones8ae8c882007-12-19 01:32:08 +00001540 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1541 }
1542 } else {
1543 // FIXME: Skip for Ax versions
Edward O'Callaghanba363d32014-05-23 05:58:27 +10001544 if (pDCTstat->MAdimms[dct] == 4) {
Marc Jones8ae8c882007-12-19 01:32:08 +00001545 if ( pDCTstat->DimmQRPresent != 0) {
1546 if ((pDCTstat->Speed == 3) || (pDCTstat->Speed == 4)) {
1547 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1548 }
Edward O'Callaghanba363d32014-05-23 05:58:27 +10001549 } else if (pDCTstat->MAdimms[dct] == 4) {
Marc Jones8ae8c882007-12-19 01:32:08 +00001550 if (pDCTstat->Speed == 4) {
1551 if ( pDCTstat->DimmQRPresent != 0) {
1552 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1553 }
1554 }
1555 }
Edward O'Callaghanba363d32014-05-23 05:58:27 +10001556 } else if (pDCTstat->MAdimms[dct] == 2) {
Marc Jones8ae8c882007-12-19 01:32:08 +00001557 DramConfigLo |= 1 << 5; /* 50 Ohms ODT */
1558 }
1559
1560 }
1561
1562 // FIXME: Skip for Ax versions
1563 /* callback not required - if (!mctParityControl_D()) */
1564 if (Status & (1 << SB_PARDIMMs)) {
1565 DramConfigLo |= 1 << ParEn;
1566 DramConfigMisc2 |= 1 << ActiveCmdAtRst;
1567 } else {
1568 DramConfigLo &= ~(1 << ParEn);
1569 DramConfigMisc2 &= ~(1 << ActiveCmdAtRst);
1570 }
1571
1572 if (mctGet_NVbits(NV_BurstLen32)) {
1573 if (!pDCTstat->GangedMode)
1574 DramConfigLo |= 1 << BurstLength32;
1575 }
1576
1577 if (Status & (1 << SB_128bitmode))
1578 DramConfigLo |= 1 << Width128; /* 128-bit mode (normal) */
1579
1580 word = dct;
1581 dword = X4Dimm;
1582 while (word < 8) {
1583 if (pDCTstat->Dimmx4Present & (1 << word))
1584 DramConfigLo |= 1 << dword; /* X4Dimm[3:0] */
1585 word++;
1586 word++;
1587 dword++;
1588 }
1589
1590 if (!(Status & (1 << SB_Registered)))
Zheng Baoc3af12f2010-10-08 05:08:47 +00001591 DramConfigLo |= 1 << UnBuffDimm; /* Unbuffered DIMMs */
Marc Jones8ae8c882007-12-19 01:32:08 +00001592
1593 if (mctGet_NVbits(NV_ECC_CAP))
1594 if (Status & (1 << SB_ECCDIMMs))
1595 if ( mctGet_NVbits(NV_ECC))
1596 DramConfigLo |= 1 << DimmEcEn;
1597
Zheng Bao69436e12011-01-06 02:18:12 +00001598 DramConfigLo = mct_DisDllShutdownSR(pMCTstat, pDCTstat, DramConfigLo, dct);
Marc Jones8ae8c882007-12-19 01:32:08 +00001599
1600 /* Build Dram Config Hi Register Value */
1601 dword = pDCTstat->Speed;
1602 DramConfigHi |= dword - 1; /* get MemClk encoding */
1603 DramConfigHi |= 1 << MemClkFreqVal;
1604
1605 if (Status & (1 << SB_Registered))
1606 if ((pDCTstat->Dimmx4Present != 0) && (pDCTstat->Dimmx8Present != 0))
1607 /* set only if x8 Registered DIMMs in System*/
1608 DramConfigHi |= 1 << RDqsEn;
1609
1610 if (mctGet_NVbits(NV_CKE_PDEN)) {
1611 DramConfigHi |= 1 << 15; /* PowerDownEn */
1612 if (mctGet_NVbits(NV_CKE_CTL))
1613 /*Chip Select control of CKE*/
1614 DramConfigHi |= 1 << 16;
1615 }
1616
1617 /* Control Bank Swizzle */
1618 if (0) /* call back not needed mctBankSwizzleControl_D()) */
1619 DramConfigHi &= ~(1 << BankSwizzleMode);
1620 else
1621 DramConfigHi |= 1 << BankSwizzleMode; /* recommended setting (default) */
1622
1623 /* Check for Quadrank DIMM presence */
1624 if ( pDCTstat->DimmQRPresent != 0) {
1625 byte = mctGet_NVbits(NV_4RANKType);
1626 if (byte == 2)
1627 DramConfigHi |= 1 << 17; /* S4 (4-Rank SO-DIMMs) */
1628 else if (byte == 1)
1629 DramConfigHi |= 1 << 18; /* R4 (4-Rank Registered DIMMs) */
1630 }
1631
1632 if (0) /* call back not needed mctOverrideDcqBypMax_D ) */
1633 val = mctGet_NVbits(NV_BYPMAX);
1634 else
1635 val = 0x0f; // recommended setting (default)
1636 DramConfigHi |= val << 24;
1637
1638 val = pDCTstat->DIMM2Kpage;
1639 if (pDCTstat->GangedMode != 0) {
1640 if (dct != 0) {
1641 val &= 0x55;
1642 } else {
1643 val &= 0xAA;
1644 }
1645 }
1646 if (val)
1647 val = Tab_2KTfawT_k[pDCTstat->Speed];
1648 else
1649 val = Tab_1KTfawT_k[pDCTstat->Speed];
1650
1651 if (pDCTstat->Speed == 5)
1652 val >>= 1;
1653
1654 val -= Bias_TfawT;
1655 val <<= 28;
1656 DramConfigHi |= val; /* Tfaw for 1K or 2K paged drams */
1657
1658 // FIXME: Skip for Ax versions
1659 DramConfigHi |= 1 << DcqArbBypassEn;
1660
1661
1662 /* Build MemClkDis Value from Dram Timing Lo and
1663 Dram Config Misc Registers
1664 1. We will assume that MemClkDis field has been preset prior to this
1665 point.
1666 2. We will only set MemClkDis bits if a DIMM is NOT present AND if:
1667 NV_AllMemClks <>0 AND SB_DiagClks ==0 */
1668
1669
1670 /* Dram Timing Low (owns Clock Enable bits) */
1671 DramTimingLo = Get_NB32(dev, 0x88 + reg_off);
1672 if (mctGet_NVbits(NV_AllMemClks) == 0) {
1673 /* Special Jedec SPD diagnostic bit - "enable all clocks" */
1674 if (!(pDCTstat->Status & (1<<SB_DiagClks))) {
1675 const u8 *p;
1676 byte = mctGet_NVbits(NV_PACK_TYPE);
1677 if (byte == PT_L1)
1678 p = Tab_L1CLKDis;
1679 else if (byte == PT_M2)
1680 p = Tab_M2CLKDis;
1681 else
1682 p = Tab_S1CLKDis;
1683
1684 dword = 0;
1685 while(dword < MAX_DIMMS_SUPPORTED) {
1686 val = p[dword];
1687 print_tx("DramTimingLo: val=", val);
1688 if (!(pDCTstat->DIMMValid & (1<<val)))
1689 /*disable memclk*/
1690 DramTimingLo |= 1<<(dword+24);
1691 dword++ ;
1692 }
1693 }
1694 }
1695
1696 print_tx("AutoConfig_D: DramControl: ", DramControl);
1697 print_tx("AutoConfig_D: DramTimingLo: ", DramTimingLo);
1698 print_tx("AutoConfig_D: DramConfigMisc: ", DramConfigMisc);
1699 print_tx("AutoConfig_D: DramConfigMisc2: ", DramConfigMisc2);
1700 print_tx("AutoConfig_D: DramConfigLo: ", DramConfigLo);
1701 print_tx("AutoConfig_D: DramConfigHi: ", DramConfigHi);
1702
1703 /* Write Values to the registers */
1704 Set_NB32(dev, 0x78 + reg_off, DramControl);
1705 Set_NB32(dev, 0x88 + reg_off, DramTimingLo);
1706 Set_NB32(dev, 0xA0 + reg_off, DramConfigMisc);
1707 Set_NB32(dev, 0xA8 + reg_off, DramConfigMisc2);
1708 Set_NB32(dev, 0x90 + reg_off, DramConfigLo);
1709 mct_SetDramConfigHi_D(pDCTstat, dct, DramConfigHi);
1710 mct_ForceAutoPrecharge_D(pDCTstat, dct);
1711 mct_EarlyArbEn_D(pMCTstat, pDCTstat);
1712 mctHookAfterAutoCfg();
1713
1714// dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1715
1716 print_tx("AutoConfig: Status ", pDCTstat->Status);
1717 print_tx("AutoConfig: ErrStatus ", pDCTstat->ErrStatus);
1718 print_tx("AutoConfig: ErrCode ", pDCTstat->ErrCode);
1719 print_t("AutoConfig: Done\n");
1720AutoConfig_exit:
1721 return pDCTstat->ErrCode;
1722}
1723
1724
1725static void SPDSetBanks_D(struct MCTStatStruc *pMCTstat,
1726 struct DCTStatStruc *pDCTstat, u8 dct)
1727{
1728 /* Set bank addressing, program Mask values and build a chip-select
1729 * population map. This routine programs PCI 0:24N:2x80 config register
1730 * and PCI 0:24N:2x60,64,68,6C config registers (CS Mask 0-3).
1731 */
1732
1733 u8 ChipSel, Rows, Cols, Ranks ,Banks, DevWidth;
1734 u32 BankAddrReg, csMask;
1735
1736 u32 val;
1737 u32 reg;
1738 u32 dev;
1739 u32 reg_off;
1740 u8 byte;
1741 u16 word;
1742 u32 dword;
1743 u16 smbaddr;
1744
1745 dev = pDCTstat->dev_dct;
1746 reg_off = 0x100 * dct;
1747
1748 BankAddrReg = 0;
1749 for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel+=2) {
1750 byte = ChipSel;
1751 if ((pDCTstat->Status & (1 << SB_64MuxedMode)) && ChipSel >=4)
1752 byte -= 3;
1753
1754 if (pDCTstat->DIMMValid & (1<<byte)) {
1755 smbaddr = Get_DIMMAddress_D(pDCTstat, (ChipSel + dct));
1756
1757 byte = mctRead_SPD(smbaddr, SPD_ROWSZ);
1758 Rows = byte & 0x1f;
1759
1760 byte = mctRead_SPD(smbaddr, SPD_COLSZ);
1761 Cols = byte & 0x1f;
1762
1763 Banks = mctRead_SPD(smbaddr, SPD_LBANKS);
1764
1765 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH);
1766 DevWidth = byte & 0x7f; /* bits 0-6 = bank 0 width */
1767
1768 byte = mctRead_SPD(smbaddr, SPD_DMBANKS);
1769 Ranks = (byte & 7) + 1;
1770
1771 /* Configure Bank encoding
1772 * Use a 6-bit key into a lookup table.
1773 * Key (index) = CCCBRR, where CCC is the number of
1774 * Columns minus 9,RR is the number of Rows minus 13,
1775 * and B is the number of banks minus 2.
1776 * See "6-bit Bank Addressing Table" at the end of
1777 * this file.*/
1778 byte = Cols - 9; /* 9 Cols is smallest dev size */
1779 byte <<= 3; /* make room for row and bank bits*/
1780 if (Banks == 8)
1781 byte |= 4;
1782
1783 /* 13 Rows is smallest dev size */
1784 byte |= Rows - 13; /* CCCBRR internal encode */
1785
1786 for (dword=0; dword < 12; dword++) {
1787 if (byte == Tab_BankAddr[dword])
1788 break;
1789 }
1790
1791 if (dword < 12) {
1792
1793 /* bit no. of CS field in address mapping reg.*/
1794 dword <<= (ChipSel<<1);
1795 BankAddrReg |= dword;
1796
1797 /* Mask value=(2pow(rows+cols+banks+3)-1)>>8,
1798 or 2pow(rows+cols+banks-5)-1*/
1799 csMask = 0;
1800
1801 byte = Rows + Cols; /* cl=rows+cols*/
1802 if (Banks == 8)
1803 byte -= 2; /* 3 banks - 5 */
1804 else
1805 byte -= 3; /* 2 banks - 5 */
1806 /* mask size (64-bit rank only) */
1807
1808 if (pDCTstat->Status & (1 << SB_128bitmode))
1809 byte++; /* double mask size if in 128-bit mode*/
1810
1811 csMask |= 1 << byte;
1812 csMask--;
1813
1814 /*set ChipSelect population indicator even bits*/
1815 pDCTstat->CSPresent |= (1<<ChipSel);
1816 if (Ranks >= 2)
1817 /*set ChipSelect population indicator odd bits*/
1818 pDCTstat->CSPresent |= 1 << (ChipSel + 1);
1819
1820 reg = 0x60+(ChipSel<<1) + reg_off; /*Dram CS Mask Register */
1821 val = csMask;
1822 val &= 0x1FF83FE0; /* Mask out reserved bits.*/
1823 Set_NB32(dev, reg, val);
1824 }
1825 } else {
1826 if (pDCTstat->DIMMSPDCSE & (1<<ChipSel))
1827 pDCTstat->CSTestFail |= (1<<ChipSel);
1828 } /* if DIMMValid*/
1829 } /* while ChipSel*/
1830
1831 SetCSTriState(pMCTstat, pDCTstat, dct);
1832 /* SetCKETriState */
1833 SetODTTriState(pMCTstat, pDCTstat, dct);
1834
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00001835 if (pDCTstat->Status & (1 << SB_128bitmode)) {
Marc Jones8ae8c882007-12-19 01:32:08 +00001836 SetCSTriState(pMCTstat, pDCTstat, 1); /* force dct1) */
1837 SetODTTriState(pMCTstat, pDCTstat, 1); /* force dct1) */
1838 }
1839 word = pDCTstat->CSPresent;
1840 mctGetCS_ExcludeMap(); /* mask out specified chip-selects */
1841 word ^= pDCTstat->CSPresent;
1842 pDCTstat->CSTestFail |= word; /* enable ODT to disabled DIMMs */
1843 if (!pDCTstat->CSPresent)
1844 pDCTstat->ErrCode = SC_StopError;
1845
1846 reg = 0x80 + reg_off; /* Bank Addressing Register */
1847 Set_NB32(dev, reg, BankAddrReg);
1848
1849// dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
1850
1851 print_tx("SPDSetBanks: Status ", pDCTstat->Status);
1852 print_tx("SPDSetBanks: ErrStatus ", pDCTstat->ErrStatus);
1853 print_tx("SPDSetBanks: ErrCode ", pDCTstat->ErrCode);
1854 print_t("SPDSetBanks: Done\n");
1855}
1856
1857
1858static void SPDCalcWidth_D(struct MCTStatStruc *pMCTstat,
1859 struct DCTStatStruc *pDCTstat)
1860{
1861 /* Per SPDs, check the symmetry of DIMM pairs (DIMM on Channel A
1862 * matching with DIMM on Channel B), the overall DIMM population,
1863 * and determine the width mode: 64-bit, 64-bit muxed, 128-bit.
1864 */
1865
1866 u8 i;
1867 u8 smbaddr, smbaddr1;
1868 u8 byte, byte1;
1869
1870 /* Check Symmetry of Channel A and Channel B DIMMs
1871 (must be matched for 128-bit mode).*/
1872 for (i=0; i < MAX_DIMMS_SUPPORTED; i += 2) {
1873 if ((pDCTstat->DIMMValid & (1 << i)) && (pDCTstat->DIMMValid & (1<<(i+1)))) {
1874 smbaddr = Get_DIMMAddress_D(pDCTstat, i);
1875 smbaddr1 = Get_DIMMAddress_D(pDCTstat, i+1);
1876
1877 byte = mctRead_SPD(smbaddr, SPD_ROWSZ) & 0x1f;
1878 byte1 = mctRead_SPD(smbaddr1, SPD_ROWSZ) & 0x1f;
1879 if (byte != byte1) {
1880 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1881 break;
1882 }
1883
1884 byte = mctRead_SPD(smbaddr, SPD_COLSZ) & 0x1f;
1885 byte1 = mctRead_SPD(smbaddr1, SPD_COLSZ) & 0x1f;
1886 if (byte != byte1) {
1887 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1888 break;
1889 }
1890
1891 byte = mctRead_SPD(smbaddr, SPD_BANKSZ);
1892 byte1 = mctRead_SPD(smbaddr1, SPD_BANKSZ);
1893 if (byte != byte1) {
1894 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1895 break;
1896 }
1897
1898 byte = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0x7f;
1899 byte1 = mctRead_SPD(smbaddr1, SPD_DEVWIDTH) & 0x7f;
1900 if (byte != byte1) {
1901 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1902 break;
1903 }
1904
1905 byte = mctRead_SPD(smbaddr, SPD_DMBANKS) & 7; /* #ranks-1 */
1906 byte1 = mctRead_SPD(smbaddr1, SPD_DMBANKS) & 7; /* #ranks-1 */
1907 if (byte != byte1) {
1908 pDCTstat->ErrStatus |= (1<<SB_DimmMismatchO);
1909 break;
1910 }
1911
1912 }
1913 }
1914
1915}
1916
1917
1918static void StitchMemory_D(struct MCTStatStruc *pMCTstat,
1919 struct DCTStatStruc *pDCTstat, u8 dct)
1920{
1921 /* Requires that Mask values for each bank be programmed first and that
1922 * the chip-select population indicator is correctly set.
1923 */
1924
1925 u8 b = 0;
1926 u32 nxtcsBase, curcsBase;
1927 u8 p, q;
1928 u32 Sizeq, BiggestBank;
1929 u8 _DSpareEn;
1930
1931 u16 word;
1932 u32 dev;
1933 u32 reg;
1934 u32 reg_off;
1935 u32 val;
1936
1937
1938 dev = pDCTstat->dev_dct;
1939 reg_off = 0x100 * dct;
1940
1941 _DSpareEn = 0;
1942
1943 /* CS Sparing 1=enabled, 0=disabled */
1944 if (mctGet_NVbits(NV_CS_SpareCTL) & 1) {
1945 if (MCT_DIMM_SPARE_NO_WARM) {
1946 /* Do no warm-reset DIMM spare */
1947 if (pMCTstat->GStatus & 1 << GSB_EnDIMMSpareNW) {
1948 word = pDCTstat->CSPresent;
1949 val = bsf(word);
1950 word &= ~(1<<val);
1951 if (word)
1952 /* Make sure at least two chip-selects are available */
1953 _DSpareEn = 1;
1954 else
1955 pDCTstat->ErrStatus |= 1 << SB_SpareDis;
1956 }
1957 } else {
1958 if (!mctGet_NVbits(NV_DQSTrainCTL)) { /*DQS Training 1=enabled, 0=disabled */
1959 word = pDCTstat->CSPresent;
1960 val = bsf(word);
1961 word &= ~(1 << val);
1962 if (word)
1963 /* Make sure at least two chip-selects are available */
1964 _DSpareEn = 1;
1965 else
1966 pDCTstat->ErrStatus |= 1 << SB_SpareDis;
1967 }
1968 }
1969 }
1970
1971 nxtcsBase = 0; /* Next available cs base ADDR[39:8] */
1972 for (p=0; p < MAX_DIMMS_SUPPORTED; p++) {
1973 BiggestBank = 0;
1974 for (q = 0; q < MAX_CS_SUPPORTED; q++) { /* from DIMMS to CS */
1975 if (pDCTstat->CSPresent & (1 << q)) { /* bank present? */
1976 reg = 0x40 + (q << 2) + reg_off; /* Base[q] reg.*/
1977 val = Get_NB32(dev, reg);
1978 if (!(val & 3)) { /* (CSEnable|Spare==1)bank is enabled already? */
Arne Georg Gleditschd6689ed2010-09-09 14:54:07 +00001979 reg = 0x60 + ((q << 1) & 0xc) + reg_off; /*Mask[q] reg.*/
Marc Jones8ae8c882007-12-19 01:32:08 +00001980 val = Get_NB32(dev, reg);
1981 val >>= 19;
1982 val++;
1983 val <<= 19;
1984 Sizeq = val; //never used
1985 if (val > BiggestBank) {
1986 /*Bingo! possibly Map this chip-select next! */
1987 BiggestBank = val;
1988 b = q;
1989 }
1990 }
1991 } /*if bank present */
1992 } /* while q */
1993 if (BiggestBank !=0) {
1994 curcsBase = nxtcsBase; /* curcsBase=nxtcsBase*/
1995 /* DRAM CS Base b Address Register offset */
1996 reg = 0x40 + (b << 2) + reg_off;
1997 if (_DSpareEn) {
1998 BiggestBank = 0;
1999 val = 1 << Spare; /* Spare Enable*/
2000 } else {
2001 val = curcsBase;
2002 val |= 1 << CSEnable; /* Bank Enable */
2003 }
2004 Set_NB32(dev, reg, val);
2005 if (_DSpareEn)
2006 _DSpareEn = 0;
2007 else
2008 /* let nxtcsBase+=Size[b] */
2009 nxtcsBase += BiggestBank;
2010 }
2011
2012 /* bank present but disabled?*/
2013 if ( pDCTstat->CSTestFail & (1 << p)) {
2014 /* DRAM CS Base b Address Register offset */
2015 reg = (p << 2) + 0x40 + reg_off;
2016 val = 1 << TestFail;
2017 Set_NB32(dev, reg, val);
2018 }
2019 }
2020
2021 if (nxtcsBase) {
2022 pDCTstat->DCTSysLimit = nxtcsBase - 1;
2023 mct_AfterStitchMemory(pMCTstat, pDCTstat, dct);
2024 }
2025
2026// dump_pci_device(PCI_DEV(0, 0x18+pDCTstat->Node_ID, 2));
2027
2028 print_tx("StitchMemory: Status ", pDCTstat->Status);
2029 print_tx("StitchMemory: ErrStatus ", pDCTstat->ErrStatus);
2030 print_tx("StitchMemory: ErrCode ", pDCTstat->ErrCode);
2031 print_t("StitchMemory: Done\n");
2032}
2033
2034
2035static u8 Get_Tk_D(u8 k)
2036{
2037 return Table_T_k[k];
2038}
2039
2040
2041static u8 Get_CLj_D(u8 j)
2042{
2043 return Table_CL2_j[j];
2044}
2045
2046static u8 Get_DefTrc_k_D(u8 k)
2047{
2048 return Tab_defTrc_k[k];
2049}
2050
2051
2052static u16 Get_40Tk_D(u8 k)
2053{
2054 return Tab_40T_k[k]; /* FIXME: k or k<<1 ?*/
2055}
2056
2057
2058static u16 Get_Fk_D(u8 k)
2059{
2060 return Table_F_k[k]; /* FIXME: k or k<<1 ? */
2061}
2062
2063
2064static u8 Dimm_Supports_D(struct DCTStatStruc *pDCTstat,
2065 u8 i, u8 j, u8 k)
2066{
2067 u8 Tk, CLj, CL_i;
2068 u8 ret = 0;
2069
2070 u32 DIMMi;
2071 u8 byte;
2072 u16 word, wordx;
2073
2074 DIMMi = Get_DIMMAddress_D(pDCTstat, i);
2075
2076 CLj = Get_CLj_D(j);
2077
2078 /* check if DIMMi supports CLj */
2079 CL_i = mctRead_SPD(DIMMi, SPD_CASLAT);
2080 byte = CL_i & CLj;
2081 if (byte) {
2082 /*find out if its CL X, CLX-1, or CLX-2 */
2083 word = bsr(byte); /* bit position of CLj */
2084 wordx = bsr(CL_i); /* bit position of CLX of CLi */
2085 wordx -= word; /* CL number (CL no. = 0,1, 2, or 3) */
2086 wordx <<= 3; /* 8 bits per SPD byte index */
2087 /*get T from SPD byte 9, 23, 25*/
2088 word = (EncodedTSPD >> wordx) & 0xFF;
2089 Tk = Get_Tk_D(k);
2090 byte = mctRead_SPD(DIMMi, word); /* DIMMi speed */
2091 if (Tk < byte) {
2092 ret = 1;
2093 } else if (byte == 0){
2094 pDCTstat->ErrStatus |= 1<<SB_NoCycTime;
2095 ret = 1;
2096 } else {
2097 ret = 0; /* DIMM is capable! */
2098 }
2099 } else {
2100 ret = 1;
2101 }
2102 return ret;
2103}
2104
2105
2106static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
2107 struct DCTStatStruc *pDCTstat)
2108{
2109 /* Check DIMMs present, verify checksum, flag SDRAM type,
2110 * build population indicator bitmaps, and preload bus loading
2111 * of DIMMs into DCTStatStruc.
2112 * MAAload=number of devices on the "A" bus.
2113 * MABload=number of devices on the "B" bus.
2114 * MAAdimms=number of DIMMs on the "A" bus slots.
2115 * MABdimms=number of DIMMs on the "B" bus slots.
2116 * DATAAload=number of ranks on the "A" bus slots.
2117 * DATABload=number of ranks on the "B" bus slots.
2118 */
2119
Timothy Pearson620fa5f2015-03-27 22:50:09 -05002120 u16 i, j, k;
Marc Jones8ae8c882007-12-19 01:32:08 +00002121 u8 smbaddr, Index;
2122 u16 Checksum;
2123 u8 SPDCtrl;
2124 u16 RegDIMMPresent, MaxDimms;
2125 u8 devwidth;
2126 u16 DimmSlots;
2127 u8 byte = 0, bytex;
2128 u16 word;
2129
2130 /* preload data structure with addrs */
2131 mctGet_DIMMAddr(pDCTstat, pDCTstat->Node_ID);
2132
2133 DimmSlots = MaxDimms = mctGet_NVbits(NV_MAX_DIMMS);
2134
2135 SPDCtrl = mctGet_NVbits(NV_SPDCHK_RESTRT);
2136
2137 RegDIMMPresent = 0;
2138 pDCTstat->DimmQRPresent = 0;
2139
2140 for (i = 0; i< MAX_DIMMS_SUPPORTED; i++) {
2141 if (i >= MaxDimms)
2142 break;
2143
2144 if ((pDCTstat->DimmQRPresent & (1 << i)) || (i < DimmSlots)) {
2145 print_tx("\t DIMMPresence: i=", i);
2146 smbaddr = Get_DIMMAddress_D(pDCTstat, i);
2147 print_tx("\t DIMMPresence: smbaddr=", smbaddr);
2148 if (smbaddr) {
2149 Checksum = 0;
2150 for (Index=0; Index < 64; Index++){
2151 int status;
2152 status = mctRead_SPD(smbaddr, Index);
2153 if (status < 0)
2154 break;
2155 byte = status & 0xFF;
2156 if (Index < 63)
2157 Checksum += byte;
2158 }
2159
2160 if (Index == 64) {
2161 pDCTstat->DIMMPresent |= 1 << i;
2162 if ((Checksum & 0xFF) == byte) {
2163 byte = mctRead_SPD(smbaddr, SPD_TYPE);
2164 if (byte == JED_DDR2SDRAM) {
2165 /*Dimm is 'Present'*/
2166 pDCTstat->DIMMValid |= 1 << i;
2167 }
2168 } else {
2169 pDCTstat->DIMMSPDCSE = 1 << i;
2170 if (SPDCtrl == 0) {
2171 pDCTstat->ErrStatus |= 1 << SB_DIMMChkSum;
2172 pDCTstat->ErrCode = SC_StopError;
2173 } else {
2174 /*if NV_SPDCHK_RESTRT is set to 1, ignore faulty SPD checksum*/
2175 pDCTstat->ErrStatus |= 1<<SB_DIMMChkSum;
2176 byte = mctRead_SPD(smbaddr, SPD_TYPE);
2177 if (byte == JED_DDR2SDRAM)
2178 pDCTstat->DIMMValid |= 1 << i;
2179 }
2180 }
Timothy Pearson620fa5f2015-03-27 22:50:09 -05002181 /* Get module information for SMBIOS */
2182 if (pDCTstat->DIMMValid & (1 << i)) {
2183 pDCTstat->DimmManufacturerID[i] = 0;
2184 for (k = 0; k < 8; k++)
2185 pDCTstat->DimmManufacturerID[i] |= ((uint64_t)mctRead_SPD(smbaddr, SPD_MANID_START + k)) << (k * 8);
2186 for (k = 0; k < SPD_PARTN_LENGTH; k++)
2187 pDCTstat->DimmPartNumber[i][k] = mctRead_SPD(smbaddr, SPD_PARTN_START + k);
Timothy Pearson730a0432015-10-16 13:51:51 -05002188 pDCTstat->DimmPartNumber[i][SPD_PARTN_LENGTH] = 0;
Timothy Pearson620fa5f2015-03-27 22:50:09 -05002189 pDCTstat->DimmRevisionNumber[i] = 0;
2190 for (k = 0; k < 2; k++)
2191 pDCTstat->DimmRevisionNumber[i] |= ((uint16_t)mctRead_SPD(smbaddr, SPD_REVNO_START + k)) << (k * 8);
2192 pDCTstat->DimmSerialNumber[i] = 0;
2193 for (k = 0; k < 4; k++)
2194 pDCTstat->DimmSerialNumber[i] |= ((uint32_t)mctRead_SPD(smbaddr, SPD_SERIAL_START + k)) << (k * 8);
2195 pDCTstat->DimmRows[i] = mctRead_SPD(smbaddr, SPD_ROWSZ) & 0xf;
2196 pDCTstat->DimmCols[i] = mctRead_SPD(smbaddr, SPD_COLSZ) & 0xf;
2197 pDCTstat->DimmRanks[i] = (mctRead_SPD(smbaddr, SPD_DMBANKS) & 0x7) + 1;
2198 pDCTstat->DimmBanks[i] = mctRead_SPD(smbaddr, SPD_LBANKS);
2199 pDCTstat->DimmWidth[i] = mctRead_SPD(smbaddr, SPD_DEVWIDTH);
2200 }
Marc Jones8ae8c882007-12-19 01:32:08 +00002201 /* Check module type */
2202 byte = mctRead_SPD(smbaddr, SPD_DIMMTYPE);
Timothy Pearson620fa5f2015-03-27 22:50:09 -05002203 if (byte & JED_REGADCMSK) {
Marc Jones8ae8c882007-12-19 01:32:08 +00002204 RegDIMMPresent |= 1 << i;
Timothy Pearson620fa5f2015-03-27 22:50:09 -05002205 pDCTstat->DimmRegistered[i] = 1;
Timothy Pearson730a0432015-10-16 13:51:51 -05002206 } else {
Timothy Pearson620fa5f2015-03-27 22:50:09 -05002207 pDCTstat->DimmRegistered[i] = 0;
2208 }
Marc Jones8ae8c882007-12-19 01:32:08 +00002209 /* Check ECC capable */
2210 byte = mctRead_SPD(smbaddr, SPD_EDCTYPE);
2211 if (byte & JED_ECC) {
2212 /* DIMM is ECC capable */
2213 pDCTstat->DimmECCPresent |= 1 << i;
2214 }
2215 if (byte & JED_ADRCPAR) {
Zheng Bao7b1a3c32010-09-28 04:43:16 +00002216 /* DIMM is ECC capable */
2217 pDCTstat->DimmPARPresent |= 1 << i;
Marc Jones8ae8c882007-12-19 01:32:08 +00002218 }
2219 /* Check if x4 device */
2220 devwidth = mctRead_SPD(smbaddr, SPD_DEVWIDTH) & 0xFE;
2221 if (devwidth == 4) {
2222 /* DIMM is made with x4 or x16 drams */
2223 pDCTstat->Dimmx4Present |= 1 << i;
2224 } else if (devwidth == 8) {
2225 pDCTstat->Dimmx8Present |= 1 << i;
2226 } else if (devwidth == 16) {
2227 pDCTstat->Dimmx16Present |= 1 << i;
2228 }
2229 /* check page size */
2230 byte = mctRead_SPD(smbaddr, SPD_COLSZ);
2231 byte &= 0x0F;
2232 word = 1 << byte;
2233 word >>= 3;
2234 word *= devwidth; /* (((2^COLBITS) / 8) * ORG) / 2048 */
2235 word >>= 11;
2236 if (word)
2237 pDCTstat->DIMM2Kpage |= 1 << i;
2238
2239 /*Check if SPD diag bit 'analysis probe installed' is set */
2240 byte = mctRead_SPD(smbaddr, SPD_ATTRIB);
2241 if ( byte & JED_PROBEMSK )
2242 pDCTstat->Status |= 1<<SB_DiagClks;
2243
2244 byte = mctRead_SPD(smbaddr, SPD_DMBANKS);
2245 if (!(byte & (1<< SPDPLBit)))
2246 pDCTstat->DimmPlPresent |= 1 << i;
2247 byte &= 7;
2248 byte++; /* ranks */
2249 if (byte > 2) {
2250 /* if any DIMMs are QR, we have to make two passes through DIMMs*/
2251 if ( pDCTstat->DimmQRPresent == 0) {
2252 MaxDimms <<= 1;
2253 }
2254 if (i < DimmSlots) {
2255 pDCTstat->DimmQRPresent |= (1 << i) | (1 << (i+4));
2256 }
2257 byte = 2; /* upper two ranks of QR DIMM will be counted on another DIMM number iteration*/
2258 } else if (byte == 2) {
2259 pDCTstat->DimmDRPresent |= 1 << i;
2260 }
2261 bytex = devwidth;
2262 if (devwidth == 16)
2263 bytex = 4;
2264 else if (devwidth == 4)
2265 bytex=16;
2266
2267 if (byte == 2)
2268 bytex <<= 1; /*double Addr bus load value for dual rank DIMMs*/
2269
2270 j = i & (1<<0);
2271 pDCTstat->DATAload[j] += byte; /*number of ranks on DATA bus*/
2272 pDCTstat->MAload[j] += bytex; /*number of devices on CMD/ADDR bus*/
2273 pDCTstat->MAdimms[j]++; /*number of DIMMs on A bus */
2274 /*check for DRAM package Year <= 06*/
2275 byte = mctRead_SPD(smbaddr, SPD_MANDATEYR);
2276 if (byte < MYEAR06) {
2277 /*Year < 06 and hence Week < 24 of 06 */
2278 pDCTstat->DimmYr06 |= 1 << i;
2279 pDCTstat->DimmWk2406 |= 1 << i;
2280 } else if (byte == MYEAR06) {
2281 /*Year = 06, check if Week <= 24 */
2282 pDCTstat->DimmYr06 |= 1 << i;
2283 byte = mctRead_SPD(smbaddr, SPD_MANDATEWK);
2284 if (byte <= MWEEK24)
2285 pDCTstat->DimmWk2406 |= 1 << i;
2286 }
2287 }
2288 }
2289 }
2290 }
2291 print_tx("\t DIMMPresence: DIMMValid=", pDCTstat->DIMMValid);
2292 print_tx("\t DIMMPresence: DIMMPresent=", pDCTstat->DIMMPresent);
2293 print_tx("\t DIMMPresence: RegDIMMPresent=", RegDIMMPresent);
2294 print_tx("\t DIMMPresence: DimmECCPresent=", pDCTstat->DimmECCPresent);
2295 print_tx("\t DIMMPresence: DimmPARPresent=", pDCTstat->DimmPARPresent);
2296 print_tx("\t DIMMPresence: Dimmx4Present=", pDCTstat->Dimmx4Present);
2297 print_tx("\t DIMMPresence: Dimmx8Present=", pDCTstat->Dimmx8Present);
2298 print_tx("\t DIMMPresence: Dimmx16Present=", pDCTstat->Dimmx16Present);
2299 print_tx("\t DIMMPresence: DimmPlPresent=", pDCTstat->DimmPlPresent);
2300 print_tx("\t DIMMPresence: DimmDRPresent=", pDCTstat->DimmDRPresent);
2301 print_tx("\t DIMMPresence: DimmQRPresent=", pDCTstat->DimmQRPresent);
2302 print_tx("\t DIMMPresence: DATAload[0]=", pDCTstat->DATAload[0]);
2303 print_tx("\t DIMMPresence: MAload[0]=", pDCTstat->MAload[0]);
2304 print_tx("\t DIMMPresence: MAdimms[0]=", pDCTstat->MAdimms[0]);
2305 print_tx("\t DIMMPresence: DATAload[1]=", pDCTstat->DATAload[1]);
2306 print_tx("\t DIMMPresence: MAload[1]=", pDCTstat->MAload[1]);
2307 print_tx("\t DIMMPresence: MAdimms[1]=", pDCTstat->MAdimms[1]);
2308
2309 if (pDCTstat->DIMMValid != 0) { /* If any DIMMs are present...*/
2310 if (RegDIMMPresent != 0) {
2311 if ((RegDIMMPresent ^ pDCTstat->DIMMValid) !=0) {
2312 /* module type DIMM mismatch (reg'ed, unbuffered) */
2313 pDCTstat->ErrStatus |= 1<<SB_DimmMismatchM;
2314 pDCTstat->ErrCode = SC_StopError;
2315 } else{
2316 /* all DIMMs are registered */
2317 pDCTstat->Status |= 1<<SB_Registered;
2318 }
2319 }
2320 if (pDCTstat->DimmECCPresent != 0) {
2321 if ((pDCTstat->DimmECCPresent ^ pDCTstat->DIMMValid )== 0) {
2322 /* all DIMMs are ECC capable */
2323 pDCTstat->Status |= 1<<SB_ECCDIMMs;
2324 }
2325 }
2326 if (pDCTstat->DimmPARPresent != 0) {
2327 if ((pDCTstat->DimmPARPresent ^ pDCTstat->DIMMValid) == 0) {
2328 /*all DIMMs are Parity capable */
2329 pDCTstat->Status |= 1<<SB_PARDIMMs;
2330 }
2331 }
2332 } else {
2333 /* no DIMMs present or no DIMMs that qualified. */
2334 pDCTstat->ErrStatus |= 1<<SB_NoDimms;
2335 pDCTstat->ErrCode = SC_StopError;
2336 }
2337
2338 print_tx("\t DIMMPresence: Status ", pDCTstat->Status);
2339 print_tx("\t DIMMPresence: ErrStatus ", pDCTstat->ErrStatus);
2340 print_tx("\t DIMMPresence: ErrCode ", pDCTstat->ErrCode);
2341 print_t("\t DIMMPresence: Done\n");
2342
2343 mctHookAfterDIMMpre();
2344
2345 return pDCTstat->ErrCode;
2346}
2347
2348
2349static u8 Sys_Capability_D(struct MCTStatStruc *pMCTstat,
2350 struct DCTStatStruc *pDCTstat, int j, int k)
2351{
2352 /* Determine if system is capable of operating at given input
2353 * parameters for CL, and T. There are three components to
2354 * determining "maximum frequency" in AUTO mode: SPD component,
2355 * Bus load component, and "Preset" max frequency component.
2356 * This procedure is used to help find the SPD component and relies
2357 * on pre-determination of the bus load component and the Preset
2358 * components. The generalized algorithm for finding maximum
2359 * frequency is structured this way so as to optimize for CAS
2360 * latency (which might get better as a result of reduced frequency).
2361 * See "Global relationship between index values and item values"
2362 * for definition of CAS latency index (j) and Frequency index (k).
2363 */
2364 u8 freqOK, ClOK;
2365 u8 ret = 0;
2366
2367 if (Get_Fk_D(k) > pDCTstat->PresetmaxFreq)
2368 freqOK = 0;
2369 else
2370 freqOK = 1;
2371
2372 /* compare proposed CAS latency with AMD Si capabilities */
2373 if ((j < J_MIN) || (j > J_MAX))
2374 ClOK = 0;
2375 else
2376 ClOK = 1;
2377
2378 if (freqOK && ClOK)
2379 ret = 1;
2380
2381 return ret;
2382}
2383
2384
2385static u8 Get_DIMMAddress_D(struct DCTStatStruc *pDCTstat, u8 i)
2386{
2387 u8 *p;
2388
2389 p = pDCTstat->DIMMAddr;
2390 //mct_BeforeGetDIMMAddress();
2391 return p[i];
2392}
2393
2394
2395static void mct_initDCT(struct MCTStatStruc *pMCTstat,
2396 struct DCTStatStruc *pDCTstat)
2397{
2398 u32 val;
2399 u8 err_code;
2400
2401 /* Config. DCT0 for Ganged or unganged mode */
2402 print_t("\tmct_initDCT: DCTInit_D 0\n");
2403 DCTInit_D(pMCTstat, pDCTstat, 0);
2404 if (pDCTstat->ErrCode == SC_FatalErr) {
2405 // Do nothing goto exitDCTInit; /* any fatal errors? */
2406 } else {
2407 /* Configure DCT1 if unganged and enabled*/
2408 if (!pDCTstat->GangedMode) {
2409 if ( pDCTstat->DIMMValidDCT[1] > 0) {
2410 print_t("\tmct_initDCT: DCTInit_D 1\n");
2411 err_code = pDCTstat->ErrCode; /* save DCT0 errors */
2412 pDCTstat->ErrCode = 0;
2413 DCTInit_D(pMCTstat, pDCTstat, 1);
2414 if (pDCTstat->ErrCode == 2) /* DCT1 is not Running */
2415 pDCTstat->ErrCode = err_code; /* Using DCT0 Error code to update pDCTstat.ErrCode */
2416 } else {
2417 val = 1 << DisDramInterface;
2418 Set_NB32(pDCTstat->dev_dct, 0x100 + 0x94, val);
2419 }
2420 }
2421 }
2422// exitDCTInit:
2423}
2424
2425
2426static void mct_DramInit(struct MCTStatStruc *pMCTstat,
2427 struct DCTStatStruc *pDCTstat, u8 dct)
2428{
2429 u32 val;
2430
2431 mct_BeforeDramInit_Prod_D(pMCTstat, pDCTstat);
2432 // FIXME: for rev A: mct_BeforeDramInit_D(pDCTstat, dct);
2433
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00002434 /* Disable auto refresh before Dram init when in ganged mode (Erratum 278) */
Zheng Bao2ca2f172011-03-28 04:29:14 +00002435 if (pDCTstat->LogicalCPUID & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_BA)) {
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00002436 if (pDCTstat->GangedMode) {
2437 val = Get_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct));
2438 val |= 1 << DisAutoRefresh;
2439 Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
2440 }
Marc Jones8ae8c882007-12-19 01:32:08 +00002441 }
2442
2443 mct_DramInit_Hw_D(pMCTstat, pDCTstat, dct);
2444
2445 /* Re-enable auto refresh after Dram init when in ganged mode
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00002446 * to ensure both DCTs are in sync (Erratum 278)
Marc Jones8ae8c882007-12-19 01:32:08 +00002447 */
2448
Zheng Bao2ca2f172011-03-28 04:29:14 +00002449 if (pDCTstat->LogicalCPUID & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_BA)) {
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00002450 if (pDCTstat->GangedMode) {
2451 do {
2452 val = Get_NB32(pDCTstat->dev_dct, 0x90 + (0x100 * dct));
2453 } while (!(val & (1 << InitDram)));
Marc Jones8ae8c882007-12-19 01:32:08 +00002454
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00002455 WaitRoutine_D(50);
Marc Jones8ae8c882007-12-19 01:32:08 +00002456
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00002457 val = Get_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct));
2458 val &= ~(1 << DisAutoRefresh);
Zheng Bao0c51ddd2010-09-21 02:51:31 +00002459 Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00002460 val |= 1 << DisAutoRefresh;
Zheng Bao0c51ddd2010-09-21 02:51:31 +00002461 Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00002462 val &= ~(1 << DisAutoRefresh);
Zheng Bao0c51ddd2010-09-21 02:51:31 +00002463 Set_NB32(pDCTstat->dev_dct, 0x8C + (0x100 * dct), val);
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00002464 }
Marc Jones8ae8c882007-12-19 01:32:08 +00002465 }
2466}
2467
2468
2469static u8 mct_setMode(struct MCTStatStruc *pMCTstat,
2470 struct DCTStatStruc *pDCTstat)
2471{
2472 u8 byte;
2473 u8 bytex;
2474 u32 val;
2475 u32 reg;
2476
2477 byte = bytex = pDCTstat->DIMMValid;
2478 bytex &= 0x55; /* CHA DIMM pop */
2479 pDCTstat->DIMMValidDCT[0] = bytex;
2480
2481 byte &= 0xAA; /* CHB DIMM popa */
2482 byte >>= 1;
2483 pDCTstat->DIMMValidDCT[1] = byte;
2484
2485 if (byte != bytex) {
2486 pDCTstat->ErrStatus &= ~(1 << SB_DimmMismatchO);
2487 } else {
Zheng Bao7b1a3c32010-09-28 04:43:16 +00002488 if ( mctGet_NVbits(NV_Unganged) )
2489 pDCTstat->ErrStatus |= (1 << SB_DimmMismatchO);
Marc Jones8ae8c882007-12-19 01:32:08 +00002490
2491 if (!(pDCTstat->ErrStatus & (1 << SB_DimmMismatchO))) {
2492 pDCTstat->GangedMode = 1;
2493 /* valid 128-bit mode population. */
2494 pDCTstat->Status |= 1 << SB_128bitmode;
2495 reg = 0x110;
2496 val = Get_NB32(pDCTstat->dev_dct, reg);
2497 val |= 1 << DctGangEn;
2498 Set_NB32(pDCTstat->dev_dct, reg, val);
2499 print_tx("setMode: DRAM Controller Select Low Register = ", val);
2500 }
2501 }
2502 return pDCTstat->ErrCode;
2503}
2504
2505
2506u32 Get_NB32(u32 dev, u32 reg)
2507{
Marc Jonesfd9c9b82009-09-14 17:00:04 +00002508 return pci_read_config32(dev, reg);
Marc Jones8ae8c882007-12-19 01:32:08 +00002509}
2510
2511
2512void Set_NB32(u32 dev, u32 reg, u32 val)
2513{
Marc Jonesfd9c9b82009-09-14 17:00:04 +00002514 pci_write_config32(dev, reg, val);
Marc Jones8ae8c882007-12-19 01:32:08 +00002515}
2516
2517
2518u32 Get_NB32_index(u32 dev, u32 index_reg, u32 index)
2519{
2520 u32 dword;
2521
2522 Set_NB32(dev, index_reg, index);
2523 dword = Get_NB32(dev, index_reg+0x4);
2524
2525 return dword;
2526}
2527
2528void Set_NB32_index(u32 dev, u32 index_reg, u32 index, u32 data)
2529{
2530 Set_NB32(dev, index_reg, index);
2531 Set_NB32(dev, index_reg + 0x4, data);
2532}
2533
2534
2535u32 Get_NB32_index_wait(u32 dev, u32 index_reg, u32 index)
2536{
2537
2538 u32 dword;
2539
2540
2541 index &= ~(1 << DctAccessWrite);
2542 Set_NB32(dev, index_reg, index);
2543 do {
2544 dword = Get_NB32(dev, index_reg);
2545 } while (!(dword & (1 << DctAccessDone)));
2546 dword = Get_NB32(dev, index_reg + 0x4);
2547
2548 return dword;
2549}
2550
2551
2552void Set_NB32_index_wait(u32 dev, u32 index_reg, u32 index, u32 data)
2553{
2554 u32 dword;
2555
2556
2557 Set_NB32(dev, index_reg + 0x4, data);
2558 index |= (1 << DctAccessWrite);
2559 Set_NB32(dev, index_reg, index);
2560 do {
2561 dword = Get_NB32(dev, index_reg);
2562 } while (!(dword & (1 << DctAccessDone)));
2563
2564}
2565
2566
2567static u8 mct_PlatformSpec(struct MCTStatStruc *pMCTstat,
2568 struct DCTStatStruc *pDCTstat, u8 dct)
2569{
2570 /* Get platform specific config/timing values from the interface layer
2571 * and program them into DCT.
2572 */
2573
2574 u32 dev = pDCTstat->dev_dct;
2575 u32 index_reg;
2576 u8 i, i_start, i_end;
2577
2578 if (pDCTstat->GangedMode) {
2579 SyncSetting(pDCTstat);
2580 i_start = 0;
2581 i_end = 2;
2582 } else {
2583 i_start = dct;
2584 i_end = dct + 1;
2585 }
2586 for (i=i_start; i<i_end; i++) {
2587 index_reg = 0x98 + (i * 0x100);
2588 Set_NB32_index_wait(dev, index_reg, 0x00, pDCTstat->CH_ODC_CTL[i]); /* Channel A Output Driver Compensation Control */
2589 Set_NB32_index_wait(dev, index_reg, 0x04, pDCTstat->CH_ADDR_TMG[i]); /* Channel A Output Driver Compensation Control */
2590 }
2591
2592 return pDCTstat->ErrCode;
2593
2594}
2595
2596
2597static void mct_SyncDCTsReady(struct DCTStatStruc *pDCTstat)
2598{
2599 u32 dev;
2600 u32 val;
2601
2602 if (pDCTstat->NodePresent) {
2603 print_tx("mct_SyncDCTsReady: Node ", pDCTstat->Node_ID);
2604 dev = pDCTstat->dev_dct;
2605
2606 if ((pDCTstat->DIMMValidDCT[0] ) || (pDCTstat->DIMMValidDCT[1])) { /* This Node has dram */
2607 do {
2608 val = Get_NB32(dev, 0x110);
2609 } while (!(val & (1 << DramEnabled)));
2610 print_t("mct_SyncDCTsReady: DramEnabled\n");
2611 }
2612 } /* Node is present */
2613}
2614
2615
2616static void mct_AfterGetCLT(struct MCTStatStruc *pMCTstat,
2617 struct DCTStatStruc *pDCTstat, u8 dct)
2618{
2619 if (!pDCTstat->GangedMode) {
2620 if (dct == 0 ) {
2621 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
2622 if (pDCTstat->DIMMValidDCT[dct] == 0)
2623 pDCTstat->ErrCode = SC_StopError;
2624 } else {
2625 pDCTstat->CSPresent = 0;
2626 pDCTstat->CSTestFail = 0;
2627 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[dct];
2628 if (pDCTstat->DIMMValidDCT[dct] == 0)
2629 pDCTstat->ErrCode = SC_StopError;
2630 }
2631 }
2632}
2633
2634static u8 mct_SPDCalcWidth(struct MCTStatStruc *pMCTstat,
2635 struct DCTStatStruc *pDCTstat, u8 dct)
2636{
2637 u8 ret;
2638
2639 if ( dct == 0) {
2640 SPDCalcWidth_D(pMCTstat, pDCTstat);
2641 ret = mct_setMode(pMCTstat, pDCTstat);
2642 } else {
2643 ret = pDCTstat->ErrCode;
2644 }
2645
2646 print_tx("SPDCalcWidth: Status ", pDCTstat->Status);
2647 print_tx("SPDCalcWidth: ErrStatus ", pDCTstat->ErrStatus);
2648 print_tx("SPDCalcWidth: ErrCode ", pDCTstat->ErrCode);
2649 print_t("SPDCalcWidth: Done\n");
2650
2651 return ret;
2652}
2653
2654
2655static void mct_AfterStitchMemory(struct MCTStatStruc *pMCTstat,
2656 struct DCTStatStruc *pDCTstat, u8 dct)
2657{
2658 u32 val;
2659 u32 dword;
2660 u32 dev;
2661 u32 reg;
2662 u8 _MemHoleRemap;
2663 u32 DramHoleBase;
2664
2665 _MemHoleRemap = mctGet_NVbits(NV_MemHole);
2666 DramHoleBase = mctGet_NVbits(NV_BottomIO);
2667 DramHoleBase <<= 8;
2668 /* Increase hole size so;[31:24]to[31:16]
2669 * it has granularity of 128MB shl eax,8
2670 * Set 'effective' bottom IOmov DramHoleBase,eax
2671 */
2672 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2673
2674 /* In unganged mode, we must add DCT0 and DCT1 to DCTSysLimit */
2675 if (!pDCTstat->GangedMode) {
2676 dev = pDCTstat->dev_dct;
2677 pDCTstat->NodeSysLimit += pDCTstat->DCTSysLimit;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002678 /* if DCT0 and DCT1 both exist, set DctSelBaseAddr[47:27] to the top of DCT0 */
Marc Jones8ae8c882007-12-19 01:32:08 +00002679 if (dct == 0) {
2680 if (pDCTstat->DIMMValidDCT[1] > 0) {
2681 dword = pDCTstat->DCTSysLimit + 1;
2682 dword += pDCTstat->NodeSysBase;
2683 dword >>= 8; /* scale [39:8] to [47:27],and to F2x110[31:11] */
2684 if ((dword >= DramHoleBase) && _MemHoleRemap) {
2685 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2686 val = pMCTstat->HoleBase;
2687 val >>= 16;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002688 val = (((~val) & 0xFF) + 1);
Marc Jones8ae8c882007-12-19 01:32:08 +00002689 val <<= 8;
2690 dword += val;
2691 }
2692 reg = 0x110;
2693 val = Get_NB32(dev, reg);
2694 val &= 0x7F;
2695 val |= dword;
2696 val |= 3; /* Set F2x110[DctSelHiRngEn], F2x110[DctSelHi] */
2697 Set_NB32(dev, reg, val);
2698 print_tx("AfterStitch DCT0 and DCT1: DRAM Controller Select Low Register = ", val);
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002699 print_tx("AfterStitch DCT0 and DCT1: DRAM Controller Select High Register = ", dword);
Marc Jones8ae8c882007-12-19 01:32:08 +00002700
2701 reg = 0x114;
2702 val = dword;
2703 Set_NB32(dev, reg, val);
2704 }
2705 } else {
2706 /* Program the DctSelBaseAddr value to 0
2707 if DCT 0 is disabled */
2708 if (pDCTstat->DIMMValidDCT[0] == 0) {
2709 dword = pDCTstat->NodeSysBase;
2710 dword >>= 8;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002711 if ((dword >= DramHoleBase) && _MemHoleRemap) {
Marc Jones8ae8c882007-12-19 01:32:08 +00002712 pMCTstat->HoleBase = (DramHoleBase & 0xFFFFF800) << 8;
2713 val = pMCTstat->HoleBase;
2714 val >>= 8;
2715 val &= ~(0xFFFF);
2716 val |= (((~val) & 0xFFFF) + 1);
2717 dword += val;
2718 }
2719 reg = 0x114;
2720 val = dword;
2721 Set_NB32(dev, reg, val);
2722
2723 reg = 0x110;
2724 val |= 3; /* Set F2x110[DctSelHiRngEn], F2x110[DctSelHi] */
2725 Set_NB32(dev, reg, val);
2726 print_tx("AfterStitch DCT1 only: DRAM Controller Select Low Register = ", val);
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002727 print_tx("AfterStitch DCT1 only: DRAM Controller Select High Register = ", dword);
Marc Jones8ae8c882007-12-19 01:32:08 +00002728 }
2729 }
2730 } else {
2731 pDCTstat->NodeSysLimit += pDCTstat->DCTSysLimit;
2732 }
2733 print_tx("AfterStitch pDCTstat->NodeSysBase = ", pDCTstat->NodeSysBase);
2734 print_tx("mct_AfterStitchMemory: pDCTstat->NodeSysLimit ", pDCTstat->NodeSysLimit);
2735}
2736
2737
2738static u8 mct_DIMMPresence(struct MCTStatStruc *pMCTstat,
2739 struct DCTStatStruc *pDCTstat, u8 dct)
2740{
2741 u8 ret;
2742
2743 if ( dct == 0)
2744 ret = DIMMPresence_D(pMCTstat, pDCTstat);
2745 else
2746 ret = pDCTstat->ErrCode;
2747
2748 return ret;
2749}
2750
2751
2752/* mct_BeforeGetDIMMAddress inline in C */
2753
2754
2755static void mct_OtherTiming(struct MCTStatStruc *pMCTstat,
2756 struct DCTStatStruc *pDCTstatA)
2757{
2758 u8 Node;
2759
2760 for (Node = 0; Node < MAX_NODES_SUPPORTED; Node++) {
2761 struct DCTStatStruc *pDCTstat;
2762 pDCTstat = pDCTstatA + Node;
2763 if (pDCTstat->NodePresent) {
2764 if (pDCTstat->DIMMValidDCT[0]) {
2765 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[0];
2766 Set_OtherTiming(pMCTstat, pDCTstat, 0);
2767 }
2768 if (pDCTstat->DIMMValidDCT[1] && !pDCTstat->GangedMode ) {
2769 pDCTstat->DIMMValid = pDCTstat->DIMMValidDCT[1];
2770 Set_OtherTiming(pMCTstat, pDCTstat, 1);
2771 }
2772 } /* Node is present*/
2773 } /* while Node */
2774}
2775
2776
2777static void Set_OtherTiming(struct MCTStatStruc *pMCTstat,
2778 struct DCTStatStruc *pDCTstat, u8 dct)
2779{
2780 u32 reg;
2781 u32 reg_off = 0x100 * dct;
2782 u32 val;
2783 u32 dword;
2784 u32 dev = pDCTstat->dev_dct;
2785
2786 Get_Trdrd(pMCTstat, pDCTstat, dct);
2787 Get_Twrwr(pMCTstat, pDCTstat, dct);
2788 Get_Twrrd(pMCTstat, pDCTstat, dct);
2789 Get_TrwtTO(pMCTstat, pDCTstat, dct);
2790 Get_TrwtWB(pMCTstat, pDCTstat);
2791
2792 reg = 0x8C + reg_off; /* Dram Timing Hi */
2793 val = Get_NB32(dev, reg);
2794 val &= 0xffff0300;
2795 dword = pDCTstat->TrwtTO; //0x07
2796 val |= dword << 4;
2797 dword = pDCTstat->Twrrd; //0x03
2798 val |= dword << 10;
2799 dword = pDCTstat->Twrwr; //0x03
2800 val |= dword << 12;
2801 dword = pDCTstat->Trdrd; //0x03
2802 val |= dword << 14;
2803 dword = pDCTstat->TrwtWB; //0x07
2804 val |= dword;
2805 val = OtherTiming_A_D(pDCTstat, val);
2806 Set_NB32(dev, reg, val);
2807
2808}
2809
2810
2811static void Get_Trdrd(struct MCTStatStruc *pMCTstat,
2812 struct DCTStatStruc *pDCTstat, u8 dct)
2813{
2814 u8 Trdrd;
2815 u8 byte;
2816 u32 dword;
2817 u32 val;
2818 u32 index_reg = 0x98 + 0x100 * dct;
2819 u32 dev = pDCTstat->dev_dct;
2820
2821 if ((pDCTstat->Dimmx4Present != 0) && (pDCTstat->Dimmx8Present != 0)) {
2822 /* mixed (x4 or x8) DIMM types
2823 the largest DqsRcvEnGrossDelay of any DIMM minus the DqsRcvEnGrossDelay
2824 of any other DIMM is equal to the Critical Gross Delay Difference (CGDD) for Trdrd.*/
2825 byte = Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
2826 if (byte == 0)
2827 Trdrd = 1;
2828 else
2829 Trdrd = 2;
2830
2831 } else {
2832 /*
2833 Trdrd with non-mixed DIMM types
2834 RdDqsTime are the same for all DIMMs and DqsRcvEn difference between
2835 any two DIMMs is less than half of a MEMCLK, BIOS should program Trdrd to 0000b,
2836 else BIOS should program Trdrd to 0001b.
2837
2838 RdDqsTime are the same for all DIMMs
2839 DDR400~DDR667 only use one set register
2840 DDR800 have two set register for DIMM0 and DIMM1 */
2841 Trdrd = 1;
2842 if (pDCTstat->Speed > 3) {
2843 /* DIMM0+DIMM1 exist */ //NOTE it should be 5
2844 val = bsf(pDCTstat->DIMMValid);
2845 dword = bsr(pDCTstat->DIMMValid);
2846 if (dword != val && dword != 0) {
2847 /* DCT Read DQS Timing Control - DIMM0 - Low */
2848 dword = Get_NB32_index_wait(dev, index_reg, 0x05);
2849 /* DCT Read DQS Timing Control - DIMM1 - Low */
2850 val = Get_NB32_index_wait(dev, index_reg, 0x105);
2851 if (val != dword)
2852 goto Trdrd_1;
2853
2854 /* DCT Read DQS Timing Control - DIMM0 - High */
2855 dword = Get_NB32_index_wait(dev, index_reg, 0x06);
2856 /* DCT Read DQS Timing Control - DIMM1 - High */
2857 val = Get_NB32_index_wait(dev, index_reg, 0x106);
2858 if (val != dword)
2859 goto Trdrd_1;
2860 }
2861 }
2862
2863 /* DqsRcvEn difference between any two DIMMs is
2864 less than half of a MEMCLK */
2865 /* DqsRcvEn byte 1,0*/
2866 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x10))
2867 goto Trdrd_1;
2868 /* DqsRcvEn byte 3,2*/
2869 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x11))
2870 goto Trdrd_1;
2871 /* DqsRcvEn byte 5,4*/
2872 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x20))
2873 goto Trdrd_1;
2874 /* DqsRcvEn byte 7,6*/
2875 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x21))
2876 goto Trdrd_1;
2877 /* DqsRcvEn ECC*/
2878 if (Check_DqsRcvEn_Diff(pDCTstat, dct, dev, index_reg, 0x12))
2879 goto Trdrd_1;
2880 Trdrd = 0;
2881 Trdrd_1:
2882 ;
2883 }
2884 pDCTstat->Trdrd = Trdrd;
2885
2886}
2887
2888
2889static void Get_Twrwr(struct MCTStatStruc *pMCTstat,
2890 struct DCTStatStruc *pDCTstat, u8 dct)
2891{
2892 u8 Twrwr = 0;
2893 u32 index_reg = 0x98 + 0x100 * dct;
2894 u32 dev = pDCTstat->dev_dct;
2895 u32 val;
2896 u32 dword;
2897
2898 /* WrDatGrossDlyByte only use one set register when DDR400~DDR667
2899 DDR800 have two set register for DIMM0 and DIMM1 */
2900 if (pDCTstat->Speed > 3) {
2901 val = bsf(pDCTstat->DIMMValid);
2902 dword = bsr(pDCTstat->DIMMValid);
2903 if (dword != val && dword != 0) {
2904 /*the largest WrDatGrossDlyByte of any DIMM minus the
Zheng Bao7b1a3c32010-09-28 04:43:16 +00002905 WrDatGrossDlyByte of any other DIMM is equal to CGDD */
Marc Jones8ae8c882007-12-19 01:32:08 +00002906 val = Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2907 }
2908 if (val == 0)
2909 Twrwr = 2;
2910 else
2911 Twrwr = 3;
2912 }
2913 pDCTstat->Twrwr = Twrwr;
2914}
2915
2916
2917static void Get_Twrrd(struct MCTStatStruc *pMCTstat,
2918 struct DCTStatStruc *pDCTstat, u8 dct)
2919{
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002920 u8 byte, bytex, val;
Marc Jones8ae8c882007-12-19 01:32:08 +00002921 u32 index_reg = 0x98 + 0x100 * dct;
2922 u32 dev = pDCTstat->dev_dct;
2923
2924 /* On any given byte lane, the largest WrDatGrossDlyByte delay of
2925 any DIMM minus the DqsRcvEnGrossDelay delay of any other DIMM is
2926 equal to the Critical Gross Delay Difference (CGDD) for Twrrd.*/
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002927
2928 /* WrDatGrossDlyByte only use one set register when DDR400~DDR667
2929 DDR800 have two set register for DIMM0 and DIMM1 */
2930 if (pDCTstat->Speed > 3) {
2931 val = Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2932 } else {
2933 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 1); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM0 */
2934 pDCTstat->WrDatGrossH = (u8) val; /* low byte = max value */
2935 }
2936
Marc Jones8ae8c882007-12-19 01:32:08 +00002937 Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00002938
Marc Jones8ae8c882007-12-19 01:32:08 +00002939 bytex = pDCTstat->DqsRcvEnGrossL;
2940 byte = pDCTstat->WrDatGrossH;
2941 if (byte > bytex) {
2942 byte -= bytex;
2943 if (byte == 1)
2944 bytex = 1;
2945 else
2946 bytex = 2;
2947 } else {
2948 bytex = 0;
2949 }
2950 pDCTstat->Twrrd = bytex;
2951}
2952
2953
2954static void Get_TrwtTO(struct MCTStatStruc *pMCTstat,
2955 struct DCTStatStruc *pDCTstat, u8 dct)
2956{
2957 u8 byte, bytex;
2958 u32 index_reg = 0x98 + 0x100 * dct;
2959 u32 dev = pDCTstat->dev_dct;
2960
2961 /* On any given byte lane, the largest WrDatGrossDlyByte delay of
2962 any DIMM minus the DqsRcvEnGrossDelay delay of any other DIMM is
2963 equal to the Critical Gross Delay Difference (CGDD) for TrwtTO. */
2964 Get_DqsRcvEnGross_Diff(pDCTstat, dev, index_reg);
2965 Get_WrDatGross_Diff(pDCTstat, dct, dev, index_reg);
2966 bytex = pDCTstat->DqsRcvEnGrossL;
2967 byte = pDCTstat->WrDatGrossH;
2968 if (bytex > byte) {
2969 bytex -= byte;
2970 if ((bytex == 1) || (bytex == 2))
2971 bytex = 3;
2972 else
2973 bytex = 4;
2974 } else {
2975 byte -= bytex;
2976 if ((byte == 0) || (byte == 1))
2977 bytex = 2;
2978 else
2979 bytex = 1;
2980 }
2981
2982 pDCTstat->TrwtTO = bytex;
2983}
2984
2985
2986static void Get_TrwtWB(struct MCTStatStruc *pMCTstat,
2987 struct DCTStatStruc *pDCTstat)
2988{
2989 /* TrwtWB ensures read-to-write data-bus turnaround.
2990 This value should be one more than the programmed TrwtTO.*/
2991 pDCTstat->TrwtWB = pDCTstat->TrwtTO + 1;
2992}
2993
2994
2995static u8 Check_DqsRcvEn_Diff(struct DCTStatStruc *pDCTstat,
2996 u8 dct, u32 dev, u32 index_reg,
2997 u32 index)
2998{
2999 u8 Smallest_0, Largest_0, Smallest_1, Largest_1;
3000 u8 i;
3001 u32 val;
3002 u8 byte;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003003 u8 ecc_reg = 0;
Marc Jones8ae8c882007-12-19 01:32:08 +00003004
3005 Smallest_0 = 0xFF;
3006 Smallest_1 = 0xFF;
3007 Largest_0 = 0;
3008 Largest_1 = 0;
3009
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003010 if (index == 0x12)
3011 ecc_reg = 1;
3012
Marc Jones8ae8c882007-12-19 01:32:08 +00003013 for (i=0; i < 8; i+=2) {
3014 if ( pDCTstat->DIMMValid & (1 << i)) {
3015 val = Get_NB32_index_wait(dev, index_reg, index);
3016 byte = val & 0xFF;
3017 if (byte < Smallest_0)
3018 Smallest_0 = byte;
3019 if (byte > Largest_0)
3020 Largest_0 = byte;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003021 if (!(ecc_reg)) {
3022 byte = (val >> 16) & 0xFF;
3023 if (byte < Smallest_1)
3024 Smallest_1 = byte;
3025 if (byte > Largest_1)
3026 Largest_1 = byte;
3027 }
Marc Jones8ae8c882007-12-19 01:32:08 +00003028 }
3029 index += 3;
3030 } /* while ++i */
3031
3032 /* check if total DqsRcvEn delay difference between any
3033 two DIMMs is less than half of a MEMCLK */
3034 if ((Largest_0 - Smallest_0) > 31)
3035 return 1;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003036 if (!(ecc_reg))
3037 if ((Largest_1 - Smallest_1) > 31)
3038 return 1;
Marc Jones8ae8c882007-12-19 01:32:08 +00003039 return 0;
3040}
3041
3042
3043static u8 Get_DqsRcvEnGross_Diff(struct DCTStatStruc *pDCTstat,
3044 u32 dev, u32 index_reg)
3045{
3046 u8 Smallest, Largest;
3047 u32 val;
3048 u8 byte, bytex;
3049
3050 /* The largest DqsRcvEnGrossDelay of any DIMM minus the
3051 DqsRcvEnGrossDelay of any other DIMM is equal to the Critical
3052 Gross Delay Difference (CGDD) */
3053 /* DqsRcvEn byte 1,0 */
3054 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x10);
3055 Largest = val & 0xFF;
3056 Smallest = (val >> 8) & 0xFF;
3057
3058 /* DqsRcvEn byte 3,2 */
3059 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x11);
3060 byte = val & 0xFF;
3061 bytex = (val >> 8) & 0xFF;
3062 if (bytex < Smallest)
3063 Smallest = bytex;
3064 if (byte > Largest)
3065 Largest = byte;
3066
3067 /* DqsRcvEn byte 5,4 */
3068 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x20);
3069 byte = val & 0xFF;
3070 bytex = (val >> 8) & 0xFF;
3071 if (bytex < Smallest)
3072 Smallest = bytex;
3073 if (byte > Largest)
3074 Largest = byte;
3075
3076 /* DqsRcvEn byte 7,6 */
3077 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x21);
3078 byte = val & 0xFF;
3079 bytex = (val >> 8) & 0xFF;
3080 if (bytex < Smallest)
3081 Smallest = bytex;
3082 if (byte > Largest)
3083 Largest = byte;
3084
3085 if (pDCTstat->DimmECCPresent> 0) {
3086 /*DqsRcvEn Ecc */
3087 val = Get_DqsRcvEnGross_MaxMin(pDCTstat, dev, index_reg, 0x12);
3088 byte = val & 0xFF;
3089 bytex = (val >> 8) & 0xFF;
3090 if (bytex < Smallest)
3091 Smallest = bytex;
3092 if (byte > Largest)
3093 Largest = byte;
3094 }
3095
3096 pDCTstat->DqsRcvEnGrossL = Largest;
3097 return Largest - Smallest;
3098}
3099
3100
3101static u8 Get_WrDatGross_Diff(struct DCTStatStruc *pDCTstat,
3102 u8 dct, u32 dev, u32 index_reg)
3103{
3104 u8 Smallest, Largest;
3105 u32 val;
3106 u8 byte, bytex;
3107
3108 /* The largest WrDatGrossDlyByte of any DIMM minus the
3109 WrDatGrossDlyByte of any other DIMM is equal to CGDD */
3110 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x01); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM0 */
3111 Largest = val & 0xFF;
3112 Smallest = (val >> 8) & 0xFF;
3113 val = Get_WrDatGross_MaxMin(pDCTstat, dct, dev, index_reg, 0x101); /* WrDatGrossDlyByte byte 0,1,2,3 for DIMM1 */
3114 byte = val & 0xFF;
3115 bytex = (val >> 8) & 0xFF;
3116 if (bytex < Smallest)
3117 Smallest = bytex;
3118 if (byte > Largest)
3119 Largest = byte;
3120
3121 // FIXME: Add Cx support.
3122
3123 pDCTstat->WrDatGrossH = Largest;
3124 return Largest - Smallest;
3125}
3126
3127static u16 Get_DqsRcvEnGross_MaxMin(struct DCTStatStruc *pDCTstat,
3128 u32 dev, u32 index_reg,
3129 u32 index)
3130{
3131 u8 Smallest, Largest;
3132 u8 i;
3133 u8 byte;
3134 u32 val;
3135 u16 word;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003136 u8 ecc_reg = 0;
Marc Jones8ae8c882007-12-19 01:32:08 +00003137
3138 Smallest = 7;
3139 Largest = 0;
3140
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003141 if (index == 0x12)
3142 ecc_reg = 1;
3143
Marc Jones8ae8c882007-12-19 01:32:08 +00003144 for (i=0; i < 8; i+=2) {
3145 if ( pDCTstat->DIMMValid & (1 << i)) {
3146 val = Get_NB32_index_wait(dev, index_reg, index);
3147 val &= 0x00E000E0;
3148 byte = (val >> 5) & 0xFF;
3149 if (byte < Smallest)
3150 Smallest = byte;
3151 if (byte > Largest)
3152 Largest = byte;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003153 if (!(ecc_reg)) {
3154 byte = (val >> (16 + 5)) & 0xFF;
3155 if (byte < Smallest)
3156 Smallest = byte;
3157 if (byte > Largest)
3158 Largest = byte;
3159 }
Marc Jones8ae8c882007-12-19 01:32:08 +00003160 }
Zheng Bao7b1a3c32010-09-28 04:43:16 +00003161 index += 3;
Marc Jones8ae8c882007-12-19 01:32:08 +00003162 } /* while ++i */
3163
3164 word = Smallest;
3165 word <<= 8;
3166 word |= Largest;
3167
3168 return word;
3169}
3170
3171static u16 Get_WrDatGross_MaxMin(struct DCTStatStruc *pDCTstat,
3172 u8 dct, u32 dev, u32 index_reg,
3173 u32 index)
3174{
3175 u8 Smallest, Largest;
3176 u8 i, j;
3177 u32 val;
3178 u8 byte;
3179 u16 word;
3180
3181 Smallest = 3;
3182 Largest = 0;
3183 for (i=0; i < 2; i++) {
3184 val = Get_NB32_index_wait(dev, index_reg, index);
3185 val &= 0x60606060;
3186 val >>= 5;
3187 for (j=0; j < 4; j++) {
3188 byte = val & 0xFF;
3189 if (byte < Smallest)
3190 Smallest = byte;
3191 if (byte > Largest)
3192 Largest = byte;
3193 val >>= 8;
3194 } /* while ++j */
3195 index++;
3196 } /*while ++i*/
3197
3198 if (pDCTstat->DimmECCPresent > 0) {
3199 index++;
3200 val = Get_NB32_index_wait(dev, index_reg, index);
3201 val &= 0x00000060;
3202 val >>= 5;
3203 byte = val & 0xFF;
3204 if (byte < Smallest)
3205 Smallest = byte;
3206 if (byte > Largest)
3207 Largest = byte;
3208 }
3209
3210 word = Smallest;
3211 word <<= 8;
3212 word |= Largest;
3213
3214 return word;
3215}
3216
3217
3218
3219static void mct_FinalMCT_D(struct MCTStatStruc *pMCTstat,
3220 struct DCTStatStruc *pDCTstat)
3221{
3222 print_t("\tmct_FinalMCT_D: Clr Cl, Wb\n");
3223
3224
Arne Georg Gleditsche150e9a2010-09-09 10:35:52 +00003225 /* ClrClToNB_D postponed until we're done executing from ROM */
Marc Jones8ae8c882007-12-19 01:32:08 +00003226 mct_ClrWbEnhWsbDis_D(pMCTstat, pDCTstat);
3227}
3228
3229
3230static void mct_InitialMCT_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat)
3231{
3232 print_t("\tmct_InitialMCT_D: Set Cl, Wb\n");
3233 mct_SetClToNB_D(pMCTstat, pDCTstat);
3234 mct_SetWbEnhWsbDis_D(pMCTstat, pDCTstat);
3235}
3236
3237
3238static u32 mct_NodePresent_D(void)
3239{
3240 u32 val;
3241 val = 0x12001022;
3242 return val;
3243}
3244
3245
3246static void mct_init(struct MCTStatStruc *pMCTstat,
3247 struct DCTStatStruc *pDCTstat)
3248{
3249 u32 lo, hi;
3250 u32 addr;
3251
3252 pDCTstat->GangedMode = 0;
3253 pDCTstat->DRPresent = 1;
3254
3255 /* enable extend PCI configuration access */
3256 addr = 0xC001001F;
3257 _RDMSR(addr, &lo, &hi);
3258 if (hi & (1 << (46-32))) {
3259 pDCTstat->Status |= 1 << SB_ExtConfig;
3260 } else {
3261 hi |= 1 << (46-32);
3262 _WRMSR(addr, lo, hi);
3263 }
3264}
3265
3266
3267static void clear_legacy_Mode(struct MCTStatStruc *pMCTstat,
3268 struct DCTStatStruc *pDCTstat)
3269{
3270 u32 reg;
3271 u32 val;
3272 u32 dev = pDCTstat->dev_dct;
3273
3274 /* Clear Legacy BIOS Mode bit */
3275 reg = 0x94;
3276 val = Get_NB32(dev, reg);
3277 val &= ~(1<<LegacyBiosMode);
3278 Set_NB32(dev, reg, val);
3279}
3280
3281
3282static void mct_HTMemMapExt(struct MCTStatStruc *pMCTstat,
3283 struct DCTStatStruc *pDCTstatA)
3284{
3285 u8 Node;
3286 u32 Drambase, Dramlimit;
3287 u32 val;
3288 u32 reg;
3289 u32 dev;
3290 u32 devx;
3291 u32 dword;
3292 struct DCTStatStruc *pDCTstat;
3293
3294 pDCTstat = pDCTstatA + 0;
3295 dev = pDCTstat->dev_map;
3296
3297 /* Copy dram map from F1x40/44,F1x48/4c,
Zheng Bao7b1a3c32010-09-28 04:43:16 +00003298 to F1x120/124(Node0),F1x120/124(Node1),...*/
Marc Jones8ae8c882007-12-19 01:32:08 +00003299 for (Node=0; Node < MAX_NODES_SUPPORTED; Node++) {
3300 pDCTstat = pDCTstatA + Node;
3301 devx = pDCTstat->dev_map;
3302
3303 /* get base/limit from Node0 */
3304 reg = 0x40 + (Node << 3); /* Node0/Dram Base 0 */
3305 val = Get_NB32(dev, reg);
3306 Drambase = val >> ( 16 + 3);
3307
3308 reg = 0x44 + (Node << 3); /* Node0/Dram Base 0 */
3309 val = Get_NB32(dev, reg);
3310 Dramlimit = val >> (16 + 3);
3311
3312 /* set base/limit to F1x120/124 per Node */
3313 if (pDCTstat->NodePresent) {
3314 reg = 0x120; /* F1x120,DramBase[47:27] */
3315 val = Get_NB32(devx, reg);
3316 val &= 0xFFE00000;
3317 val |= Drambase;
3318 Set_NB32(devx, reg, val);
3319
3320 reg = 0x124;
3321 val = Get_NB32(devx, reg);
3322 val &= 0xFFE00000;
3323 val |= Dramlimit;
3324 Set_NB32(devx, reg, val);
3325
3326 if ( pMCTstat->GStatus & ( 1 << GSB_HWHole)) {
3327 reg = 0xF0;
3328 val = Get_NB32(devx, reg);
3329 val |= (1 << DramMemHoistValid);
3330 val &= ~(0xFF << 24);
3331 dword = (pMCTstat->HoleBase >> (24 - 8)) & 0xFF;
3332 dword <<= 24;
3333 val |= dword;
3334 Set_NB32(devx, reg, val);
3335 }
3336
3337 }
3338 }
3339}
3340
3341static void SetCSTriState(struct MCTStatStruc *pMCTstat,
3342 struct DCTStatStruc *pDCTstat, u8 dct)
3343{
3344 u32 val;
3345 u32 dev = pDCTstat->dev_dct;
3346 u32 index_reg = 0x98 + 0x100 * dct;
3347 u8 cs;
3348 u32 index;
3349 u16 word;
3350
3351 /* Tri-state unused chipselects when motherboard
3352 termination is available */
3353
3354 // FIXME: skip for Ax
3355
3356 word = pDCTstat->CSPresent;
3357 if (pDCTstat->Status & (1 << SB_Registered)) {
3358 for (cs = 0; cs < 8; cs++) {
3359 if (word & (1 << cs)) {
3360 if (!(cs & 1))
3361 word |= 1 << (cs + 1);
3362 }
3363 }
3364 }
3365 word = (~word) & 0xFF;
3366 index = 0x0c;
3367 val = Get_NB32_index_wait(dev, index_reg, index);
3368 val |= word;
3369 Set_NB32_index_wait(dev, index_reg, index, val);
3370}
3371
3372
Stefan Reinauerd6532112010-04-16 00:31:44 +00003373#ifdef UNUSED_CODE
Marc Jones8ae8c882007-12-19 01:32:08 +00003374static void SetCKETriState(struct MCTStatStruc *pMCTstat,
3375 struct DCTStatStruc *pDCTstat, u8 dct)
3376{
3377 u32 val;
3378 u32 dev;
3379 u32 index_reg = 0x98 + 0x100 * dct;
3380 u8 cs;
3381 u32 index;
3382 u16 word;
3383
3384 /* Tri-state unused CKEs when motherboard termination is available */
3385
3386 // FIXME: skip for Ax
3387
3388 dev = pDCTstat->dev_dct;
3389 word = 0x101;
3390 for (cs = 0; cs < 8; cs++) {
3391 if (pDCTstat->CSPresent & (1 << cs)) {
3392 if (!(cs & 1))
3393 word &= 0xFF00;
3394 else
3395 word &= 0x00FF;
3396 }
3397 }
3398
3399 index = 0x0c;
3400 val = Get_NB32_index_wait(dev, index_reg, index);
3401 if ((word & 0x00FF) == 1)
3402 val |= 1 << 12;
3403 else
3404 val &= ~(1 << 12);
3405
3406 if ((word >> 8) == 1)
3407 val |= 1 << 13;
3408 else
3409 val &= ~(1 << 13);
3410
3411 Set_NB32_index_wait(dev, index_reg, index, val);
3412}
Stefan Reinauerd6532112010-04-16 00:31:44 +00003413#endif
Marc Jones8ae8c882007-12-19 01:32:08 +00003414
3415static void SetODTTriState(struct MCTStatStruc *pMCTstat,
3416 struct DCTStatStruc *pDCTstat, u8 dct)
3417{
3418 u32 val;
3419 u32 dev;
3420 u32 index_reg = 0x98 + 0x100 * dct;
3421 u8 cs;
3422 u32 index;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003423 u8 odt;
3424 u8 max_dimms;
Marc Jones8ae8c882007-12-19 01:32:08 +00003425
3426 // FIXME: skip for Ax
Stefan Reinauer14e22772010-04-27 06:56:47 +00003427
Stefan Reinauerce00f1d2008-12-05 22:38:18 +00003428 dev = pDCTstat->dev_dct;
Marc Jones8ae8c882007-12-19 01:32:08 +00003429
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003430 /* Tri-state unused ODTs when motherboard termination is available */
3431 max_dimms = (u8) mctGet_NVbits(NV_MAX_DIMMS);
3432 odt = 0x0F; /* tristate all the pins then clear the used ones. */
3433
Marc Jones8ae8c882007-12-19 01:32:08 +00003434 for (cs = 0; cs < 8; cs += 2) {
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003435 if (pDCTstat->CSPresent & (1 << cs)) {
3436 odt &= ~(1 << (cs / 2));
3437
Zheng Baoc3af12f2010-10-08 05:08:47 +00003438 /* if quad-rank capable platform clear additional pins */
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003439 if (max_dimms != MAX_CS_SUPPORTED) {
3440 if (pDCTstat->CSPresent & (1 << (cs + 1)))
3441 odt &= ~(4 << (cs / 2));
3442 }
Marc Jones8ae8c882007-12-19 01:32:08 +00003443 }
3444 }
3445
3446 index = 0x0C;
3447 val = Get_NB32_index_wait(dev, index_reg, index);
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003448 val |= (odt << 8);
Marc Jones8ae8c882007-12-19 01:32:08 +00003449 Set_NB32_index_wait(dev, index_reg, index, val);
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003450
Marc Jones8ae8c882007-12-19 01:32:08 +00003451}
3452
3453
3454static void InitPhyCompensation(struct MCTStatStruc *pMCTstat,
3455 struct DCTStatStruc *pDCTstat, u8 dct)
3456{
3457 u8 i;
3458 u32 index_reg = 0x98 + 0x100 * dct;
3459 u32 dev = pDCTstat->dev_dct;
3460 u32 val;
3461 u32 valx = 0;
3462 u32 dword;
3463 const u8 *p;
3464
3465 val = Get_NB32_index_wait(dev, index_reg, 0x00);
3466 dword = 0;
3467 for (i=0; i < 6; i++) {
3468 switch (i) {
3469 case 0:
3470 case 4:
3471 p = Table_Comp_Rise_Slew_15x;
3472 valx = p[(val >> 16) & 3];
3473 break;
3474 case 1:
3475 case 5:
3476 p = Table_Comp_Fall_Slew_15x;
3477 valx = p[(val >> 16) & 3];
3478 break;
3479 case 2:
3480 p = Table_Comp_Rise_Slew_20x;
3481 valx = p[(val >> 8) & 3];
3482 break;
3483 case 3:
3484 p = Table_Comp_Fall_Slew_20x;
3485 valx = p[(val >> 8) & 3];
3486 break;
3487
3488 }
3489 dword |= valx << (5 * i);
3490 }
3491
3492 /* Override/Exception */
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003493 if (!pDCTstat->GangedMode) {
3494 i = 0; /* use i for the dct setting required */
3495 if (pDCTstat->MAdimms[0] < 4)
3496 i = 1;
Kerry She08c92e02010-09-04 06:13:02 +00003497 if (((pDCTstat->Speed == 2) || (pDCTstat->Speed == 3)) && (pDCTstat->MAdimms[i] == 4)) {
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003498 dword &= 0xF18FFF18;
3499 index_reg = 0x98; /* force dct = 0 */
Kerry She08c92e02010-09-04 06:13:02 +00003500 }
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003501 }
Marc Jones8ae8c882007-12-19 01:32:08 +00003502
3503 Set_NB32_index_wait(dev, index_reg, 0x0a, dword);
3504}
3505
3506
3507static void WaitRoutine_D(u32 time)
3508{
3509 while(time) {
3510 _EXECFENCE;
3511 time--;
3512 }
3513}
3514
3515
3516static void mct_EarlyArbEn_D(struct MCTStatStruc *pMCTstat,
3517 struct DCTStatStruc *pDCTstat)
3518{
3519 u32 reg;
3520 u32 val;
3521 u32 dev = pDCTstat->dev_dct;
3522
3523 /* GhEnhancement #18429 modified by askar: For low NB CLK :
3524 * Memclk ratio, the DCT may need to arbitrate early to avoid
3525 * unnecessary bubbles.
3526 * bit 19 of F2x[1,0]78 Dram Control Register, set this bit only when
3527 * NB CLK : Memclk ratio is between 3:1 (inclusive) to 4:5 (inclusive)
3528 */
3529
3530 reg = 0x78;
3531 val = Get_NB32(dev, reg);
3532
3533 //FIXME: check for Cx
3534 if (CheckNBCOFEarlyArbEn(pMCTstat, pDCTstat))
3535 val |= (1 << EarlyArbEn);
3536
3537 Set_NB32(dev, reg, val);
3538
3539}
3540
3541
3542static u8 CheckNBCOFEarlyArbEn(struct MCTStatStruc *pMCTstat,
3543 struct DCTStatStruc *pDCTstat)
3544{
3545 u32 reg;
3546 u32 val;
3547 u32 tmp;
3548 u32 rem;
3549 u32 dev = pDCTstat->dev_dct;
3550 u32 hi, lo;
3551 u8 NbDid = 0;
3552
3553 /* Check if NB COF >= 4*Memclk, if it is not, return a fatal error
3554 */
3555
3556 /* 3*(Fn2xD4[NBFid]+4)/(2^NbDid)/(3+Fn2x94[MemClkFreq]) */
3557 _RDMSR(0xC0010071, &lo, &hi);
3558 if (lo & (1 << 22))
3559 NbDid |= 1;
3560
3561
3562 reg = 0x94;
3563 val = Get_NB32(dev, reg);
3564 if (!(val & (1 << MemClkFreqVal)))
3565 val = Get_NB32(dev, reg * 0x100); /* get the DCT1 value */
3566
3567 val &= 0x07;
3568 val += 3;
3569 if (NbDid)
3570 val <<= 1;
3571 tmp = val;
3572
3573 dev = pDCTstat->dev_nbmisc;
3574 reg = 0xD4;
3575 val = Get_NB32(dev, reg);
3576 val &= 0x1F;
3577 val += 3;
3578 val *= 3;
3579 val = val / tmp;
3580 rem = val % tmp;
3581 tmp >>= 1;
3582
3583 // Yes this could be nicer but this was how the asm was....
3584 if (val < 3) { /* NClk:MemClk < 3:1 */
3585 return 0;
3586 } else if (val > 4) { /* NClk:MemClk >= 5:1 */
3587 return 0;
3588 } else if ((val == 4) && (rem > tmp)) { /* NClk:MemClk > 4.5:1 */
3589 return 0;
3590 } else {
3591 return 1; /* 3:1 <= NClk:MemClk <= 4.5:1*/
3592 }
3593}
3594
3595
3596static void mct_ResetDataStruct_D(struct MCTStatStruc *pMCTstat,
3597 struct DCTStatStruc *pDCTstatA)
3598{
3599 u8 Node;
3600 u32 i;
3601 struct DCTStatStruc *pDCTstat;
Myles Watson075fbe82010-04-15 05:19:29 +00003602 u32 start, stop;
Marc Jones8ae8c882007-12-19 01:32:08 +00003603 u8 *p;
3604 u16 host_serv1, host_serv2;
3605
3606 /* Initialize Data structures by clearing all entries to 0 */
3607 p = (u8 *) pMCTstat;
3608 for (i = 0; i < sizeof(struct MCTStatStruc); i++) {
3609 p[i] = 0;
3610 }
3611
3612 for (Node = 0; Node < 8; Node++) {
3613 pDCTstat = pDCTstatA + Node;
3614 host_serv1 = pDCTstat->HostBiosSrvc1;
3615 host_serv2 = pDCTstat->HostBiosSrvc2;
3616
3617 p = (u8 *) pDCTstat;
3618 start = 0;
Myles Watson075fbe82010-04-15 05:19:29 +00003619 stop = (u32)(&((struct DCTStatStruc *)0)->CH_MaxRdLat[2]);
Marc Jones8ae8c882007-12-19 01:32:08 +00003620 for (i = start; i < stop ; i++) {
3621 p[i] = 0;
3622 }
3623
Myles Watson075fbe82010-04-15 05:19:29 +00003624 start = (u32)(&((struct DCTStatStruc *)0)->CH_D_BC_RCVRDLY[2][4]);
Marc Jones8ae8c882007-12-19 01:32:08 +00003625 stop = sizeof(struct DCTStatStruc);
3626 for (i = start; i < stop; i++) {
3627 p[i] = 0;
3628 }
3629 pDCTstat->HostBiosSrvc1 = host_serv1;
3630 pDCTstat->HostBiosSrvc2 = host_serv2;
3631 }
3632}
3633
3634
3635static void mct_BeforeDramInit_Prod_D(struct MCTStatStruc *pMCTstat,
3636 struct DCTStatStruc *pDCTstat)
3637{
3638 u8 i;
3639 u32 reg_off;
3640 u32 dev = pDCTstat->dev_dct;
3641
3642 // FIXME: skip for Ax
Elyes HAOUAS0f92f632014-07-27 19:37:31 +02003643 if ((pDCTstat->Speed == 3) || ( pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533MHz
Marc Jones8ae8c882007-12-19 01:32:08 +00003644 for (i=0; i < 2; i++) {
3645 reg_off = 0x100 * i;
3646 Set_NB32(dev, 0x98 + reg_off, 0x0D000030);
3647 Set_NB32(dev, 0x9C + reg_off, 0x00000806);
3648 Set_NB32(dev, 0x98 + reg_off, 0x4D040F30);
3649 }
3650 }
3651}
3652
3653
Myles Watson075fbe82010-04-15 05:19:29 +00003654static void mct_AdjustDelayRange_D(struct MCTStatStruc *pMCTstat,
Marc Jones8ae8c882007-12-19 01:32:08 +00003655 struct DCTStatStruc *pDCTstat, u8 *dqs_pos)
3656{
3657 // FIXME: Skip for Ax
Elyes HAOUAS0f92f632014-07-27 19:37:31 +02003658 if ((pDCTstat->Speed == 3) || ( pDCTstat->Speed == 2)) { // MemClkFreq = 667MHz or 533MHz
Marc Jones8ae8c882007-12-19 01:32:08 +00003659 *dqs_pos = 32;
3660 }
3661}
3662
Zheng Bao69436e12011-01-06 02:18:12 +00003663static u32 mct_DisDllShutdownSR(struct MCTStatStruc *pMCTstat,
3664 struct DCTStatStruc *pDCTstat, u32 DramConfigLo, u8 dct)
3665{
3666 u32 reg_off = 0x100 * dct;
3667 u32 dev = pDCTstat->dev_dct;
3668
3669 /* Write 0000_07D0h to register F2x[1, 0]98_x4D0FE006 */
3670 if (pDCTstat->LogicalCPUID & (AMD_DA_C2 | AMD_RB_C3)) {
3671 Set_NB32(dev, 0x9C + reg_off, 0x7D0);
3672 Set_NB32(dev, 0x98 + reg_off, 0x4D0FE006);
3673 Set_NB32(dev, 0x9C + reg_off, 0x190);
3674 Set_NB32(dev, 0x98 + reg_off, 0x4D0FE007);
3675 }
3676
3677 return DramConfigLo | /* DisDllShutdownSR */ 1 << 27;
3678}
3679
3680static void mct_EnDllShutdownSR(struct MCTStatStruc *pMCTstat,
3681 struct DCTStatStruc *pDCTstat, u8 dct)
3682{
3683 u32 reg_off = 0x100 * dct;
3684 u32 dev = pDCTstat->dev_dct, val;
3685
3686 /* Write 0000_07D0h to register F2x[1, 0]98_x4D0FE006 */
3687 if (pDCTstat->LogicalCPUID & (AMD_DA_C2 | AMD_RB_C3)) {
3688 Set_NB32(dev, 0x9C + reg_off, 0x1C);
3689 Set_NB32(dev, 0x98 + reg_off, 0x4D0FE006);
3690 Set_NB32(dev, 0x9C + reg_off, 0x13D);
3691 Set_NB32(dev, 0x98 + reg_off, 0x4D0FE007);
3692
3693 val = Get_NB32(dev, 0x90 + reg_off);
3694 val &= ~(1 << 27/* DisDllShutdownSR */);
3695 Set_NB32(dev, 0x90 + reg_off, val);
3696 }
3697}
Marc Jones8ae8c882007-12-19 01:32:08 +00003698
3699void mct_SetClToNB_D(struct MCTStatStruc *pMCTstat,
3700 struct DCTStatStruc *pDCTstat)
3701{
3702 u32 lo, hi;
3703 u32 msr;
3704
3705 // FIXME: Maybe check the CPUID? - not for now.
3706 // pDCTstat->LogicalCPUID;
3707
3708 msr = BU_CFG2;
3709 _RDMSR(msr, &lo, &hi);
3710 lo |= 1 << ClLinesToNbDis;
3711 _WRMSR(msr, lo, hi);
3712}
3713
3714
3715void mct_ClrClToNB_D(struct MCTStatStruc *pMCTstat,
3716 struct DCTStatStruc *pDCTstat)
3717{
3718
3719 u32 lo, hi;
3720 u32 msr;
3721
3722 // FIXME: Maybe check the CPUID? - not for now.
3723 // pDCTstat->LogicalCPUID;
3724
3725 msr = BU_CFG2;
3726 _RDMSR(msr, &lo, &hi);
3727 if (!pDCTstat->ClToNB_flag)
3728 lo &= ~(1<<ClLinesToNbDis);
3729 _WRMSR(msr, lo, hi);
3730
3731}
3732
3733
3734void mct_SetWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
3735 struct DCTStatStruc *pDCTstat)
3736{
3737 u32 lo, hi;
3738 u32 msr;
3739
3740 // FIXME: Maybe check the CPUID? - not for now.
3741 // pDCTstat->LogicalCPUID;
3742
3743 msr = BU_CFG;
3744 _RDMSR(msr, &lo, &hi);
3745 hi |= (1 << WbEnhWsbDis_D);
3746 _WRMSR(msr, lo, hi);
3747}
3748
3749
3750void mct_ClrWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat,
3751 struct DCTStatStruc *pDCTstat)
3752{
3753 u32 lo, hi;
3754 u32 msr;
3755
3756 // FIXME: Maybe check the CPUID? - not for now.
3757 // pDCTstat->LogicalCPUID;
3758
3759 msr = BU_CFG;
3760 _RDMSR(msr, &lo, &hi);
3761 hi &= ~(1 << WbEnhWsbDis_D);
3762 _WRMSR(msr, lo, hi);
3763}
3764
3765
3766void mct_SetDramConfigHi_D(struct DCTStatStruc *pDCTstat, u32 dct,
3767 u32 DramConfigHi)
3768{
3769 /* Bug#15114: Comp. update interrupted by Freq. change can cause
3770 * subsequent update to be invalid during any MemClk frequency change:
3771 * Solution: From the bug report:
3772 * 1. A software-initiated frequency change should be wrapped into the
3773 * following sequence :
3774 * - a) Disable Compensation (F2[1, 0]9C_x08[30] )
3775 * b) Reset the Begin Compensation bit (D3CMP->COMP_CONFIG[0]) in all the compensation engines
3776 * c) Do frequency change
3777 * d) Enable Compensation (F2[1, 0]9C_x08[30] )
3778 * 2. A software-initiated Disable Compensation should always be
3779 * followed by step b) of the above steps.
3780 * Silicon Status: Fixed In Rev B0
3781 *
3782 * Errata#177: DRAM Phy Automatic Compensation Updates May Be Invalid
3783 * Solution: BIOS should disable the phy automatic compensation prior
3784 * to initiating a memory clock frequency change as follows:
3785 * 1. Disable PhyAutoComp by writing 1'b1 to F2x[1, 0]9C_x08[30]
3786 * 2. Reset the Begin Compensation bits by writing 32'h0 to
3787 * F2x[1, 0]9C_x4D004F00
3788 * 3. Perform frequency change
3789 * 4. Enable PhyAutoComp by writing 1'b0 to F2x[1, 0]9C_08[30]
3790 * In addition, any time software disables the automatic phy
3791 * compensation it should reset the begin compensation bit per step 2.
3792 * Silicon Status: Fixed in DR-B0
3793 */
3794
3795 u32 dev = pDCTstat->dev_dct;
3796 u32 index_reg = 0x98 + 0x100 * dct;
3797 u32 index;
3798
3799 u32 val;
3800
3801 index = 0x08;
3802 val = Get_NB32_index_wait(dev, index_reg, index);
3803 Set_NB32_index_wait(dev, index_reg, index, val | (1 << DisAutoComp));
3804
3805 //FIXME: check for Bx Cx CPU
Zheng Bao7b1a3c32010-09-28 04:43:16 +00003806 // if Ax mct_SetDramConfigHi_Samp_D
Marc Jones8ae8c882007-12-19 01:32:08 +00003807
3808 /* errata#177 */
3809 index = 0x4D014F00; /* F2x[1, 0]9C_x[D0FFFFF:D000000] DRAM Phy Debug Registers */
3810 index |= 1 << DctAccessWrite;
3811 val = 0;
3812 Set_NB32_index_wait(dev, index_reg, index, val);
3813
3814 Set_NB32(dev, 0x94 + 0x100 * dct, DramConfigHi);
3815
3816 index = 0x08;
3817 val = Get_NB32_index_wait(dev, index_reg, index);
3818 Set_NB32_index_wait(dev, index_reg, index, val & (~(1 << DisAutoComp)));
3819}
3820
3821static void mct_BeforeDQSTrain_D(struct MCTStatStruc *pMCTstat,
3822 struct DCTStatStruc *pDCTstatA)
3823{
3824 u8 Node;
3825 struct DCTStatStruc *pDCTstat;
3826
3827 /* Errata 178
3828 *
3829 * Bug#15115: Uncertainty In The Sync Chain Leads To Setup Violations
3830 * In TX FIFO
3831 * Solution: BIOS should program DRAM Control Register[RdPtrInit] =
3832 * 5h, (F2x[1, 0]78[3:0] = 5h).
3833 * Silicon Status: Fixed In Rev B0
3834 *
3835 * Bug#15880: Determine validity of reset settings for DDR PHY timing.
Zheng Baoc3af12f2010-10-08 05:08:47 +00003836 * Solution: At least, set WrDqs fine delay to be 0 for DDR2 training.
Marc Jones8ae8c882007-12-19 01:32:08 +00003837 */
3838
3839 for (Node = 0; Node < 8; Node++) {
3840 pDCTstat = pDCTstatA + Node;
3841
Xavi Drudis Ferran7cdf1ec2010-09-27 21:08:40 +00003842 if (pDCTstat->NodePresent) {
Marc Jones8ae8c882007-12-19 01:32:08 +00003843 mct_BeforeDQSTrain_Samp_D(pMCTstat, pDCTstat);
3844 mct_ResetDLL_D(pMCTstat, pDCTstat, 0);
3845 mct_ResetDLL_D(pMCTstat, pDCTstat, 1);
Xavi Drudis Ferran7cdf1ec2010-09-27 21:08:40 +00003846 }
Marc Jones8ae8c882007-12-19 01:32:08 +00003847 }
3848}
3849
3850static void mct_ResetDLL_D(struct MCTStatStruc *pMCTstat,
3851 struct DCTStatStruc *pDCTstat, u8 dct)
3852{
3853 u8 Receiver;
Marc Jones8ae8c882007-12-19 01:32:08 +00003854 u32 dev = pDCTstat->dev_dct;
3855 u32 reg_off = 0x100 * dct;
3856 u32 addr;
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003857 u32 lo, hi;
3858 u8 wrap32dis = 0;
Marc Jones8ae8c882007-12-19 01:32:08 +00003859 u8 valid = 0;
3860
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00003861 /* Skip reset DLL for B3 */
3862 if (pDCTstat->LogicalCPUID & AMD_DR_B3) {
3863 return;
3864 }
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003865
3866 addr = HWCR;
3867 _RDMSR(addr, &lo, &hi);
3868 if(lo & (1<<17)) { /* save the old value */
3869 wrap32dis = 1;
3870 }
3871 lo |= (1<<17); /* HWCR.wrap32dis */
3872 lo &= ~(1<<15); /* SSEDIS */
3873 /* Setting wrap32dis allows 64-bit memory references in 32bit mode */
3874 _WRMSR(addr, lo, hi);
3875
3876
Marc Jones8ae8c882007-12-19 01:32:08 +00003877 pDCTstat->Channel = dct;
3878 Receiver = mct_InitReceiver_D(pDCTstat, dct);
3879 /* there are four receiver pairs, loosely associated with chipselects.*/
3880 for (; Receiver < 8; Receiver += 2) {
3881 if (mct_RcvrRankEnabled_D(pMCTstat, pDCTstat, dct, Receiver)) {
3882 addr = mct_GetRcvrSysAddr_D(pMCTstat, pDCTstat, dct, Receiver, &valid);
3883 if (valid) {
3884 mct_Read1LTestPattern_D(pMCTstat, pDCTstat, addr); /* cache fills */
Marc Jones8ae8c882007-12-19 01:32:08 +00003885
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003886 /* Write 0000_8000h to register F2x[1,0]9C_xD080F0C */
3887 Set_NB32_index_wait(dev, 0x98 + reg_off, 0x4D080F0C, 0x00008000);
3888 mct_Wait(80); /* wait >= 300ns */
3889
3890 /* Write 0000_0000h to register F2x[1,0]9C_xD080F0C */
3891 Set_NB32_index_wait(dev, 0x98 + reg_off, 0x4D080F0C, 0x00000000);
3892 mct_Wait(800); /* wait >= 2us */
Marc Jones8ae8c882007-12-19 01:32:08 +00003893 break;
3894 }
3895 }
3896 }
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003897 if(!wrap32dis) {
3898 addr = HWCR;
3899 _RDMSR(addr, &lo, &hi);
3900 lo &= ~(1<<17); /* restore HWCR.wrap32dis */
3901 _WRMSR(addr, lo, hi);
3902 }
Marc Jones8ae8c882007-12-19 01:32:08 +00003903}
3904
3905
3906static void mct_EnableDatIntlv_D(struct MCTStatStruc *pMCTstat,
3907 struct DCTStatStruc *pDCTstat)
3908{
3909 u32 dev = pDCTstat->dev_dct;
3910 u32 val;
3911
3912 /* Enable F2x110[DctDatIntlv] */
3913 // Call back not required mctHookBeforeDatIntlv_D()
3914 // FIXME Skip for Ax
3915 if (!pDCTstat->GangedMode) {
3916 val = Get_NB32(dev, 0x110);
3917 val |= 1 << 5; // DctDatIntlv
3918 Set_NB32(dev, 0x110, val);
3919
3920 // FIXME Skip for Cx
3921 dev = pDCTstat->dev_nbmisc;
3922 val = Get_NB32(dev, 0x8C); // NB Configuration Hi
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003923 val |= 1 << (36-32); // DisDatMask
Marc Jones8ae8c882007-12-19 01:32:08 +00003924 Set_NB32(dev, 0x8C, val);
3925 }
3926}
3927
Stefan Reinauerd6532112010-04-16 00:31:44 +00003928#ifdef UNUSED_CODE
Marc Jones8ae8c882007-12-19 01:32:08 +00003929static void mct_SetupSync_D(struct MCTStatStruc *pMCTstat,
3930 struct DCTStatStruc *pDCTstat)
3931{
3932 /* set F2x78[ChSetupSync] when F2x[1, 0]9C_x04[AddrCmdSetup, CsOdtSetup,
3933 * CkeSetup] setups for one DCT are all 0s and at least one of the setups,
3934 * F2x[1, 0]9C_x04[AddrCmdSetup, CsOdtSetup, CkeSetup], of the other
3935 * controller is 1
3936 */
3937 u32 cha, chb;
3938 u32 dev = pDCTstat->dev_dct;
3939 u32 val;
3940
3941 cha = pDCTstat->CH_ADDR_TMG[0] & 0x0202020;
3942 chb = pDCTstat->CH_ADDR_TMG[1] & 0x0202020;
3943
3944 if ((cha != chb) && ((cha == 0) || (chb == 0))) {
3945 val = Get_NB32(dev, 0x78);
3946 val |= ChSetupSync;
3947 Set_NB32(dev, 0x78, val);
3948 }
3949}
Stefan Reinauerd6532112010-04-16 00:31:44 +00003950#endif
Marc Jones8ae8c882007-12-19 01:32:08 +00003951
3952static void AfterDramInit_D(struct DCTStatStruc *pDCTstat, u8 dct) {
3953
3954 u32 val;
3955 u32 reg_off = 0x100 * dct;
3956 u32 dev = pDCTstat->dev_dct;
3957
Marc Jonesc3ec1ac2008-07-23 21:04:03 +00003958 if (pDCTstat->LogicalCPUID & (AMD_DR_B2 | AMD_DR_B3)) {
Marc Jones (marc.jonese3aeb932008-04-11 03:20:28 +00003959 mct_Wait(10000); /* Wait 50 us*/
Marc Jones8ae8c882007-12-19 01:32:08 +00003960 val = Get_NB32(dev, 0x110);
3961 if ( val & (1 << DramEnabled)) {
3962 /* If 50 us expires while DramEnable =0 then do the following */
3963 val = Get_NB32(dev, 0x90 + reg_off);
3964 val &= ~(1 << Width128); /* Program Width128 = 0 */
3965 Set_NB32(dev, 0x90 + reg_off, val);
3966
3967 val = Get_NB32_index_wait(dev, 0x98 + reg_off, 0x05); /* Perform dummy CSR read to F2x09C_x05 */
3968
3969 if (pDCTstat->GangedMode) {
3970 val = Get_NB32(dev, 0x90 + reg_off);
3971 val |= 1 << Width128; /* Program Width128 = 0 */
3972 Set_NB32(dev, 0x90 + reg_off, val);
3973 }
3974 }
3975 }
3976}
3977
3978
3979/* ==========================================================
3980 * 6-bit Bank Addressing Table
3981 * RR=rows-13 binary
3982 * B=Banks-2 binary
3983 * CCC=Columns-9 binary
3984 * ==========================================================
3985 * DCT CCCBRR Rows Banks Columns 64-bit CS Size
3986 * Encoding
3987 * 0000 000000 13 2 9 128MB
3988 * 0001 001000 13 2 10 256MB
3989 * 0010 001001 14 2 10 512MB
3990 * 0011 010000 13 2 11 512MB
3991 * 0100 001100 13 3 10 512MB
3992 * 0101 001101 14 3 10 1GB
3993 * 0110 010001 14 2 11 1GB
3994 * 0111 001110 15 3 10 2GB
3995 * 1000 010101 14 3 11 2GB
3996 * 1001 010110 15 3 11 4GB
3997 * 1010 001111 16 3 10 4GB
3998 * 1011 010111 16 3 11 8GB
3999 */